diff --git a/.github/workflows/ansible-deploy.yml b/.github/workflows/ansible-deploy.yml new file mode 100644 index 0000000000..278b8c3e3a --- /dev/null +++ b/.github/workflows/ansible-deploy.yml @@ -0,0 +1,95 @@ +name: Ansible Deployment + +on: + push: + branches: [ lab06, master ] + paths: + - 'ansible/**' + - '.github/workflows/ansible-deploy.yml' + pull_request: + branches: [ lab06, master ] + paths: + - 'ansible/**' + - '.github/workflows/ansible-deploy.yml' + +jobs: + lint: + name: Ansible Lint + runs-on: ubuntu-latest + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: '3.12' + + - name: Install Ansible + ansible-lint + run: | + python -m pip install --upgrade pip + pip install ansible ansible-lint + + - name: Install Ansible collections + run: | + ansible-galaxy collection install -r ansible/requirements.yml + + - name: Run ansible-lint + run: | + cd ansible + ansible-lint -p playbooks/*.yml roles + + deploy: + name: Deploy Application + needs: lint + + runs-on: [self-hosted, linux] + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Set up Python venv + shell: bash + run: | + set -euo pipefail + python3 -m venv .venv + . .venv/bin/activate + python -m pip install --upgrade pip + python -m pip install ansible ansible-lint + + - name: Install Ansible collections + shell: bash + run: | + set -euo pipefail + . .venv/bin/activate + ansible-galaxy collection install -r ansible/requirements.yml + + - name: Deploy with Ansible + shell: bash + env: + ANSIBLE_VAULT_PASSWORD: ${{ secrets.ANSIBLE_VAULT_PASSWORD }} + run: | + set -euo pipefail + . .venv/bin/activate + export ANSIBLE_CONFIG="$GITHUB_WORKSPACE/ansible/ansible.cfg" + cd ansible + + if [[ -n "${ANSIBLE_VAULT_PASSWORD:-}" ]]; then + trap 'rm -f /tmp/vault_pass' EXIT + printf '%s' "${ANSIBLE_VAULT_PASSWORD}" > /tmp/vault_pass + ansible-playbook -i inventory/hosts.ini playbooks/deploy.yml --vault-password-file /tmp/vault_pass + else + ansible-playbook -i inventory/hosts.ini playbooks/deploy.yml + fi + + - name: Verify deployment + shell: bash + env: + VM_HOST: ${{ secrets.VM_HOST }} + run: | + set -euo pipefail + HOST="${VM_HOST:-127.0.0.1}" + curl --retry 10 --retry-delay 3 --retry-connrefused -fsS "http://${HOST}:8000/health" + curl --retry 10 --retry-delay 3 --retry-connrefused -fsS "http://${HOST}:8000/" \ No newline at end of file diff --git a/.github/workflows/python-ci.yml b/.github/workflows/python-ci.yml new file mode 100644 index 0000000000..b81f2f56a9 --- /dev/null +++ b/.github/workflows/python-ci.yml @@ -0,0 +1,114 @@ +name: python-ci + +on: + workflow_dispatch: + push: + branches: ["lab03", "lab05", "master"] + paths: + - "app_python/**" + - ".github/workflows/python-ci.yml" + pull_request: + branches: ["master"] + paths: + - "app_python/**" + - ".github/workflows/python-ci.yml" + +concurrency: + group: python-ci-${{ github.ref }} + cancel-in-progress: true + +permissions: + contents: read + +env: + IMAGE_NAME: devops-info-service + APP_DIR: app_python + +jobs: + test-and-lint: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: "3.13" + cache: "pip" + cache-dependency-path: | + app_python/requirements.txt + app_python/requirements-dev.txt + + - name: Install deps + run: | + python -m pip install --upgrade pip + pip install -r app_python/requirements.txt + pip install -r app_python/requirements-dev.txt + + - name: Lint (ruff) + run: | + cd app_python + ruff check . + + - name: Tests (pytest) + coverage + run: | + cd app_python + pytest -q tests --cov=. --cov-report=term-missing --cov-report=xml + + - name: Upload coverage artifact + uses: actions/upload-artifact@v4 + with: + name: coverage-xml + path: app_python/coverage.xml + + - name: Install Snyk CLI + run: npm install -g snyk + + - name: Snyk scan (dependencies) + continue-on-error: true + env: + SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }} + run: | + cd app_python + snyk test --severity-threshold=high --file=requirements.txt + + docker-build-and-push: + runs-on: ubuntu-latest + needs: test-and-lint + if: github.event_name == 'push' && (github.ref == 'refs/heads/master' || github.ref == 'refs/heads/lab03' || github.ref == 'refs/heads/lab05') + permissions: + contents: read + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Prepare CalVer tags + run: | + echo "CALVER_MONTH=$(date -u +'%Y.%m')" >> $GITHUB_ENV + echo "CALVER_BUILD=$(date -u +'%Y.%m').${{ github.run_number }}" >> $GITHUB_ENV + + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Login to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Build and push + uses: docker/build-push-action@v6 + with: + context: ${{ env.APP_DIR }} + file: ${{ env.APP_DIR }}/Dockerfile + push: true + tags: | + ${{ secrets.DOCKERHUB_USERNAME }}/${{ env.IMAGE_NAME }}:${{ env.CALVER_BUILD }} + ${{ secrets.DOCKERHUB_USERNAME }}/${{ env.IMAGE_NAME }}:${{ env.CALVER_MONTH }} + ${{ secrets.DOCKERHUB_USERNAME }}/${{ env.IMAGE_NAME }}:latest + cache-from: type=gha + cache-to: type=gha,mode=max diff --git a/.github/workflows/terraform-ci.yml b/.github/workflows/terraform-ci.yml new file mode 100644 index 0000000000..d6bc61d1a6 --- /dev/null +++ b/.github/workflows/terraform-ci.yml @@ -0,0 +1,57 @@ +name: Terraform CI + +on: + pull_request: + paths: + - "terraform/**" + - ".github/workflows/terraform-ci.yml" + push: + branches: + - master + - lab04 + paths: + - "terraform/**" + - ".github/workflows/terraform-ci.yml" + +jobs: + terraform-check: + name: "fmt / validate / tflint" + runs-on: ubuntu-latest + + strategy: + fail-fast: false + matrix: + workdir: + - terraform + - terraform/github + + defaults: + run: + working-directory: ${{ matrix.workdir }} + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Setup Terraform + uses: hashicorp/setup-terraform@v3 + with: + terraform_wrapper: false + + - name: Terraform fmt + run: terraform fmt -check -recursive + + - name: Terraform init (no backend) + run: terraform init -backend=false + + - name: Terraform validate + run: terraform validate -no-color + + - name: Setup TFLint + uses: terraform-linters/setup-tflint@v3 + + - name: TFLint init + run: tflint --init + + - name: TFLint + run: tflint diff --git a/.gitignore b/.gitignore index 30d74d2584..45f14ccf09 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,19 @@ -test \ No newline at end of file +test +.env +.venv +actions-runner/ +_work/ +.runner/ +key.json +# --- Ansible --- +*.retry +.vault_pass +ansible/inventory/*.pyc +ansible/inventory/__pycache__/ +__pycache__/ + +# --- Vagrant --- +.vagrant/ + +# Do not commit real inventory with IPs if you don't want +# ansible/inventory/hosts.ini diff --git a/.vagrant/machines/default/virtualbox/action_set_name b/.vagrant/machines/default/virtualbox/action_set_name new file mode 100644 index 0000000000..77f6fb8046 --- /dev/null +++ b/.vagrant/machines/default/virtualbox/action_set_name @@ -0,0 +1 @@ +1773325600 \ No newline at end of file diff --git a/.vagrant/machines/default/virtualbox/creator_uid b/.vagrant/machines/default/virtualbox/creator_uid new file mode 100644 index 0000000000..c227083464 --- /dev/null +++ b/.vagrant/machines/default/virtualbox/creator_uid @@ -0,0 +1 @@ +0 \ No newline at end of file diff --git a/.vagrant/machines/default/virtualbox/id b/.vagrant/machines/default/virtualbox/id new file mode 100644 index 0000000000..9bd219e5e6 --- /dev/null +++ b/.vagrant/machines/default/virtualbox/id @@ -0,0 +1 @@ +5f4a1e02-46c6-4dcd-817a-01783b29dfd2 \ No newline at end of file diff --git a/.vagrant/machines/default/virtualbox/index_uuid b/.vagrant/machines/default/virtualbox/index_uuid new file mode 100644 index 0000000000..90907a7cd0 --- /dev/null +++ b/.vagrant/machines/default/virtualbox/index_uuid @@ -0,0 +1 @@ +41695340a0c0459bb1ec1e9674b9fcae \ No newline at end of file diff --git a/.vagrant/rgloader/loader.rb b/.vagrant/rgloader/loader.rb new file mode 100644 index 0000000000..b6c81bf31b --- /dev/null +++ b/.vagrant/rgloader/loader.rb @@ -0,0 +1,12 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +# This file loads the proper rgloader/loader.rb file that comes packaged +# with Vagrant so that encoded files can properly run with Vagrant. + +if ENV["VAGRANT_INSTALLER_EMBEDDED_DIR"] + require File.expand_path( + "rgloader/loader", ENV["VAGRANT_INSTALLER_EMBEDDED_DIR"]) +else + raise "Encoded files can't be read outside of the Vagrant installer." +end diff --git a/Vagrantfile b/Vagrantfile new file mode 100644 index 0000000000..53e2f6e4b0 --- /dev/null +++ b/Vagrantfile @@ -0,0 +1,28 @@ +Vagrant.configure("2") do |config| + config.vm.box = "ubuntu/jammy64" + config.vm.hostname = "lab09" + + # Disable project folder sharing inside the VM. + # This avoids common Windows path issues (spaces, Cyrillic characters) + # and is not needed for this lab workflow. + config.vm.synced_folder ".", "/vagrant", disabled: true + + # Forward ports to the Windows host. + # host_ip "0.0.0.0" lets WSL2 reach the forwarded ports through the host. + config.vm.network "forwarded_port", guest: 22, host: 2222, host_ip: "0.0.0.0", id: "ssh", auto_correct: true + config.vm.network "forwarded_port", guest: 8000, host: 8000, host_ip: "0.0.0.0", id: "app", auto_correct: true + config.vm.network "forwarded_port", guest: 3000, host: 3000, host_ip: "0.0.0.0", id: "grafana", auto_correct: true + config.vm.network "forwarded_port", guest: 3100, host: 3100, host_ip: "0.0.0.0", id: "loki", auto_correct: true + config.vm.network "forwarded_port", guest: 9080, host: 9080, host_ip: "0.0.0.0", id: "promtail", auto_correct: true + config.vm.network "forwarded_port", guest: 9090, host: 9090, host_ip: "0.0.0.0", id: "prometheus", auto_correct: true + config.vm.network "forwarded_port", guest: 30080, host: 30080, host_ip: "0.0.0.0", id: "k8s-app1", auto_correct: true + config.vm.network "forwarded_port", guest: 30081, host: 30081, host_ip: "0.0.0.0", id: "k8s-app2", auto_correct: true + + config.ssh.insert_key = true + + config.vm.provider "virtualbox" do |vb| + vb.name = "lab09" + vb.memory = 3072 + vb.cpus = 2 + end +end diff --git a/ansible/.ansible-lint b/ansible/.ansible-lint new file mode 100644 index 0000000000..3ab93c253f --- /dev/null +++ b/ansible/.ansible-lint @@ -0,0 +1,3 @@ +skip_list: + - key-order + - var-naming[no-role-prefix] diff --git a/ansible/0 b/ansible/0 new file mode 100644 index 0000000000..e69de29bb2 diff --git a/ansible/README.md b/ansible/README.md new file mode 100644 index 0000000000..82a1582690 --- /dev/null +++ b/ansible/README.md @@ -0,0 +1,61 @@ +# Lab05/Lab06 — Ansible + +See: +- `labs/lab05.md` — assignment +- `ansible/docs/LAB05.md` — report template +- `labs/lab06.md` — assignment +- `ansible/docs/LAB06.md` — report +- `ansible/docs/LOCAL_VALIDATION_WINDOWS.md` — Windows + WSL2 local validation guide + +## Workflow badge + +After creating your GitHub repository, replace `OWNER/REPO` in the snippet below: + +```md +[![Ansible Deployment](https://github.com/OWNER/REPO/actions/workflows/ansible-deploy.yml/badge.svg)](https://github.com/OWNER/REPO/actions/workflows/ansible-deploy.yml) +``` + +## Quick start + +```bash +cd ansible + +# Install required collections +ansible-galaxy collection install -r requirements.yml + +# Connectivity test +ansible all -m ping + +# Provision the target VM (run twice to prove idempotency) +ansible-playbook playbooks/provision.yml +ansible-playbook playbooks/provision.yml + +# Deploy the application with Docker Compose +ansible-playbook playbooks/deploy.yml + +# Useful tag examples (Lab06) +ansible-playbook playbooks/provision.yml --list-tags +ansible-playbook playbooks/provision.yml --tags "docker_install" +ansible-playbook playbooks/provision.yml --skip-tags "common" +ansible-playbook playbooks/deploy.yml --tags "web_app" + +# Wipe only (double-gated: variable + tag) +ansible-playbook playbooks/deploy.yml -e "web_app_wipe=true" --tags web_app_wipe +``` + +## CI/CD (GitHub Actions) + +Workflow file: `.github/workflows/ansible-deploy.yml`. + +For Vagrant/VirtualBox setups behind NAT, prefer a **self-hosted Linux runner** on the same machine +where you run Ansible (for example, WSL2 Ubuntu). This avoids inbound SSH exposure and stays free. + +## Lab07 monitoring stack + +```bash +# Deploy Loki + Promtail + Grafana + app stack on the target VM +ansible-playbook -i inventory/hosts.ini playbooks/deploy-monitoring.yml +``` + +The monitoring role builds the Python app locally on the target VM, so you do not need to push a new +application image to Docker Hub for Lab07. diff --git a/ansible/ansible.cfg b/ansible/ansible.cfg new file mode 100644 index 0000000000..b6a8c1d248 --- /dev/null +++ b/ansible/ansible.cfg @@ -0,0 +1,14 @@ +[defaults] +inventory = inventory/hosts.ini +roles_path = roles +host_key_checking = False +# For Vagrant boxes the default SSH user is usually "vagrant". +# You can still override this per-host in inventory/hosts.ini. +remote_user = vagrant +retry_files_enabled = False +interpreter_python = auto_silent + +[privilege_escalation] +become = True +become_method = sudo +become_user = root diff --git a/ansible/docs/LAB05.md b/ansible/docs/LAB05.md new file mode 100644 index 0000000000..112d992ddc --- /dev/null +++ b/ansible/docs/LAB05.md @@ -0,0 +1,450 @@ +# LAB05 — Ansible Fundamentals (Report) + +## 1. Architecture Overview + +### Control node +- OS: Windows 11 + WSL (Ubuntu) +- Ansible is executed inside WSL +- Ansible version: + +```bash +ansible --version +``` + +```text +ansible [core 2.20.3] + config file = /home/dorley/projects/DevOps-Core-Course/ansible/ansible.cfg + configured module search path = ['/home/dorley/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] + ansible python module location = /home/dorley/.local/share/pipx/venvs/ansible-core/lib/python3.12/site-packages/ansible + ansible collection location = /home/dorley/.ansible/collections:/usr/share/ansible/collections + executable location = /home/dorley/.local/bin/ansible + python version = 3.12.3 (main, Jan 22 2026, 20:57:42) [GCC 13.3.0] (/home/dorley/.local/share/pipx/venvs/ansible-core/bin/python) + jinja version = 3.1.6 + pyyaml version = 6.0.3 (with libyaml v0.2.5) +``` + +### Target node +- Provisioned via: **Vagrant + VirtualBox** +- SSH access: forwarded port (guest 22 -> host 2222) +- OS (queried via Ansible): + +```bash +ansible -i inventory/hosts.ini webservers -a "lsb_release -a" +``` + +```text +vagrant1 | CHANGED | rc=0 >> +Distributor ID: Ubuntu +Description: Ubuntu 22.04.5 LTS +Release: 22.04 +Codename: jammyNo LSB modules are available. +``` + +### Networking note (WSL + Windows) +From WSL, SSH access to the VM uses the **Windows host LAN IP** (not 127.0.0.1). + +Example values used in this lab: +- Windows host IP: `192.168.31.32` +- SSH forwarded port: `2222` +- App forwarded port: `5000` + +> If your Windows host IP differs, replace `192.168.31.32` accordingly. + +--- + +## 2. Project Structure (Ansible) + +```text +ansible/ +├── ansible.cfg +├── inventory/ +│ └── hosts.ini +├── group_vars/ +│ └── webservers.yml # non-secret variables (public image) +├── playbooks/ +│ ├── provision.yml +│ └── deploy.yml +└── roles/ + ├── common/ + ├── docker/ + └── app_deploy/ +``` + +--- + +## 3. Roles Documentation + +### Role: `common` +**Purpose** +- Base OS provisioning: apt cache update + essential packages. +- Optional timezone configuration. + +**Key tasks** +- Update apt cache +- Install common packages +- Set timezone (optional) + +**Variables** +- `common_packages` (list) +- `common_timezone` (string) +- `common_set_timezone` (bool) + +--- + +### Role: `docker` +**Purpose** +- Install Docker Engine from the official Docker APT repository. +- Enable and start Docker. +- Add SSH user to the `docker` group. +- Install `python3-docker` for Ansible Docker modules. + +**Key tasks** +- Add Docker GPG key + repo +- Install Docker packages +- Ensure `docker` service is enabled and running +- Add user to docker group +- Install Docker SDK for Python (`python3-docker`) + +**Variables** +- `docker_user` +- `docker_packages` + +--- + +### Role: `app_deploy` +**Purpose** +- Pull the application image. +- Run the container with a stable name, port mapping and restart policy. +- Wait for readiness and verify `/health` and `/`. + +**Key tasks** +- Optional `docker_login` (executed only if password is provided) +- `docker_image` pull +- `docker_container` start +- `wait_for` + HTTP checks + +**Variables** +- `dockerhub_username` +- `dockerhub_password` (empty for public image) +- `docker_image` +- `docker_image_tag` +- `app_name` +- `app_container_name` +- `app_port` / `container_port` +- `app_restart_policy` +- `app_env` + +--- + +## 4. Idempotency Demonstration (Provisioning) + +### 4.1 First run + +```bash +ansible-playbook -i inventory/hosts.ini playbooks/provision.yml +``` + +```text + +PLAY [Provision web servers] ************************************************************************************************************************* + +TASK [Gathering Facts] ******************************************************************************************************************************* +ok: [vagrant1] + +TASK [common : Update apt cache] ********************************************************************************************************************* +ok: [vagrant1] + +TASK [common : Install common packages] ************************************************************************************************************** +ok: [vagrant1] + +TASK [common : Set timezone] ************************************************************************************************************************* +skipping: [vagrant1] + +TASK [docker : Install prerequisites for Docker repository] ****************************************************************************************** +ok: [vagrant1] + +TASK [docker : Ensure /etc/apt/keyrings exists] ****************************************************************************************************** +ok: [vagrant1] + +TASK [docker : Download Docker GPG key (ASCII)] ****************************************************************************************************** +ok: [vagrant1] + +TASK [docker : Check if Docker keyring already exists] *********************************************************************************************** +ok: [vagrant1] + +TASK [docker : Convert (dearmor) Docker GPG key to keyring] ****************************************************************************************** +skipping: [vagrant1] + +TASK [docker : Set correct permissions on Docker keyring] ******************************************************************************************** +ok: [vagrant1] + +TASK [docker : Set Docker APT architecture mapping] ************************************************************************************************** +[WARNING]: Deprecation warnings can be disabled by setting `deprecation_warnings=False` in ansible.cfg. +[DEPRECATION WARNING]: INJECT_FACTS_AS_VARS default to `True` is deprecated, top-level facts will not be auto injected after the change. This feature will be removed from ansible-core version 2.24. +Origin: /home/dorley/projects/DevOps-Core-Course/ansible/roles/docker/tasks/main.yml:42:22 + +40 - name: Set Docker APT architecture mapping +41 ansible.builtin.set_fact: +42 docker_apt_arch: "{{ {'x86_64':'amd64','aarch64':'arm64'}.get(ansible_architecture, ansible_architecture) }}" + ^ column 22 + +Use `ansible_facts["fact_name"]` (no `ansible_` prefix) instead. + +ok: [vagrant1] + +TASK [docker : Add official Docker APT repository] *************************************************************************************************** +[DEPRECATION WARNING]: INJECT_FACTS_AS_VARS default to `True` is deprecated, top-level facts will not be auto injected after the change. This feature will be removed from ansible-core version 2.24. +Origin: /home/dorley/projects/DevOps-Core-Course/ansible/roles/docker/tasks/main.yml:46:11 + +44 - name: Add official Docker APT repository +45 ansible.builtin.apt_repository: +46 repo: "deb [arch={{ docker_apt_arch }} signed-by={{ docker_keyring_path }}] https://download.docker.com/linux/... + ^ column 11 + +Use `ansible_facts["fact_name"]` (no `ansible_` prefix) instead. + +ok: [vagrant1] + +TASK [docker : Install Docker Engine packages] ******************************************************************************************************* +ok: [vagrant1] + +TASK [docker : Ensure Docker service is enabled and running] ***************************************************************************************** +ok: [vagrant1] + +TASK [docker : Ensure docker group exists] *********************************************************************************************************** +ok: [vagrant1] + +TASK [docker : Add user to docker group] ************************************************************************************************************* +ok: [vagrant1] + +TASK [docker : Install Docker SDK for Python on target (for Ansible docker modules)] ***************************************************************** +ok: [vagrant1] + +PLAY RECAP ******************************************************************************************************************************************* +vagrant1 : ok=15 changed=0 unreachable=0 failed=0 skipped=2 rescued=0 ignored=0 +``` + +### 4.2 Second run + +```bash +ansible-playbook -i inventory/hosts.ini playbooks/provision.yml +``` + +```text +PLAY [Provision web servers] ************************************************************************************************************************* + +TASK [Gathering Facts] ******************************************************************************************************************************* +ok: [vagrant1] + +TASK [common : Update apt cache] ********************************************************************************************************************* +ok: [vagrant1] + +TASK [common : Install common packages] ************************************************************************************************************** +ok: [vagrant1] + +TASK [common : Set timezone] ************************************************************************************************************************* +skipping: [vagrant1] + +TASK [docker : Install prerequisites for Docker repository] ****************************************************************************************** +ok: [vagrant1] + +TASK [docker : Ensure /etc/apt/keyrings exists] ****************************************************************************************************** +ok: [vagrant1] + +TASK [docker : Download Docker GPG key (ASCII)] ****************************************************************************************************** +ok: [vagrant1] + +TASK [docker : Check if Docker keyring already exists] *********************************************************************************************** +ok: [vagrant1] + +TASK [docker : Convert (dearmor) Docker GPG key to keyring] ****************************************************************************************** +skipping: [vagrant1] + +TASK [docker : Set correct permissions on Docker keyring] ******************************************************************************************** +ok: [vagrant1] + +TASK [docker : Set Docker APT architecture mapping] ************************************************************************************************** +[WARNING]: Deprecation warnings can be disabled by setting `deprecation_warnings=False` in ansible.cfg. +[DEPRECATION WARNING]: INJECT_FACTS_AS_VARS default to `True` is deprecated, top-level facts will not be auto injected after the change. This feature will be removed from ansible-core version 2.24. +Origin: /home/dorley/projects/DevOps-Core-Course/ansible/roles/docker/tasks/main.yml:42:22 + +40 - name: Set Docker APT architecture mapping +41 ansible.builtin.set_fact: +42 docker_apt_arch: "{{ {'x86_64':'amd64','aarch64':'arm64'}.get(ansible_architecture, ansible_architecture) }}" + ^ column 22 + +Use `ansible_facts["fact_name"]` (no `ansible_` prefix) instead. + +ok: [vagrant1] + +TASK [docker : Add official Docker APT repository] *************************************************************************************************** +[DEPRECATION WARNING]: INJECT_FACTS_AS_VARS default to `True` is deprecated, top-level facts will not be auto injected after the change. This feature will be removed from ansible-core version 2.24. +Origin: /home/dorley/projects/DevOps-Core-Course/ansible/roles/docker/tasks/main.yml:46:11 + +44 - name: Add official Docker APT repository +45 ansible.builtin.apt_repository: +46 repo: "deb [arch={{ docker_apt_arch }} signed-by={{ docker_keyring_path }}] https://download.docker.com/linux/... + ^ column 11 + +Use `ansible_facts["fact_name"]` (no `ansible_` prefix) instead. + +ok: [vagrant1] + +TASK [docker : Install Docker Engine packages] ******************************************************************************************************* +ok: [vagrant1] + +TASK [docker : Ensure Docker service is enabled and running] ***************************************************************************************** +ok: [vagrant1] + +TASK [docker : Ensure docker group exists] *********************************************************************************************************** +ok: [vagrant1] + +TASK [docker : Add user to docker group] ************************************************************************************************************* +ok: [vagrant1] + +TASK [docker : Install Docker SDK for Python on target (for Ansible docker modules)] ***************************************************************** +ok: [vagrant1] + +PLAY RECAP ******************************************************************************************************************************************* +vagrant1 : ok=15 changed=0 unreachable=0 failed=0 skipped=2 rescued=0 ignored=0 +``` + +### 4.3 Analysis +First run changes the system (packages, repositories, services). Second run converges to the desired state and should show `changed=0` (or close to it), proving idempotency. + +--- + +## 5. Secrets / Vault + +This lab uses a **public Docker Hub image**, therefore no registry password is required for `docker pull`. + +Variables are stored in `group_vars/webservers.yml` and `dockerhub_password` is set to an empty string. + +Optional: Ansible Vault can still be used for secrets (e.g., if using a private image), but is not required for this public-image setup. + +--- + +## 6. Deployment Verification + +### 6.1 Deploy run + +```bash +ansible-playbook -i inventory/hosts.ini playbooks/deploy.yml +``` + +```text + +PLAY [Deploy application] **************************************************************************************************************************** + +TASK [Gathering Facts] ******************************************************************************************************************************* +ok: [vagrant1] + +TASK [app_deploy : Ensure Docker SDK for Python is installed on target] ****************************************************************************** +ok: [vagrant1] + +TASK [app_deploy : Login to Docker registry] ********************************************************************************************************* +skipping: [vagrant1] + +TASK [app_deploy : Pull application image] *********************************************************************************************************** +ok: [vagrant1] + +TASK [app_deploy : Check current container (if exists)] ********************************************************************************************** +ok: [vagrant1] + +TASK [app_deploy : Decide if redeploy is needed] ***************************************************************************************************** +ok: [vagrant1] + +TASK [app_deploy : Stop existing container (only if redeploy needed)] ******************************************************************************** +skipping: [vagrant1] + +TASK [app_deploy : Remove existing container (only if redeploy needed)] ****************************************************************************** +skipping: [vagrant1] + +TASK [app_deploy : Run application container] ******************************************************************************************************** +ok: [vagrant1] + +TASK [app_deploy : Wait for the application port to become available] ******************************************************************************** +ok: [vagrant1] + +TASK [app_deploy : Health check (/health)] *********************************************************************************************************** +ok: [vagrant1] + +TASK [app_deploy : Verify main endpoint (/)] ********************************************************************************************************* +ok: [vagrant1] + +PLAY RECAP ******************************************************************************************************************************************* +vagrant1 : ok=9 changed=0 unreachable=0 failed=0 skipped=3 rescued=0 ignored=0 +``` + +### 6.2 Container status + +```bash +ansible -i inventory/hosts.ini webservers -a "docker ps" +``` + +```text +vagrant1 | CHANGED | rc=0 >> +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +a0a73f77e763 dorley174/devops-info-service:latest "python app.py" 35 minutes ago Up 34 minutes 0.0.0.0:5000->5000/tcp devops-info-service +``` + +### 6.3 Health check + +From the target VM (via Ansible): + +```bash +ansible -i inventory/hosts.ini webservers -a "curl -i http://127.0.0.1:5000/health" +``` + +From the control node (WSL) via Windows host forwarding: + +```bash +curl -i "http://192.168.31.32:5000/health" +curl -i "http://192.168.31.32:5000/" +``` + +```text +curl -i "http://192.168.31.32:5000/" +HTTP/1.1 200 OK +Server: Werkzeug/3.1.5 Python/3.13.1 +Date: Thu, 26 Feb 2026 20:05:00 GMT +Content-Type: application/json; charset=utf-8 +Content-Length: 82 +Connection: close + +{"status":"healthy","timestamp":"2026-02-26T20:05:00.236Z","uptime_seconds":2111} +HTTP/1.1 200 OK +Server: Werkzeug/3.1.5 Python/3.13.1 +Date: Thu, 26 Feb 2026 20:05:00 GMT +Content-Type: application/json; charset=utf-8 +Content-Length: 675 +Connection: close + +{"endpoints":[{"description":"Service information","method":"GET","path":"/"},{"description":"Health check","method":"GET","path":"/health"}],"request":{"client_ip":"192.168.31.32","method":"GET","path":"/","user_agent":"curl/8.5.0"},"runtime":{"current_time":"2026-02-26T20:05:00.254Z","timezone":"UTC","uptime_human":"0 hours, 35 minutes","uptime_seconds":2111},"service":{"description":"DevOps course info service","framework":"Flask","name":"devops-info-service","version":"1.0.0"},"system":{"architecture":"x86_64","cpu_count":2,"hostname":"a0a73f77e763","platform":"Linux","platform_version":"Linux-5.15.0-170-generic-x86_64-with-glibc2.36","python_version":"3.13.1"}} +``` + +--- + +## 7. Key Decisions (Short Answers) + +1. **Why use roles instead of plain playbooks?** + Roles keep automation modular and reusable, making playbooks shorter and easier to maintain. + +2. **How do roles improve reusability?** + The same role can be applied to different hosts/projects by changing variables without copying tasks. + +3. **What makes a task idempotent?** + Stateful modules that only change the system if the current state differs from desired state. + +4. **How do handlers improve efficiency?** + Handlers run only when notified, avoiding unnecessary restarts. + +5. **Why would Ansible Vault be needed?** + To store sensitive credentials safely in version control when secrets are required. + +--- + +## 8. Challenges (Optional) + +- VirtualBox/Vagrant leftovers caused VM name conflicts; fixed by removing stale VMs from VirtualBox. +- WSL could not access `127.0.0.1:2222` forwarded port; fixed by using Windows LAN IP (e.g., `192.168.31.32`). diff --git a/ansible/docs/LAB06.md b/ansible/docs/LAB06.md new file mode 100644 index 0000000000..c851f78b3c --- /dev/null +++ b/ansible/docs/LAB06.md @@ -0,0 +1,323 @@ +# LAB06 — Advanced Ansible & CI/CD + +## Overview + +This lab upgrades the Lab05 Ansible project to a more production-ready setup: + +- Refactored roles to use **blocks**, **rescue/always**, and a clear **tag strategy**. +- Migrated the application deployment from `docker run` to **Docker Compose v2**. +- Added **role dependency** (`web_app` depends on `docker`) to guarantee execution order. +- Implemented **safe wipe logic** (double-gated: variable + tag). +- Added a **GitHub Actions** workflow for Ansible linting + deployment. + +Tech used: **Ansible 2.16+**, **community.docker**, **Docker Compose v2**, **GitHub Actions**, **Jinja2**. + +--- + +## Blocks & Tags + +### Tag strategy + +- `common` — the whole common role (set at playbook role level) +- `packages` — package management tasks inside `common` +- `users` — user management tasks inside `common` + +- `docker` — the whole docker role (set at playbook role level) +- `docker_install` — Docker installation + repo setup +- `docker_config` — Docker group/user configuration + +- `web_app` — the whole deployment role (set at playbook role level) +- `app_deploy` — deployment block inside `web_app` +- `compose` — Docker Compose related tasks in `web_app` +- `web_app_wipe` — wipe logic tasks + +### `common` role + +File: `roles/common/tasks/main.yml` + +What was done: + +1. **Packages block** tagged `packages`. +2. **Users block** tagged `users`. +3. Added **rescue** for APT cache update failures: + - best-effort `apt-get update --fix-missing` + - retry cache update. +4. Added an outer **always** section that writes `/tmp/ansible_common_role_completed.log`. + +Notes: + +- User management is **optional** and controlled by `common_users` (default empty list => no changes). +- SSH keys are managed with `ansible.posix.authorized_key` for better collection portability. + +### `docker` role + +File: `roles/docker/tasks/main.yml` + +What was done: + +1. **Install block** tagged `docker_install`. +2. **Config block** tagged `docker_config`. +3. Added **rescue** to handle transient Docker repository / GPG failures: + - wait 10 seconds + - retry `apt update` + - re-download and re-install the GPG key + - retry install. +4. Added an outer **always** section to ensure the Docker service is enabled and started. + +### How to test tags + +```bash +cd ansible + +# List all tags +ansible-playbook playbooks/provision.yml --list-tags + +# Run only docker role tasks +ansible-playbook playbooks/provision.yml --tags docker + +# Run only docker installation tasks +ansible-playbook playbooks/provision.yml --tags docker_install + +# Install packages only +ansible-playbook playbooks/provision.yml --tags packages + +# Skip common role +ansible-playbook playbooks/provision.yml --skip-tags common +``` + +--- + +## Docker Compose Migration + +### Role rename + +The deployment role was renamed from `app_deploy` to `web_app`. +The old `roles/app_deploy/` directory was removed from the final submission. + +### Compose template + +File: `roles/web_app/templates/docker-compose.yml.j2` + +Template supports: + +- `app_name` +- `docker_image` +- `docker_tag` +- `app_port` / `app_internal_port` +- `app_env` (dict of extra env vars) +- optional `app_secret_key` (Vault-friendly) + +It also sets sane defaults for the provided Flask app: + +- `HOST=0.0.0.0` +- `PORT={{ app_internal_port }}` + +### Role dependency + +File: `roles/web_app/meta/main.yml` + +`web_app` depends on `docker`, so a playbook that runs the whole `web_app` role will install Docker first. + +Important practical note: + +- `ansible-playbook playbooks/deploy.yml` is the safest default. +- `ansible-playbook playbooks/deploy.yml --tags web_app` also works for selective role execution. +- `ansible-playbook playbooks/deploy.yml --tags app_deploy` targets only the deployment block and is **not** the best choice if you expect role dependencies to be selected by tag filtering. + +### Deployment implementation + +File: `roles/web_app/tasks/main.yml` + +Deployment flow: + +1. Create `/opt/{{ app_name }}`. +2. Render `docker-compose.yml`. +3. Optional `docker login` (only if password is provided). +4. Pull the image using `community.docker.docker_image`. +5. Run `community.docker.docker_compose_v2` (`state: present`). +6. Verify app responds on `/health` and `/`. + +Idempotency test: + +```bash +ansible-playbook playbooks/deploy.yml +ansible-playbook playbooks/deploy.yml +``` + +The second run should show mostly `ok` tasks, and no container recreation unless inputs changed. + +--- + +## Wipe Logic + +Files: + +- `roles/web_app/defaults/main.yml` +- `roles/web_app/tasks/wipe.yml` +- `roles/web_app/tasks/main.yml` (includes wipe first) + +### Why variable + tag? + +This is a **double safety mechanism**: + +- Variable (`web_app_wipe=true`) prevents accidental wipe during normal runs. +- Tag (`--tags web_app_wipe`) enables a wipe-only run that does not deploy. + +### Test scenarios + +```bash +# 1) Normal deployment (wipe does NOT run) +ansible-playbook playbooks/deploy.yml + +# 2) Wipe only +ansible-playbook playbooks/deploy.yml -e "web_app_wipe=true" --tags web_app_wipe + +# 3) Clean reinstall (wipe -> deploy) +ansible-playbook playbooks/deploy.yml -e "web_app_wipe=true" + +# 4a) Safety check: tag without variable => wipe skipped +ansible-playbook playbooks/deploy.yml --tags web_app_wipe +``` + +--- + +## CI/CD Integration + +File: `.github/workflows/ansible-deploy.yml` + +### Workflow architecture + +Two-stage pipeline: + +1. **lint** (GitHub-hosted runner) + - installs Ansible + ansible-lint + - installs collections from `ansible/requirements.yml` + - runs `ansible-lint -p .` inside the `ansible/` directory. + +2. **deploy** (self-hosted Linux runner) + - runs on `runs-on: [self-hosted, linux]` + - installs Ansible on the runner + - installs collections from `ansible/requirements.yml` + - runs `ansible-playbook playbooks/deploy.yml` + - verifies the app with `curl /health` and `curl /`. + +### Why a Linux self-hosted runner? + +For a local Vagrant/VirtualBox environment behind NAT, a **Linux self-hosted runner** on the same machine is the simplest free option. +Using WSL2 Ubuntu is practical because: + +- the workflow already uses Bash and Linux paths +- Ansible is easier to maintain on Linux +- no public inbound SSH exposure is required +- it matches the local, free, Windows-friendly lab setup + +### Secrets + +Recommended secrets (Actions → Secrets): + +- `ANSIBLE_VAULT_PASSWORD` — only needed if you use Vault-encrypted files +- `VM_HOST` — host/IP used by the verification curl step (default fallback: `127.0.0.1`) + +If you later move to a public VM and a GitHub-hosted deploy job, add: + +- `SSH_PRIVATE_KEY` +- `VM_USER` + +--- + +## Testing Results Checklist + +Collect evidence for submission: + +- `ansible-playbook playbooks/provision.yml --list-tags` +- `ansible-playbook playbooks/provision.yml --tags docker_install` +- `ansible-playbook playbooks/deploy.yml` +- second deployment run for idempotency proof +- wipe-only run: + - `ansible-playbook playbooks/deploy.yml -e "web_app_wipe=true" --tags web_app_wipe` +- clean reinstall run: + - `ansible-playbook playbooks/deploy.yml -e "web_app_wipe=true"` +- rendered file: + - `/opt/{{ app_name }}/docker-compose.yml` +- app responses: + - `curl http://:{{ app_port }}/health` + - `curl http://:{{ app_port }}/` +- GitHub Actions logs for `lint` and `deploy` + +--- + +## Challenges & Solutions + +- **Compose v2 on Ubuntu**: the docker role installs `docker-compose-plugin` so `docker compose` is available. +- **Idempotency vs pull policy**: image pulling is done explicitly via `docker_image`, while Compose uses `pull: never`. +- **Safety for destructive operations**: wipe logic requires both a boolean variable and, for wipe-only mode, an explicit tag. +- **Collection portability**: `ansible.posix` was added explicitly so `authorized_key` does not depend on the full `ansible` meta-package. + +--- + +## Research Answers + +### Blocks & Tags + +1. **What happens if the rescue block also fails?** + - The whole block is considered failed and Ansible stops the play (unless errors are ignored). + - The `always` section still runs. + +2. **Can you have nested blocks?** + - Yes. This lab uses an outer block with an `always` section, and inner blocks for `packages` / `users`. + +3. **How do tags inherit to tasks within blocks?** + - Tags defined at a block level are applied to all tasks in that block. + - Tags defined at role level (in playbooks) also apply to all tasks in the role. + +### Docker Compose + +1. **Difference between `restart: always` and `restart: unless-stopped`?** + - `always` restarts containers even if you manually stopped them. + - `unless-stopped` restarts containers automatically except when they were explicitly stopped by an operator. + +2. **How do Compose networks differ from Docker bridge networks?** + - Compose creates and manages networks as part of an application project, with predictable naming. + - A classic Docker bridge network is a lower-level primitive not tied to a Compose project. + +3. **Can you reference Ansible Vault variables in the template?** + - Yes. Vault-encrypted variables are decrypted at runtime and can be used like normal variables in templates. + +### Wipe logic + +1. **Why use both variable AND tag?** + - The variable protects normal runs. + - The tag enables a wipe-only execution that skips deployment tasks. + +2. **Difference between `never` tag and this approach?** + - `never` requires explicit `--tags never` (or another selected tag on the same task) and can be confusing. + - Variable + tag is explicit and easier to reason about for safety-critical operations. + +3. **Why must wipe logic come BEFORE deployment?** + - It enables the clean reinstall flow: remove old state first, then deploy from scratch. + +4. **When do you want clean reinstall vs rolling update?** + - Clean reinstall: broken state, changed config/layout, major upgrades, testing fresh installs. + - Rolling update: keep service available, minimize downtime, gradual rollout. + +5. **How to extend wipe to remove images/volumes too?** + - Add Compose options such as `remove_volumes: true` when needed. + - Remove images via `community.docker.docker_image state: absent`. + - Remove named volumes explicitly with Docker modules or CLI commands. + +### CI/CD + +1. **Security implications of storing SSH keys in GitHub Secrets?** + - Secrets can be exfiltrated if workflows are compromised. + - Limit key scope, use dedicated deploy keys, restrict repo access, and rotate regularly. + +2. **How to implement staging → production pipeline?** + - Use separate environments, separate inventories, and manual approvals for production. + - Another option is separate workflows triggered on tags/releases. + +3. **What to add for rollbacks?** + - Pin image tags, keep the previous known-good tag, and add a rollback job that redeploys it. + +4. **How does a self-hosted runner improve security compared to GitHub-hosted?** + - SSH keys/secrets do not need to be sent to GitHub-hosted machines. + - The runner stays inside your controlled network perimeter. diff --git a/ansible/docs/LOCAL_VALIDATION_WINDOWS.md b/ansible/docs/LOCAL_VALIDATION_WINDOWS.md new file mode 100644 index 0000000000..7d7ba30340 --- /dev/null +++ b/ansible/docs/LOCAL_VALIDATION_WINDOWS.md @@ -0,0 +1,282 @@ +# Local Validation Guide (Windows + WSL2 + Vagrant + VirtualBox) + +This guide is designed for a **fully local and free** Lab06 workflow on **Windows**. +It assumes: + +- Windows 10/11 +- VirtualBox +- Vagrant +- WSL2 with Ubuntu +- GitHub repository (optional, only for CI/CD) + +--- + +## 1. Recommended Setup + +### Windows side + +Install and verify: + +```powershell +vagrant --version +VBoxManage --version +git --version +``` + +Recommended Vagrant plugins check: + +```powershell +vagrant plugin list +``` + +### WSL2 Ubuntu side + +Install and verify: + +```bash +wsl --status +python3 --version +pip3 --version +ssh -V +git --version +``` + +Install Ansible in WSL2 if missing: + +```bash +python3 -m pip install --user --upgrade pip +python3 -m pip install --user ansible ansible-lint +printf '\nexport PATH="$HOME/.local/bin:$PATH"\n' >> ~/.bashrc +source ~/.bashrc +ansible --version +ansible-lint --version +``` + +--- + +## 2. Start the VM + +Run in **PowerShell** from the repository root: + +```powershell +vagrant up +vagrant status +vagrant port +``` + +Expected important forwarded ports: + +- guest SSH `22` -> host `2222` +- guest app `8000` -> host `8000` + +If ports changed because of `auto_correct`, update the inventory accordingly. + +--- + +## 3. Prepare the SSH key for Ansible + +The Vagrant private key is usually stored under the project directory on Windows. +Copy it into WSL and fix permissions. + +Example from WSL: + +```bash +mkdir -p ~/.ssh +cp /mnt/c/path/to/your/repo/.vagrant/machines/default/virtualbox/private_key ~/.ssh/lab05_vagrant_key +chmod 600 ~/.ssh/lab05_vagrant_key +``` + +Check SSH connectivity: + +```bash +ssh -i ~/.ssh/lab05_vagrant_key -p 2222 vagrant@127.0.0.1 +``` + +If `127.0.0.1` does not work from WSL2, detect the Windows host IP and try again: + +```bash +WIN_HOST_IP=$(grep -m1 nameserver /etc/resolv.conf | awk '{print $2}') +echo "$WIN_HOST_IP" +ssh -i ~/.ssh/lab05_vagrant_key -p 2222 vagrant@"$WIN_HOST_IP" +``` + +--- + +## 4. Inventory Settings + +Default local-friendly inventory example: + +```ini +[webservers] +vagrant1 ansible_host=127.0.0.1 ansible_port=2222 ansible_user=vagrant ansible_ssh_private_key_file=~/.ssh/lab05_vagrant_key + +[webservers:vars] +ansible_python_interpreter=/usr/bin/python3 +``` + +If WSL2 cannot reach `127.0.0.1:2222`, replace `ansible_host` with the detected Windows host IP. + +--- + +## 5. Install Ansible collections + +Run in WSL2 from the repository root: + +```bash +cd ansible +ansible-galaxy collection install -r requirements.yml +``` + +Verify installed collections: + +```bash +ansible-galaxy collection list | grep -E 'community.docker|community.general|ansible.posix' +``` + +--- + +## 6. Basic validation commands + +### Connectivity + +```bash +cd ansible +ansible all -m ping +``` + +### Syntax and task listing + +```bash +ansible-playbook playbooks/provision.yml --syntax-check +ansible-playbook playbooks/deploy.yml --syntax-check +ansible-playbook playbooks/provision.yml --list-tags +ansible-playbook playbooks/deploy.yml --list-tasks +ansible-playbook playbooks/deploy.yml --list-tasks --tags web_app +``` + +### Lint + +```bash +ansible-lint -p . +``` + +--- + +## 7. Provisioning validation + +```bash +cd ansible +ansible-playbook playbooks/provision.yml +ansible-playbook playbooks/provision.yml +``` + +The second run should be mostly idempotent. + +Selective tag checks: + +```bash +ansible-playbook playbooks/provision.yml --tags docker +ansible-playbook playbooks/provision.yml --tags docker_install +ansible-playbook playbooks/provision.yml --tags packages +ansible-playbook playbooks/provision.yml --skip-tags common +``` + +--- + +## 8. Deployment validation + +```bash +cd ansible +ansible-playbook playbooks/deploy.yml +ansible-playbook playbooks/deploy.yml +``` + +Verify on the target VM: + +```bash +ssh -i ~/.ssh/lab05_vagrant_key -p 2222 vagrant@127.0.0.1 +sudo docker ps +sudo docker compose -f /opt/devops-info-service/docker-compose.yml ps +curl http://127.0.0.1:8000/health +curl http://127.0.0.1:8000/ +``` + +From WSL2 / host side: + +```bash +curl http://127.0.0.1:8000/health +curl http://127.0.0.1:8000/ +``` + +If WSL2 cannot reach Windows localhost, replace `127.0.0.1` with the Windows host IP. + +--- + +## 9. Wipe logic validation + +### Wipe only + +```bash +cd ansible +ansible-playbook playbooks/deploy.yml -e "web_app_wipe=true" --tags web_app_wipe +``` + +### Clean reinstall + +```bash +ansible-playbook playbooks/deploy.yml -e "web_app_wipe=true" +``` + +### Safety check + +```bash +ansible-playbook playbooks/deploy.yml --tags web_app_wipe +``` + +Expected result: wipe tasks are selected by tag but skipped by the boolean guard. + +--- + +## 10. GitHub Actions self-hosted runner (free local option) + +Recommended approach: + +- Create the self-hosted runner in **WSL2 Ubuntu**, not in native Windows. +- Use labels that include `self-hosted` and `linux`. +- Run the runner only when you want to test CI locally. + +Why this is preferred: + +- the workflow uses Bash and Linux paths +- Ansible support is simpler on Linux +- no cloud VM is required +- no paid service is required + +Useful checks on the runner machine: + +```bash +python3 --version +ansible --version +ansible-galaxy collection list | grep -E 'community.docker|community.general|ansible.posix' +curl --version +``` + +Suggested repository secrets: + +- `ANSIBLE_VAULT_PASSWORD` (only if you really use Vault) +- `VM_HOST` with value `127.0.0.1` or the Windows host IP + +--- + +## 11. What to capture for submission + +Recommended evidence: + +1. `ansible-playbook playbooks/provision.yml --list-tags` +2. `ansible-playbook playbooks/provision.yml --tags docker_install` +3. First and second `ansible-playbook playbooks/deploy.yml` runs +4. Wipe-only run +5. Clean reinstall run +6. `docker compose ps` output on the VM +7. `curl /health` and `curl /` output +8. GitHub Actions `lint` and `deploy` job logs diff --git a/ansible/docs/lab06_overview.md b/ansible/docs/lab06_overview.md new file mode 100644 index 0000000000..ef0b078c1d --- /dev/null +++ b/ansible/docs/lab06_overview.md @@ -0,0 +1,605 @@ +# Local Validation Guide (Windows + WSL2 + Vagrant + VirtualBox) + +This guide is designed for a **fully local and free** Lab06 workflow on **Windows**. +It assumes: + +- Windows 10/11 +- VirtualBox +- Vagrant +- WSL2 with Ubuntu +- GitHub repository (optional, only for CI/CD) + +--- + +## 1. Recommended Setup + +### Windows side + +Install and verify: + +```powershell +vagrant --version +VBoxManage --version +git --version +``` + +Recommended Vagrant plugins check: + +```powershell +vagrant plugin list +``` + +### WSL2 Ubuntu side + +Install and verify: + +```bash +wsl --status +python3 --version +pip3 --version +ssh -V +git --version +``` + +Install Ansible in WSL2 if missing: + +```bash +python3 -m pip install --user --upgrade pip +python3 -m pip install --user ansible ansible-lint +printf '\nexport PATH="$HOME/.local/bin:$PATH"\n' >> ~/.bashrc +source ~/.bashrc +ansible --version +ansible-lint --version +``` + +--- + +## 2. Start the VM + +Run in **PowerShell** from the repository root: + +```powershell +vagrant up +vagrant status +vagrant port +``` + +Expected important forwarded ports: + +- guest SSH `22` -> host `2222` +- guest app `8000` -> host `8000` + +If ports changed because of `auto_correct`, update the inventory accordingly. + +--- + +## 3. Prepare the SSH key for Ansible + +The Vagrant private key is usually stored under the project directory on Windows. +Copy it into WSL and fix permissions. + +Example from WSL: + +```bash +mkdir -p ~/.ssh +cp /mnt/c/path/to/your/repo/.vagrant/machines/default/virtualbox/private_key ~/.ssh/lab05_vagrant_key +chmod 600 ~/.ssh/lab05_vagrant_key +``` + +Check SSH connectivity: + +```bash +ssh -i ~/.ssh/lab05_vagrant_key -p 2222 vagrant@127.0.0.1 +``` + +If `127.0.0.1` does not work from WSL2, detect the Windows host IP and try again: + +```bash +WIN_HOST_IP=$(grep -m1 nameserver /etc/resolv.conf | awk '{print $2}') +echo "$WIN_HOST_IP" +ssh -i ~/.ssh/lab05_vagrant_key -p 2222 vagrant@"$WIN_HOST_IP" +``` + +--- + +## 4. Inventory Settings + +Default local-friendly inventory example: + +```ini +[webservers] +vagrant1 ansible_host=127.0.0.1 ansible_port=2222 ansible_user=vagrant ansible_ssh_private_key_file=~/.ssh/lab05_vagrant_key + +[webservers:vars] +ansible_python_interpreter=/usr/bin/python3 +``` + +If WSL2 cannot reach `127.0.0.1:2222`, replace `ansible_host` with the detected Windows host IP. + +--- + +## 5. Install Ansible collections + +Run in WSL2 from the repository root: + +```bash +cd ansible +ansible-galaxy collection install -r requirements.yml +``` + +Verify installed collections: + +```bash +ansible-galaxy collection list | grep -E 'community.docker|community.general|ansible.posix' +``` + +--- + +## 6. Basic validation commands + +### Connectivity + +```bash +cd ansible +ansible all -m ping +``` + +### Syntax and task listing + +```bash +ansible-playbook playbooks/provision.yml --syntax-check +ansible-playbook playbooks/deploy.yml --syntax-check +ansible-playbook playbooks/provision.yml --list-tags +ansible-playbook playbooks/deploy.yml --list-tasks +ansible-playbook playbooks/deploy.yml --list-tasks --tags web_app +``` + +### Lint + +```bash +ansible-lint -p . +``` + +--- + +## 7. Provisioning validation + +```bash +cd ansible +ansible-playbook playbooks/provision.yml +ansible-playbook playbooks/provision.yml +``` + +The second run should be mostly idempotent. + +Selective tag checks: + +```bash +ansible-playbook playbooks/provision.yml --tags docker +ansible-playbook playbooks/provision.yml --tags docker_install +ansible-playbook playbooks/provision.yml --tags packages +ansible-playbook playbooks/provision.yml --skip-tags common +``` + +--- + +## 8. Deployment validation + +```bash +cd ansible +ansible-playbook playbooks/deploy.yml +ansible-playbook playbooks/deploy.yml +``` + +Verify on the target VM: + +```bash +ssh -i ~/.ssh/lab05_vagrant_key -p 2222 vagrant@127.0.0.1 +sudo docker ps +sudo docker compose -f /opt/devops-info-service/docker-compose.yml ps +curl http://127.0.0.1:8000/health +curl http://127.0.0.1:8000/ +``` + +From WSL2 / host side: + +```bash +curl http://127.0.0.1:8000/health +curl http://127.0.0.1:8000/ +``` + +If WSL2 cannot reach Windows localhost, replace `127.0.0.1` with the Windows host IP. + +--- + +## 9. Wipe logic validation + +### Wipe only + +```bash +cd ansible +ansible-playbook playbooks/deploy.yml -e "web_app_wipe=true" --tags web_app_wipe +``` + +### Clean reinstall + +```bash +ansible-playbook playbooks/deploy.yml -e "web_app_wipe=true" +``` + +### Safety check + +```bash +ansible-playbook playbooks/deploy.yml --tags web_app_wipe +``` + +Expected result: wipe tasks are selected by tag but skipped by the boolean guard. + +--- + +## 10. GitHub Actions self-hosted runner (free local option) + +Recommended approach: + +- Create the self-hosted runner in **WSL2 Ubuntu**, not in native Windows. +- Use labels that include `self-hosted` and `linux`. +- Run the runner only when you want to test CI locally. + +Why this is preferred: + +- the workflow uses Bash and Linux paths +- Ansible support is simpler on Linux +- no cloud VM is required +- no paid service is required + +Useful checks on the runner machine: + +```bash +python3 --version +ansible --version +ansible-galaxy collection list | grep -E 'community.docker|community.general|ansible.posix' +curl --version +``` + +Suggested repository secrets: + +- `ANSIBLE_VAULT_PASSWORD` (only if you really use Vault) +- `VM_HOST` with value `127.0.0.1` or the Windows host IP + +--- + +## 11. What to capture for submission + +Recommended evidence: + +1. `ansible-playbook playbooks/provision.yml --list-tags` +2. `ansible-playbook playbooks/provision.yml --tags docker_install` +3. First and second `ansible-playbook playbooks/deploy.yml` runs +4. Wipe-only run +5. Clean reinstall run +6. `docker compose ps` output on the VM +7. `curl /health` and `curl /` output +8. GitHub Actions `lint` and `deploy` job logs + +## Overview + +This lab upgrades the Lab05 Ansible project to a more production-ready setup: + +- Refactored roles to use **blocks**, **rescue/always**, and a clear **tag strategy**. +- Migrated the application deployment from `docker run` to **Docker Compose v2**. +- Added **role dependency** (`web_app` depends on `docker`) to guarantee execution order. +- Implemented **safe wipe logic** (double-gated: variable + tag). +- Added a **GitHub Actions** workflow for Ansible linting + deployment. + +Tech used: **Ansible 2.16+**, **community.docker**, **Docker Compose v2**, **GitHub Actions**, **Jinja2**. + +--- + +## Blocks & Tags + +### Tag strategy + +- `common` — the whole common role (set at playbook role level) +- `packages` — package management tasks inside `common` +- `users` — user management tasks inside `common` + +- `docker` — the whole docker role (set at playbook role level) +- `docker_install` — Docker installation + repo setup +- `docker_config` — Docker group/user configuration + +- `web_app` — the whole deployment role (set at playbook role level) +- `app_deploy` — deployment block inside `web_app` +- `compose` — Docker Compose related tasks in `web_app` +- `web_app_wipe` — wipe logic tasks + +### `common` role + +File: `roles/common/tasks/main.yml` + +What was done: + +1. **Packages block** tagged `packages`. +2. **Users block** tagged `users`. +3. Added **rescue** for APT cache update failures: + - best-effort `apt-get update --fix-missing` + - retry cache update. +4. Added an outer **always** section that writes `/tmp/ansible_common_role_completed.log`. + +Notes: + +- User management is **optional** and controlled by `common_users` (default empty list => no changes). +- SSH keys are managed with `ansible.posix.authorized_key` for better collection portability. + +### `docker` role + +File: `roles/docker/tasks/main.yml` + +What was done: + +1. **Install block** tagged `docker_install`. +2. **Config block** tagged `docker_config`. +3. Added **rescue** to handle transient Docker repository / GPG failures: + - wait 10 seconds + - retry `apt update` + - re-download and re-install the GPG key + - retry install. +4. Added an outer **always** section to ensure the Docker service is enabled and started. + +### How to test tags + +```bash +cd ansible + +# List all tags +ansible-playbook playbooks/provision.yml --list-tags + +# Run only docker role tasks +ansible-playbook playbooks/provision.yml --tags docker + +# Run only docker installation tasks +ansible-playbook playbooks/provision.yml --tags docker_install + +# Install packages only +ansible-playbook playbooks/provision.yml --tags packages + +# Skip common role +ansible-playbook playbooks/provision.yml --skip-tags common +``` + +--- + +## Docker Compose Migration + +### Role rename + +The deployment role was renamed from `app_deploy` to `web_app`. +The old `roles/app_deploy/` directory was removed from the final submission. + +### Compose template + +File: `roles/web_app/templates/docker-compose.yml.j2` + +Template supports: + +- `app_name` +- `docker_image` +- `docker_tag` +- `app_port` / `app_internal_port` +- `app_env` (dict of extra env vars) +- optional `app_secret_key` (Vault-friendly) + +It also sets sane defaults for the provided Flask app: + +- `HOST=0.0.0.0` +- `PORT={{ app_internal_port }}` + +### Role dependency + +File: `roles/web_app/meta/main.yml` + +`web_app` depends on `docker`, so a playbook that runs the whole `web_app` role will install Docker first. + +Important practical note: + +- `ansible-playbook playbooks/deploy.yml` is the safest default. +- `ansible-playbook playbooks/deploy.yml --tags web_app` also works for selective role execution. +- `ansible-playbook playbooks/deploy.yml --tags app_deploy` targets only the deployment block and is **not** the best choice if you expect role dependencies to be selected by tag filtering. + +### Deployment implementation + +File: `roles/web_app/tasks/main.yml` + +Deployment flow: + +1. Create `/opt/{{ app_name }}`. +2. Render `docker-compose.yml`. +3. Optional `docker login` (only if password is provided). +4. Pull the image using `community.docker.docker_image`. +5. Run `community.docker.docker_compose_v2` (`state: present`). +6. Verify app responds on `/health` and `/`. + +Idempotency test: + +```bash +ansible-playbook playbooks/deploy.yml +ansible-playbook playbooks/deploy.yml +``` + +The second run should show mostly `ok` tasks, and no container recreation unless inputs changed. + +--- + +## Wipe Logic + +Files: + +- `roles/web_app/defaults/main.yml` +- `roles/web_app/tasks/wipe.yml` +- `roles/web_app/tasks/main.yml` (includes wipe first) + +### Why variable + tag? + +This is a **double safety mechanism**: + +- Variable (`web_app_wipe=true`) prevents accidental wipe during normal runs. +- Tag (`--tags web_app_wipe`) enables a wipe-only run that does not deploy. + +### Test scenarios + +```bash +# 1) Normal deployment (wipe does NOT run) +ansible-playbook playbooks/deploy.yml + +# 2) Wipe only +ansible-playbook playbooks/deploy.yml -e "web_app_wipe=true" --tags web_app_wipe + +# 3) Clean reinstall (wipe -> deploy) +ansible-playbook playbooks/deploy.yml -e "web_app_wipe=true" + +# 4a) Safety check: tag without variable => wipe skipped +ansible-playbook playbooks/deploy.yml --tags web_app_wipe +``` + +--- + +## CI/CD Integration + +File: `.github/workflows/ansible-deploy.yml` + +### Workflow architecture + +Two-stage pipeline: + +1. **lint** (GitHub-hosted runner) + - installs Ansible + ansible-lint + - installs collections from `ansible/requirements.yml` + - runs `ansible-lint -p .` inside the `ansible/` directory. + +2. **deploy** (self-hosted Linux runner) + - runs on `runs-on: [self-hosted, linux]` + - installs Ansible on the runner + - installs collections from `ansible/requirements.yml` + - runs `ansible-playbook playbooks/deploy.yml` + - verifies the app with `curl /health` and `curl /`. + +### Why a Linux self-hosted runner? + +For a local Vagrant/VirtualBox environment behind NAT, a **Linux self-hosted runner** on the same machine is the simplest free option. +Using WSL2 Ubuntu is practical because: + +- the workflow already uses Bash and Linux paths +- Ansible is easier to maintain on Linux +- no public inbound SSH exposure is required +- it matches the local, free, Windows-friendly lab setup + +### Secrets + +Recommended secrets (Actions → Secrets): + +- `ANSIBLE_VAULT_PASSWORD` — only needed if you use Vault-encrypted files +- `VM_HOST` — host/IP used by the verification curl step (default fallback: `127.0.0.1`) + +If you later move to a public VM and a GitHub-hosted deploy job, add: + +- `SSH_PRIVATE_KEY` +- `VM_USER` + +--- + +## Testing Results Checklist + +Collect evidence for submission: + +- `ansible-playbook playbooks/provision.yml --list-tags` +- `ansible-playbook playbooks/provision.yml --tags docker_install` +- `ansible-playbook playbooks/deploy.yml` +- second deployment run for idempotency proof +- wipe-only run: + - `ansible-playbook playbooks/deploy.yml -e "web_app_wipe=true" --tags web_app_wipe` +- clean reinstall run: + - `ansible-playbook playbooks/deploy.yml -e "web_app_wipe=true"` +- rendered file: + - `/opt/{{ app_name }}/docker-compose.yml` +- app responses: + - `curl http://:{{ app_port }}/health` + - `curl http://:{{ app_port }}/` +- GitHub Actions logs for `lint` and `deploy` + +--- + +## Challenges & Solutions + +- **Compose v2 on Ubuntu**: the docker role installs `docker-compose-plugin` so `docker compose` is available. +- **Idempotency vs pull policy**: image pulling is done explicitly via `docker_image`, while Compose uses `pull: never`. +- **Safety for destructive operations**: wipe logic requires both a boolean variable and, for wipe-only mode, an explicit tag. +- **Collection portability**: `ansible.posix` was added explicitly so `authorized_key` does not depend on the full `ansible` meta-package. + +--- + +## Research Answers + +### Blocks & Tags + +1. **What happens if the rescue block also fails?** + - The whole block is considered failed and Ansible stops the play (unless errors are ignored). + - The `always` section still runs. + +2. **Can you have nested blocks?** + - Yes. This lab uses an outer block with an `always` section, and inner blocks for `packages` / `users`. + +3. **How do tags inherit to tasks within blocks?** + - Tags defined at a block level are applied to all tasks in that block. + - Tags defined at role level (in playbooks) also apply to all tasks in the role. + +### Docker Compose + +1. **Difference between `restart: always` and `restart: unless-stopped`?** + - `always` restarts containers even if you manually stopped them. + - `unless-stopped` restarts containers automatically except when they were explicitly stopped by an operator. + +2. **How do Compose networks differ from Docker bridge networks?** + - Compose creates and manages networks as part of an application project, with predictable naming. + - A classic Docker bridge network is a lower-level primitive not tied to a Compose project. + +3. **Can you reference Ansible Vault variables in the template?** + - Yes. Vault-encrypted variables are decrypted at runtime and can be used like normal variables in templates. + +### Wipe logic + +1. **Why use both variable AND tag?** + - The variable protects normal runs. + - The tag enables a wipe-only execution that skips deployment tasks. + +2. **Difference between `never` tag and this approach?** + - `never` requires explicit `--tags never` (or another selected tag on the same task) and can be confusing. + - Variable + tag is explicit and easier to reason about for safety-critical operations. + +3. **Why must wipe logic come BEFORE deployment?** + - It enables the clean reinstall flow: remove old state first, then deploy from scratch. + +4. **When do you want clean reinstall vs rolling update?** + - Clean reinstall: broken state, changed config/layout, major upgrades, testing fresh installs. + - Rolling update: keep service available, minimize downtime, gradual rollout. + +5. **How to extend wipe to remove images/volumes too?** + - Add Compose options such as `remove_volumes: true` when needed. + - Remove images via `community.docker.docker_image state: absent`. + - Remove named volumes explicitly with Docker modules or CLI commands. + +### CI/CD + +1. **Security implications of storing SSH keys in GitHub Secrets?** + - Secrets can be exfiltrated if workflows are compromised. + - Limit key scope, use dedicated deploy keys, restrict repo access, and rotate regularly. + +2. **How to implement staging → production pipeline?** + - Use separate environments, separate inventories, and manual approvals for production. + - Another option is separate workflows triggered on tags/releases. + +3. **What to add for rollbacks?** + - Pin image tags, keep the previous known-good tag, and add a rollback job that redeploys it. + +4. **How does a self-hosted runner improve security compared to GitHub-hosted?** + - SSH keys/secrets do not need to be sent to GitHub-hosted machines. + - The runner stays inside your controlled network perimeter. + diff --git a/ansible/group_vars/all.yml.example b/ansible/group_vars/all.yml.example new file mode 100644 index 0000000000..e82ce0e542 --- /dev/null +++ b/ansible/group_vars/all.yml.example @@ -0,0 +1,17 @@ +--- +# Example only. Store real values in an encrypted file, for example: +# ansible-vault create ansible/group_vars/all.yml + +dockerhub_username: "CHANGE_ME" +dockerhub_password: "CHANGE_ME" # Prefer a token instead of a password + +app_name: "devops-info-service" +docker_image: "{{ dockerhub_username }}/{{ app_name }}" +docker_tag: "latest" + +# Course default is 8000, but you can change it if your lab setup requires it. +app_port: 8000 +app_internal_port: 8000 + +# Extra environment variables for the container (dict) +app_env: {} diff --git a/ansible/group_vars/webservers.yml b/ansible/group_vars/webservers.yml new file mode 100644 index 0000000000..2d245cbfeb --- /dev/null +++ b/ansible/group_vars/webservers.yml @@ -0,0 +1,15 @@ +--- +dockerhub_username: "dorley174" +dockerhub_password: "" + +# Application / image +app_name: "devops-info-service" +docker_image: "{{ dockerhub_username }}/{{ app_name }}" +docker_tag: "latest" + +# Expose the app on the course-standard port. +app_port: 8000 +app_internal_port: 8000 + +# Extra environment variables for the container (optional) +app_env: {} diff --git a/ansible/inventory/group_vars/webservers.yml b/ansible/inventory/group_vars/webservers.yml new file mode 100644 index 0000000000..2d245cbfeb --- /dev/null +++ b/ansible/inventory/group_vars/webservers.yml @@ -0,0 +1,15 @@ +--- +dockerhub_username: "dorley174" +dockerhub_password: "" + +# Application / image +app_name: "devops-info-service" +docker_image: "{{ dockerhub_username }}/{{ app_name }}" +docker_tag: "latest" + +# Expose the app on the course-standard port. +app_port: 8000 +app_internal_port: 8000 + +# Extra environment variables for the container (optional) +app_env: {} diff --git a/ansible/inventory/hosts.ini b/ansible/inventory/hosts.ini new file mode 100644 index 0000000000..d521f32a26 --- /dev/null +++ b/ansible/inventory/hosts.ini @@ -0,0 +1,5 @@ +[webservers] +vagrant1 ansible_host=172.19.144.1 ansible_port=2222 ansible_user=vagrant ansible_ssh_private_key_file=/home/dorley/.ssh/lab05_vagrant_key + +[webservers:vars] +ansible_python_interpreter=/usr/bin/python3 diff --git a/ansible/inventory/yandex.yml.example b/ansible/inventory/yandex.yml.example new file mode 100644 index 0000000000..5d211343b3 --- /dev/null +++ b/ansible/inventory/yandex.yml.example @@ -0,0 +1,32 @@ +--- +# Пример dynamic inventory для Yandex Cloud (Bonus). +# Требует установленного Python SDK и/или ansible collection, в зависимости от плагина. +# Скопируйте в ansible/inventory/yandex.yml и заполните параметры. +# +# ВНИМАНИЕ: точное имя плагина и поля могут отличаться в зависимости от коллекции. +# См. `ansible-doc -t inventory -l | grep -i yandex`. + +plugin: yandex.cloud.yandex_compute + +# Один из вариантов аутентификации (пример): +# auth_kind: serviceaccount +# service_account_key_file: /home//.config/yandex-cloud/key.json + +folder_id: "CHANGE_ME_FOLDER_ID" + +# Группируем ВМ по label project=lab04, чтобы автоматически получить группу webservers +filters: + labels.project: "lab04" + +# Собираем ansible_host из публичного IP +compose: + ansible_host: network_interfaces[0].primary_v4_address.one_to_one_nat.address + ansible_user: "ubuntu" + +keyed_groups: + - key: labels.project + prefix: "project" + +# Можно дополнительно создать группу webservers +# groups: +# webservers: "labels.project == 'lab04'" diff --git a/ansible/playbooks/deploy-monitoring.yml b/ansible/playbooks/deploy-monitoring.yml new file mode 100644 index 0000000000..b36ea3a65a --- /dev/null +++ b/ansible/playbooks/deploy-monitoring.yml @@ -0,0 +1,9 @@ +--- +- name: Deploy monitoring stack + hosts: webservers + become: true + + roles: + - role: monitoring + tags: + - monitoring diff --git a/ansible/playbooks/deploy.yml b/ansible/playbooks/deploy.yml new file mode 100644 index 0000000000..f9c3864e66 --- /dev/null +++ b/ansible/playbooks/deploy.yml @@ -0,0 +1,10 @@ +--- +- name: Deploy application + hosts: webservers + become: true + + roles: + # Docker is a dependency of web_app (see roles/web_app/meta/main.yml) + - role: web_app + tags: + - web_app diff --git a/ansible/playbooks/provision.yml b/ansible/playbooks/provision.yml new file mode 100644 index 0000000000..9f6954005f --- /dev/null +++ b/ansible/playbooks/provision.yml @@ -0,0 +1,13 @@ +--- +- name: Provision web servers + hosts: webservers + become: true + + roles: + - role: common + tags: + - common + + - role: docker + tags: + - docker diff --git a/ansible/playbooks/site.yml b/ansible/playbooks/site.yml new file mode 100644 index 0000000000..6606e5aea3 --- /dev/null +++ b/ansible/playbooks/site.yml @@ -0,0 +1,14 @@ +--- +- name: Provision and deploy + hosts: webservers + become: true + + roles: + - role: common + tags: + - common + + # Docker role is executed automatically as a dependency of web_app. + - role: web_app + tags: + - web_app diff --git a/ansible/requirements.yml b/ansible/requirements.yml new file mode 100644 index 0000000000..ff73cd7f28 --- /dev/null +++ b/ansible/requirements.yml @@ -0,0 +1,5 @@ +--- +collections: + - name: community.docker + - name: community.general + - name: ansible.posix diff --git a/ansible/roles/common/defaults/main.yml b/ansible/roles/common/defaults/main.yml new file mode 100644 index 0000000000..f7648321e4 --- /dev/null +++ b/ansible/roles/common/defaults/main.yml @@ -0,0 +1,35 @@ +--- +# Packages that are useful on almost any Ubuntu server +common_packages: + - ca-certificates + - curl + - git + - vim + - htop + - jq + - unzip + - python3-pip + - python3-venv + - tzdata + +# Default timezone (change if needed) +common_timezone: "Europe/Moscow" + +# In WSL and some minimal environments, timedatectl may be unavailable. +# By default, timezone changes are disabled. +common_set_timezone: false + +# --------------------------------------------------------------------------- +# User management (optional) +# --------------------------------------------------------------------------- +# Lab06 requires a "users" block. To avoid unexpected changes on shared hosts, +# this is disabled by default (empty list => no-op). +# +# Example: +# common_users: +# - name: devops +# groups: sudo +# sudo_nopasswd: true +# authorized_keys: +# - "ssh-ed25519 AAAA... your_key" +common_users: [] diff --git a/ansible/roles/common/tasks/main.yml b/ansible/roles/common/tasks/main.yml new file mode 100644 index 0000000000..514aa4e705 --- /dev/null +++ b/ansible/roles/common/tasks/main.yml @@ -0,0 +1,101 @@ +--- + +# This role demonstrates advanced Ansible features (Lab06): +# - nested blocks +# - rescue/always error handling +# - tag strategy for selective execution + +- name: Common role tasks + block: + # ----------------------------------------------------------------------- + # Packages + # ----------------------------------------------------------------------- + - name: Install common packages + block: + - name: Update apt cache + ansible.builtin.apt: + update_cache: true + cache_valid_time: 3600 + + - name: Install common packages + ansible.builtin.apt: + name: "{{ common_packages }}" + state: present + + rescue: + # NOTE: If this rescue block fails too, Ansible will stop the play. + # The outer 'always' section will still run. + - name: Fix APT metadata issues (best-effort) # noqa command-instead-of-module + ansible.builtin.command: apt-get update --fix-missing + changed_when: false + + - name: Retry apt cache update + ansible.builtin.apt: + update_cache: true + cache_valid_time: 3600 + + become: true + tags: + - packages + + # ----------------------------------------------------------------------- + # Users + # ----------------------------------------------------------------------- + - name: Manage users (optional) + block: + - name: Ensure requested users exist + ansible.builtin.user: + name: "{{ item.name }}" + groups: "{{ item.groups | default(omit) }}" + shell: "{{ item.shell | default('/bin/bash') }}" + create_home: true + state: present + loop: "{{ common_users }}" + loop_control: + label: "{{ item.name }}" + + - name: Configure passwordless sudo (optional) + ansible.builtin.copy: + dest: "/etc/sudoers.d/{{ item.name }}" + content: "{{ item.name }} ALL=(ALL) NOPASSWD:ALL\n" + mode: "0440" + validate: "visudo -cf %s" + when: item.sudo_nopasswd | default(false) | bool + loop: "{{ common_users }}" + loop_control: + label: "{{ item.name }}" + + - name: Add SSH authorized keys (optional) + ansible.posix.authorized_key: + user: "{{ item.0.name }}" + key: "{{ item.1 }}" + state: present + loop: "{{ common_users | subelements('authorized_keys', skip_missing=True) }}" + loop_control: + label: "{{ item.0.name }}" + + become: true + tags: + - users + + # ----------------------------------------------------------------------- + # Other system settings + # ----------------------------------------------------------------------- + - name: Set timezone + community.general.timezone: + name: "{{ common_timezone }}" + when: common_set_timezone | bool + become: true + + always: + # A simple completion marker that can be used as "evidence". + - name: Log common role completion + ansible.builtin.copy: + dest: /tmp/ansible_common_role_completed.log + content: "common role completed at {{ ansible_date_time.iso8601 }}\n" + mode: "0644" + become: true + changed_when: false + tags: + - packages + - users diff --git a/ansible/roles/docker/defaults/main.yml b/ansible/roles/docker/defaults/main.yml new file mode 100644 index 0000000000..7c7861e59f --- /dev/null +++ b/ansible/roles/docker/defaults/main.yml @@ -0,0 +1,19 @@ +--- +# User to be added to docker group. +# By default uses the SSH user (Vagrant boxes usually use "vagrant"). +docker_user: "{{ ansible_user | default('vagrant') }}" + +# Official Docker repository key and repo +# (Ubuntu) +docker_gpg_key_url: "https://download.docker.com/linux/ubuntu/gpg" +docker_keyring_path: "/etc/apt/keyrings/docker.gpg" +docker_repo_filename: "docker" + +# Docker packages to install +# docker-compose-plugin gives `docker compose` command. +docker_packages: + - docker-ce + - docker-ce-cli + - containerd.io + - docker-buildx-plugin + - docker-compose-plugin diff --git a/ansible/roles/docker/handlers/main.yml b/ansible/roles/docker/handlers/main.yml new file mode 100644 index 0000000000..0162ba52da --- /dev/null +++ b/ansible/roles/docker/handlers/main.yml @@ -0,0 +1,5 @@ +--- +- name: Restart Docker + ansible.builtin.service: + name: docker + state: restarted diff --git a/ansible/roles/docker/tasks/main.yml b/ansible/roles/docker/tasks/main.yml new file mode 100644 index 0000000000..b6734814c6 --- /dev/null +++ b/ansible/roles/docker/tasks/main.yml @@ -0,0 +1,155 @@ +--- + +# Lab06: this role is refactored to use blocks, tags and error handling. +# Tags: +# - docker (role-level tag, set in playbooks) +# - docker_install (installation only) +# - docker_config (configuration only) + +- name: Docker role tasks + block: + # ----------------------------------------------------------------------- + # Installation + # ----------------------------------------------------------------------- + - name: Install Docker Engine + block: + - name: Install prerequisites for Docker repository + ansible.builtin.apt: + name: + - ca-certificates + - curl + - gnupg + state: present + update_cache: true + + - name: Ensure /etc/apt/keyrings exists + ansible.builtin.file: + path: /etc/apt/keyrings + state: directory + mode: "0755" + + - name: Download Docker GPG key (ASCII) + ansible.builtin.get_url: + url: "{{ docker_gpg_key_url }}" + dest: /tmp/docker.gpg + mode: "0644" + register: docker_gpg_download + + - name: Check if Docker keyring already exists + ansible.builtin.stat: + path: "{{ docker_keyring_path }}" + register: docker_keyring_stat + + - name: Convert (dearmor) Docker GPG key to keyring + ansible.builtin.command: + cmd: "gpg --dearmor -o {{ docker_keyring_path }} /tmp/docker.gpg" + when: docker_gpg_download.changed or (not docker_keyring_stat.stat.exists) + changed_when: true + notify: Restart Docker + + - name: Set correct permissions on Docker keyring + ansible.builtin.file: + path: "{{ docker_keyring_path }}" + mode: "0644" + + - name: Set Docker APT architecture mapping + ansible.builtin.set_fact: + docker_apt_arch: "{{ {'x86_64': 'amd64', 'aarch64': 'arm64'}.get(ansible_architecture, ansible_architecture) }}" + + - name: Add official Docker APT repository + ansible.builtin.apt_repository: + repo: >- + deb [arch={{ docker_apt_arch }} signed-by={{ docker_keyring_path }}] + https://download.docker.com/linux/ubuntu {{ ansible_distribution_release }} stable + state: present + filename: "{{ docker_repo_filename }}" + + - name: Install Docker Engine packages + ansible.builtin.apt: + name: "{{ docker_packages }}" + state: present + update_cache: true + notify: Restart Docker + + - name: Install Docker SDK for Python on target (for Ansible docker modules) + ansible.builtin.apt: + name: python3-docker + state: present + + rescue: + # Docker repository setup can occasionally fail (GPG/network hiccups). + # We do a best-effort retry after a short wait. + - name: Wait 10 seconds before retrying Docker repository setup + ansible.builtin.pause: + seconds: 10 + + - name: Retry apt cache update + ansible.builtin.apt: + update_cache: true + + - name: Re-download Docker GPG key (force) + ansible.builtin.get_url: + url: "{{ docker_gpg_key_url }}" + dest: /tmp/docker.gpg + mode: "0644" + force: true + + - name: Retry converting (dearmor) Docker GPG key + ansible.builtin.command: + cmd: "gpg --dearmor -o {{ docker_keyring_path }} /tmp/docker.gpg" + changed_when: true + notify: Restart Docker + + - name: Retry adding Docker APT repository + ansible.builtin.apt_repository: + repo: >- + deb [arch={{ docker_apt_arch | default('amd64') }} signed-by={{ docker_keyring_path }}] + https://download.docker.com/linux/ubuntu {{ ansible_distribution_release }} stable + state: present + filename: "{{ docker_repo_filename }}" + + - name: Retry installing Docker Engine packages + ansible.builtin.apt: + name: "{{ docker_packages }}" + state: present + update_cache: true + notify: Restart Docker + + become: true + tags: + - docker_install + + # ----------------------------------------------------------------------- + # Configuration + # ----------------------------------------------------------------------- + - name: Configure Docker + block: + - name: Ensure docker group exists + ansible.builtin.group: + name: docker + state: present + + - name: Add user to docker group + ansible.builtin.user: + name: "{{ docker_user }}" + groups: docker + append: true + + become: true + tags: + - docker_config + + always: + # Lab06 requirement: always ensure Docker service is enabled & started. + # If Docker is not installed yet, we do not fail the whole play. + - name: Ensure Docker service is enabled and running + ansible.builtin.service: + name: docker + state: started + enabled: true + become: true + register: docker_service_result + failed_when: false + tags: + - docker_install + - docker_config diff --git a/ansible/roles/monitoring/defaults/main.yml b/ansible/roles/monitoring/defaults/main.yml new file mode 100644 index 0000000000..2524c6e8fe --- /dev/null +++ b/ansible/roles/monitoring/defaults/main.yml @@ -0,0 +1,76 @@ +--- +monitoring_project_dir: /opt/monitoring +monitoring_compose_project_name: devops-monitoring + +monitoring_loki_version: "3.0.0" +monitoring_promtail_version: "3.0.0" +monitoring_grafana_version: "12.3.1" +monitoring_prometheus_version: "v3.9.0" + +monitoring_loki_port: 3100 +monitoring_promtail_port: 9080 +monitoring_grafana_port: 3000 +monitoring_prometheus_port: 9090 +monitoring_app_port: 8000 +monitoring_app_internal_port: 8000 + +monitoring_loki_retention_period: "168h" +monitoring_prometheus_retention_days: 15 +monitoring_prometheus_retention_size: "10GB" +monitoring_prometheus_scrape_interval: "15s" + +monitoring_grafana_admin_user: admin +monitoring_grafana_admin_password: ChangeMe_Lab08! +monitoring_loki_datasource_uid: loki +monitoring_loki_datasource_name: Loki +monitoring_prometheus_datasource_uid: prometheus +monitoring_prometheus_datasource_name: Prometheus + +monitoring_app_service_name: app-python +monitoring_app_container_name: devops-python +monitoring_app_label: devops-python +monitoring_app_image: devops-info-service:lab08 +monitoring_app_source_dir: "{{ playbook_dir }}/../../app_python" +monitoring_app_source_files: + - .dockerignore + - Dockerfile + - requirements.txt + - app.py +monitoring_app_env: + HOST: "0.0.0.0" + PORT: "{{ monitoring_app_internal_port | string }}" + DEBUG: "false" + +monitoring_prometheus_targets: + - job: prometheus + targets: + - localhost:9090 + - job: app + targets: + - "{{ monitoring_app_service_name }}:{{ monitoring_app_internal_port }}" + path: /metrics + - job: loki + targets: + - loki:3100 + path: /metrics + - job: grafana + targets: + - grafana:3000 + path: /metrics + +monitoring_resource_limits: + loki: + limits: { cpus: '1.0', memory: 1G } + reservations: { cpus: '0.25', memory: 256M } + promtail: + limits: { cpus: '0.5', memory: 512M } + reservations: { cpus: '0.10', memory: 128M } + grafana: + limits: { cpus: '0.5', memory: 512M } + reservations: { cpus: '0.10', memory: 128M } + prometheus: + limits: { cpus: '1.0', memory: 1G } + reservations: { cpus: '0.25', memory: 256M } + app_python: + limits: { cpus: '0.5', memory: 256M } + reservations: { cpus: '0.10', memory: 128M } diff --git a/ansible/roles/monitoring/files/lab08-metrics.json b/ansible/roles/monitoring/files/lab08-metrics.json new file mode 100644 index 0000000000..b05c0fa86a --- /dev/null +++ b/ansible/roles/monitoring/files/lab08-metrics.json @@ -0,0 +1,454 @@ +{ + "id": null, + "uid": "lab08-prometheus-metrics", + "title": "Lab08 - Prometheus Metrics Overview", + "tags": [ + "lab08", + "prometheus", + "metrics", + "observability" + ], + "timezone": "browser", + "schemaVersion": 39, + "version": 1, + "refresh": "10s", + "time": { + "from": "now-30m", + "to": "now" + }, + "panels": [ + { + "id": 1, + "type": "timeseries", + "title": "Request Rate by Endpoint", + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 0 + }, + "targets": [ + { + "refId": "A", + "expr": "sum by (endpoint) (rate(http_requests_total{endpoint!=\"/metrics\"}[5m]))", + "legendFormat": "{{endpoint}}", + "datasource": { + "type": "prometheus", + "uid": "prometheus" + } + } + ], + "fieldConfig": { + "defaults": { + "unit": "reqps" + }, + "overrides": [] + }, + "options": { + "legend": { + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "multi" + } + } + }, + { + "id": 2, + "type": "timeseries", + "title": "Error Rate (5xx)", + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 0 + }, + "targets": [ + { + "refId": "A", + "expr": "sum(rate(http_requests_total{status_code=~\"5..\"}[5m]))", + "legendFormat": "5xx errors/sec", + "datasource": { + "type": "prometheus", + "uid": "prometheus" + } + } + ], + "fieldConfig": { + "defaults": { + "unit": "reqps" + }, + "overrides": [] + }, + "options": { + "legend": { + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "single" + } + } + }, + { + "id": 3, + "type": "timeseries", + "title": "Request Duration p95 by Endpoint", + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 8 + }, + "targets": [ + { + "refId": "A", + "expr": "histogram_quantile(0.95, sum by (le, endpoint) (rate(http_request_duration_seconds_bucket{endpoint!=\"/metrics\"}[5m])))", + "legendFormat": "{{endpoint}} p95", + "datasource": { + "type": "prometheus", + "uid": "prometheus" + } + } + ], + "fieldConfig": { + "defaults": { + "unit": "s" + }, + "overrides": [] + }, + "options": { + "legend": { + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "multi" + } + } + }, + { + "id": 4, + "type": "stat", + "title": "Active Requests", + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 12, + "y": 8 + }, + "targets": [ + { + "refId": "A", + "expr": "sum(http_requests_in_progress)", + "legendFormat": "in-flight", + "datasource": { + "type": "prometheus", + "uid": "prometheus" + } + } + ], + "fieldConfig": { + "defaults": { + "unit": "none", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "orange", + "value": 5 + }, + { + "color": "red", + "value": 10 + } + ] + } + }, + "overrides": [] + }, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "center", + "reduceOptions": { + "values": false, + "calcs": [ + "lastNotNull" + ], + "fields": "" + }, + "textMode": "auto" + } + }, + { + "id": 5, + "type": "stat", + "title": "Application Uptime", + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 18, + "y": 8 + }, + "targets": [ + { + "refId": "A", + "expr": "max(devops_info_uptime_seconds)", + "legendFormat": "uptime", + "datasource": { + "type": "prometheus", + "uid": "prometheus" + } + } + ], + "fieldConfig": { + "defaults": { + "unit": "s" + }, + "overrides": [] + }, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "center", + "reduceOptions": { + "values": false, + "calcs": [ + "lastNotNull" + ], + "fields": "" + }, + "textMode": "auto" + } + }, + { + "id": 6, + "type": "heatmap", + "title": "Request Duration Heatmap", + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 16 + }, + "targets": [ + { + "refId": "A", + "expr": "sum by (le) (rate(http_request_duration_seconds_bucket{endpoint!=\"/metrics\"}[5m]))", + "legendFormat": "{{le}}", + "datasource": { + "type": "prometheus", + "uid": "prometheus" + } + } + ], + "fieldConfig": { + "defaults": { + "unit": "reqps" + }, + "overrides": [] + }, + "options": { + "calculate": false, + "legend": { + "show": false + }, + "tooltip": { + "show": true, + "yHistogram": false + }, + "yAxis": { + "unit": "s" + } + } + }, + { + "id": 7, + "type": "piechart", + "title": "Status Code Distribution", + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 16 + }, + "targets": [ + { + "refId": "A", + "expr": "sum by (status_code) (rate(http_requests_total[5m]))", + "legendFormat": "{{status_code}}", + "datasource": { + "type": "prometheus", + "uid": "prometheus" + } + } + ], + "options": { + "legend": { + "displayMode": "list", + "placement": "right" + }, + "pieType": "pie", + "reduceOptions": { + "values": false, + "calcs": [ + "lastNotNull" + ], + "fields": "" + } + } + }, + { + "id": 8, + "type": "stat", + "title": "App Target Uptime", + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 25 + }, + "targets": [ + { + "refId": "A", + "expr": "up{job=\"app\"}", + "legendFormat": "app", + "datasource": { + "type": "prometheus", + "uid": "prometheus" + } + } + ], + "fieldConfig": { + "defaults": { + "unit": "none", + "mappings": [ + { + "type": "value", + "options": { + "0": { + "text": "DOWN" + }, + "1": { + "text": "UP" + } + } + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "red", + "value": null + }, + { + "color": "green", + "value": 1 + } + ] + } + }, + "overrides": [] + }, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "center", + "reduceOptions": { + "values": false, + "calcs": [ + "lastNotNull" + ], + "fields": "" + }, + "textMode": "auto" + } + }, + { + "id": 9, + "type": "timeseries", + "title": "System Info Collection p95", + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "gridPos": { + "h": 7, + "w": 16, + "x": 8, + "y": 25 + }, + "targets": [ + { + "refId": "A", + "expr": "histogram_quantile(0.95, sum by (le) (rate(devops_info_system_collection_seconds_bucket[5m])))", + "legendFormat": "system info p95", + "datasource": { + "type": "prometheus", + "uid": "prometheus" + } + } + ], + "fieldConfig": { + "defaults": { + "unit": "s" + }, + "overrides": [] + }, + "options": { + "legend": { + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "single" + } + } + } + ], + "templating": { + "list": [] + }, + "annotations": { + "list": [] + } +} diff --git a/ansible/roles/monitoring/meta/main.yml b/ansible/roles/monitoring/meta/main.yml new file mode 100644 index 0000000000..cb7d8e0460 --- /dev/null +++ b/ansible/roles/monitoring/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - role: docker diff --git a/ansible/roles/monitoring/tasks/main.yml b/ansible/roles/monitoring/tasks/main.yml new file mode 100644 index 0000000000..a0d3f2b674 --- /dev/null +++ b/ansible/roles/monitoring/tasks/main.yml @@ -0,0 +1,155 @@ +--- +- name: Deploy monitoring stack + block: + - name: Create monitoring directory structure + ansible.builtin.file: + path: "{{ item }}" + state: directory + owner: root + group: root + mode: "0755" + loop: + - "{{ monitoring_project_dir }}" + - "{{ monitoring_project_dir }}/loki" + - "{{ monitoring_project_dir }}/promtail" + - "{{ monitoring_project_dir }}/prometheus" + - "{{ monitoring_project_dir }}/grafana" + - "{{ monitoring_project_dir }}/grafana/provisioning" + - "{{ monitoring_project_dir }}/grafana/provisioning/datasources" + - "{{ monitoring_project_dir }}/grafana/provisioning/dashboards" + - "{{ monitoring_project_dir }}/grafana/dashboards" + - "{{ monitoring_project_dir }}/app-python" + + - name: Copy application source files for local image build + ansible.builtin.copy: + src: "{{ monitoring_app_source_dir }}/{{ item }}" + dest: "{{ monitoring_project_dir }}/app-python/{{ item }}" + owner: root + group: root + mode: "0644" + loop: "{{ monitoring_app_source_files }}" + + - name: Template monitoring environment file + ansible.builtin.template: + src: env.j2 + dest: "{{ monitoring_project_dir }}/.env" + owner: root + group: root + mode: "0600" + + - name: Template monitoring stack configuration files + ansible.builtin.template: + src: "{{ item.src }}" + dest: "{{ monitoring_project_dir }}/{{ item.dest }}" + owner: root + group: root + mode: "0644" + loop: + - { src: 'docker-compose.yml.j2', dest: 'docker-compose.yml' } + - { src: 'loki-config.yml.j2', dest: 'loki/config.yml' } + - { src: 'promtail-config.yml.j2', dest: 'promtail/config.yml' } + - { src: 'prometheus-config.yml.j2', dest: 'prometheus/prometheus.yml' } + - { src: 'grafana-datasource.yml.j2', dest: 'grafana/provisioning/datasources/loki.yml' } + - { src: 'grafana-dashboard-provider.yml.j2', dest: 'grafana/provisioning/dashboards/dashboard-provider.yml' } + - { src: 'grafana-dashboard.json.j2', dest: 'grafana/dashboards/lab07-logging.json' } + + - name: Copy Lab08 Grafana metrics dashboard + ansible.builtin.copy: + src: lab08-metrics.json + dest: "{{ monitoring_project_dir }}/grafana/dashboards/lab08-metrics.json" + owner: root + group: root + mode: "0644" + + - name: Deploy monitoring stack with Docker Compose v2 + community.docker.docker_compose_v2: + project_src: "{{ monitoring_project_dir }}" + project_name: "{{ monitoring_compose_project_name }}" + state: present + build: always + recreate: auto + register: monitoring_compose + + - name: Wait for Loki to become ready + ansible.builtin.uri: + url: "http://127.0.0.1:{{ monitoring_loki_port }}/ready" + method: GET + status_code: 200 + register: loki_ready + retries: 20 + delay: 3 + until: loki_ready.status == 200 + + - name: Wait for Prometheus to become healthy + ansible.builtin.uri: + url: "http://127.0.0.1:{{ monitoring_prometheus_port }}/-/healthy" + method: GET + status_code: 200 + register: prometheus_ready + retries: 20 + delay: 3 + until: prometheus_ready.status == 200 + + - name: Wait for Grafana API health endpoint + ansible.builtin.uri: + url: "http://127.0.0.1:{{ monitoring_grafana_port }}/api/health" + method: GET + user: "{{ monitoring_grafana_admin_user }}" + password: "{{ monitoring_grafana_admin_password }}" + force_basic_auth: true + status_code: 200 + register: grafana_health + retries: 20 + delay: 3 + until: grafana_health.status == 200 + + - name: Wait for monitored application health endpoint + ansible.builtin.uri: + url: "http://127.0.0.1:{{ monitoring_app_port }}/health" + method: GET + status_code: 200 + register: monitoring_app_health + retries: 20 + delay: 3 + until: monitoring_app_health.status == 200 + + - name: Wait for monitored application metrics endpoint + ansible.builtin.uri: + url: "http://127.0.0.1:{{ monitoring_app_port }}/metrics" + method: GET + status_code: 200 + return_content: true + register: monitoring_app_metrics + retries: 20 + delay: 3 + until: + - monitoring_app_metrics.status == 200 + - "'http_requests_total' in monitoring_app_metrics.content" + + - name: Verify provisioned Grafana data sources + ansible.builtin.uri: + url: "http://127.0.0.1:{{ monitoring_grafana_port }}/api/datasources/uid/{{ item.uid }}" + method: GET + user: "{{ monitoring_grafana_admin_user }}" + password: "{{ monitoring_grafana_admin_password }}" + force_basic_auth: true + status_code: 200 + loop: + - { name: '{{ monitoring_loki_datasource_name }}', uid: '{{ monitoring_loki_datasource_uid }}' } + - { name: '{{ monitoring_prometheus_datasource_name }}', uid: '{{ monitoring_prometheus_datasource_uid }}' } + loop_control: + label: "{{ item.name }}" + + rescue: + - name: Monitoring deployment failure hint + ansible.builtin.debug: + msg: | + Monitoring stack deployment failed. + Helpful follow-up commands on the target host: + cd {{ monitoring_project_dir }} + docker compose ps + docker compose logs --no-color --tail=200 + + tags: + - monitoring + - monitoring_deploy diff --git a/ansible/roles/monitoring/templates/docker-compose.yml.j2 b/ansible/roles/monitoring/templates/docker-compose.yml.j2 new file mode 100644 index 0000000000..972a969a60 --- /dev/null +++ b/ansible/roles/monitoring/templates/docker-compose.yml.j2 @@ -0,0 +1,196 @@ +name: {{ monitoring_compose_project_name }} + +services: + loki: + image: grafana/loki:{{ monitoring_loki_version }} + container_name: devops-loki + command: + - -config.file=/etc/loki/config.yml + ports: + - "{{ monitoring_loki_port }}:3100" + volumes: + - ./loki/config.yml:/etc/loki/config.yml:ro + - loki-data:/loki + networks: + - logging + labels: + logging: "promtail" + app: "devops-loki" + restart: unless-stopped + healthcheck: + test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://127.0.0.1:3100/ready"] + interval: 10s + timeout: 5s + retries: 5 + start_period: 20s + deploy: + resources: + limits: + cpus: "{{ monitoring_resource_limits.loki.limits.cpus }}" + memory: {{ monitoring_resource_limits.loki.limits.memory }} + reservations: + cpus: "{{ monitoring_resource_limits.loki.reservations.cpus }}" + memory: {{ monitoring_resource_limits.loki.reservations.memory }} + + promtail: + image: grafana/promtail:{{ monitoring_promtail_version }} + container_name: devops-promtail + command: + - -config.file=/etc/promtail/config.yml + ports: + - "{{ monitoring_promtail_port }}:9080" + volumes: + - ./promtail/config.yml:/etc/promtail/config.yml:ro + - promtail-data:/var/lib/promtail + - /var/lib/docker/containers:/var/lib/docker/containers:ro + - /var/run/docker.sock:/var/run/docker.sock:ro + networks: + - logging + labels: + logging: "promtail" + app: "devops-promtail" + restart: unless-stopped + depends_on: + loki: + condition: service_healthy + healthcheck: + test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://127.0.0.1:9080/ready"] + interval: 10s + timeout: 5s + retries: 5 + start_period: 20s + deploy: + resources: + limits: + cpus: "{{ monitoring_resource_limits.promtail.limits.cpus }}" + memory: {{ monitoring_resource_limits.promtail.limits.memory }} + reservations: + cpus: "{{ monitoring_resource_limits.promtail.reservations.cpus }}" + memory: {{ monitoring_resource_limits.promtail.reservations.memory }} + + grafana: + image: grafana/grafana:{{ monitoring_grafana_version }} + container_name: devops-grafana + env_file: + - .env + environment: + GF_SECURITY_ADMIN_USER: "${GRAFANA_ADMIN_USER:-{{ monitoring_grafana_admin_user }}}" + GF_SECURITY_ADMIN_PASSWORD: "${GRAFANA_ADMIN_PASSWORD}" + GF_AUTH_ANONYMOUS_ENABLED: "false" + GF_AUTH_ANONYMOUS_ORG_ROLE: Viewer + GF_SECURITY_ALLOW_EMBEDDING: "false" + GF_METRICS_ENABLED: "true" + ports: + - "{{ monitoring_grafana_port }}:3000" + volumes: + - grafana-data:/var/lib/grafana + - ./grafana/provisioning:/etc/grafana/provisioning:ro + - ./grafana/dashboards:/etc/grafana/dashboards:ro + networks: + - logging + labels: + logging: "promtail" + app: "devops-grafana" + restart: unless-stopped + depends_on: + loki: + condition: service_healthy + healthcheck: + test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://127.0.0.1:3000/api/health"] + interval: 10s + timeout: 5s + retries: 5 + start_period: 30s + deploy: + resources: + limits: + cpus: "{{ monitoring_resource_limits.grafana.limits.cpus }}" + memory: {{ monitoring_resource_limits.grafana.limits.memory }} + reservations: + cpus: "{{ monitoring_resource_limits.grafana.reservations.cpus }}" + memory: {{ monitoring_resource_limits.grafana.reservations.memory }} + + prometheus: + image: prom/prometheus:{{ monitoring_prometheus_version }} + container_name: devops-prometheus + command: + - --config.file=/etc/prometheus/prometheus.yml + - --storage.tsdb.path=/prometheus + - --storage.tsdb.retention.time={{ monitoring_prometheus_retention_days }}d + - --storage.tsdb.retention.size={{ monitoring_prometheus_retention_size }} + - --web.enable-lifecycle + ports: + - "{{ monitoring_prometheus_port }}:9090" + volumes: + - ./prometheus/prometheus.yml:/etc/prometheus/prometheus.yml:ro + - prometheus-data:/prometheus + networks: + - logging + labels: + logging: "promtail" + app: "devops-prometheus" + restart: unless-stopped + depends_on: + loki: + condition: service_healthy + grafana: + condition: service_healthy + {{ monitoring_app_service_name }}: + condition: service_healthy + healthcheck: + test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://127.0.0.1:9090/-/healthy"] + interval: 10s + timeout: 5s + retries: 5 + start_period: 20s + deploy: + resources: + limits: + cpus: "{{ monitoring_resource_limits.prometheus.limits.cpus }}" + memory: {{ monitoring_resource_limits.prometheus.limits.memory }} + reservations: + cpus: "{{ monitoring_resource_limits.prometheus.reservations.cpus }}" + memory: {{ monitoring_resource_limits.prometheus.reservations.memory }} + + {{ monitoring_app_service_name }}: + build: + context: ./app-python + dockerfile: Dockerfile + image: {{ monitoring_app_image }} + container_name: {{ monitoring_app_container_name }} + environment: +{% for key, value in monitoring_app_env.items() %} + {{ key }}: {{ value | quote }} +{% endfor %} + ports: + - "{{ monitoring_app_port }}:{{ monitoring_app_internal_port }}" + networks: + - logging + labels: + logging: "promtail" + app: "{{ monitoring_app_label }}" + restart: unless-stopped + healthcheck: + test: ["CMD", "python", "-c", "import urllib.request; urllib.request.urlopen('http://127.0.0.1:{{ monitoring_app_internal_port }}/health')"] + interval: 10s + timeout: 5s + retries: 5 + start_period: 20s + deploy: + resources: + limits: + cpus: "{{ monitoring_resource_limits.app_python.limits.cpus }}" + memory: {{ monitoring_resource_limits.app_python.limits.memory }} + reservations: + cpus: "{{ monitoring_resource_limits.app_python.reservations.cpus }}" + memory: {{ monitoring_resource_limits.app_python.reservations.memory }} + +networks: + logging: + driver: bridge + +volumes: + loki-data: + grafana-data: + promtail-data: + prometheus-data: diff --git a/ansible/roles/monitoring/templates/env.j2 b/ansible/roles/monitoring/templates/env.j2 new file mode 100644 index 0000000000..c922f823ea --- /dev/null +++ b/ansible/roles/monitoring/templates/env.j2 @@ -0,0 +1,7 @@ +GRAFANA_ADMIN_USER={{ monitoring_grafana_admin_user }} +GRAFANA_ADMIN_PASSWORD={{ monitoring_grafana_admin_password }} +GRAFANA_PORT={{ monitoring_grafana_port }} +LOKI_PORT={{ monitoring_loki_port }} +PROMTAIL_PORT={{ monitoring_promtail_port }} +PROMETHEUS_PORT={{ monitoring_prometheus_port }} +APP_PORT={{ monitoring_app_port }} diff --git a/ansible/roles/monitoring/templates/grafana-dashboard-provider.yml.j2 b/ansible/roles/monitoring/templates/grafana-dashboard-provider.yml.j2 new file mode 100644 index 0000000000..e85b723a5c --- /dev/null +++ b/ansible/roles/monitoring/templates/grafana-dashboard-provider.yml.j2 @@ -0,0 +1,12 @@ +apiVersion: 1 + +providers: + - name: lab07-logging + orgId: 1 + folder: Lab07 Logging + type: file + disableDeletion: false + allowUiUpdates: true + updateIntervalSeconds: 30 + options: + path: /etc/grafana/dashboards diff --git a/ansible/roles/monitoring/templates/grafana-dashboard.json.j2 b/ansible/roles/monitoring/templates/grafana-dashboard.json.j2 new file mode 100644 index 0000000000..e2b3426b9d --- /dev/null +++ b/ansible/roles/monitoring/templates/grafana-dashboard.json.j2 @@ -0,0 +1,163 @@ +{ + "id": null, + "uid": "lab07-logging-overview", + "title": "Lab07 - Loki Logging Overview", + "tags": [ + "lab07", + "loki", + "logging" + ], + "timezone": "browser", + "schemaVersion": 39, + "version": 1, + "refresh": "10s", + "time": { + "from": "now-30m", + "to": "now" + }, + "panels": [ + { + "id": 1, + "type": "logs", + "title": "Recent Logs (all apps)", + "datasource": { + "type": "loki", + "uid": "loki" + }, + "gridPos": { + "h": 9, + "w": 24, + "x": 0, + "y": 0 + }, + "targets": [ + { + "refId": "A", + "expr": "{app=~\"devops-.*\"}", + "queryType": "range", + "datasource": { + "type": "loki", + "uid": "loki" + } + } + ], + "options": { + "showTime": true, + "showLabels": true, + "sortOrder": "Descending" + } + }, + { + "id": 2, + "type": "timeseries", + "title": "Request Rate by App", + "datasource": { + "type": "loki", + "uid": "loki" + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 9 + }, + "targets": [ + { + "refId": "A", + "expr": "sum by (app) (rate({app=~\"devops-.*\"}[1m]))", + "queryType": "range", + "datasource": { + "type": "loki", + "uid": "loki" + } + } + ], + "fieldConfig": { + "defaults": { + "unit": "reqps" + }, + "overrides": [] + }, + "options": { + "legend": { + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "single" + } + } + }, + { + "id": 3, + "type": "logs", + "title": "Error Logs Only", + "datasource": { + "type": "loki", + "uid": "loki" + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 9 + }, + "targets": [ + { + "refId": "A", + "expr": "{app=~\"devops-.*\"} | json | level=\"ERROR\"", + "queryType": "range", + "datasource": { + "type": "loki", + "uid": "loki" + } + } + ], + "options": { + "showTime": true, + "showLabels": true, + "sortOrder": "Descending" + } + }, + { + "id": 4, + "type": "piechart", + "title": "Log Level Distribution (last 5m)", + "datasource": { + "type": "loki", + "uid": "loki" + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 17 + }, + "targets": [ + { + "refId": "A", + "expr": "sum by (level) (count_over_time({app=~\"devops-.*\"} | json [5m]))", + "queryType": "range", + "datasource": { + "type": "loki", + "uid": "loki" + } + } + ], + "options": { + "legend": { + "displayMode": "list", + "placement": "right" + }, + "pieType": "pie", + "reduceOptions": { + "values": false, + "calcs": [ + "lastNotNull" + ], + "fields": "" + } + } + } + ] +} \ No newline at end of file diff --git a/ansible/roles/monitoring/templates/grafana-datasource.yml.j2 b/ansible/roles/monitoring/templates/grafana-datasource.yml.j2 new file mode 100644 index 0000000000..a19cd5ec5c --- /dev/null +++ b/ansible/roles/monitoring/templates/grafana-datasource.yml.j2 @@ -0,0 +1,33 @@ +apiVersion: 1 + +deleteDatasources: + - name: {{ monitoring_loki_datasource_name }} + orgId: 1 + - name: {{ monitoring_prometheus_datasource_name }} + orgId: 1 + +prune: true + +datasources: + - name: {{ monitoring_loki_datasource_name }} + uid: {{ monitoring_loki_datasource_uid }} + type: loki + access: proxy + url: http://loki:3100 + isDefault: true + editable: false + jsonData: + maxLines: 1000 + timeout: 60 + + - name: {{ monitoring_prometheus_datasource_name }} + uid: {{ monitoring_prometheus_datasource_uid }} + type: prometheus + access: proxy + url: http://prometheus:9090 + isDefault: false + editable: false + jsonData: + httpMethod: POST + prometheusType: Prometheus + timeInterval: {{ monitoring_prometheus_scrape_interval }} diff --git a/ansible/roles/monitoring/templates/loki-config.yml.j2 b/ansible/roles/monitoring/templates/loki-config.yml.j2 new file mode 100644 index 0000000000..dc0ada6071 --- /dev/null +++ b/ansible/roles/monitoring/templates/loki-config.yml.j2 @@ -0,0 +1,45 @@ +auth_enabled: false + +server: + http_listen_port: 3100 + grpc_listen_port: 9096 + +common: + path_prefix: /loki + replication_factor: 1 + ring: + kvstore: + store: inmemory + storage: + filesystem: + chunks_directory: /loki/chunks + rules_directory: /loki/rules + +schema_config: + configs: + - from: "2024-01-01" + store: tsdb + object_store: filesystem + schema: v13 + index: + prefix: index_ + period: 24h + +storage_config: + tsdb_shipper: + active_index_directory: /loki/tsdb-index + cache_location: /loki/tsdb-cache + +compactor: + working_directory: /loki/compactor + compaction_interval: 10m + retention_enabled: true + retention_delete_delay: 2h + delete_request_store: filesystem + +limits_config: + retention_period: {{ monitoring_loki_retention_period }} + volume_enabled: true + +analytics: + reporting_enabled: false diff --git a/ansible/roles/monitoring/templates/prometheus-config.yml.j2 b/ansible/roles/monitoring/templates/prometheus-config.yml.j2 new file mode 100644 index 0000000000..f3ffd4378e --- /dev/null +++ b/ansible/roles/monitoring/templates/prometheus-config.yml.j2 @@ -0,0 +1,16 @@ +global: + scrape_interval: {{ monitoring_prometheus_scrape_interval }} + evaluation_interval: {{ monitoring_prometheus_scrape_interval }} + +scrape_configs: +{% for target in monitoring_prometheus_targets %} + - job_name: {{ target.job | quote }} +{% if target.path is defined %} + metrics_path: {{ target.path | quote }} +{% endif %} + static_configs: + - targets: +{% for endpoint in target.targets %} + - {{ endpoint | quote }} +{% endfor %} +{% endfor %} diff --git a/ansible/roles/monitoring/templates/promtail-config.yml.j2 b/ansible/roles/monitoring/templates/promtail-config.yml.j2 new file mode 100644 index 0000000000..8db835ba55 --- /dev/null +++ b/ansible/roles/monitoring/templates/promtail-config.yml.j2 @@ -0,0 +1,30 @@ +server: + http_listen_port: 9080 + grpc_listen_port: 0 + +positions: + filename: /var/lib/promtail/positions.yml + +clients: + - url: http://loki:3100/loki/api/v1/push + +scrape_configs: + - job_name: docker + docker_sd_configs: + - host: unix:///var/run/docker.sock + refresh_interval: 5s + filters: + - name: label + values: ["logging=promtail"] + relabel_configs: + - source_labels: [__meta_docker_container_name] + regex: '/(.*)' + target_label: container + - source_labels: [__meta_docker_container_label_app] + target_label: app + - source_labels: [__meta_docker_container_log_stream] + target_label: stream + - target_label: job + replacement: docker + pipeline_stages: + - docker: {} diff --git a/ansible/roles/web_app/defaults/main.yml b/ansible/roles/web_app/defaults/main.yml new file mode 100644 index 0000000000..57d9c2ea83 --- /dev/null +++ b/ansible/roles/web_app/defaults/main.yml @@ -0,0 +1,45 @@ +--- + +# --------------------------------------------------------------------------- +# Web application deployment (Docker Compose) +# --------------------------------------------------------------------------- + +# Service/container name +app_name: devops-app + +# Docker image reference (repository part, without tag) +# Example: "your_dockerhub_username/devops-info-service" +docker_image: "{{ (dockerhub_username is defined and dockerhub_username | length > 0) | ternary(dockerhub_username ~ '/' ~ app_name, app_name) }}" + +# Docker image tag/version +# Backward compatible with Lab05 variable name `docker_image_tag`. +docker_tag: "{{ docker_image_tag | default('latest') }}" + +# Host -> container port mapping +app_port: 8000 + +# Internal container port. Backward compatible with Lab05 variable `container_port`. +app_internal_port: "{{ container_port | default(8000) }}" + +# Where docker-compose.yml will be stored on the target host +compose_project_dir: "/opt/{{ app_name }}" + +# Compose file version header (mainly for readability; Compose v2 doesn't require it) +docker_compose_version: "3.8" + +# Extra environment variables passed to the container (merged in the template) +app_env: {} + +# Optional registry URL (empty means Docker Hub) +docker_registry: "" + +# --------------------------------------------------------------------------- +# Wipe Logic (Lab06 Task 3) +# --------------------------------------------------------------------------- +# Double-gated safety: +# 1) variable web_app_wipe=true +# 2) tag web_app_wipe (for wipe-only runs) +web_app_wipe: false + +# Optional: remove the image during wipe (useful to save disk space) +web_app_wipe_remove_image: false diff --git a/ansible/roles/web_app/meta/main.yml b/ansible/roles/web_app/meta/main.yml new file mode 100644 index 0000000000..6ab81a8714 --- /dev/null +++ b/ansible/roles/web_app/meta/main.yml @@ -0,0 +1,7 @@ +--- + +# Role dependencies ensure correct execution order. +# Lab06 requirement: Docker must be installed before deploying the web app. + +dependencies: + - role: docker diff --git a/ansible/roles/web_app/tasks/main.yml b/ansible/roles/web_app/tasks/main.yml new file mode 100644 index 0000000000..a173d708f8 --- /dev/null +++ b/ansible/roles/web_app/tasks/main.yml @@ -0,0 +1,89 @@ +--- + +# Lab06 Task 3: wipe logic runs first (only when variable is true). +- name: Include wipe tasks + ansible.builtin.include_tasks: wipe.yml + tags: + - web_app_wipe + + +# Lab06 Task 2: deploy with Docker Compose v2. +- name: Deploy application with Docker Compose + block: + - name: Create application directory + ansible.builtin.file: + path: "{{ compose_project_dir }}" + state: directory + owner: root + group: root + mode: "0755" + + - name: Template docker-compose.yml + ansible.builtin.template: + src: docker-compose.yml.j2 + dest: "{{ compose_project_dir }}/docker-compose.yml" + mode: "0644" + + # Optional: if the image is private, login must be done on the target host. + - name: Login to Docker registry (optional) + community.docker.docker_login: + registry_url: "{{ (docker_registry | length > 0) | ternary(docker_registry, omit) }}" + username: "{{ dockerhub_username }}" + password: "{{ dockerhub_password }}" + no_log: true + when: + - dockerhub_username is defined + - dockerhub_password is defined + - dockerhub_password | length > 0 + + # Keep idempotency: pull only if image changed. + - name: Pull application image + community.docker.docker_image: + name: "{{ docker_image }}:{{ docker_tag }}" + source: pull + register: web_app_image_pull + + - name: Deploy (docker compose up) + community.docker.docker_compose_v2: + project_src: "{{ compose_project_dir }}" + state: present + pull: never + recreate: auto + register: web_app_compose + + - name: Wait for the application port to become available + ansible.builtin.wait_for: + host: "127.0.0.1" + port: "{{ app_port }}" + timeout: 60 + + - name: Health check (/health) + ansible.builtin.uri: + url: "http://127.0.0.1:{{ app_port }}/health" + method: GET + status_code: 200 + return_content: true + register: web_app_health + retries: 10 + delay: 3 + until: web_app_health.status == 200 + + - name: Verify main endpoint (/) + ansible.builtin.uri: + url: "http://127.0.0.1:{{ app_port }}/" + method: GET + status_code: 200 + return_content: false + + rescue: + - name: Deployment failure hint + ansible.builtin.debug: + msg: | + Web app deployment failed. + Try checking logs on the target host: + cd {{ compose_project_dir }} + docker compose logs --no-color --tail=200 + + tags: + - app_deploy + - compose diff --git a/ansible/roles/web_app/tasks/wipe.yml b/ansible/roles/web_app/tasks/wipe.yml new file mode 100644 index 0000000000..df8a348e82 --- /dev/null +++ b/ansible/roles/web_app/tasks/wipe.yml @@ -0,0 +1,55 @@ +--- + +# Lab06 Task 3: Safe wipe logic. +# Double-gating: +# 1) web_app_wipe=true +# 2) tag web_app_wipe (for wipe-only runs) + +- name: Wipe web application + block: + - name: Check if app directory exists + ansible.builtin.stat: + path: "{{ compose_project_dir }}" + register: web_app_dir + + - name: Stop and remove containers (docker compose down) + community.docker.docker_compose_v2: + project_src: "{{ compose_project_dir }}" + state: absent + remove_orphans: true + when: web_app_dir.stat.exists + register: web_app_compose_down + failed_when: false + + - name: Remove docker-compose.yml + ansible.builtin.file: + path: "{{ compose_project_dir }}/docker-compose.yml" + state: absent + + - name: Remove application directory + ansible.builtin.file: + path: "{{ compose_project_dir }}" + state: absent + + - name: Remove application image (optional) + community.docker.docker_image: + name: "{{ docker_image }}:{{ docker_tag }}" + state: absent + when: web_app_wipe_remove_image | bool + register: web_app_image_remove + failed_when: false + + - name: Log wipe completion + ansible.builtin.copy: + dest: /tmp/ansible_web_app_wipe.log + content: "web app {{ app_name }} wiped at {{ ansible_date_time.iso8601 }}\n" + mode: "0644" + changed_when: false + + - name: Wipe summary + ansible.builtin.debug: + msg: "Application {{ app_name }} wiped successfully" + + when: web_app_wipe | bool + tags: + - web_app_wipe diff --git a/ansible/roles/web_app/templates/docker-compose.yml.j2 b/ansible/roles/web_app/templates/docker-compose.yml.j2 new file mode 100644 index 0000000000..5ae69e2951 --- /dev/null +++ b/ansible/roles/web_app/templates/docker-compose.yml.j2 @@ -0,0 +1,26 @@ +version: "{{ docker_compose_version }}" + +services: + {{ app_name }}: + image: "{{ docker_image }}:{{ docker_tag }}" + container_name: "{{ app_name }}" + ports: + - "{{ app_port }}:{{ app_internal_port }}" + environment: + HOST: "0.0.0.0" + PORT: "{{ app_internal_port }}" +{% if app_env is defined and app_env | length > 0 %} +{% for key, value in app_env.items() %} + {{ key }}: {{ value | quote }} +{% endfor %} +{% endif %} +{% if app_secret_key is defined %} + APP_SECRET_KEY: {{ app_secret_key | quote }} +{% endif %} + restart: unless-stopped + networks: + - {{ app_name }}_net + +networks: + {{ app_name }}_net: + driver: bridge diff --git a/app_python/.dockerignore b/app_python/.dockerignore new file mode 100644 index 0000000000..a253025893 --- /dev/null +++ b/app_python/.dockerignore @@ -0,0 +1,23 @@ +# Python bytecode / cache +__pycache__/ +*.pyc +*.pyo +*.pyd + +# Virtualenvs +.venv/ +venv/ + +# VCS +.git/ +.gitignore + +# IDEs / OS junk +.vscode/ +.idea/ +.DS_Store + +# Docs & tests are not needed at runtime +docs/ +tests/ +README.md diff --git a/app_python/.gitignore b/app_python/.gitignore new file mode 100644 index 0000000000..061f19a9c0 --- /dev/null +++ b/app_python/.gitignore @@ -0,0 +1,13 @@ +# Python +__pycache__/ +*.py[cod] +venv/ +*.log +.env + +# IDE +.vscode/ +.idea/ + +# OS +.DS_Store diff --git a/app_python/Dockerfile b/app_python/Dockerfile new file mode 100644 index 0000000000..7548f6608b --- /dev/null +++ b/app_python/Dockerfile @@ -0,0 +1,29 @@ +# Production-oriented image for a small Flask app. +# Pin a specific Python version for reproducible builds. +FROM python:3.13.1-slim + +# Python runtime defaults: no .pyc files, unbuffered logs (better for containers) +ENV PYTHONDONTWRITEBYTECODE=1 \ + PYTHONUNBUFFERED=1 + +WORKDIR /app + +# Create a dedicated non-root user with numeric UID/GID. +RUN addgroup --system --gid 10001 app \ + && adduser --system --uid 10001 --ingroup app --no-create-home app + +# Install dependencies first to leverage Docker layer caching. +COPY requirements.txt ./ +RUN pip install --no-cache-dir -r requirements.txt + +# Copy only the application code needed at runtime. +COPY app.py ./ + +# Drop privileges using a numeric user for Kubernetes runAsNonRoot validation. +USER 10001:10001 + +# Document the port (Flask defaults to 5000 in this repo) +EXPOSE 5000 + +# Start the application +CMD ["python", "app.py"] diff --git a/app_python/README.md b/app_python/README.md new file mode 100644 index 0000000000..d418c0fd0e --- /dev/null +++ b/app_python/README.md @@ -0,0 +1,168 @@ +# DevOps Info Service + +[![python-ci](https://github.com/dorley174/DevOps-Core-Course/actions/workflows/python-ci.yml/badge.svg)](https://github.com/dorley174/DevOps-Core-Course/actions/workflows/python-ci.yml) + +## Overview +DevOps Info Service is a production-ready starter web service for the DevOps course. +It reports service metadata, runtime details, and basic system information. + +The service exposes two endpoints: +- `GET /` — service + system + runtime + request information +- `GET /health` — liveness health endpoint +- `GET /ready` — readiness health endpoint for Kubernetes + +## Prerequisites +- Python **3.11+** +- `pip` +- (Recommended) Virtual environment (`venv`) +- **Windows:** Python Launcher `py` is recommended + +## Installation + +```bash +python -m venv venv +# Windows: .\venv\Scripts\activate +# Linux/macOS: source venv/bin/activate + +pip install -r requirements.txt +``` + +## Running the Application + +### Default run (port 5000) +> If `PORT` is not set, the application runs on **0.0.0.0:5000**. +```bash +python app.py +``` + +### Custom configuration + +**Linux/Mac:** +```bash +HOST=127.0.0.1 PORT=8080 DEBUG=True python app.py +``` + +**Windows (PowerShell):** +```powershell +$env:HOST="127.0.0.1" +$env:PORT="8080" +$env:DEBUG="True" +python app.py +``` + +**Windows (CMD):** +```bat +set HOST=127.0.0.1 +set PORT=8080 +set DEBUG=True +python app.py +``` + +## API Endpoints + +### `GET /` +Returns service metadata, system information, runtime details, request info, and a list of available endpoints. + +Example: +```bash +curl http://127.0.0.1:5000/ +``` + +### `GET /health` +Returns a minimal liveness response for monitoring and Kubernetes liveness probes. + +Example (includes HTTP status): +```bash +curl -i http://127.0.0.1:5000/health +``` + +### `GET /ready` +Returns readiness information for Kubernetes readiness probes. + +Example: +```bash +curl -i http://127.0.0.1:5000/ready +``` + +## Testing / Pretty Output + +### Pretty-printed JSON +**Windows PowerShell tip:** use `curl.exe`. +```bash +curl -s http://127.0.0.1:5000/ | python -m json.tool +``` + +## Testing & Linting (LAB03) + +> Dev dependencies live in `requirements-dev.txt` (pytest, coverage, ruff). + +Install dev deps: +```bash +pip install -r requirements-dev.txt +``` + +Run linter: +```bash +ruff check . +``` + +Run tests + coverage: +```bash +pytest -q tests --cov=. --cov-report=term-missing +``` + +## CI/CD Secrets (GitHub Actions) + +In your GitHub repository: +**Settings → Secrets and variables → Actions → New repository secret** + +Add: +- `DOCKERHUB_USERNAME` — your Docker Hub username +- `DOCKERHUB_TOKEN` — Docker Hub Access Token (Account Settings → Security) +- `SNYK_TOKEN` — Snyk API token (Account settings → API token) + +## Configuration + +| Variable | Default | Description | +|----------|---------|-------------| +| HOST | 0.0.0.0 | Bind address | +| PORT | 5000 | HTTP port | +| DEBUG | False | Flask debug mode | + +--- + +## Docker + +> Examples below use placeholders like `` and ``. + +### Build (local) + +```bash +docker build -t : . +``` + +### Run + +```bash +docker run --rm -p 5000:5000 : +``` + +(Optional: override env vars) + +```bash +docker run --rm -p 5000:5000 -e PORT=5000 -e DEBUG=false : +``` + +### Pull from Docker Hub + +```bash +docker pull /: +docker run --rm -p 5000:5000 /: +``` + +### Quick test + +```bash +curl http://localhost:5000/health +curl http://localhost:5000/ +``` diff --git a/app_python/app.py b/app_python/app.py new file mode 100644 index 0000000000..36e505df46 --- /dev/null +++ b/app_python/app.py @@ -0,0 +1,459 @@ +""" +DevOps Info Service +Main application module (Flask) + +Endpoints: +- GET / : service + system + runtime + request info +- GET /health : health check (for probes/monitoring) +- GET /metrics : Prometheus metrics endpoint +""" + +from __future__ import annotations + +import json +import logging +import os +import platform +import socket +import sys +import time +from datetime import datetime, timezone +from typing import Any, Dict + +from flask import Flask, Response, g, jsonify, request +from prometheus_client import CONTENT_TYPE_LATEST, Counter, Gauge, Histogram, generate_latest + +# ----------------------------------------------------------------------------- +# App & Config +# ----------------------------------------------------------------------------- + +app = Flask(__name__) + +HOST: str = os.getenv("HOST", "0.0.0.0") +PORT: int = int(os.getenv("PORT", "5000")) +DEBUG: bool = os.getenv("DEBUG", "False").strip().lower() == "true" + +SERVICE_NAME = os.getenv("SERVICE_NAME", "devops-info-service") +SERVICE_VERSION = os.getenv("SERVICE_VERSION", "1.1.0") +SERVICE_DESCRIPTION = os.getenv("SERVICE_DESCRIPTION", "DevOps course info service") +SERVICE_FRAMEWORK = "Flask" +APP_VARIANT = os.getenv("APP_VARIANT", "primary") +APP_MESSAGE = os.getenv("APP_MESSAGE", "running") + +START_TIME_UTC = datetime.now(timezone.utc) + + +# ----------------------------------------------------------------------------- +# Prometheus metrics +# ----------------------------------------------------------------------------- + +HTTP_REQUESTS_TOTAL = Counter( + "http_requests_total", + "Total HTTP requests processed by the service.", + ["method", "endpoint", "status_code"], +) + +HTTP_REQUEST_DURATION_SECONDS = Histogram( + "http_request_duration_seconds", + "HTTP request duration in seconds.", + ["method", "endpoint"], +) + +HTTP_REQUESTS_IN_PROGRESS = Gauge( + "http_requests_in_progress", + "HTTP requests currently being processed.", +) + +DEVOPS_INFO_ENDPOINT_CALLS_TOTAL = Counter( + "devops_info_endpoint_calls_total", + "Total endpoint calls for the DevOps info service.", + ["endpoint"], +) + +DEVOPS_INFO_SYSTEM_COLLECTION_SECONDS = Histogram( + "devops_info_system_collection_seconds", + "Time spent collecting system information.", +) + +DEVOPS_INFO_UPTIME_SECONDS = Gauge( + "devops_info_uptime_seconds", + "Current service uptime in seconds.", +) + + +# ----------------------------------------------------------------------------- +# Logging +# ----------------------------------------------------------------------------- + + +def iso_utc_now_z() -> str: + """Return current UTC time in ISO format with 'Z' suffix.""" + return datetime.now(timezone.utc).isoformat(timespec="milliseconds").replace("+00:00", "Z") + + +class JSONFormatter(logging.Formatter): + """Small JSON formatter for container-friendly structured logs.""" + + EXTRA_FIELDS = ( + "event", + "service", + "version", + "method", + "path", + "status_code", + "client_ip", + "duration_ms", + "user_agent", + ) + + def format(self, record: logging.LogRecord) -> str: + payload: Dict[str, Any] = { + "timestamp": iso_utc_now_z(), + "level": record.levelname, + "logger": record.name, + "message": record.getMessage(), + } + + for field in self.EXTRA_FIELDS: + value = getattr(record, field, None) + if value is not None: + payload[field] = value + + if record.exc_info: + payload["exception"] = self.formatException(record.exc_info) + + return json.dumps(payload, ensure_ascii=False) + + + +def configure_logging() -> logging.Logger: + handler = logging.StreamHandler(sys.stdout) + handler.setFormatter(JSONFormatter()) + + root_logger = logging.getLogger() + root_logger.handlers.clear() + root_logger.addHandler(handler) + root_logger.setLevel(logging.DEBUG if DEBUG else logging.INFO) + + for logger_name in ("werkzeug", "gunicorn.error", "gunicorn.access"): + current_logger = logging.getLogger(logger_name) + current_logger.handlers.clear() + current_logger.propagate = True + current_logger.setLevel(logging.DEBUG if DEBUG else logging.INFO) + + return logging.getLogger(SERVICE_NAME) + + +logger = configure_logging() + + +# ----------------------------------------------------------------------------- +# Request hooks +# ----------------------------------------------------------------------------- + + +def get_client_ip() -> str: + """ + Best-effort client IP resolution. + Prefers X-Forwarded-For (common behind reverse proxies). + """ + forwarded_for = request.headers.get("X-Forwarded-For", "") + if forwarded_for: + # "client, proxy1, proxy2" + return forwarded_for.split(",")[0].strip() + return request.remote_addr or "unknown" + + +def normalize_endpoint() -> str: + """ + Keep endpoint labels low-cardinality for Prometheus. + Uses the Flask route template when available and groups unknown paths. + """ + if request.url_rule and request.url_rule.rule: + return request.url_rule.rule + + if request.path == "/": + return "/" + + return "unmatched" + + +@app.before_request +def log_request_started() -> None: + g.request_started_at = time.perf_counter() + g.normalized_endpoint = normalize_endpoint() + g.skip_http_metrics = request.path == "/metrics" + g.active_request_metric_registered = False + + if not g.skip_http_metrics: + HTTP_REQUESTS_IN_PROGRESS.inc() + g.active_request_metric_registered = True + + logger.debug( + "request_started", + extra={ + "event": "request_started", + "service": SERVICE_NAME, + "version": SERVICE_VERSION, + "method": request.method, + "path": request.path, + "client_ip": get_client_ip(), + "user_agent": request.headers.get("User-Agent", "unknown"), + }, + ) + + +@app.teardown_request +def track_request_finished(_error: Exception | None) -> None: + if getattr(g, "active_request_metric_registered", False): + HTTP_REQUESTS_IN_PROGRESS.dec() + g.active_request_metric_registered = False + + +@app.after_request +def add_headers(response): + endpoint = getattr(g, "normalized_endpoint", normalize_endpoint()) + duration_seconds = time.perf_counter() - getattr(g, "request_started_at", time.perf_counter()) + duration_ms = round(duration_seconds * 1000, 2) + + if not getattr(g, "skip_http_metrics", False): + HTTP_REQUESTS_TOTAL.labels( + method=request.method, + endpoint=endpoint, + status_code=str(response.status_code), + ).inc() + HTTP_REQUEST_DURATION_SECONDS.labels( + method=request.method, + endpoint=endpoint, + ).observe(duration_seconds) + DEVOPS_INFO_ENDPOINT_CALLS_TOTAL.labels(endpoint=endpoint).inc() + + DEVOPS_INFO_UPTIME_SECONDS.set(get_uptime()["seconds"]) + + if response.mimetype == "application/json": + response.headers["Content-Type"] = "application/json; charset=utf-8" + + logger.info( + "request_completed", + extra={ + "event": "request_completed", + "service": SERVICE_NAME, + "version": SERVICE_VERSION, + "method": request.method, + "path": request.path, + "status_code": response.status_code, + "client_ip": get_client_ip(), + "duration_ms": duration_ms, + "user_agent": request.headers.get("User-Agent", "unknown"), + }, + ) + return response + + +# ----------------------------------------------------------------------------- +# Helpers +# ----------------------------------------------------------------------------- + + +def get_uptime() -> Dict[str, Any]: + """Calculate service uptime since START_TIME_UTC.""" + delta = datetime.now(timezone.utc) - START_TIME_UTC + seconds = int(delta.total_seconds()) + hours = seconds // 3600 + minutes = (seconds % 3600) // 60 + + hours_part = f"{hours} hour" + ("" if hours == 1 else "s") + minutes_part = f"{minutes} minute" + ("" if minutes == 1 else "s") + + return { + "seconds": seconds, + "human": f"{hours_part}, {minutes_part}", + } + + + +def get_system_info() -> Dict[str, Any]: + """Collect system information using Python standard library.""" + started_at = time.perf_counter() + try: + return { + "hostname": socket.gethostname(), + "platform": platform.system(), + "platform_version": platform.platform(), + "architecture": platform.machine(), + "cpu_count": os.cpu_count() or 0, + "python_version": platform.python_version(), + } + finally: + DEVOPS_INFO_SYSTEM_COLLECTION_SECONDS.observe(time.perf_counter() - started_at) + + + +def build_endpoints() -> list[Dict[str, str]]: + return [ + {"path": "/", "method": "GET", "description": "Service information"}, + {"path": "/health", "method": "GET", "description": "Liveness health check"}, + {"path": "/ready", "method": "GET", "description": "Readiness health check"}, + {"path": "/metrics", "method": "GET", "description": "Prometheus metrics"}, + ] + + +# ----------------------------------------------------------------------------- +# Routes +# ----------------------------------------------------------------------------- + +@app.get("/") +def index(): + """Main endpoint - service and system information.""" + uptime = get_uptime() + + payload: Dict[str, Any] = { + "service": { + "name": SERVICE_NAME, + "version": SERVICE_VERSION, + "description": SERVICE_DESCRIPTION, + "framework": SERVICE_FRAMEWORK, + "variant": APP_VARIANT, + "message": APP_MESSAGE, + }, + "system": get_system_info(), + "runtime": { + "uptime_seconds": uptime["seconds"], + "uptime_human": uptime["human"], + "current_time": iso_utc_now_z(), + "timezone": "UTC", + }, + "request": { + "client_ip": get_client_ip(), + "user_agent": request.headers.get("User-Agent", "unknown"), + "method": request.method, + "path": request.path, + }, + "endpoints": build_endpoints(), + } + + return jsonify(payload), 200 + + +@app.get("/health") +def health(): + """Health check endpoint (for probes/monitoring).""" + uptime = get_uptime() + return jsonify( + { + "status": "healthy", + "timestamp": iso_utc_now_z(), + "uptime_seconds": uptime["seconds"], + "variant": APP_VARIANT, + } + ), 200 + + +@app.get("/ready") +def ready(): + """Readiness endpoint used by Kubernetes readiness probes.""" + uptime = get_uptime() + return jsonify( + { + "status": "ready", + "timestamp": iso_utc_now_z(), + "uptime_seconds": uptime["seconds"], + "variant": APP_VARIANT, + "message": APP_MESSAGE, + } + ), 200 + + +@app.get("/metrics") +def metrics() -> Response: + """Expose Prometheus metrics for scraping.""" + DEVOPS_INFO_UPTIME_SECONDS.set(get_uptime()["seconds"]) + return Response(generate_latest(), content_type=CONTENT_TYPE_LATEST) + + +# ----------------------------------------------------------------------------- +# Error Handlers +# ----------------------------------------------------------------------------- + +@app.errorhandler(404) +def not_found(_error): + logger.warning( + "endpoint_not_found", + extra={ + "event": "endpoint_not_found", + "service": SERVICE_NAME, + "version": SERVICE_VERSION, + "method": request.method, + "path": request.path, + "status_code": 404, + "client_ip": get_client_ip(), + }, + ) + return ( + jsonify( + { + "error": "Not Found", + "message": "Endpoint does not exist", + "timestamp": iso_utc_now_z(), + } + ), + 404, + ) + + +@app.errorhandler(500) +def internal_error(_error): + logger.exception( + "unhandled_error", + extra={ + "event": "unhandled_error", + "service": SERVICE_NAME, + "version": SERVICE_VERSION, + "method": request.method, + "path": request.path, + "status_code": 500, + "client_ip": get_client_ip(), + }, + ) + return ( + jsonify( + { + "error": "Internal Server Error", + "message": "An unexpected error occurred", + "timestamp": iso_utc_now_z(), + } + ), + 500, + ) + + +# ----------------------------------------------------------------------------- +# Entrypoint +# ----------------------------------------------------------------------------- + + +def main() -> None: + logger.info( + "service_starting", + extra={ + "event": "service_starting", + "service": SERVICE_NAME, + "version": SERVICE_VERSION, + }, + ) + logger.info( + "runtime_configuration host=%s port=%s debug=%s", + HOST, + PORT, + DEBUG, + extra={ + "event": "runtime_configuration", + "service": SERVICE_NAME, + "version": SERVICE_VERSION, + }, + ) + app.run(host=HOST, port=PORT, debug=DEBUG) + + +if __name__ == "__main__": + main() diff --git a/app_python/docs/LAB01.md b/app_python/docs/LAB01.md new file mode 100644 index 0000000000..1bce3a2986 --- /dev/null +++ b/app_python/docs/LAB01.md @@ -0,0 +1,150 @@ +# LAB01 — DevOps Info Service (Python) + +## 1) Framework Selection + +### Chosen Framework: Flask + +I chose **Flask** because: +- It is lightweight and easy to set up for a small service with 2 endpoints. +- Minimal boilerplate: perfect for a “foundation” lab where the focus is DevOps workflow. +- Great learning curve and widely used in industry for microservices. + +### Comparison Table + +| Framework | Pros | Cons | Fit for this Lab | +|---|---|---|---| +| **Flask** | Simple, minimal, fast to implement, huge ecosystem | No built-in async, fewer built-ins than Django/FastAPI | Quick & clean | +| FastAPI | Modern, async-ready, auto Swagger/OpenAPI docs | Slightly more setup, concepts (pydantic/models) | Also good | +| Django | Full-featured (ORM, auth, admin, etc.) | Overkill for 2 endpoints | Too heavy | + +## 2) Best Practices Applied + +### 2.1 Clean Code Organization (PEP 8, structure) + +- Imports grouped. +- Constants for configuration and service metadata. + + +**Example (helpers):** +```python +def get_uptime() -> Dict[str, Any]: + delta = datetime.now(timezone.utc) - START_TIME_UTC + seconds = int(delta.total_seconds()) + ... +``` + +### 2.2 Configuration via Environment Variables + +Implemented: +- `HOST` (default `0.0.0.0`) +- `PORT` (default `5000`) +- `DEBUG` (default `False`) + +**Example:** +```python +HOST = os.getenv("HOST", "0.0.0.0") +PORT = int(os.getenv("PORT", "5000")) +DEBUG = os.getenv("DEBUG", "False").lower() == "true" +``` + +Why it matters: +- Same artifact runs in different environments without code changes. + +### 2.3 Error Handling + +Added: +- `404` handler returning JSON +- `500` handler returning JSON + logging exception + +**Example:** +```python +@app.errorhandler(404) +def not_found(_error): + return jsonify({"error": "Not Found", ...}), 404 +``` + +Why it matters: +- Predictable API responses +- Easier debugging and monitoring + +### 2.4 Logging + +- `logging.basicConfig(...)` +- Debug request logging in `@app.before_request` +- Startup logs show config + +Why it matters: +- Logs are essential for observability (containers, CI/CD, production troubleshooting). + +## 3) API Documentation + +### 3.1 `GET /` + +Returns: +- `service`: name/version/description/framework +- `system`: hostname/platform/platform_version/architecture/cpu_count/python_version +- `runtime`: uptime_seconds/uptime_human/current_time/timezone +- `request`: client_ip/user_agent/method/path +- `endpoints`: list of available endpoints + +**Test command:** +```bash +curl -s http://127.0.0.1:5000/ | python -m json.tool +``` + +### 3.2 `GET /health` + +Returns: +```json +{ + "status": "healthy", + "timestamp": "....Z", + "uptime_seconds": 123 +} +``` + +**Test command:** +```bash +curl -s http://127.0.0.1:5000/health | python -m json.tool +``` + +### 3.3 Configuration Tests + +```bash +python app.py +PORT=8080 python app.py +HOST=127.0.0.1 PORT=3000 python app.py +DEBUG=true python app.py +``` + +## 4) Testing Evidence (Screenshots) + +Screenshots are stored in: +`app_python/docs/screenshots/` + +Required screenshots: +1. `01-main-endpoint.png` — Browser/terminal showing full JSON from `GET /` +2. `02-health-check.png` — Response from `GET /health` +3. `03-formatted-output.png` — Pretty-printed JSON output (example: `python -m json.tool`) + +## 5) Challenges & Solutions + +### Challenge 1: Correct client IP behind proxy + +**Problem:** `request.remote_addr` may show proxy IP. +**Solution:** Prefer `X-Forwarded-For` header when available: +```python +forwarded_for = request.headers.get("X-Forwarded-For", "") +``` + +### Challenge 2: UTC timestamp format with `Z` + +**Problem:** `datetime.isoformat()` returns `+00:00`. +**Solution:** Convert to `Z` suffix: +```python +datetime.now(timezone.utc).isoformat(...).replace("+00:00", "Z") +``` + +## 6) GitHub Community + +Starring repositories is a useful way to bookmark valuable projects and also signals appreciation/support to maintainers, improving a project’s visibility in GitHub search. Following developers (professor/TAs/classmates) helps with networking, discovering useful code patterns, and makes collaboration easier by tracking teammates’ activity and progress. diff --git a/app_python/docs/LAB02.md b/app_python/docs/LAB02.md new file mode 100644 index 0000000000..cf73086f00 --- /dev/null +++ b/app_python/docs/LAB02.md @@ -0,0 +1,188 @@ +# LAB02 — Docker Containerization + +This document describes how the Flask app from Lab 1 was containerized using Docker best practices. + +--- + +## 1) Docker Best Practices Applied + +### 1.1 Pinned base image version +Using a specific tag makes builds reproducible and avoids unexpected changes when a floating tag updates. + +```dockerfile +FROM python:3.13.1-slim +``` + +### 1.2 Non-root user +Containers should not run as root to reduce the blast radius if the process is compromised. + +```dockerfile +RUN addgroup --system app \ + && adduser --system --ingroup app --no-create-home app +USER app +``` + +### 1.3 Layer caching (install deps before copying source) +Copy `requirements.txt` first and install dependencies. When you change only `app.py`, Docker can reuse the cached dependency layer. + +```dockerfile +COPY requirements.txt ./ +RUN pip install --no-cache-dir -r requirements.txt +COPY app.py ./ +``` + +### 1.4 Minimal copy & build context (.dockerignore) +`.dockerignore` keeps your build context small and avoids shipping dev artifacts, docs, and VCS metadata. + +Examples excluded: +- `.git/` +- `__pycache__/` +- `docs/`, `tests/` + +--- + +## 2) Image Information & Decisions + +### 2.1 Base image choice +- **Chosen:** `python:3.13.1-slim` +- **Why:** small Debian-based runtime, good compatibility for Python wheels, smaller than full images. + +### 2.2 Image size +Record final size: + +```bash +docker images | grep +``` + +**Result:** +```bash +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +9b452f420205 dorley174/devops-info-service:lab02 "python app.py" 28 seconds ago Up 28 seconds 0.0.0.0:8080->5000/tcp, [::]:8080->5000/tcp keen_cannon +``` + +### 2.3 Layer structure +Explain key layers: +1. Base runtime layer (Python slim) +2. User creation (security) +3. Dependencies install (cached) +4. App code copy + +### 2.4 Optimizations +- `pip install --no-cache-dir` to avoid keeping pip cache in the image +- `.dockerignore` to reduce build context and speed up builds + +--- + +## 3) Build & Run Process + +### 3.1 Build output +```bash +docker build -t devops-info-service:lab02 . +``` + +**Output:** +```text +[+] Building 141.0s (13/13) FINISHED docker:desktop-linux + => [internal] load build definition from Dockerfile 0.1s + => => transferring dockerfile: 860B 0.0s + => resolve image config for docker-image://docker.io/docker/dockerfile:1 4.3s + => docker-image://docker.io/docker/dockerfile:1@sha256:b6afd42430b15f2d2a4c5a02b919e98a525b785b1aaff16747d2f623364e39b6 48.9s + => => resolve docker.io/docker/dockerfile:1@sha256:b6afd42430b15f2d2a4c5a02b919e98a525b785b1aaff16747d2f623364e39b6 0.0s + => => sha256:77246a01651da592b7bae79e0e20ed3b4f2e4c00a1b54b7c921c91ae3fa9ef07 13.57MB / 13.57MB 48.7s + => => extracting sha256:77246a01651da592b7bae79e0e20ed3b4f2e4c00a1b54b7c921c91ae3fa9ef07 0.1s + => [internal] load metadata for docker.io/library/python:3.13.1-slim 2.4s + => [internal] load .dockerignore 0.0s + => => transferring context: 260B 0.0s + => [1/6] FROM docker.io/library/python:3.13.1-slim@sha256:031ebf3cde9f3719d2db385233bcb18df5162038e9cda20e64e08f49f4b47a 73.5s + => => resolve docker.io/library/python:3.13.1-slim@sha256:031ebf3cde9f3719d2db385233bcb18df5162038e9cda20e64e08f49f4b47a2 0.0s + => => sha256:e18acc4841d040a12b49e06abbb2c9096bb559fa8d853543ff7ddc2e410531ff 249B / 249B 0.5s + => => sha256:a9910b4c71585ea2d9ca0acc308ab5ed7cc807819405bb23b5de7538142e4f36 12.58MB / 12.58MB 29.6s + => => sha256:ad24708d5ee00d90d30e51e59ffce29e5764775c877a74798e85bcc3176cde76 3.51MB / 3.51MB 15.2s + => => sha256:c29f5b76f736a8b555fd191c48d6581bb918bcd605a7cbcc76205dd6acff3260 28.21MB / 28.21MB 69.4s + => => extracting sha256:c29f5b76f736a8b555fd191c48d6581bb918bcd605a7cbcc76205dd6acff3260 0.7s + => => extracting sha256:ad24708d5ee00d90d30e51e59ffce29e5764775c877a74798e85bcc3176cde76 0.0s + => => extracting sha256:a9910b4c71585ea2d9ca0acc308ab5ed7cc807819405bb23b5de7538142e4f36 3.2s + => => extracting sha256:e18acc4841d040a12b49e06abbb2c9096bb559fa8d853543ff7ddc2e410531ff 0.0s + => [internal] load build context 0.1s + => => transferring context: 6.51kB 0.0s + => [2/6] WORKDIR /app 0.3s + => [3/6] RUN addgroup --system app && adduser --system --ingroup app --no-create-home app 0.4s + => [4/6] COPY requirements.txt ./ 0.0s + => [5/6] RUN pip install --no-cache-dir -r requirements.txt 9.2s + => [6/6] COPY app.py ./ 0.0s + => exporting to image 0.6s + => => exporting layers 0.4s + => => exporting manifest sha256:409e654da6806d719714cca291fe908afa3a7a1f78a444630451c3b6aea4fd52 0.0s + => => exporting config sha256:09310faf41ce8b42a107e1ec5a0e880b29a6aa7664b862574ecfa84c92183f4a 0.0s + => => exporting attestation manifest sha256:fccefc3ee604d7bca7e605be0deb0341e96a260a53534af34245a6e2013a34ec 0.0s + +View build details: docker-desktop://dashboard/build/desktop-linux/desktop-linux/vg7v11zlyt4npch7bw4olkcat +``` + +### 3.2 Run output +```bash +docker run --rm -p 8080:5000 --name devops-info devops-info-service:lab02 +``` + +**Output:** +```text + * Serving Flask app 'app' + * Debug mode: off +2026-02-04 19:29:00,312 - devops-info-service - INFO - Starting devops-info-service v1.0.0 (Flask) +2026-02-04 19:29:00,312 - devops-info-service - INFO - Config: HOST=0.0.0.0 PORT=5000 DEBUG=False +2026-02-04 19:29:00,322 - werkzeug - INFO - WARNING: This is a development server. Do not use it in a production deployment. Use a production WSGI server instead. + * Running on all addresses (0.0.0.0) + * Running on http://127.0.0.1:5000 + * Running on http://172.17.0.2:5000 +2026-02-04 19:29:00,322 - werkzeug - INFO - Press CTRL+C to quit +2026-02-04 19:30:45,863 - werkzeug - INFO - 172.17.0.1 - - [04/Feb/2026 19:30:45] "GET / HTTP/1.1" 200 - +2026-02-04 19:30:59,367 - werkzeug - INFO - 172.17.0.1 - - [04/Feb/2026 19:30:59] "GET /health HTTP/1.1" 200 - +``` + +### 3.3 Endpoint tests +```bash +curl http://localhost:5000/health +curl http://localhost:5000/ +``` + +**Output:** +```text +https://hub.docker.com/r/dorley174/devops-info-service +``` + +### 3.4 Docker Hub +- **Repo URL:** `https://hub.docker.com/r/dorley174/devops-info-service` +- **Tags pushed:** `latest, lab02` + +--- + +## 4) Technical Analysis + +### Why does this Dockerfile work? +- The app listens on `0.0.0.0` and port `5000` by default, so it is reachable from outside the container when `-p 5000:5000` is used. +- Dependencies are installed before source code to maximize cache hits. + +### What happens if you change layer order? +If you copy `app.py` before installing dependencies, any change in source invalidates the cache and forces dependency reinstall, making rebuilds slower. + +### Security considerations implemented +- Non-root user with `USER app` +- Minimal base image (`slim`) +- No extra tools installed + +### How does `.dockerignore` help? +- Smaller build context → faster builds +- Prevents shipping irrelevant files into the image (security + size) + +--- + +## 5) Challenges & Solutions + +Document what happened on your machine. Typical examples: +- **Port not reachable:** fixed by ensuring the app binds to `0.0.0.0` (already default in `app.py`) and using `-p host:container`. +- **Permission issues:** ensured runtime user exists and app files are readable by it. + +What you learned: +- How Docker layer caching changes rebuild speed +- Why non-root matters +- How `.dockerignore` affects build context and image size diff --git a/app_python/docs/LAB03.md b/app_python/docs/LAB03.md new file mode 100644 index 0000000000..cd7976fa9b --- /dev/null +++ b/app_python/docs/LAB03.md @@ -0,0 +1,74 @@ +# LAB03 — CI/CD (GitHub Actions) + +## 1) Overview + +### Testing framework +This project uses **pytest** because it provides: +- clean assertions and fixtures +- Flask test client without running a live server +- easy coverage reporting via `pytest-cov` + +### Test coverage (what is tested) +- `GET /` — validates the JSON structure and required fields +- `GET /health` — validates the health-check response +- `GET /does-not-exist` — validates the JSON 404 error handler +- 500-case — forces an internal error and validates the JSON 500 error handler + +### CI triggers +The workflow runs on `push` and `pull_request` for the selected branches, **only when** these paths change: +- `app_python/**` +- `.github/workflows/python-ci.yml` + +### Versioning strategy +We use **CalVer** for Docker image tags: +- monthly tag: `YYYY.MM` (e.g., `2026.02`) +- build tag: `YYYY.MM.` (e.g., `2026.02.31`) +- plus `latest` + +This makes it easy to see *when* an image was built and to find the most recent build. + +--- + +## 2) How to run locally + +From the `app_python` directory: + +```bash +python -m venv .venv +# Windows: .\.venv\Scripts\activate +# Linux/macOS: source .venv/bin/activate + +pip install -r requirements.txt +pip install -r requirements-dev.txt + +ruff check . +pytest -q tests --cov=. --cov-report=term-missing +``` + +--- + +## 3) Workflow evidence (paste links here) + +Add links to prove the pipeline works: + +- ✅ Successful workflow run: [https://github.com/dorley174/DevOps-Core-Course/actions/runs/21917089826/job/63287177794](https://github.com/dorley174/DevOps-Core-Course/actions/runs/21917089826/job/63287177794) +- ✅ Docker Hub image/repo: [https://hub.docker.com/repository/docker/dorley174/devops-info-service/general](https://hub.docker.com/repository/docker/dorley174/devops-info-service/general) +- ✅ Status badge in README: see `README.md` +or go +[![python-ci](https://github.com/dorley174/DevOps-Core-Course/actions/workflows/python-ci.yml/badge.svg)](https://github.com/dorley174/DevOps-Core-Course/actions/workflows/python-ci.yml) + +--- + +## 4) Best practices implemented + +- **Path filters**: CI does not run if changes are outside `app_python/**` +- **Job dependency**: Docker push runs only if lint + tests succeeded +- **Concurrency**: cancels outdated runs on the same branch +- **Least privileges**: `permissions: contents: read` +- **Caching**: + - pip cache via `actions/setup-python` + - Docker layer cache via `cache-to/cache-from type=gha` +- **Snyk scan**: + - scans dependencies from `requirements.txt` + - severity threshold = high + - `continue-on-error: true` (learning mode; does not block pipeline) diff --git a/app_python/docs/LAB04.md b/app_python/docs/LAB04.md new file mode 100644 index 0000000000..32c243bde7 --- /dev/null +++ b/app_python/docs/LAB04.md @@ -0,0 +1,173 @@ +# LAB04 — Infrastructure as Code (Terraform + Pulumi) + +## 0) Почему Yandex Cloud (я из РФ) + +From Russia is better to work with **Yandex Cloud** (registry, billing, accessability). As a zone i use `ru-central1-a`. + +Alternatives: VK Cloud / Selectel + +--- + +## 1) Terraform + +### 1.1. hat to create + +- VPC Network +- Subnet +- Security Group с inbound rules: + - **22/tcp** (SSH) — using my IP (`allowed_ssh_cidr`) + - **80/tcp** — open out + - **5000/tcp** — open out +- VM (Compute Instance) with public IP (NAT) + +### 1.2. Repo structure + +``` +terraform/ + main.tf + providers.tf + variables.tf + locals.tf + outputs.tf + versions.tf + .gitignore + terraform.tfvars.example + .tflint.hcl +``` + +### 1.3. Prepare + +1) Download Terraform. +2) Download `yc` CLI. +3) Create an SSH key: + +```bash +ssh-keygen -t ed25519 -C "lab04" -f ~/.ssh/lab04_ed25519 +``` + +4) How to know my IP (example): + +```bash +curl ifconfig.me +``` + +### 1.4. Start + +```bash +cd terraform +cp terraform.tfvars.example terraform.tfvars +# filling terraform.tfvars + +terraform init +terraform fmt +terraform validate +terraform plan +terraform apply +``` + +**Output after apply** (вставьте сюда ваш вывод): + +- `terraform version` +- `terraform plan` output +- `terraform apply` output +- `public_ip` / `ssh_command` + +### 1.5. Access check + +```bash +ssh -i ~/.ssh/lab04_ed25519 ubuntu@ +``` + +### 1.6. Delete resourses + +```bash +terraform destroy +``` + +--- + +## 2) Pulumi + +### 2.1. What to create + +Same is in Terraform (Network/Subnet/SG/VM). + +### 2.2. Structure + +``` +pulumi/ + Pulumi.yaml + requirements.txt + __main__.py + README.md + .gitignore +``` + +### 2.3. Authorization YC + +Using env (`YC_TOKEN`, `YC_CLOUD_ID`, `YC_FOLDER_ID`, `YC_ZONE`) or using `pulumi config` keys `yandex:*`. + +### 2.4. Start + +```bash +cd pulumi +python -m venv venv +# Windows: +# .\venv\Scripts\activate +# Linux/macOS: +# source venv/bin/activate +pip install -r requirements.txt + +pulumi login +pulumi stack init dev + +# Provider config (пример): +pulumi config set yandex:cloudId +pulumi config set yandex:folderId +pulumi config set yandex:zone ru-central1-a +pulumi config set --secret yandex:token + +# Project config: +pulumi config set zone ru-central1-a +pulumi config set subnetCidr 10.10.0.0/24 +pulumi config set allowedSshCidr "<ваш_IP>/32" +pulumi config set sshUser ubuntu +pulumi config set sshPublicKeyPath "~/.ssh/lab04_ed25519.pub" + +pulumi preview +pulumi up +``` + +**Output after up**: + +- `pulumi version` +- `pulumi up` output +- `public_ip` / `ssh_command` + +### 2.5. Delete resourses + +```bash +pulumi destroy +``` + +--- + +## 3) Comparison Terraform vs Pulumi + +| Criteria | Terraform | Pulumi | +|---|---|---| +| Language | HCL | Python | +| Reusing | modules | full language abstraction | +| entry threshold | lower | upper (needed) | +| State | loval / remote backend | Pulumi Cloud / local | + +--- + +## Bonus 1 — Terraform CI (fmt/validate/tflint) + +There are workflow `.github/workflows/terraform-ci.yml`. + +all logs in lab04 folder + +--- + diff --git a/app_python/docs/screenshots/01-main-endpoint.png b/app_python/docs/screenshots/01-main-endpoint.png new file mode 100644 index 0000000000..37f9f4b206 Binary files /dev/null and b/app_python/docs/screenshots/01-main-endpoint.png differ diff --git a/app_python/docs/screenshots/02-health-check.png b/app_python/docs/screenshots/02-health-check.png new file mode 100644 index 0000000000..dbaf3f82e2 Binary files /dev/null and b/app_python/docs/screenshots/02-health-check.png differ diff --git a/app_python/docs/screenshots/03-formatted-output.png b/app_python/docs/screenshots/03-formatted-output.png new file mode 100644 index 0000000000..e806eddcf2 Binary files /dev/null and b/app_python/docs/screenshots/03-formatted-output.png differ diff --git a/app_python/requirements-dev.txt b/app_python/requirements-dev.txt new file mode 100644 index 0000000000..3365ea3398 --- /dev/null +++ b/app_python/requirements-dev.txt @@ -0,0 +1,3 @@ +pytest==8.3.4 +pytest-cov==6.0.0 +ruff==0.9.7 \ No newline at end of file diff --git a/app_python/requirements.txt b/app_python/requirements.txt new file mode 100644 index 0000000000..46c776bf8d --- /dev/null +++ b/app_python/requirements.txt @@ -0,0 +1,2 @@ +Flask==3.1.0 +prometheus-client==0.23.1 diff --git a/app_python/tests/__init__.py b/app_python/tests/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/app_python/tests/conftest.py b/app_python/tests/conftest.py new file mode 100644 index 0000000000..29d0202acc --- /dev/null +++ b/app_python/tests/conftest.py @@ -0,0 +1,9 @@ +import pytest +from app import app as flask_app + + +@pytest.fixture() +def client(): + flask_app.config.update(TESTING=True) + with flask_app.test_client() as client: + yield client diff --git a/app_python/tests/test_endpoints.py b/app_python/tests/test_endpoints.py new file mode 100644 index 0000000000..8a6e199c1d --- /dev/null +++ b/app_python/tests/test_endpoints.py @@ -0,0 +1,40 @@ +import app as app_module +from app import app as flask_app + + +def test_root_ok_structure(client): + resp = client.get("/", headers={"User-Agent": "pytest"}) + assert resp.status_code == 200 + data = resp.get_json() + assert isinstance(data, dict) + for key in ("service", "system", "runtime", "request", "endpoints"): + assert key in data + + +def test_health_ok(client): + resp = client.get("/health") + assert resp.status_code == 200 + data = resp.get_json() + assert data["status"] == "healthy" + + +def test_404_error_shape(client): + resp = client.get("/does-not-exist") + assert resp.status_code == 404 + data = resp.get_json() + assert data["error"] == "Not Found" + + +def test_500_error_shape(monkeypatch): + flask_app.config.update(TESTING=False, PROPAGATE_EXCEPTIONS=False) + + def boom(): + raise RuntimeError("boom") + + monkeypatch.setattr(app_module, "get_system_info", boom) + + with flask_app.test_client() as client: + resp = client.get("/") + assert resp.status_code == 500 + data = resp.get_json() + assert data["error"] == "Internal Server Error" diff --git a/app_python/tests/test_logging.py b/app_python/tests/test_logging.py new file mode 100644 index 0000000000..4e25aa23bc --- /dev/null +++ b/app_python/tests/test_logging.py @@ -0,0 +1,35 @@ +import json +import logging + +from app import JSONFormatter + + +def test_json_formatter_renders_expected_fields(): + formatter = JSONFormatter() + record = logging.LogRecord( + name="devops-info-service", + level=logging.INFO, + pathname=__file__, + lineno=10, + msg="request_completed", + args=(), + exc_info=None, + ) + record.event = "request_completed" + record.service = "devops-info-service" + record.version = "1.1.0" + record.method = "GET" + record.path = "/health" + record.status_code = 200 + record.client_ip = "127.0.0.1" + record.duration_ms = 3.14 + + payload = json.loads(formatter.format(record)) + + assert payload["message"] == "request_completed" + assert payload["level"] == "INFO" + assert payload["event"] == "request_completed" + assert payload["service"] == "devops-info-service" + assert payload["method"] == "GET" + assert payload["path"] == "/health" + assert payload["status_code"] == 200 diff --git a/get-docker.sh b/get-docker.sh new file mode 100644 index 0000000000..9a7bddb001 --- /dev/null +++ b/get-docker.sh @@ -0,0 +1,764 @@ +#!/bin/sh +set -e +# Docker Engine for Linux installation script. +# +# This script is intended as a convenient way to configure docker's package +# repositories and to install Docker Engine, This script is not recommended +# for production environments. Before running this script, make yourself familiar +# with potential risks and limitations, and refer to the installation manual +# at https://docs.docker.com/engine/install/ for alternative installation methods. +# +# The script: +# +# - Requires `root` or `sudo` privileges to run. +# - Attempts to detect your Linux distribution and version and configure your +# package management system for you. +# - Doesn't allow you to customize most installation parameters. +# - Installs dependencies and recommendations without asking for confirmation. +# - Installs the latest stable release (by default) of Docker CLI, Docker Engine, +# Docker Buildx, Docker Compose, containerd, and runc. When using this script +# to provision a machine, this may result in unexpected major version upgrades +# of these packages. Always test upgrades in a test environment before +# deploying to your production systems. +# - Isn't designed to upgrade an existing Docker installation. When using the +# script to update an existing installation, dependencies may not be updated +# to the expected version, resulting in outdated versions. +# +# Source code is available at https://github.com/docker/docker-install/ +# +# Usage +# ============================================================================== +# +# To install the latest stable versions of Docker CLI, Docker Engine, and their +# dependencies: +# +# 1. download the script +# +# $ curl -fsSL https://get.docker.com -o install-docker.sh +# +# 2. verify the script's content +# +# $ cat install-docker.sh +# +# 3. run the script with --dry-run to verify the steps it executes +# +# $ sh install-docker.sh --dry-run +# +# 4. run the script either as root, or using sudo to perform the installation. +# +# $ sudo sh install-docker.sh +# +# Command-line options +# ============================================================================== +# +# --version +# Use the --version option to install a specific version, for example: +# +# $ sudo sh install-docker.sh --version 23.0 +# +# --channel +# +# Use the --channel option to install from an alternative installation channel. +# The following example installs the latest versions from the "test" channel, +# which includes pre-releases (alpha, beta, rc): +# +# $ sudo sh install-docker.sh --channel test +# +# Alternatively, use the script at https://test.docker.com, which uses the test +# channel as default. +# +# --mirror +# +# Use the --mirror option to install from a mirror supported by this script. +# Available mirrors are "Aliyun" (https://mirrors.aliyun.com/docker-ce), and +# "AzureChinaCloud" (https://mirror.azure.cn/docker-ce), for example: +# +# $ sudo sh install-docker.sh --mirror AzureChinaCloud +# +# --setup-repo +# +# Use the --setup-repo option to configure Docker's package repositories without +# installing Docker packages. This is useful when you want to add the repository +# but install packages separately: +# +# $ sudo sh install-docker.sh --setup-repo +# +# Automatic Service Start +# +# By default, this script automatically starts the Docker daemon and enables the docker +# service after installation if systemd is used as init. +# +# If you prefer to start the service manually, use the --no-autostart option: +# +# $ sudo sh install-docker.sh --no-autostart +# +# Note: Starting the service requires appropriate privileges to manage system services. +# +# ============================================================================== + + +# Git commit from https://github.com/docker/docker-install when +# the script was uploaded (Should only be modified by upload job): +SCRIPT_COMMIT_SHA="f381ee68b32e515bb4dc034b339266aff1fbc460" + +# strip "v" prefix if present +VERSION="${VERSION#v}" + +# The channel to install from: +# * stable +# * test +DEFAULT_CHANNEL_VALUE="stable" +if [ -z "$CHANNEL" ]; then + CHANNEL=$DEFAULT_CHANNEL_VALUE +fi + +DEFAULT_DOWNLOAD_URL="https://download.docker.com" +if [ -z "$DOWNLOAD_URL" ]; then + DOWNLOAD_URL=$DEFAULT_DOWNLOAD_URL +fi + +DEFAULT_REPO_FILE="docker-ce.repo" +if [ -z "$REPO_FILE" ]; then + REPO_FILE="$DEFAULT_REPO_FILE" + # Automatically default to a staging repo fora + # a staging download url (download-stage.docker.com) + case "$DOWNLOAD_URL" in + *-stage*) REPO_FILE="docker-ce-staging.repo";; + esac +fi + +mirror='' +DRY_RUN=${DRY_RUN:-} +REPO_ONLY=${REPO_ONLY:-0} +NO_AUTOSTART=${NO_AUTOSTART:-0} +while [ $# -gt 0 ]; do + case "$1" in + --channel) + CHANNEL="$2" + shift + ;; + --dry-run) + DRY_RUN=1 + ;; + --mirror) + mirror="$2" + shift + ;; + --version) + VERSION="${2#v}" + shift + ;; + --setup-repo) + REPO_ONLY=1 + shift + ;; + --no-autostart) + NO_AUTOSTART=1 + ;; + --*) + echo "Illegal option $1" + ;; + esac + shift $(( $# > 0 ? 1 : 0 )) +done + +case "$mirror" in + Aliyun) + DOWNLOAD_URL="https://mirrors.aliyun.com/docker-ce" + ;; + AzureChinaCloud) + DOWNLOAD_URL="https://mirror.azure.cn/docker-ce" + ;; + "") + ;; + *) + >&2 echo "unknown mirror '$mirror': use either 'Aliyun', or 'AzureChinaCloud'." + exit 1 + ;; +esac + +case "$CHANNEL" in + stable|test) + ;; + *) + >&2 echo "unknown CHANNEL '$CHANNEL': use either stable or test." + exit 1 + ;; +esac + +command_exists() { + command -v "$@" > /dev/null 2>&1 +} + +# version_gte checks if the version specified in $VERSION is at least the given +# SemVer (Maj.Minor[.Patch]), or CalVer (YY.MM) version.It returns 0 (success) +# if $VERSION is either unset (=latest) or newer or equal than the specified +# version, or returns 1 (fail) otherwise. +# +# examples: +# +# VERSION=23.0 +# version_gte 23.0 // 0 (success) +# version_gte 20.10 // 0 (success) +# version_gte 19.03 // 0 (success) +# version_gte 26.1 // 1 (fail) +version_gte() { + if [ -z "$VERSION" ]; then + return 0 + fi + version_compare "$VERSION" "$1" +} + +# version_compare compares two version strings (either SemVer (Major.Minor.Path), +# or CalVer (YY.MM) version strings. It returns 0 (success) if version A is newer +# or equal than version B, or 1 (fail) otherwise. Patch releases and pre-release +# (-alpha/-beta) are not taken into account +# +# examples: +# +# version_compare 23.0.0 20.10 // 0 (success) +# version_compare 23.0 20.10 // 0 (success) +# version_compare 20.10 19.03 // 0 (success) +# version_compare 20.10 20.10 // 0 (success) +# version_compare 19.03 20.10 // 1 (fail) +version_compare() ( + set +x + + yy_a="$(echo "$1" | cut -d'.' -f1)" + yy_b="$(echo "$2" | cut -d'.' -f1)" + if [ "$yy_a" -lt "$yy_b" ]; then + return 1 + fi + if [ "$yy_a" -gt "$yy_b" ]; then + return 0 + fi + mm_a="$(echo "$1" | cut -d'.' -f2)" + mm_b="$(echo "$2" | cut -d'.' -f2)" + + # trim leading zeros to accommodate CalVer + mm_a="${mm_a#0}" + mm_b="${mm_b#0}" + + if [ "${mm_a:-0}" -lt "${mm_b:-0}" ]; then + return 1 + fi + + return 0 +) + +is_dry_run() { + if [ -z "$DRY_RUN" ]; then + return 1 + else + return 0 + fi +} + +is_wsl() { + case "$(uname -r)" in + *microsoft* ) true ;; # WSL 2 + *Microsoft* ) true ;; # WSL 1 + * ) false;; + esac +} + +is_darwin() { + case "$(uname -s)" in + *darwin* ) true ;; + *Darwin* ) true ;; + * ) false;; + esac +} + +deprecation_notice() { + distro=$1 + distro_version=$2 + echo + printf "\033[91;1mDEPRECATION WARNING\033[0m\n" + printf " This Linux distribution (\033[1m%s %s\033[0m) reached end-of-life and is no longer supported by this script.\n" "$distro" "$distro_version" + echo " No updates or security fixes will be released for this distribution, and users are recommended" + echo " to upgrade to a currently maintained version of $distro." + echo + printf "Press \033[1mCtrl+C\033[0m now to abort this script, or wait for the installation to continue." + echo + sleep 10 +} + +get_distribution() { + lsb_dist="" + # Every system that we officially support has /etc/os-release + if [ -r /etc/os-release ]; then + lsb_dist="$(. /etc/os-release && echo "$ID")" + fi + # Returning an empty string here should be alright since the + # case statements don't act unless you provide an actual value + echo "$lsb_dist" +} + +start_docker_daemon() { + # Use systemctl if available (for systemd-based systems) + if command_exists systemctl; then + is_dry_run || >&2 echo "Using systemd to manage Docker service" + if ( + is_dry_run || set -x + $sh_c systemctl enable --now docker.service 2>/dev/null + ); then + is_dry_run || echo "INFO: Docker daemon enabled and started" >&2 + else + is_dry_run || echo "WARNING: unable to enable the docker service" >&2 + fi + else + # No service management available (container environment) + if ! is_dry_run; then + >&2 echo "Note: Running in a container environment without service management" + >&2 echo "Docker daemon cannot be started automatically in this environment" + >&2 echo "The Docker packages have been installed successfully" + fi + fi + >&2 echo +} + +echo_docker_as_nonroot() { + if is_dry_run; then + return + fi + if command_exists docker && [ -e /var/run/docker.sock ]; then + ( + set -x + $sh_c 'docker version' + ) || true + fi + + # intentionally mixed spaces and tabs here -- tabs are stripped by "<<-EOF", spaces are kept in the output + echo + echo "================================================================================" + echo + if version_gte "20.10"; then + echo "To run Docker as a non-privileged user, consider setting up the" + echo "Docker daemon in rootless mode for your user:" + echo + echo " dockerd-rootless-setuptool.sh install" + echo + echo "Visit https://docs.docker.com/go/rootless/ to learn about rootless mode." + echo + fi + echo + echo "To run the Docker daemon as a fully privileged service, but granting non-root" + echo "users access, refer to https://docs.docker.com/go/daemon-access/" + echo + echo "WARNING: Access to the remote API on a privileged Docker daemon is equivalent" + echo " to root access on the host. Refer to the 'Docker daemon attack surface'" + echo " documentation for details: https://docs.docker.com/go/attack-surface/" + echo + echo "================================================================================" + echo +} + +# Check if this is a forked Linux distro +check_forked() { + + # Check for lsb_release command existence, it usually exists in forked distros + if command_exists lsb_release; then + # Check if the `-u` option is supported + set +e + lsb_release -a -u > /dev/null 2>&1 + lsb_release_exit_code=$? + set -e + + # Check if the command has exited successfully, it means we're in a forked distro + if [ "$lsb_release_exit_code" = "0" ]; then + # Print info about current distro + cat <<-EOF + You're using '$lsb_dist' version '$dist_version'. + EOF + + # Get the upstream release info + lsb_dist=$(lsb_release -a -u 2>&1 | tr '[:upper:]' '[:lower:]' | grep -E 'id' | cut -d ':' -f 2 | tr -d '[:space:]') + dist_version=$(lsb_release -a -u 2>&1 | tr '[:upper:]' '[:lower:]' | grep -E 'codename' | cut -d ':' -f 2 | tr -d '[:space:]') + + # Print info about upstream distro + cat <<-EOF + Upstream release is '$lsb_dist' version '$dist_version'. + EOF + else + if [ -r /etc/debian_version ] && [ "$lsb_dist" != "ubuntu" ] && [ "$lsb_dist" != "raspbian" ]; then + if [ "$lsb_dist" = "osmc" ]; then + # OSMC runs Raspbian + lsb_dist=raspbian + else + # We're Debian and don't even know it! + lsb_dist=debian + fi + dist_version="$(sed 's/\/.*//' /etc/debian_version | sed 's/\..*//')" + case "$dist_version" in + 13|14|forky) + dist_version="trixie" + ;; + 12) + dist_version="bookworm" + ;; + 11) + dist_version="bullseye" + ;; + 10) + dist_version="buster" + ;; + 9) + dist_version="stretch" + ;; + 8) + dist_version="jessie" + ;; + esac + fi + fi + fi +} + +do_install() { + echo "# Executing docker install script, commit: $SCRIPT_COMMIT_SHA" + + if command_exists docker; then + cat >&2 <<-'EOF' + Warning: the "docker" command appears to already exist on this system. + + If you already have Docker installed, this script can cause trouble, which is + why we're displaying this warning and provide the opportunity to cancel the + installation. + + If you installed the current Docker package using this script and are using it + again to update Docker, you can ignore this message, but be aware that the + script resets any custom changes in the deb and rpm repo configuration + files to match the parameters passed to the script. + + You may press Ctrl+C now to abort this script. + EOF + ( set -x; sleep 20 ) + fi + + user="$(id -un 2>/dev/null || true)" + + sh_c='sh -c' + if [ "$user" != 'root' ]; then + if command_exists sudo; then + sh_c='sudo -E sh -c' + elif command_exists su; then + sh_c='su -c' + else + cat >&2 <<-'EOF' + Error: this installer needs the ability to run commands as root. + We are unable to find either "sudo" or "su" available to make this happen. + EOF + exit 1 + fi + fi + + if is_dry_run; then + sh_c="echo" + fi + + # perform some very rudimentary platform detection + lsb_dist=$( get_distribution ) + lsb_dist="$(echo "$lsb_dist" | tr '[:upper:]' '[:lower:]')" + + if is_wsl; then + echo + echo "WSL DETECTED: We recommend using Docker Desktop for Windows." + echo "Please get Docker Desktop from https://www.docker.com/products/docker-desktop/" + echo + cat >&2 <<-'EOF' + + You may press Ctrl+C now to abort this script. + EOF + ( set -x; sleep 20 ) + fi + + case "$lsb_dist" in + + ubuntu) + if command_exists lsb_release; then + dist_version="$(lsb_release --codename | cut -f2)" + fi + if [ -z "$dist_version" ] && [ -r /etc/lsb-release ]; then + dist_version="$(. /etc/lsb-release && echo "$DISTRIB_CODENAME")" + fi + ;; + + debian|raspbian) + dist_version="$(sed 's/\/.*//' /etc/debian_version | sed 's/\..*//')" + case "$dist_version" in + 13) + dist_version="trixie" + ;; + 12) + dist_version="bookworm" + ;; + 11) + dist_version="bullseye" + ;; + 10) + dist_version="buster" + ;; + 9) + dist_version="stretch" + ;; + 8) + dist_version="jessie" + ;; + esac + ;; + + centos|rhel) + if [ -z "$dist_version" ] && [ -r /etc/os-release ]; then + dist_version="$(. /etc/os-release && echo "$VERSION_ID")" + fi + ;; + + *) + if command_exists lsb_release; then + dist_version="$(lsb_release --release | cut -f2)" + fi + if [ -z "$dist_version" ] && [ -r /etc/os-release ]; then + dist_version="$(. /etc/os-release && echo "$VERSION_ID")" + fi + ;; + + esac + + # Check if this is a forked Linux distro + check_forked + + # Print deprecation warnings for distro versions that recently reached EOL, + # but may still be commonly used (especially LTS versions). + case "$lsb_dist.$dist_version" in + centos.8|centos.7|rhel.7) + deprecation_notice "$lsb_dist" "$dist_version" + ;; + debian.buster|debian.stretch|debian.jessie) + deprecation_notice "$lsb_dist" "$dist_version" + ;; + raspbian.buster|raspbian.stretch|raspbian.jessie) + deprecation_notice "$lsb_dist" "$dist_version" + ;; + ubuntu.focal|ubuntu.bionic|ubuntu.xenial|ubuntu.trusty) + deprecation_notice "$lsb_dist" "$dist_version" + ;; + ubuntu.oracular|ubuntu.mantic|ubuntu.lunar|ubuntu.kinetic|ubuntu.impish|ubuntu.hirsute|ubuntu.groovy|ubuntu.eoan|ubuntu.disco|ubuntu.cosmic) + deprecation_notice "$lsb_dist" "$dist_version" + ;; + fedora.*) + if [ "$dist_version" -lt 41 ]; then + deprecation_notice "$lsb_dist" "$dist_version" + fi + ;; + esac + + # Run setup for each distro accordingly + case "$lsb_dist" in + ubuntu|debian|raspbian) + pre_reqs="ca-certificates curl" + apt_repo="deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] $DOWNLOAD_URL/linux/$lsb_dist $dist_version $CHANNEL" + ( + if ! is_dry_run; then + set -x + fi + $sh_c 'apt-get -qq update >/dev/null' + $sh_c "DEBIAN_FRONTEND=noninteractive apt-get -y -qq install $pre_reqs >/dev/null" + $sh_c 'install -m 0755 -d /etc/apt/keyrings' + $sh_c "curl -fsSL \"$DOWNLOAD_URL/linux/$lsb_dist/gpg\" -o /etc/apt/keyrings/docker.asc" + $sh_c "chmod a+r /etc/apt/keyrings/docker.asc" + $sh_c "echo \"$apt_repo\" > /etc/apt/sources.list.d/docker.list" + $sh_c 'apt-get -qq update >/dev/null' + ) + + if [ "$REPO_ONLY" = "1" ]; then + exit 0 + fi + + pkg_version="" + if [ -n "$VERSION" ]; then + if is_dry_run; then + echo "# WARNING: VERSION pinning is not supported in DRY_RUN" + else + # Will work for incomplete versions IE (17.12), but may not actually grab the "latest" if in the test channel + pkg_pattern="$(echo "$VERSION" | sed 's/-ce-/~ce~.*/g' | sed 's/-/.*/g')" + search_command="apt-cache madison docker-ce | grep '$pkg_pattern' | head -1 | awk '{\$1=\$1};1' | cut -d' ' -f 3" + pkg_version="$($sh_c "$search_command")" + echo "INFO: Searching repository for VERSION '$VERSION'" + echo "INFO: $search_command" + if [ -z "$pkg_version" ]; then + echo + echo "ERROR: '$VERSION' not found amongst apt-cache madison results" + echo + exit 1 + fi + if version_gte "18.09"; then + search_command="apt-cache madison docker-ce-cli | grep '$pkg_pattern' | head -1 | awk '{\$1=\$1};1' | cut -d' ' -f 3" + echo "INFO: $search_command" + cli_pkg_version="=$($sh_c "$search_command")" + fi + pkg_version="=$pkg_version" + fi + fi + ( + pkgs="docker-ce${pkg_version%=}" + if version_gte "18.09"; then + # older versions didn't ship the cli and containerd as separate packages + pkgs="$pkgs docker-ce-cli${cli_pkg_version%=} containerd.io" + fi + if version_gte "20.10"; then + pkgs="$pkgs docker-compose-plugin docker-ce-rootless-extras$pkg_version" + fi + if version_gte "23.0"; then + pkgs="$pkgs docker-buildx-plugin" + fi + if version_gte "28.2"; then + pkgs="$pkgs docker-model-plugin" + fi + if ! is_dry_run; then + set -x + fi + $sh_c "DEBIAN_FRONTEND=noninteractive apt-get -y -qq install $pkgs >/dev/null" + ) + if [ "$NO_AUTOSTART" != "1" ]; then + start_docker_daemon + fi + echo_docker_as_nonroot + exit 0 + ;; + centos|fedora|rhel) + if [ "$(uname -m)" = "s390x" ]; then + echo "Effective v27.5, please consult RHEL distro statement for s390x support." + exit 1 + fi + repo_file_url="$DOWNLOAD_URL/linux/$lsb_dist/$REPO_FILE" + ( + if ! is_dry_run; then + set -x + fi + if command_exists dnf5; then + $sh_c "dnf -y -q --setopt=install_weak_deps=False install dnf-plugins-core" + $sh_c "dnf5 config-manager addrepo --overwrite --save-filename=docker-ce.repo --from-repofile='$repo_file_url'" + + if [ "$CHANNEL" != "stable" ]; then + $sh_c "dnf5 config-manager setopt \"docker-ce-*.enabled=0\"" + $sh_c "dnf5 config-manager setopt \"docker-ce-$CHANNEL.enabled=1\"" + fi + $sh_c "dnf makecache" + elif command_exists dnf; then + $sh_c "dnf -y -q --setopt=install_weak_deps=False install dnf-plugins-core" + $sh_c "rm -f /etc/yum.repos.d/docker-ce.repo /etc/yum.repos.d/docker-ce-staging.repo" + $sh_c "dnf config-manager --add-repo $repo_file_url" + + if [ "$CHANNEL" != "stable" ]; then + $sh_c "dnf config-manager --set-disabled \"docker-ce-*\"" + $sh_c "dnf config-manager --set-enabled \"docker-ce-$CHANNEL\"" + fi + $sh_c "dnf makecache" + else + $sh_c "yum -y -q install yum-utils" + $sh_c "rm -f /etc/yum.repos.d/docker-ce.repo /etc/yum.repos.d/docker-ce-staging.repo" + $sh_c "yum-config-manager --add-repo $repo_file_url" + + if [ "$CHANNEL" != "stable" ]; then + $sh_c "yum-config-manager --disable \"docker-ce-*\"" + $sh_c "yum-config-manager --enable \"docker-ce-$CHANNEL\"" + fi + $sh_c "yum makecache" + fi + ) + + if [ "$REPO_ONLY" = "1" ]; then + exit 0 + fi + + pkg_version="" + if command_exists dnf; then + pkg_manager="dnf" + pkg_manager_flags="-y -q --best" + else + pkg_manager="yum" + pkg_manager_flags="-y -q" + fi + if [ -n "$VERSION" ]; then + if is_dry_run; then + echo "# WARNING: VERSION pinning is not supported in DRY_RUN" + else + if [ "$lsb_dist" = "fedora" ]; then + pkg_suffix="fc$dist_version" + else + pkg_suffix="el" + fi + pkg_pattern="$(echo "$VERSION" | sed 's/-ce-/\\\\.ce.*/g' | sed 's/-/.*/g').*$pkg_suffix" + search_command="$pkg_manager list --showduplicates docker-ce | grep '$pkg_pattern' | tail -1 | awk '{print \$2}'" + pkg_version="$($sh_c "$search_command")" + echo "INFO: Searching repository for VERSION '$VERSION'" + echo "INFO: $search_command" + if [ -z "$pkg_version" ]; then + echo + echo "ERROR: '$VERSION' not found amongst $pkg_manager list results" + echo + exit 1 + fi + if version_gte "18.09"; then + # older versions don't support a cli package + search_command="$pkg_manager list --showduplicates docker-ce-cli | grep '$pkg_pattern' | tail -1 | awk '{print \$2}'" + cli_pkg_version="$($sh_c "$search_command" | cut -d':' -f 2)" + fi + # Cut out the epoch and prefix with a '-' + pkg_version="-$(echo "$pkg_version" | cut -d':' -f 2)" + fi + fi + ( + pkgs="docker-ce$pkg_version" + if version_gte "18.09"; then + # older versions didn't ship the cli and containerd as separate packages + if [ -n "$cli_pkg_version" ]; then + pkgs="$pkgs docker-ce-cli-$cli_pkg_version containerd.io" + else + pkgs="$pkgs docker-ce-cli containerd.io" + fi + fi + if version_gte "20.10"; then + pkgs="$pkgs docker-compose-plugin docker-ce-rootless-extras$pkg_version" + fi + if version_gte "23.0"; then + pkgs="$pkgs docker-buildx-plugin docker-model-plugin" + fi + if ! is_dry_run; then + set -x + fi + $sh_c "$pkg_manager $pkg_manager_flags install $pkgs" + ) + if [ "$NO_AUTOSTART" != "1" ]; then + start_docker_daemon + fi + echo_docker_as_nonroot + exit 0 + ;; + sles) + echo "Effective v27.5, please consult SLES distro statement for s390x support." + exit 1 + ;; + *) + if [ -z "$lsb_dist" ]; then + if is_darwin; then + echo + echo "ERROR: Unsupported operating system 'macOS'" + echo "Please get Docker Desktop from https://www.docker.com/products/docker-desktop" + echo + exit 1 + fi + fi + echo + echo "ERROR: Unsupported distribution '$lsb_dist'" + echo + exit 1 + ;; + esac + exit 1 +} + +# wrapped up in a function so that we have some protection against only getting +# half the file during "curl | sh" +do_install diff --git a/k8s/README.md b/k8s/README.md new file mode 100644 index 0000000000..1fade15d84 --- /dev/null +++ b/k8s/README.md @@ -0,0 +1,442 @@ +# Lab 09 — Kubernetes Fundamentals + +This report documents the Kubernetes deployment of the Flask-based `devops-info-service` application. + +--- + +## 1. Architecture Overview + +### 1.1 Selected local Kubernetes tool +I selected **minikube** with the **Docker driver** on **Windows + VS Code + WSL + Docker Desktop**. + +**Why this option fits my environment:** +1. It runs completely locally and does not require a cloud provider. +2. It works well with my Windows + VS Code + WSL workflow. +3. Docker Desktop provides the container runtime, and minikube uses the Docker driver directly from WSL. +4. It is practical in my region because the lab can be completed locally after the required images are downloaded. + +### 1.2 Deployment architecture +The main application is deployed with a Kubernetes **Deployment** and exposed with a **NodePort Service**. + +**Main application path:** +- `Deployment/devops-info-service` +- `Service/devops-info-service` +- `3 replicas` by default +- `NodePort 30080` +- container port `5000` + +**Bonus manifests prepared in the repository:** +- `Deployment/devops-info-service-app2` +- `Service/devops-info-service-app2` +- `Ingress/devops-course-ingress` +- host `local.example.com` +- routes `/app1` and `/app2` + +### 1.3 Networking flow +#### Base task +1. A client sends a request to the local cluster. +2. The NodePort Service exposes the application on port `30080`. +3. The Service selects Pods by label `app.kubernetes.io/name=devops-info-service`. +4. Traffic is forwarded to container port `5000`. + +#### Local verification flow actually used +In my WSL + Docker Desktop setup, direct NodePort access through `minikube ip` was not reliable. I verified the application with: + +```bash +kubectl port-forward -n devops-lab09 service/devops-info-service 8080:80 +``` + +This mapped local port `8080` to Service port `80` and allowed stable validation with `curl`. + +### 1.4 Resource allocation strategy +Each container defines conservative lab-friendly resources: +- **Requests:** `100m CPU`, `128Mi memory` +- **Limits:** `250m CPU`, `256Mi memory` + +These values are appropriate for a lightweight Flask service on a local minikube cluster running through WSL and Docker Desktop. + +--- + +## 2. Manifest Files + +### 2.1 `k8s/namespace.yml` +Creates a dedicated namespace `devops-lab09` for logical isolation of all lab resources. + +### 2.2 `k8s/deployment.yml` +Creates the primary Deployment. + +**Key implementation choices:** +1. `replicas: 3` satisfies the requirement for at least three Pod replicas. +2. `RollingUpdate` uses `maxSurge: 1` and `maxUnavailable: 0` to avoid downtime during updates. +3. `readinessProbe` checks `/ready`. +4. `livenessProbe` checks `/health`. +5. Resource requests and limits are defined. +6. Runtime hardening is enabled with `runAsNonRoot`, dropped capabilities, disabled privilege escalation, and `RuntimeDefault` seccomp. + +### 2.3 `k8s/service.yml` +Creates a `NodePort` Service. + +**Key implementation choices:** +1. Service port `80` is user-friendly. +2. `targetPort: http` forwards traffic to container port `5000`. +3. `nodePort: 30080` is fixed explicitly to simplify local testing. + +### 2.4 `k8s/deployment-app2.yml` +Creates a second Deployment for the bonus part, using the same image with different environment values. + +### 2.5 `k8s/service-app2.yml` +Creates a second NodePort Service on `30081` for the bonus application. + +### 2.6 `k8s/ingress.yml` +Defines nginx Ingress with path-based routing and TLS. + +### 2.7 Helper scripts +- `k8s/deploy.sh` — deploys the namespace, main Deployment, and Service. +- `k8s/deploy-bonus.sh` — deploys the bonus resources. +- `k8s/collect-evidence.sh` — saves Kubernetes evidence into `k8s/evidence/`. + +### 2.8 Docker image security fix +The initial deployment failed because Kubernetes could not validate a named non-root user with `runAsNonRoot: true`. + +I fixed this by updating `app_python/Dockerfile` to use a **numeric UID/GID**: + +```dockerfile +RUN addgroup --system --gid 10001 app \ + && adduser --system --uid 10001 --ingroup app --no-create-home app + +USER 10001:10001 +``` + +--- + +## 3. Deployment Evidence + +### 3.1 Cluster setup evidence + +```text +$ kubectl cluster-info +Kubernetes control plane is running at https://127.0.0.1:60412 +CoreDNS is running at https://127.0.0.1:60412/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy + +To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'. +``` + +```text +$ kubectl get nodes -o wide +NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME +minikube Ready control-plane 14m v1.35.1 192.168.49.2 Debian GNU/Linux 12 (bookworm) 5.15.153.1-microsoft-standard-WSL2 docker://29.2.1 +``` + +### 3.2 Deployment evidence + +```text +$ kubectl get all -n devops-lab09 -o wide +NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES +pod/devops-info-service-7b48589b6b-2cf77 1/1 Running 0 5m26s 10.244.0.6 minikube +pod/devops-info-service-7b48589b6b-52j4f 1/1 Running 0 5m9s 10.244.0.8 minikube +pod/devops-info-service-7b48589b6b-wrvvj 1/1 Running 0 5m19s 10.244.0.7 minikube + +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR +service/devops-info-service NodePort 10.100.203.165 80:30080/TCP 13m app.kubernetes.io/name=devops-info-service + +NAME READY UP-TO-DATE AVAILABLE AGE CONTAINERS IMAGES SELECTOR +deployment.apps/devops-info-service 3/3 3 3 13m devops-info-service dorley174/devops-info-service:latest app.kubernetes.io/name=devops-info-service +``` + +```text +$ kubectl get pods,svc -n devops-lab09 -o wide +NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES +pod/devops-info-service-7b48589b6b-2cf77 1/1 Running 0 5m27s 10.244.0.6 minikube +pod/devops-info-service-7b48589b6b-52j4f 1/1 Running 0 5m10s 10.244.0.8 minikube +pod/devops-info-service-7b48589b6b-wrvvj 1/1 Running 0 5m20s 10.244.0.7 minikube + +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR +service/devops-info-service NodePort 10.100.203.165 80:30080/TCP 13m app.kubernetes.io/name=devops-info-service +``` + +```text +$ kubectl describe deployment devops-info-service -n devops-lab09 +Name: devops-info-service +Namespace: devops-lab09 +Replicas: 3 desired | 3 updated | 3 total | 3 available | 0 unavailable +StrategyType: RollingUpdate +RollingUpdateStrategy: 0 max unavailable, 1 max surge +Image: dorley174/devops-info-service:latest +Liveness: http-get http://:http/health delay=15s timeout=2s period=10s #success=1 #failure=3 +Readiness: http-get http://:http/ready delay=5s timeout=2s period=5s #success=1 #failure=3 +Environment: + PORT: 5000 + DEBUG: False + APP_VARIANT: app1 + APP_MESSAGE: Lab 09 primary deployment + SERVICE_VERSION: lab09-v1 +``` + +### 3.3 Application verification +I verified the running Service with `kubectl port-forward`. + +```bash +kubectl port-forward -n devops-lab09 service/devops-info-service 8080:80 +``` + +```text +$ curl http://127.0.0.1:8080/health +{"status":"healthy","timestamp":"2026-03-26T19:47:24.216Z","uptime_seconds":264,"variant":"app1"} +``` + +```text +$ curl http://127.0.0.1:8080/ready +{"message":"Lab 09 primary deployment","status":"ready","timestamp":"2026-03-26T19:47:24.232Z","uptime_seconds":264,"variant":"app1"} +``` + +```json +$ curl http://127.0.0.1:8080/ | python3 -m json.tool +{ + "service": { + "description": "DevOps course info service", + "framework": "Flask", + "message": "Lab 09 primary deployment", + "name": "devops-info-service", + "variant": "app1", + "version": "lab09-v1" + } +} +``` + +### 3.4 Evidence collection helper + +```text +$ ./k8s/collect-evidence.sh +Evidence saved to k8s/evidence +``` + +The raw evidence files are included in `k8s/evidence/`. + +--- + +## 4. Operations Performed + +### 4.1 Deploy the application +I deployed the namespace, Deployment, and Service declaratively with `kubectl apply` and confirmed that the Deployment reached `3/3` available replicas. + +### 4.2 Scale the Deployment to 5 replicas + +```bash +kubectl scale deployment/devops-info-service -n devops-lab09 --replicas=5 +kubectl rollout status deployment/devops-info-service -n devops-lab09 +kubectl get pods -n devops-lab09 -o wide +``` + +**Result:** the Deployment was successfully scaled from 3 to 5 replicas. + +```text +deployment.apps/devops-info-service scaled +Waiting for deployment "devops-info-service" rollout to finish: 3 of 5 updated replicas are available... +Waiting for deployment "devops-info-service" rollout to finish: 4 of 5 updated replicas are available... +deployment "devops-info-service" successfully rolled out +``` + +```text +NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES +devops-info-service-7b48589b6b-2b97b 1/1 Running 0 8m32s 10.244.0.14 minikube +devops-info-service-7b48589b6b-7fttq 1/1 Running 0 8m25s 10.244.0.15 minikube +devops-info-service-7b48589b6b-jjtsm 1/1 Running 0 8s 10.244.0.17 minikube +devops-info-service-7b48589b6b-wmf6g 1/1 Running 0 7s 10.244.0.18 minikube +devops-info-service-7b48589b6b-zhkpz 1/1 Running 0 8m18s 10.244.0.16 minikube +``` + +### 4.3 Demonstrate a rolling update +Instead of editing the YAML by hand during the live test, I used `kubectl set env` to trigger a Deployment rollout by changing the Pod template environment variables. + +```bash +kubectl set env deployment/devops-info-service -n devops-lab09 \ + APP_MESSAGE="Lab 09 rolling update" \ + SERVICE_VERSION="lab09-v2" +kubectl rollout status deployment/devops-info-service -n devops-lab09 +kubectl rollout history deployment/devops-info-service -n devops-lab09 +``` + +**Result:** rollout completed successfully and the Deployment spec reflected the updated values. + +```text +deployment.apps/devops-info-service env updated +Waiting for deployment "devops-info-service" rollout to finish: 1 out of 5 new replicas have been updated... +... +deployment "devops-info-service" successfully rolled out +``` + +```text +$ kubectl rollout history deployment/devops-info-service -n devops-lab09 +REVISION CHANGE-CAUSE +1 +4 +5 +``` + +```text +$ kubectl get deployment devops-info-service -n devops-lab09 -o yaml | grep -A1 -E 'APP_MESSAGE|SERVICE_VERSION' +- name: APP_MESSAGE + value: Lab 09 rolling update +- name: SERVICE_VERSION + value: lab09-v2 +``` + +### 4.4 Demonstrate rollback + +```bash +kubectl rollout undo deployment/devops-info-service -n devops-lab09 +kubectl rollout status deployment/devops-info-service -n devops-lab09 +kubectl rollout history deployment/devops-info-service -n devops-lab09 +``` + +**Result:** rollback completed successfully and the Deployment returned to the original values. + +```text +deployment.apps/devops-info-service rolled back +Waiting for deployment "devops-info-service" rollout to finish: 1 out of 5 new replicas have been updated... +... +deployment "devops-info-service" successfully rolled out +``` + +```text +$ kubectl rollout history deployment/devops-info-service -n devops-lab09 +REVISION CHANGE-CAUSE +1 +5 +6 +``` + +```text +$ kubectl get deployment devops-info-service -n devops-lab09 -o yaml | grep -A1 -E 'APP_MESSAGE|SERVICE_VERSION' +- name: APP_MESSAGE + value: Lab 09 primary deployment +- name: SERVICE_VERSION + value: lab09-v1 +``` + +Rollback response check: + +```json +$ curl http://127.0.0.1:8082/ | python3 -m json.tool +{ + "service": { + "description": "DevOps course info service", + "framework": "Flask", + "message": "Lab 09 primary deployment", + "name": "devops-info-service", + "variant": "app1", + "version": "lab09-v1" + } +} +``` + +### 4.5 Bonus status +The repository includes bonus manifests for the second application and Ingress, but I did not complete final runtime verification for the bonus part during this execution session. + +--- + +## 5. Production Considerations + +### 5.1 Health checks +Two probes are implemented: +1. **Liveness probe** on `/health` checks whether the process is alive. +2. **Readiness probe** on `/ready` checks whether the Pod is ready to receive traffic. + +### 5.2 Resource limits rationale +The application is lightweight, so the selected requests and limits are sufficient for local development while still demonstrating correct Kubernetes resource configuration. + +### 5.3 Security choices +1. Non-root execution is enforced. +2. Privilege escalation is disabled. +3. All Linux capabilities are dropped. +4. `RuntimeDefault` seccomp is enabled. +5. The Docker image now uses a numeric UID/GID to satisfy Kubernetes non-root validation. + +### 5.4 Suggested production improvements +For a real production deployment, I would additionally introduce: +1. `ConfigMap` and `Secret` resources. +2. `HorizontalPodAutoscaler`. +3. `NetworkPolicy` rules. +4. `PodDisruptionBudget`. +5. immutable image tags instead of `latest`. +6. CI/CD-driven promotion between environments. +7. centralized monitoring, logging, and alerting. + +--- + +## 6. Challenges & Solutions + +### 6.1 Vagrant was more complex than necessary +I initially tried the lab with Vagrant, but the workflow was slower and more fragile than needed for this local setup. I switched to **WSL + Docker Desktop + minikube**, which was simpler and more reliable. + +### 6.2 Docker Desktop / WSL networking behavior +Direct access through `minikube ip` and the NodePort was not reliable in my environment. The stable solution was to verify the application with `kubectl port-forward`. + +### 6.3 `CreateContainerConfigError` +The first deployment failed with: + +```text +Error: container has runAsNonRoot and image has non-numeric user (app), cannot verify user is non-root +``` + +I fixed this by updating the Docker image to use a numeric UID/GID (`10001:10001`) and rebuilding the image inside minikube's Docker environment. + +### 6.4 Rolling update evidence capture +During update verification, one local `port-forward` session was interrupted. To keep the evidence trustworthy, I documented the successful rollout with: +- rollout status +- rollout history +- Deployment environment values after update +- rollback verification with a successful application response + +--- + +## 7. Recommended Local Execution Order + +### 7.1 Prerequisites +- Windows host +- Docker Desktop +- WSL +- `kubectl` installed in WSL +- `minikube` installed in WSL + +### 7.2 Cluster startup +```bash +minikube start --driver=docker +kubectl cluster-info +kubectl get nodes -o wide +``` + +### 7.3 Build and deploy +```bash +eval $(minikube -p minikube docker-env) +docker build -t dorley174/devops-info-service:latest ./app_python +./k8s/deploy.sh +``` + +### 7.4 Verify the application +```bash +kubectl port-forward -n devops-lab09 service/devops-info-service 8080:80 +curl http://127.0.0.1:8080/health +curl http://127.0.0.1:8080/ready +curl http://127.0.0.1:8080/ | python3 -m json.tool +``` + +### 7.5 Scale, update, rollback +Use the commands from Section 4. + +--- + +## 8. Conclusion + +The lab requirements for the base task were completed: +- local Kubernetes cluster started successfully +- application deployed with 3 replicas +- Service exposed through NodePort +- readiness and liveness probes implemented +- resource requests and limits configured +- Deployment scaled to 5 replicas +- rolling update demonstrated +- rollback demonstrated +- evidence collected into `k8s/evidence/` + +The repository also contains prepared bonus manifests for a second app and Ingress. diff --git a/k8s/collect-evidence.sh b/k8s/collect-evidence.sh new file mode 100644 index 0000000000..22d70da958 --- /dev/null +++ b/k8s/collect-evidence.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash +set -euo pipefail + +OUT_DIR="k8s/evidence" +NS="devops-lab09" +APP="devops-info-service" + +mkdir -p "$OUT_DIR" + +kubectl cluster-info > "$OUT_DIR/01-cluster-info.txt" +kubectl get nodes -o wide > "$OUT_DIR/02-get-nodes.txt" +kubectl get all -n "$NS" -o wide > "$OUT_DIR/03-get-all.txt" +kubectl get pods,svc -n "$NS" -o wide > "$OUT_DIR/04-get-pods-svc.txt" +kubectl describe deployment "$APP" -n "$NS" > "$OUT_DIR/05-describe-deployment.txt" +kubectl rollout history deployment/"$APP" -n "$NS" > "$OUT_DIR/06-rollout-history.txt" +kubectl get ingress -n "$NS" -o wide > "$OUT_DIR/07-get-ingress.txt" 2>/dev/null || true +kubectl get events -n "$NS" --sort-by=.metadata.creationTimestamp > "$OUT_DIR/08-events.txt" + +echo "Evidence saved to $OUT_DIR" diff --git a/k8s/curl_result.txt b/k8s/curl_result.txt new file mode 100644 index 0000000000..8725f74d33 --- /dev/null +++ b/k8s/curl_result.txt @@ -0,0 +1,55 @@ + % Total % Received % Xferd Average Speed Time Time Time Current + Dload Upload Total Spent Left Speed +100 917 100 917 0 0 20928 0 --:--:-- --:--:-- --:--:-- 21325 +{ + "endpoints": [ + { + "description": "Service information", + "method": "GET", + "path": "/" + }, + { + "description": "Liveness health check", + "method": "GET", + "path": "/health" + }, + { + "description": "Readiness health check", + "method": "GET", + "path": "/ready" + }, + { + "description": "Prometheus metrics", + "method": "GET", + "path": "/metrics" + } + ], + "request": { + "client_ip": "127.0.0.1", + "method": "GET", + "path": "/", + "user_agent": "curl/8.5.0" + }, + "runtime": { + "current_time": "2026-03-26T19:57:23.089Z", + "timezone": "UTC", + "uptime_human": "0 hours, 1 minute", + "uptime_seconds": 61 + }, + "service": { + "description": "DevOps course info service", + "framework": "Flask", + "message": "Lab 09 primary deployment", + "name": "devops-info-service", + "variant": "app1", + "version": "lab09-v1" + }, + "system": { + "architecture": "x86_64", + "cpu_count": 20, + "hostname": "devops-info-service-7b48589b6b-2b97b", + "platform": "Linux", + "platform_version": "Linux-5.15.153.1-microsoft-standard-WSL2-x86_64-with-glibc2.36", + "python_version": "3.13.1" + } +} \ No newline at end of file diff --git a/k8s/deploy-bonus.sh b/k8s/deploy-bonus.sh new file mode 100644 index 0000000000..f099f39332 --- /dev/null +++ b/k8s/deploy-bonus.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash +set -euo pipefail + +kubectl apply -f k8s/deployment-app2.yml +kubectl apply -f k8s/service-app2.yml +kubectl apply -f k8s/ingress.yml +kubectl rollout status deployment/devops-info-service-app2 -n devops-lab09 +kubectl get ingress,pods,svc -n devops-lab09 -o wide diff --git a/k8s/deploy.sh b/k8s/deploy.sh new file mode 100644 index 0000000000..baef96f8f4 --- /dev/null +++ b/k8s/deploy.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash +set -euo pipefail + +kubectl apply -f k8s/namespace.yml +kubectl apply -f k8s/deployment.yml +kubectl apply -f k8s/service.yml +kubectl rollout status deployment/devops-info-service -n devops-lab09 +kubectl get pods,svc -n devops-lab09 -o wide diff --git a/k8s/deployment-app2.yml b/k8s/deployment-app2.yml new file mode 100644 index 0000000000..4a0b472fde --- /dev/null +++ b/k8s/deployment-app2.yml @@ -0,0 +1,78 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: devops-info-service-app2 + namespace: devops-lab09 + labels: + app.kubernetes.io/name: devops-info-service-app2 + app.kubernetes.io/component: web + app.kubernetes.io/part-of: devops-core-course +spec: + replicas: 2 + revisionHistoryLimit: 10 + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 1 + maxUnavailable: 0 + selector: + matchLabels: + app.kubernetes.io/name: devops-info-service-app2 + template: + metadata: + labels: + app.kubernetes.io/name: devops-info-service-app2 + app.kubernetes.io/component: web + app.kubernetes.io/part-of: devops-core-course + spec: + securityContext: + seccompProfile: + type: RuntimeDefault + containers: + - name: devops-info-service-app2 + image: dorley174/devops-info-service:latest + imagePullPolicy: IfNotPresent + ports: + - name: http + containerPort: 5000 + protocol: TCP + env: + - name: PORT + value: "5000" + - name: DEBUG + value: "False" + - name: APP_VARIANT + value: "app2" + - name: APP_MESSAGE + value: "Lab 09 bonus deployment" + - name: SERVICE_VERSION + value: "lab09-bonus" + resources: + requests: + cpu: "100m" + memory: "128Mi" + limits: + cpu: "250m" + memory: "256Mi" + readinessProbe: + httpGet: + path: /ready + port: http + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 2 + failureThreshold: 3 + livenessProbe: + httpGet: + path: /health + port: http + initialDelaySeconds: 15 + periodSeconds: 10 + timeoutSeconds: 2 + failureThreshold: 3 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + runAsNonRoot: true diff --git a/k8s/deployment.yml b/k8s/deployment.yml new file mode 100644 index 0000000000..283a808676 --- /dev/null +++ b/k8s/deployment.yml @@ -0,0 +1,78 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: devops-info-service + namespace: devops-lab09 + labels: + app.kubernetes.io/name: devops-info-service + app.kubernetes.io/component: web + app.kubernetes.io/part-of: devops-core-course +spec: + replicas: 3 + revisionHistoryLimit: 10 + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 1 + maxUnavailable: 0 + selector: + matchLabels: + app.kubernetes.io/name: devops-info-service + template: + metadata: + labels: + app.kubernetes.io/name: devops-info-service + app.kubernetes.io/component: web + app.kubernetes.io/part-of: devops-core-course + spec: + securityContext: + seccompProfile: + type: RuntimeDefault + containers: + - name: devops-info-service + image: dorley174/devops-info-service:latest + imagePullPolicy: IfNotPresent + ports: + - name: http + containerPort: 5000 + protocol: TCP + env: + - name: PORT + value: "5000" + - name: DEBUG + value: "False" + - name: APP_VARIANT + value: "app1" + - name: APP_MESSAGE + value: "Lab 09 primary deployment" + - name: SERVICE_VERSION + value: "lab09-v1" + resources: + requests: + cpu: "100m" + memory: "128Mi" + limits: + cpu: "250m" + memory: "256Mi" + readinessProbe: + httpGet: + path: /ready + port: http + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 2 + failureThreshold: 3 + livenessProbe: + httpGet: + path: /health + port: http + initialDelaySeconds: 15 + periodSeconds: 10 + timeoutSeconds: 2 + failureThreshold: 3 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + runAsNonRoot: true diff --git a/k8s/evidence/01-cluster-info.txt b/k8s/evidence/01-cluster-info.txt new file mode 100644 index 0000000000..378eadb21b --- /dev/null +++ b/k8s/evidence/01-cluster-info.txt @@ -0,0 +1,4 @@ +Kubernetes control plane is running at https://127.0.0.1:60412 +CoreDNS is running at https://127.0.0.1:60412/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy + +To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'. diff --git a/k8s/evidence/02-get-nodes.txt b/k8s/evidence/02-get-nodes.txt new file mode 100644 index 0000000000..d9a2b26a89 --- /dev/null +++ b/k8s/evidence/02-get-nodes.txt @@ -0,0 +1,2 @@ +NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME +minikube Ready control-plane 14m v1.35.1 192.168.49.2 Debian GNU/Linux 12 (bookworm) 5.15.153.1-microsoft-standard-WSL2 docker://29.2.1 diff --git a/k8s/evidence/03-get-all.txt b/k8s/evidence/03-get-all.txt new file mode 100644 index 0000000000..ae2d1d6915 --- /dev/null +++ b/k8s/evidence/03-get-all.txt @@ -0,0 +1,14 @@ +NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES +pod/devops-info-service-7b48589b6b-2cf77 1/1 Running 0 5m26s 10.244.0.6 minikube +pod/devops-info-service-7b48589b6b-52j4f 1/1 Running 0 5m9s 10.244.0.8 minikube +pod/devops-info-service-7b48589b6b-wrvvj 1/1 Running 0 5m19s 10.244.0.7 minikube + +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR +service/devops-info-service NodePort 10.100.203.165 80:30080/TCP 13m app.kubernetes.io/name=devops-info-service + +NAME READY UP-TO-DATE AVAILABLE AGE CONTAINERS IMAGES SELECTOR +deployment.apps/devops-info-service 3/3 3 3 13m devops-info-service dorley174/devops-info-service:latest app.kubernetes.io/name=devops-info-service + +NAME DESIRED CURRENT READY AGE CONTAINERS IMAGES SELECTOR +replicaset.apps/devops-info-service-7b48589b6b 3 3 3 5m26s devops-info-service dorley174/devops-info-service:latest app.kubernetes.io/name=devops-info-service,pod-template-hash=7b48589b6b +replicaset.apps/devops-info-service-8689cb4bbc 0 0 0 13m devops-info-service dorley174/devops-info-service:latest app.kubernetes.io/name=devops-info-service,pod-template-hash=8689cb4bbc diff --git a/k8s/evidence/04-get-pods-svc.txt b/k8s/evidence/04-get-pods-svc.txt new file mode 100644 index 0000000000..037a7c981e --- /dev/null +++ b/k8s/evidence/04-get-pods-svc.txt @@ -0,0 +1,7 @@ +NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES +pod/devops-info-service-7b48589b6b-2cf77 1/1 Running 0 5m27s 10.244.0.6 minikube +pod/devops-info-service-7b48589b6b-52j4f 1/1 Running 0 5m10s 10.244.0.8 minikube +pod/devops-info-service-7b48589b6b-wrvvj 1/1 Running 0 5m20s 10.244.0.7 minikube + +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR +service/devops-info-service NodePort 10.100.203.165 80:30080/TCP 13m app.kubernetes.io/name=devops-info-service diff --git a/k8s/evidence/05-describe-deployment.txt b/k8s/evidence/05-describe-deployment.txt new file mode 100644 index 0000000000..1efc8774b5 --- /dev/null +++ b/k8s/evidence/05-describe-deployment.txt @@ -0,0 +1,57 @@ +Name: devops-info-service +Namespace: devops-lab09 +CreationTimestamp: Thu, 26 Mar 2026 22:35:19 +0300 +Labels: app.kubernetes.io/component=web + app.kubernetes.io/name=devops-info-service + app.kubernetes.io/part-of=devops-core-course +Annotations: deployment.kubernetes.io/revision: 2 +Selector: app.kubernetes.io/name=devops-info-service +Replicas: 3 desired | 3 updated | 3 total | 3 available | 0 unavailable +StrategyType: RollingUpdate +MinReadySeconds: 0 +RollingUpdateStrategy: 0 max unavailable, 1 max surge +Pod Template: + Labels: app.kubernetes.io/component=web + app.kubernetes.io/name=devops-info-service + app.kubernetes.io/part-of=devops-core-course + Annotations: kubectl.kubernetes.io/restartedAt: 2026-03-26T22:42:54+03:00 + Containers: + devops-info-service: + Image: dorley174/devops-info-service:latest + Port: 5000/TCP (http) + Host Port: 0/TCP (http) + Limits: + cpu: 250m + memory: 256Mi + Requests: + cpu: 100m + memory: 128Mi + Liveness: http-get http://:http/health delay=15s timeout=2s period=10s #success=1 #failure=3 + Readiness: http-get http://:http/ready delay=5s timeout=2s period=5s #success=1 #failure=3 + Environment: + PORT: 5000 + DEBUG: False + APP_VARIANT: app1 + APP_MESSAGE: Lab 09 primary deployment + SERVICE_VERSION: lab09-v1 + Mounts: + Volumes: + Node-Selectors: + Tolerations: +Conditions: + Type Status Reason + ---- ------ ------ + Available True MinimumReplicasAvailable + Progressing True NewReplicaSetAvailable +OldReplicaSets: devops-info-service-8689cb4bbc (0/0 replicas created) +NewReplicaSet: devops-info-service-7b48589b6b (3/3 replicas created) +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal ScalingReplicaSet 13m deployment-controller Scaled up replica set devops-info-service-8689cb4bbc from 0 to 3 + Normal ScalingReplicaSet 5m27s deployment-controller Scaled up replica set devops-info-service-7b48589b6b from 0 to 1 + Normal ScalingReplicaSet 5m20s deployment-controller Scaled down replica set devops-info-service-8689cb4bbc from 3 to 2 + Normal ScalingReplicaSet 5m20s deployment-controller Scaled up replica set devops-info-service-7b48589b6b from 1 to 2 + Normal ScalingReplicaSet 5m10s deployment-controller Scaled down replica set devops-info-service-8689cb4bbc from 2 to 1 + Normal ScalingReplicaSet 5m10s deployment-controller Scaled up replica set devops-info-service-7b48589b6b from 2 to 3 + Normal ScalingReplicaSet 5m2s deployment-controller Scaled down replica set devops-info-service-8689cb4bbc from 1 to 0 diff --git a/k8s/evidence/06-rollout-history.txt b/k8s/evidence/06-rollout-history.txt new file mode 100644 index 0000000000..4e3de9fb60 --- /dev/null +++ b/k8s/evidence/06-rollout-history.txt @@ -0,0 +1,5 @@ +deployment.apps/devops-info-service +REVISION CHANGE-CAUSE +1 +2 + diff --git a/k8s/evidence/07-get-ingress.txt b/k8s/evidence/07-get-ingress.txt new file mode 100644 index 0000000000..e69de29bb2 diff --git a/k8s/evidence/08-events.txt b/k8s/evidence/08-events.txt new file mode 100644 index 0000000000..953684f826 --- /dev/null +++ b/k8s/evidence/08-events.txt @@ -0,0 +1,38 @@ +LAST SEEN TYPE REASON OBJECT MESSAGE +13m Normal SuccessfulCreate replicaset/devops-info-service-8689cb4bbc Created pod: devops-info-service-8689cb4bbc-49c68 +13m Normal Scheduled pod/devops-info-service-8689cb4bbc-gbntr Successfully assigned devops-lab09/devops-info-service-8689cb4bbc-gbntr to minikube +13m Normal ScalingReplicaSet deployment/devops-info-service Scaled up replica set devops-info-service-8689cb4bbc from 0 to 3 +13m Normal Scheduled pod/devops-info-service-8689cb4bbc-49c68 Successfully assigned devops-lab09/devops-info-service-8689cb4bbc-49c68 to minikube +13m Normal SuccessfulCreate replicaset/devops-info-service-8689cb4bbc Created pod: devops-info-service-8689cb4bbc-gbntr +13m Normal SuccessfulCreate replicaset/devops-info-service-8689cb4bbc Created pod: devops-info-service-8689cb4bbc-tmcsb +13m Normal Scheduled pod/devops-info-service-8689cb4bbc-tmcsb Successfully assigned devops-lab09/devops-info-service-8689cb4bbc-tmcsb to minikube +7m19s Warning Failed pod/devops-info-service-8689cb4bbc-49c68 Error: container has runAsNonRoot and image has non-numeric user (app), cannot verify user is non-root (pod: "devops-info-service-8689cb4bbc-49c68_devops-lab09(73e9a23a-7db6-421c-b843-970354f1e086)", container: devops-info-service) +7m19s Normal Pulled pod/devops-info-service-8689cb4bbc-49c68 Container image "dorley174/devops-info-service:latest" already present on machine and can be accessed by the pod +7m21s Normal Pulled pod/devops-info-service-8689cb4bbc-gbntr Container image "dorley174/devops-info-service:latest" already present on machine and can be accessed by the pod +7m18s Warning Failed pod/devops-info-service-8689cb4bbc-tmcsb Error: container has runAsNonRoot and image has non-numeric user (app), cannot verify user is non-root (pod: "devops-info-service-8689cb4bbc-tmcsb_devops-lab09(67a638c8-08d0-4a24-a638-9a1e6b758032)", container: devops-info-service) +7m18s Normal Pulled pod/devops-info-service-8689cb4bbc-tmcsb Container image "dorley174/devops-info-service:latest" already present on machine and can be accessed by the pod +7m21s Warning Failed pod/devops-info-service-8689cb4bbc-gbntr Error: container has runAsNonRoot and image has non-numeric user (app), cannot verify user is non-root (pod: "devops-info-service-8689cb4bbc-gbntr_devops-lab09(6221b7e7-e23d-42bd-9957-c4efe897415f)", container: devops-info-service) +5m29s Normal ScalingReplicaSet deployment/devops-info-service Scaled up replica set devops-info-service-7b48589b6b from 0 to 1 +5m29s Normal SuccessfulCreate replicaset/devops-info-service-7b48589b6b Created pod: devops-info-service-7b48589b6b-2cf77 +5m29s Normal Scheduled pod/devops-info-service-7b48589b6b-2cf77 Successfully assigned devops-lab09/devops-info-service-7b48589b6b-2cf77 to minikube +5m28s Normal Started pod/devops-info-service-7b48589b6b-2cf77 Container started +5m28s Normal Created pod/devops-info-service-7b48589b6b-2cf77 Container created +5m28s Normal Pulled pod/devops-info-service-7b48589b6b-2cf77 Container image "dorley174/devops-info-service:latest" already present on machine and can be accessed by the pod +5m22s Normal SuccessfulDelete replicaset/devops-info-service-8689cb4bbc Deleted pod: devops-info-service-8689cb4bbc-49c68 +5m22s Normal Scheduled pod/devops-info-service-7b48589b6b-wrvvj Successfully assigned devops-lab09/devops-info-service-7b48589b6b-wrvvj to minikube +5m22s Normal SuccessfulCreate replicaset/devops-info-service-7b48589b6b Created pod: devops-info-service-7b48589b6b-wrvvj +5m22s Normal ScalingReplicaSet deployment/devops-info-service Scaled up replica set devops-info-service-7b48589b6b from 1 to 2 +5m22s Normal ScalingReplicaSet deployment/devops-info-service Scaled down replica set devops-info-service-8689cb4bbc from 3 to 2 +5m21s Normal Pulled pod/devops-info-service-7b48589b6b-wrvvj Container image "dorley174/devops-info-service:latest" already present on machine and can be accessed by the pod +5m21s Normal Created pod/devops-info-service-7b48589b6b-wrvvj Container created +5m21s Normal Started pod/devops-info-service-7b48589b6b-wrvvj Container started +5m12s Normal SuccessfulCreate replicaset/devops-info-service-7b48589b6b Created pod: devops-info-service-7b48589b6b-52j4f +5m12s Normal SuccessfulDelete replicaset/devops-info-service-8689cb4bbc Deleted pod: devops-info-service-8689cb4bbc-gbntr +5m12s Normal Scheduled pod/devops-info-service-7b48589b6b-52j4f Successfully assigned devops-lab09/devops-info-service-7b48589b6b-52j4f to minikube +5m12s Normal ScalingReplicaSet deployment/devops-info-service Scaled down replica set devops-info-service-8689cb4bbc from 2 to 1 +5m12s Normal ScalingReplicaSet deployment/devops-info-service Scaled up replica set devops-info-service-7b48589b6b from 2 to 3 +5m11s Normal Started pod/devops-info-service-7b48589b6b-52j4f Container started +5m11s Normal Created pod/devops-info-service-7b48589b6b-52j4f Container created +5m11s Normal Pulled pod/devops-info-service-7b48589b6b-52j4f Container image "dorley174/devops-info-service:latest" already present on machine and can be accessed by the pod +5m4s Normal SuccessfulDelete replicaset/devops-info-service-8689cb4bbc Deleted pod: devops-info-service-8689cb4bbc-tmcsb +5m4s Normal ScalingReplicaSet deployment/devops-info-service Scaled down replica set devops-info-service-8689cb4bbc from 1 to 0 diff --git a/k8s/evidence/09-deployment-before.txt b/k8s/evidence/09-deployment-before.txt new file mode 100644 index 0000000000..1564dc93c0 --- /dev/null +++ b/k8s/evidence/09-deployment-before.txt @@ -0,0 +1,2 @@ +NAME READY UP-TO-DATE AVAILABLE AGE CONTAINERS IMAGES SELECTOR +devops-info-service 3/3 3 3 29m devops-info-service dorley174/devops-info-service:latest app.kubernetes.io/name=devops-info-service diff --git a/k8s/evidence/10-pods-before.txt b/k8s/evidence/10-pods-before.txt new file mode 100644 index 0000000000..69ceb653d0 --- /dev/null +++ b/k8s/evidence/10-pods-before.txt @@ -0,0 +1,4 @@ +NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES +devops-info-service-7b48589b6b-2b97b 1/1 Running 0 8m24s 10.244.0.14 minikube +devops-info-service-7b48589b6b-7fttq 1/1 Running 0 8m17s 10.244.0.15 minikube +devops-info-service-7b48589b6b-zhkpz 1/1 Running 0 8m10s 10.244.0.16 minikube diff --git a/k8s/evidence/11-pods-after-scale.txt b/k8s/evidence/11-pods-after-scale.txt new file mode 100644 index 0000000000..5936628d54 --- /dev/null +++ b/k8s/evidence/11-pods-after-scale.txt @@ -0,0 +1,6 @@ +NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES +devops-info-service-7b48589b6b-2b97b 1/1 Running 0 8m32s 10.244.0.14 minikube +devops-info-service-7b48589b6b-7fttq 1/1 Running 0 8m25s 10.244.0.15 minikube +devops-info-service-7b48589b6b-jjtsm 1/1 Running 0 8s 10.244.0.17 minikube +devops-info-service-7b48589b6b-wmf6g 1/1 Running 0 7s 10.244.0.18 minikube +devops-info-service-7b48589b6b-zhkpz 1/1 Running 0 8m18s 10.244.0.16 minikube diff --git a/k8s/evidence/12-deployment-after-scale.txt b/k8s/evidence/12-deployment-after-scale.txt new file mode 100644 index 0000000000..6f3d84e203 --- /dev/null +++ b/k8s/evidence/12-deployment-after-scale.txt @@ -0,0 +1,2 @@ +NAME READY UP-TO-DATE AVAILABLE AGE CONTAINERS IMAGES SELECTOR +devops-info-service 5/5 5 5 29m devops-info-service dorley174/devops-info-service:latest app.kubernetes.io/name=devops-info-service diff --git a/k8s/evidence/13-rollout-history-after-update.txt b/k8s/evidence/13-rollout-history-after-update.txt new file mode 100644 index 0000000000..76380fc135 --- /dev/null +++ b/k8s/evidence/13-rollout-history-after-update.txt @@ -0,0 +1,5 @@ +deployment.apps/devops-info-service +REVISION CHANGE-CAUSE +1 +4 +5 diff --git a/k8s/evidence/14-env-after-update.txt b/k8s/evidence/14-env-after-update.txt new file mode 100644 index 0000000000..c4222427cf --- /dev/null +++ b/k8s/evidence/14-env-after-update.txt @@ -0,0 +1,4 @@ + - name: APP_MESSAGE + value: Lab 09 rolling update + - name: SERVICE_VERSION + value: lab09-v2 diff --git a/k8s/evidence/15-rollout-history-after-rollback.txt b/k8s/evidence/15-rollout-history-after-rollback.txt new file mode 100644 index 0000000000..e2feb8609c --- /dev/null +++ b/k8s/evidence/15-rollout-history-after-rollback.txt @@ -0,0 +1,5 @@ +deployment.apps/devops-info-service +REVISION CHANGE-CAUSE +1 +5 +6 diff --git a/k8s/evidence/16-env-after-rollback.txt b/k8s/evidence/16-env-after-rollback.txt new file mode 100644 index 0000000000..abd5fafc86 --- /dev/null +++ b/k8s/evidence/16-env-after-rollback.txt @@ -0,0 +1,4 @@ + - name: APP_MESSAGE + value: Lab 09 primary deployment + - name: SERVICE_VERSION + value: lab09-v1 diff --git a/k8s/evidence/17-health.txt b/k8s/evidence/17-health.txt new file mode 100644 index 0000000000..2f5438bd07 --- /dev/null +++ b/k8s/evidence/17-health.txt @@ -0,0 +1 @@ +{"status":"healthy","timestamp":"2026-03-26T19:47:24.216Z","uptime_seconds":264,"variant":"app1"} diff --git a/k8s/evidence/18-ready.txt b/k8s/evidence/18-ready.txt new file mode 100644 index 0000000000..422d3a0176 --- /dev/null +++ b/k8s/evidence/18-ready.txt @@ -0,0 +1 @@ +{"message":"Lab 09 primary deployment","status":"ready","timestamp":"2026-03-26T19:47:24.232Z","uptime_seconds":264,"variant":"app1"} diff --git a/k8s/evidence/19-root-after-rollback.json b/k8s/evidence/19-root-after-rollback.json new file mode 100644 index 0000000000..e618153f29 --- /dev/null +++ b/k8s/evidence/19-root-after-rollback.json @@ -0,0 +1,52 @@ +{ + "endpoints": [ + { + "description": "Service information", + "method": "GET", + "path": "/" + }, + { + "description": "Liveness health check", + "method": "GET", + "path": "/health" + }, + { + "description": "Readiness health check", + "method": "GET", + "path": "/ready" + }, + { + "description": "Prometheus metrics", + "method": "GET", + "path": "/metrics" + } + ], + "request": { + "client_ip": "127.0.0.1", + "method": "GET", + "path": "/", + "user_agent": "curl/8.5.0" + }, + "runtime": { + "current_time": "2026-03-26T19:57:23.089Z", + "timezone": "UTC", + "uptime_human": "0 hours, 1 minute", + "uptime_seconds": 61 + }, + "service": { + "description": "DevOps course info service", + "framework": "Flask", + "message": "Lab 09 primary deployment", + "name": "devops-info-service", + "variant": "app1", + "version": "lab09-v1" + }, + "system": { + "architecture": "x86_64", + "cpu_count": 20, + "hostname": "devops-info-service-7b48589b6b-2b97b", + "platform": "Linux", + "platform_version": "Linux-5.15.153.1-microsoft-standard-WSL2-x86_64-with-glibc2.36", + "python_version": "3.13.1" + } +} diff --git a/k8s/ingress.yml b/k8s/ingress.yml new file mode 100644 index 0000000000..2b6e7d130a --- /dev/null +++ b/k8s/ingress.yml @@ -0,0 +1,32 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: devops-course-ingress + namespace: devops-lab09 + annotations: + nginx.ingress.kubernetes.io/use-regex: "true" + nginx.ingress.kubernetes.io/rewrite-target: /$2 +spec: + ingressClassName: nginx + tls: + - hosts: + - local.example.com + secretName: apps-ingress-tls + rules: + - host: local.example.com + http: + paths: + - path: /app1(/|$)(.*) + pathType: ImplementationSpecific + backend: + service: + name: devops-info-service + port: + number: 80 + - path: /app2(/|$)(.*) + pathType: ImplementationSpecific + backend: + service: + name: devops-info-service-app2 + port: + number: 80 diff --git a/k8s/namespace.yml b/k8s/namespace.yml new file mode 100644 index 0000000000..3bc418d3e7 --- /dev/null +++ b/k8s/namespace.yml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: devops-lab09 + labels: + app.kubernetes.io/part-of: devops-core-course + app.kubernetes.io/managed-by: kubectl diff --git a/k8s/service-app2.yml b/k8s/service-app2.yml new file mode 100644 index 0000000000..14b9aba0df --- /dev/null +++ b/k8s/service-app2.yml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Service +metadata: + name: devops-info-service-app2 + namespace: devops-lab09 + labels: + app.kubernetes.io/name: devops-info-service-app2 + app.kubernetes.io/component: web +spec: + type: NodePort + selector: + app.kubernetes.io/name: devops-info-service-app2 + ports: + - name: http + protocol: TCP + port: 80 + targetPort: http + nodePort: 30081 diff --git a/k8s/service.yml b/k8s/service.yml new file mode 100644 index 0000000000..c27e336f28 --- /dev/null +++ b/k8s/service.yml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Service +metadata: + name: devops-info-service + namespace: devops-lab09 + labels: + app.kubernetes.io/name: devops-info-service + app.kubernetes.io/component: web +spec: + type: NodePort + selector: + app.kubernetes.io/name: devops-info-service + ports: + - name: http + protocol: TCP + port: 80 + targetPort: http + nodePort: 30080 diff --git a/lab04_proofs/proof.png b/lab04_proofs/proof.png new file mode 100644 index 0000000000..35724a8b29 Binary files /dev/null and b/lab04_proofs/proof.png differ diff --git a/lab04_proofs/pulumi_config_log.txt b/lab04_proofs/pulumi_config_log.txt new file mode 100644 index 0000000000..94e3e04dd9 --- /dev/null +++ b/lab04_proofs/pulumi_config_log.txt @@ -0,0 +1,282 @@ +PS C:\DevOps\DevOps-Core-Course> git add . +PS C:\DevOps\DevOps-Core-Course> git commit -m "feat: added terraform lab part" +[lab04 4686605] feat: added terraform lab part + 30 files changed, 1767 insertions(+), 1 deletion(-) + create mode 100644 .github/workflows/terraform-ci.yml + create mode 100644 app_python/docs/LAB04.md + create mode 100644 lab04_proofs/proof.png + create mode 100644 lab04_proofs/terraform_intalling_log.txt + create mode 100644 lab04_proofs/ubuntu_logs.txt + create mode 100644 pulumi/.gitignore + create mode 100644 pulumi/Pulumi.yaml + create mode 100644 pulumi/README.md + create mode 100644 pulumi/__main__.py + create mode 100644 pulumi/requirements.txt + create mode 100644 scripts/load-env.ps1 + create mode 100644 terraform/.gitignore + create mode 100644 terraform/.tflint.hcl + create mode 100644 terraform/README.md + create mode 100644 terraform/github/.gitignore + create mode 100644 terraform/github/.tflint.hcl + create mode 100644 terraform/github/main.tf + create mode 100644 terraform/github/outputs.tf + create mode 100644 terraform/github/providers.tf + create mode 100644 terraform/github/terraform.tfvars.example + create mode 100644 terraform/github/variables.tf + create mode 100644 terraform/github/versions.tf + create mode 100644 terraform/locals.tf + create mode 100644 terraform/main.tf + create mode 100644 terraform/outputs.tf + create mode 100644 terraform/providers.tf + create mode 100644 terraform/terraform.tfvars.example + create mode 100644 terraform/variables.tf + create mode 100644 terraform/versions.tf +PS C:\DevOps\DevOps-Core-Course> git push origin lab04 +Enumerating objects: 47, done. +Counting objects: 100% (47/47), done. +Delta compression using up to 20 threads +Compressing objects: 100% (36/36), done. +Writing objects: 100% (41/41), 113.24 KiB | 5.66 MiB/s, done. +Total 41 (delta 2), reused 0 (delta 0), pack-reused 0 (from 0) +remote: Resolving deltas: 100% (2/2), completed with 2 local objects. +remote: +remote: Create a pull request for 'lab04' on GitHub by visiting: +remote: https://github.com/dorley174/DevOps-Core-Course/pull/new/lab04 +remote: +To https://github.com/dorley174/DevOps-Core-Course + * [new branch] lab04 -> lab04 +PS C:\DevOps\DevOps-Core-Course> .\scripts\load-env.ps1 +Loaded .env OK +YC_CLOUD_ID=b1gca960emnn9qqikne9 +YC_FOLDER_ID=b1g82kdcn5grlmu79ano +YC_ZONE=ru-central1-a +YC_SERVICE_ACCOUNT_KEY_FILE=C:\DevOps\DevOps-Core-Course\key.json +PS C:\DevOps\DevOps-Core-Course> winget install -e --id Pulumi.Pulumi +Найдено Pulumi [Pulumi.Pulumi] Версия 3.222.0 +Лицензия на это приложение предоставлена вам владельцем. +Корпорация Майкрософт не несет ответственность за сторонние пакеты и не предоставляет для них никакие лицензии. +Скачивание https://github.com/pulumi/pulumi-winget/releases/download/v3.222.0/pulumi-3.222.0-windows-x64.msi + ██████████████████████████████ 87.8 MB / 87.8 MB +Хэш установщика успешно проверен +Запуск установки пакета... +Успешно установлено +PS C:\DevOps\DevOps-Core-Course> pulumi version +pulumi : Имя "pulumi" не распознано как имя командлета, функции, файла сценария или выполняемой программы. Проверьте прави +льность написания имени, а также наличие и правильность пути, после чего повторите попытку. +строка:1 знак:1 ++ pulumi version ++ ~~~~~~ + + CategoryInfo : ObjectNotFound: (pulumi:String) [], CommandNotFoundException + + FullyQualifiedErrorId : CommandNotFoundException + +PS C:\DevOps\DevOps-Core-Course> + * Журнал восстановлен + + +PS C:\DevOps\DevOps-Core-Course> .\scripts\load-env.ps1 +Loaded .env OK +YC_CLOUD_ID=b1gca960emnn9qqikne9 +YC_FOLDER_ID=b1g82kdcn5grlmu79ano +YC_ZONE=ru-central1-a +YC_SERVICE_ACCOUNT_KEY_FILE=C:\DevOps\DevOps-Core-Course\key.json +PS C:\DevOps\DevOps-Core-Course> pulumi version +v3.222.0 +PS C:\DevOps\DevOps-Core-Course> mkdir pulumi +mkdir : Элемент с указанным именем C:\DevOps\DevOps-Core-Course\pulumi уже существует. +строка:1 знак:1 ++ mkdir pulumi ++ ~~~~~~~~~~~~ + + CategoryInfo : ResourceExists: (C:\DevOps\DevOps-Core-Course\pulumi:String) [New-Item], IOException + + FullyQualifiedErrorId : DirectoryExist,Microsoft.PowerShell.Commands.NewItemCommand + +PS C:\DevOps\DevOps-Core-Course> cd pulumi +PS C:\DevOps\DevOps-Core-Course\pulumi> pulumi new python +error: C:\DevOps\DevOps-Core-Course\pulumi is not empty; use --force to continue and overwrite existing files, or use --dir to specify an empty directory. +PS C:\DevOps\DevOps-Core-Course\pulumi> tree /f +Структура папок +Серийный номер тома: 56C3-7760 +C:. +│ .gitignore +│ Pulumi.yaml +│ README.md +│ requirements.txt +│ __main__.py +│ +└───__pycache__ + __main__.cpython-311.pyc + +PS C:\DevOps\DevOps-Core-Course\pulumi> python -m venv venv +PS C:\DevOps\DevOps-Core-Course\pulumi> .\venv\Scripts\activate +(venv) PS C:\DevOps\DevOps-Core-Course\pulumi> pip install -r requirements.txt +Collecting pulumi<4.0.0,>=3.0.0 (from -r requirements.txt (line 1)) + Downloading pulumi-3.222.0-py3-none-any.whl.metadata (3.8 kB) +Collecting pulumi-yandex (from -r requirements.txt (line 2)) + Downloading pulumi_yandex-0.13.0.tar.gz (425 kB) + Installing build dependencies ... done + Getting requirements to build wheel ... done + Preparing metadata (pyproject.toml) ... done +Collecting debugpy~=1.8.7 (from pulumi<4.0.0,>=3.0.0->-r requirements.txt (line 1)) + Downloading debugpy-1.8.20-cp313-cp313-win_amd64.whl.metadata (1.5 kB) +Collecting dill~=0.4 (from pulumi<4.0.0,>=3.0.0->-r requirements.txt (line 1)) + Downloading dill-0.4.1-py3-none-any.whl.metadata (10 kB) +Collecting grpcio<2,>=1.68.1 (from pulumi<4.0.0,>=3.0.0->-r requirements.txt (line 1)) + Downloading grpcio-1.78.0-cp313-cp313-win_amd64.whl.metadata (3.9 kB) +Requirement already satisfied: pip>=24.3.1 in c:\devops\devops-core-course\pulumi\venv\lib\site-packages (from pulumi<4.0.0,>=3.0.0->-r requirements.txt (line 1)) (24.3.1) +Collecting protobuf<7,>=3.20.3 (from pulumi<4.0.0,>=3.0.0->-r requirements.txt (line 1)) + Downloading protobuf-6.33.5-cp310-abi3-win_amd64.whl.metadata (593 bytes) +Collecting pyyaml~=6.0 (from pulumi<4.0.0,>=3.0.0->-r requirements.txt (line 1)) + Downloading pyyaml-6.0.3-cp313-cp313-win_amd64.whl.metadata (2.4 kB) +Collecting semver~=3.0 (from pulumi<4.0.0,>=3.0.0->-r requirements.txt (line 1)) + Downloading semver-3.0.4-py3-none-any.whl.metadata (6.8 kB) +Collecting parver>=0.2.1 (from pulumi-yandex->-r requirements.txt (line 2)) + Downloading parver-0.5-py3-none-any.whl.metadata (2.7 kB) +Collecting typing-extensions~=4.12 (from grpcio<2,>=1.68.1->pulumi<4.0.0,>=3.0.0->-r requirements.txt (line 1)) + Using cached typing_extensions-4.15.0-py3-none-any.whl.metadata (3.3 kB) +Collecting arpeggio>=1.7 (from parver>=0.2.1->pulumi-yandex->-r requirements.txt (line 2)) + Downloading Arpeggio-2.0.3-py2.py3-none-any.whl.metadata (2.4 kB) +Collecting attrs>=19.2 (from parver>=0.2.1->pulumi-yandex->-r requirements.txt (line 2)) + Using cached attrs-25.4.0-py3-none-any.whl.metadata (10 kB) +Downloading pulumi-3.222.0-py3-none-any.whl (390 kB) +Downloading debugpy-1.8.20-cp313-cp313-win_amd64.whl (5.4 MB) + ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 5.4/5.4 MB 7.1 MB/s eta 0:00:00 +Downloading dill-0.4.1-py3-none-any.whl (120 kB) +Downloading grpcio-1.78.0-cp313-cp313-win_amd64.whl (4.8 MB) + ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 4.8/4.8 MB 7.6 MB/s eta 0:00:00 +Downloading parver-0.5-py3-none-any.whl (15 kB) +Downloading protobuf-6.33.5-cp310-abi3-win_amd64.whl (437 kB) +Downloading pyyaml-6.0.3-cp313-cp313-win_amd64.whl (154 kB) +Downloading semver-3.0.4-py3-none-any.whl (17 kB) +Downloading Arpeggio-2.0.3-py2.py3-none-any.whl (54 kB) +Using cached attrs-25.4.0-py3-none-any.whl (67 kB) +Using cached typing_extensions-4.15.0-py3-none-any.whl (44 kB) +Building wheels for collected packages: pulumi-yandex + Building wheel for pulumi-yandex (pyproject.toml) ... done + Created wheel for pulumi-yandex: filename=pulumi_yandex-0.13.0-py3-none-any.whl size=606675 sha256=8441929cfc6b1f5855695216aff4c9946e00cbdecca3d15f56ed027966eaa71e + Stored in directory: c:\users\данил\appdata\local\pip\cache\wheels\d0\b7\cf\9a6c587521036e8cbd68fa735d2f5d352bb7fb4f71d8d5aaac +Successfully built pulumi-yandex +Installing collected packages: arpeggio, typing-extensions, semver, pyyaml, protobuf, dill, debugpy, attrs, parver, grpcio, pulumi, pulumi-yandex +Successfully installed arpeggio-2.0.3 attrs-25.4.0 debugpy-1.8.20 dill-0.4.1 grpcio-1.78.0 parver-0.5 protobuf-6.33.5 pulumi-3.222.0 pulumi-yandex-0.13.0 pyyaml-6.0.3 semver-3.0.4 typing-extensions-4.15.0 + +[notice] A new release of pip is available: 24.3.1 -> 26.0.1 +[notice] To update, run: python.exe -m pip install --upgrade pip +(venv) PS C:\DevOps\DevOps-Core-Course\pulumi> pulumi version +v3.222.0 +(venv) PS C:\DevOps\DevOps-Core-Course\pulumi> python --version +Python 3.13.1 +(venv) PS C:\DevOps\DevOps-Core-Course\pulumi> pulumi login file://C:\DevOps\DevOps-Core-Course\pulumi\.pulumi-state +error: problem logging in: unable to open bucket file:///C:/DevOps/DevOps-Core-Course/pulumi/.pulumi-state?no_tmp_dir=true: GetFileAttributesEx C:\DevOps\DevOps-Core-Course\pulumi\.pulumi-state: The system cannot find the file specified. +(venv) PS C:\DevOps\DevOps-Core-Course\pulumi> New-Item -ItemType Directory -Force .pulumi-state | Out-Null +(venv) PS C:\DevOps\DevOps-Core-Course\pulumi> pulumi login file://C:\DevOps\DevOps-Core-Course\pulumi\.pulumi-state +Logged in to Dorley as Dorley\Данил (file://C:/DevOps/DevOps-Core-Course/pulumi/.pulumi-state) +(venv) PS C:\DevOps\DevOps-Core-Course\pulumi> pulumi whoami +Dorley\Данил +(venv) PS C:\DevOps\DevOps-Core-Course\pulumi> pulumi stack ls +NAME LAST UPDATE RESOURCE COUNT +(venv) PS C:\DevOps\DevOps-Core-Course\pulumi> pulumi stack init dev +Enter your passphrase to protect config/secrets: +Re-enter your passphrase to confirm: +Created stack 'dev' +(venv) PS C:\DevOps\DevOps-Core-Course\pulumi> cd C:\DevOps\DevOps-Core-Course +(venv) PS C:\DevOps\DevOps-Core-Course> .\scripts\load-env.ps1 +Loaded .env OK +YC_CLOUD_ID=b1gca960emnn9qqikne9 +YC_FOLDER_ID=b1g82kdcn5grlmu79ano +YC_ZONE=ru-central1-a +YC_SERVICE_ACCOUNT_KEY_FILE=C:\DevOps\DevOps-Core-Course\key.json +(venv) PS C:\DevOps\DevOps-Core-Course> cd pulumi +(venv) PS C:\DevOps\DevOps-Core-Course\pulumi> .\venv\Scripts\activate +(venv) PS C:\DevOps\DevOps-Core-Course\pulumi> pulumi config set zone ru-central1-a +(venv) PS C:\DevOps\DevOps-Core-Course\pulumi> pulumi config set subnetCidr 10.10.0.0/24 +(venv) PS C:\DevOps\DevOps-Core-Course\pulumi> pulumi config set allowedSshCidr "95.111.204.70/32" +(venv) PS C:\DevOps\DevOps-Core-Course\pulumi> pulumi config set sshUser ubuntu +(venv) PS C:\DevOps\DevOps-Core-Course\pulumi> pulumi config set sshPublicKeyPath "~/.ssh/lab04_ed25519.pub" +(venv) PS C:\DevOps\DevOps-Core-Course\pulumi> pulumi config set imageFamily ubuntu-2404-lts +(venv) PS C:\DevOps\DevOps-Core-Course\pulumi> pulumi preview +Enter your passphrase to unlock config/secrets + (set PULUMI_CONFIG_PASSPHRASE or PULUMI_CONFIG_PASSPHRASE_FILE to remember): +Enter your passphrase to unlock config/secrets +Previewing update (dev): + Type Name Plan Info + + pulumi:pulumi:Stack lab04-pulumi-yc-dev create 1 error +Diagnostics: + pulumi:pulumi:Stack (lab04-pulumi-yc-dev): + error: Program failed with an unhandled exception: + Traceback (most recent call last): + File "C:\DevOps\DevOps-Core-Course\pulumi\__main__.py", line 4, in + import pulumi_yandex as yandex + File "C:\DevOps\DevOps-Core-Course\pulumi\venv\Lib\site-packages\pulumi_yandex\__init__.py", line 5, in + from . import _utilities + File "C:\DevOps\DevOps-Core-Course\pulumi\venv\Lib\site-packages\pulumi_yandex\_utilities.py", line 10, in + import pkg_resources + ModuleNotFoundError: No module named 'pkg_resources' + +Resources: + + 1 to create + 1 errored + +(venv) PS C:\DevOps\DevOps-Core-Course\pulumi> python -m pip install --upgrade pip setuptools wheel +Requirement already satisfied: pip in c:\devops\devops-core-course\pulumi\venv\lib\site-packages (24.3.1) +Collecting pip + Downloading pip-26.0.1-py3-none-any.whl.metadata (4.7 kB) +Collecting setuptools + Using cached setuptools-82.0.0-py3-none-any.whl.metadata (6.6 kB) +Collecting wheel + Downloading wheel-0.46.3-py3-none-any.whl.metadata (2.4 kB) +Collecting packaging>=24.0 (from wheel) + Using cached packaging-26.0-py3-none-any.whl.metadata (3.3 kB) +Downloading pip-26.0.1-py3-none-any.whl (1.8 MB) + ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 1.8/1.8 MB 8.3 MB/s eta 0:00:00 +Using cached setuptools-82.0.0-py3-none-any.whl (1.0 MB) +Downloading wheel-0.46.3-py3-none-any.whl (30 kB) +Using cached packaging-26.0-py3-none-any.whl (74 kB) +Installing collected packages: setuptools, pip, packaging, wheel + Attempting uninstall: pip + Found existing installation: pip 24.3.1 + Uninstalling pip-24.3.1: + Successfully uninstalled pip-24.3.1 +Successfully installed packaging-26.0 pip-26.0.1 setuptools-82.0.0 wheel-0.46.3 +(venv) PS C:\DevOps\DevOps-Core-Course\pulumi> python -c "import pkg_resources; print('pkg_resources OK')" +Traceback (most recent call last): + File "", line 1, in + import pkg_resources; print('pkg_resources OK') + ^^^^^^^^^^^^^^^^^^^^ +ModuleNotFoundError: No module named 'pkg_resources' +(venv) PS C:\DevOps\DevOps-Core-Course\pulumi> winget install -e --id Python.Python.3.12 +Найден существующий установленный пакет. Попытка обновления установленного пакета... +Найдено Python 3.12 [Python.Python.3.12] Версия 3.12.10 +Лицензия на это приложение предоставлена вам владельцем. +Корпорация Майкрософт не несет ответственность за сторонние пакеты и не предоставляет для них никакие лицензии. +Скачивание https://www.python.org/ftp/python/3.12.10/python-3.12.10-amd64.exe + ██████████████████████████████ 25.7 MB / 25.7 MB +Хэш установщика успешно проверен +Запуск установки пакета... +Вы отменили установку. +Сбой установки с кодом выхода: 1602 +Журнал установщика доступен по адресу: C:\Users\Данил\AppData\Local\Packages\Microsoft.DesktopAppInstaller_8wekyb3d8bbwe\LocalState\DiagOutputDir\Python.Python.3.12.3.12.10-26-02-19-23-43-29.log +(venv) PS C:\DevOps\DevOps-Core-Course\pulumi> pip uninstall -y setuptools +Found existing installation: setuptools 82.0.0 +Uninstalling setuptools-82.0.0: + Successfully uninstalled setuptools-82.0.0 +(venv) PS C:\DevOps\DevOps-Core-Course\pulumi> pip install setuptools==80.9.0 +Collecting setuptools==80.9.0 + Downloading setuptools-80.9.0-py3-none-any.whl.metadata (6.6 kB) +Downloading setuptools-80.9.0-py3-none-any.whl (1.2 MB) + ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 1.2/1.2 MB 6.8 MB/s 0:00:00 +Installing collected packages: setuptools +Successfully installed setuptools-80.9.0 +(venv) PS C:\DevOps\DevOps-Core-Course\pulumi> python -c "import pkg_resources; print('pkg_resources OK')" +:1: UserWarning: pkg_resources is deprecated as an API. See https://setuptools.pypa.io/en/latest/pkg_resources.html. The pkg_resources package is slated for removal as early as 2025-11-30. Refrain from using this package or pin to Setuptools<81. + import pkg_resources; print('pkg_resources OK') +pkg_resources OK +(venv) PS C:\DevOps\DevOps-Core-Course\pulumi> pulumi stack ls +NAME LAST UPDATE RESOURCE COUNT +dev* 11 minutes ago 0 +(venv) PS C:\DevOps\DevOps-Core-Course\pulumi> pulumi stack select dev +(venv) PS C:\DevOps\DevOps-Core-Course\pulumi> pulumi config +KEY VALUE +allowedSshCidr 95.111.204.70/32 +imageFamily ubuntu-2404-lts +sshPublicKeyPath ~/.ssh/lab04_ed25519.pub +sshUser ubuntu +subnetCidr 10.10.0.0/24 +zone ru-central1-a \ No newline at end of file diff --git a/lab04_proofs/pulumi_result.txt b/lab04_proofs/pulumi_result.txt new file mode 100644 index 0000000000..e69de29bb2 diff --git a/lab04_proofs/terraform_intalling_log.txt b/lab04_proofs/terraform_intalling_log.txt new file mode 100644 index 0000000000..ae90e9f9bf --- /dev/null +++ b/lab04_proofs/terraform_intalling_log.txt @@ -0,0 +1,282 @@ +PS C:\DevOps\DevOps-Core-Course> .\scripts\load-env.ps1 +Loaded .env OK +YC_CLOUD_ID=b1gca960emnn9qqikne9 +YC_FOLDER_ID=b1g82kdcn5grlmu79ano +YC_ZONE=ru-central1-a +YC_SERVICE_ACCOUNT_KEY_FILE=C:\DevOps\DevOps-Core-Course\key.json +PS C:\DevOps\DevOps-Core-Course> terraform -chdir=terraform apply +data.yandex_compute_image.os: Reading... +data.yandex_compute_image.os: Read complete after 1s [id=fd8lt661chfo5i13a40d] + +Terraform used the selected providers to generate the following execution plan. Resource actions are indicated +with the following symbols: + + create + +Terraform will perform the following actions: + + # yandex_compute_instance.vm will be created + + resource "yandex_compute_instance" "vm" { + + allow_stopping_for_update = true + + created_at = (known after apply) + + folder_id = (known after apply) + + fqdn = (known after apply) + + gpu_cluster_id = (known after apply) + + hardware_generation = (known after apply) + + hostname = (known after apply) + + id = (known after apply) + + labels = { + + "project" = "lab04" + } + + maintenance_grace_period = (known after apply) + + maintenance_policy = (known after apply) + + metadata = { + + "ssh-keys" = "ubuntu:ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAII5nKglXX/E2/S3oPKXahGr77IZELnnZlqE4oGCyEhZQ Данил@Dorley" + } + + name = "lab04-vm" + + network_acceleration_type = "standard" + + platform_id = "standard-v2" + + status = (known after apply) + + zone = "ru-central1-a" + + + boot_disk { + + auto_delete = true + + device_name = (known after apply) + + disk_id = (known after apply) + + mode = (known after apply) + + + initialize_params { + + block_size = (known after apply) + + description = (known after apply) + + image_id = "fd8lt661chfo5i13a40d" + + name = (known after apply) + + size = 10 + + snapshot_id = (known after apply) + + type = "network-hdd" + } + } + + + metadata_options (known after apply) + + + network_interface { + + index = (known after apply) + + ip_address = (known after apply) + + ipv4 = true + + ipv6 = (known after apply) + + ipv6_address = (known after apply) + + mac_address = (known after apply) + + nat = true + + nat_ip_address = (known after apply) + + nat_ip_version = (known after apply) + + security_group_ids = (known after apply) + + subnet_id = (known after apply) + } + + + placement_policy (known after apply) + + + resources { + + core_fraction = 20 + + cores = 2 + + memory = 1 + } + + + scheduling_policy { + + preemptible = false + } + } + + # yandex_vpc_network.lab_net will be created + + resource "yandex_vpc_network" "lab_net" { + + created_at = (known after apply) + + default_security_group_id = (known after apply) + + folder_id = (known after apply) + + id = (known after apply) + + labels = { + + "project" = "lab04" + } + + name = "lab04-net" + + subnet_ids = (known after apply) + } + + # yandex_vpc_security_group.vm_sg will be created + + resource "yandex_vpc_security_group" "vm_sg" { + + created_at = (known after apply) + + description = "Security Group for Lab04 VM" + + folder_id = (known after apply) + + id = (known after apply) + + labels = { + + "project" = "lab04" + } + + name = "lab04-sg" + + network_id = (known after apply) + + status = (known after apply) + + + egress { + + description = "Allow all outbound" + + from_port = 0 + + id = (known after apply) + + labels = (known after apply) + + port = -1 + + protocol = "ANY" + + to_port = 65535 + + v4_cidr_blocks = [ + + "0.0.0.0/0", + ] + + v6_cidr_blocks = [] + # (2 unchanged attributes hidden) + } + + + ingress { + + description = "App port (Flask)" + + from_port = -1 + + id = (known after apply) + + labels = (known after apply) + + port = 5000 + + protocol = "TCP" + + to_port = -1 + + v4_cidr_blocks = [ + + "0.0.0.0/0", + ] + + v6_cidr_blocks = [] + # (2 unchanged attributes hidden) + } + + ingress { + + description = "HTTP" + + from_port = -1 + + id = (known after apply) + + labels = (known after apply) + + port = 80 + + protocol = "TCP" + + to_port = -1 + + v4_cidr_blocks = [ + + "0.0.0.0/0", + ] + + v6_cidr_blocks = [] + # (2 unchanged attributes hidden) + } + + ingress { + + description = "SSH from allowed CIDR" + + from_port = -1 + + id = (known after apply) + + labels = (known after apply) + + port = 22 + + protocol = "TCP" + + to_port = -1 + + v4_cidr_blocks = [ + + "95.111.204.70/32", + ] + + v6_cidr_blocks = [] + # (2 unchanged attributes hidden) + } + } + + # yandex_vpc_subnet.lab_subnet will be created + + resource "yandex_vpc_subnet" "lab_subnet" { + + created_at = (known after apply) + + folder_id = (known after apply) + + id = (known after apply) + + labels = { + + "project" = "lab04" + } + + name = "lab04-subnet" + + network_id = (known after apply) + + v4_cidr_blocks = [ + + "10.10.0.0/24", + ] + + v6_cidr_blocks = (known after apply) + + zone = "ru-central1-a" + } + +Plan: 4 to add, 0 to change, 0 to destroy. + +Changes to Outputs: + + app_url = (known after apply) + + http_url = (known after apply) + + internal_ip = (known after apply) + + public_ip = (known after apply) + + ssh_command = (known after apply) + + vm_id = (known after apply) +╷ +│ Warning: Cannot connect to YC tool initialization service. Network connectivity to the service is required for provider version control. +│ +│ +│ with provider["registry.terraform.io/yandex-cloud/yandex"], +│ on providers.tf line 1, in provider "yandex": +│ 1: provider "yandex" { +│ +╵ + +Do you want to perform these actions? + Terraform will perform the actions described above. + Only 'yes' will be accepted to approve. + + Enter a value: yes + +yandex_vpc_network.lab_net: Creating... +yandex_vpc_network.lab_net: Creation complete after 4s [id=enpq59lnhclspku28afm] +yandex_vpc_subnet.lab_subnet: Creating... +yandex_vpc_security_group.vm_sg: Creating... +yandex_vpc_subnet.lab_subnet: Creation complete after 1s [id=e9bedb7ig70u3rpvqeru] +yandex_vpc_security_group.vm_sg: Creation complete after 3s [id=enp5bnpeefpntqlukehb] +yandex_compute_instance.vm: Creating... +yandex_compute_instance.vm: Still creating... [00m10s elapsed] +yandex_compute_instance.vm: Still creating... [00m20s elapsed] +yandex_compute_instance.vm: Still creating... [00m30s elapsed] +yandex_compute_instance.vm: Creation complete after 35s [id=fhmdplo02ifil4mk7odj] +╷ +│ Warning: Cannot connect to YC tool initialization service. Network connectivity to the service is required for provider version control. +│ +│ +│ with provider["registry.terraform.io/yandex-cloud/yandex"], +│ on providers.tf line 1, in provider "yandex": +│ 1: provider "yandex" { +│ +╵ + +Apply complete! Resources: 4 added, 0 changed, 0 destroyed. + +Outputs: + +app_url = "http://89.169.147.72:5000/" +http_url = "http://89.169.147.72/" +internal_ip = "10.10.0.22" +public_ip = "89.169.147.72" +ssh_command = "ssh -i ~/.ssh/id_ed25519 ubuntu@89.169.147.72" +vm_id = "fhmdplo02ifil4mk7odj" +PS C:\DevOps\DevOps-Core-Course> ssh -i $env:USERPROFILE\.ssh\lab04_ed25519 ubuntu@89.169.147.72 +The authenticity of host '89.169.147.72 (89.169.147.72)' can't be established. +ED25519 key fingerprint is SHA256:87OV4P2mx41pGWV3CIi0gSmPp8kE6JXzmB6sfnxBg2c. +This key is not known by any other names. +Are you sure you want to continue connecting (yes/no/[fingerprint])? yes +Warning: Permanently added '89.169.147.72' (ED25519) to the list of known hosts. +Welcome to Ubuntu 24.04.4 LTS (GNU/Linux 6.8.0-100-generic x86_64) + + * Documentation: https://help.ubuntu.com + * Management: https://landscape.canonical.com + * Support: https://ubuntu.com/pro + + System information as of Thu Feb 19 19:29:02 UTC 2026 + + System load: 0.2 Processes: 100 + Usage of /: 23.1% of 9.04GB Users logged in: 0 + Memory usage: 19% IPv4 address for eth0: 10.10.0.22 + Swap usage: 0% + + +Expanded Security Maintenance for Applications is not enabled. + +0 updates can be applied immediately. + +Enable ESM Apps to receive additional future security updates. +See https://ubuntu.com/esm or run: sudo pro status + + + +The programs included with the Ubuntu system are free software; +the exact distribution terms for each program are described in the +individual files in /usr/share/doc/*/copyright. + +Ubuntu comes with ABSOLUTELY NO WARRANTY, to the extent permitted by +applicable law. + +To run a command as administrator (user "root"), use "sudo ". +See "man sudo_root" for details. \ No newline at end of file diff --git a/lab04_proofs/terraform_result.txt b/lab04_proofs/terraform_result.txt new file mode 100644 index 0000000000..ca88584f03 --- /dev/null +++ b/lab04_proofs/terraform_result.txt @@ -0,0 +1,488 @@ +ubuntu@fhmdplo02ifil4mk7odj:~$ sudo apt update +Hit:1 http://mirror.yandex.ru/ubuntu noble InRelease +Get:2 http://mirror.yandex.ru/ubuntu noble-updates InRelease [126 kB] +Get:3 http://mirror.yandex.ru/ubuntu noble-backports InRelease [126 kB] +Get:4 http://security.ubuntu.com/ubuntu noble-security InRelease [126 kB] +Get:5 http://mirror.yandex.ru/ubuntu noble-updates/main amd64 Packages [1771 kB] +Get:6 http://mirror.yandex.ru/ubuntu noble-updates/main Translation-en [328 kB] +Get:7 http://mirror.yandex.ru/ubuntu noble-updates/main amd64 Components [175 kB] +Get:8 http://mirror.yandex.ru/ubuntu noble-updates/main amd64 c-n-f Metadata [16.5 kB] +Get:9 http://mirror.yandex.ru/ubuntu noble-updates/universe amd64 Packages [1557 kB] +Get:10 http://mirror.yandex.ru/ubuntu noble-updates/universe Translation-en [316 kB] +Get:11 http://mirror.yandex.ru/ubuntu noble-updates/universe amd64 Components [386 kB] +Get:12 http://mirror.yandex.ru/ubuntu noble-updates/universe amd64 c-n-f Metadata [32.6 kB] +Get:13 http://mirror.yandex.ru/ubuntu noble-updates/restricted amd64 Packages [2663 kB] +Get:14 http://mirror.yandex.ru/ubuntu noble-updates/restricted Translation-en [613 kB] +Get:15 http://mirror.yandex.ru/ubuntu noble-updates/restricted amd64 Components [212 B] +Get:16 http://mirror.yandex.ru/ubuntu noble-updates/multiverse Translation-en [7044 B] +Get:17 http://mirror.yandex.ru/ubuntu noble-updates/multiverse amd64 Components [940 B] +Get:18 http://mirror.yandex.ru/ubuntu noble-backports/main amd64 Components [7308 B] +Get:19 http://mirror.yandex.ru/ubuntu noble-backports/universe amd64 Components [10.5 kB] +Get:20 http://mirror.yandex.ru/ubuntu noble-backports/restricted amd64 Components [216 B] +Get:21 http://mirror.yandex.ru/ubuntu noble-backports/multiverse amd64 Components [212 B] +Get:22 http://security.ubuntu.com/ubuntu noble-security/main amd64 Packages [1474 kB] +Get:23 http://security.ubuntu.com/ubuntu noble-security/main Translation-en [237 kB] +Get:24 http://security.ubuntu.com/ubuntu noble-security/main amd64 Components [21.5 kB] +Get:25 http://security.ubuntu.com/ubuntu noble-security/main amd64 c-n-f Metadata [9892 B] +Get:26 http://security.ubuntu.com/ubuntu noble-security/universe amd64 Packages [935 kB] +Get:27 http://security.ubuntu.com/ubuntu noble-security/universe Translation-en [214 kB] +Get:28 http://security.ubuntu.com/ubuntu noble-security/universe amd64 Components [74.2 kB] +Get:29 http://security.ubuntu.com/ubuntu noble-security/universe amd64 c-n-f Metadata [20.0 kB] +Get:30 http://security.ubuntu.com/ubuntu noble-security/restricted amd64 Packages [2516 kB] +Get:31 http://security.ubuntu.com/ubuntu noble-security/restricted Translation-en [582 kB] +Get:32 http://security.ubuntu.com/ubuntu noble-security/restricted amd64 Components [212 B] +Get:33 http://security.ubuntu.com/ubuntu noble-security/multiverse amd64 Components [212 B] +Fetched 14.3 MB in 3s (5184 kB/s) +Reading package lists... Done +Building dependency tree... Done +Reading state information... Done +8 packages can be upgraded. Run 'apt list --upgradable' to see them. +ubuntu@fhmdplo02ifil4mk7odj:~$ sudo apt install -y python3-venv python3-pip +Reading package lists... Done +Building dependency tree... Done +Reading state information... Done +The following additional packages will be installed: + binutils binutils-common binutils-x86-64-linux-gnu build-essential bzip2 cpp cpp-13 cpp-13-x86-64-linux-gnu + cpp-x86-64-linux-gnu dpkg-dev fakeroot g++ g++-13 g++-13-x86-64-linux-gnu g++-x86-64-linux-gnu gcc gcc-13 + gcc-13-base gcc-13-x86-64-linux-gnu gcc-14-base gcc-x86-64-linux-gnu javascript-common libalgorithm-diff-perl + libalgorithm-diff-xs-perl libalgorithm-merge-perl libasan8 libatomic1 libbinutils libcc1-0 libctf-nobfd0 + libctf0 libdpkg-perl libexpat1 libexpat1-dev libfakeroot libfile-fcntllock-perl libgcc-13-dev libgcc-s1 + libgomp1 libgprofng0 libhwasan0 libisl23 libitm1 libjs-jquery libjs-sphinxdoc libjs-underscore liblsan0 libmpc3 + libpython3-dev libpython3.12-dev libquadmath0 libsframe1 libstdc++-13-dev libstdc++6 libtsan2 libubsan1 + lto-disabled-list make python3-dev python3-pip-whl python3-setuptools-whl python3-wheel python3.12-dev + python3.12-venv zlib1g-dev +Suggested packages: + binutils-doc gprofng-gui bzip2-doc cpp-doc gcc-13-locales cpp-13-doc debian-keyring g++-multilib + g++-13-multilib gcc-13-doc gcc-multilib autoconf automake libtool flex bison gdb gcc-doc gcc-13-multilib + gdb-x86-64-linux-gnu apache2 | lighttpd | httpd bzr libstdc++-13-doc make-doc +The following NEW packages will be installed: + binutils binutils-common binutils-x86-64-linux-gnu build-essential bzip2 cpp cpp-13 cpp-13-x86-64-linux-gnu + cpp-x86-64-linux-gnu dpkg-dev fakeroot g++ g++-13 g++-13-x86-64-linux-gnu g++-x86-64-linux-gnu gcc gcc-13 + gcc-13-base gcc-13-x86-64-linux-gnu gcc-x86-64-linux-gnu javascript-common libalgorithm-diff-perl + libalgorithm-diff-xs-perl libalgorithm-merge-perl libasan8 libatomic1 libbinutils libcc1-0 libctf-nobfd0 + libctf0 libdpkg-perl libexpat1-dev libfakeroot libfile-fcntllock-perl libgcc-13-dev libgomp1 libgprofng0 + libhwasan0 libisl23 libitm1 libjs-jquery libjs-sphinxdoc libjs-underscore liblsan0 libmpc3 libpython3-dev + libpython3.12-dev libquadmath0 libsframe1 libstdc++-13-dev libtsan2 libubsan1 lto-disabled-list make + python3-dev python3-pip python3-pip-whl python3-setuptools-whl python3-venv python3-wheel python3.12-dev + python3.12-venv zlib1g-dev +The following packages will be upgraded: + gcc-14-base libexpat1 libgcc-s1 libstdc++6 +4 upgraded, 63 newly installed, 0 to remove and 4 not upgraded. +Need to get 79.5 MB of archives. +After this operation, 272 MB of additional disk space will be used. +Get:1 http://mirror.yandex.ru/ubuntu noble-updates/main amd64 gcc-14-base amd64 14.2.0-4ubuntu2~24.04.1 [51.0 kB] +Get:2 http://mirror.yandex.ru/ubuntu noble-updates/main amd64 libstdc++6 amd64 14.2.0-4ubuntu2~24.04.1 [792 kB] +Get:3 http://mirror.yandex.ru/ubuntu noble-updates/main amd64 libgcc-s1 amd64 14.2.0-4ubuntu2~24.04.1 [78.4 kB] +Get:4 http://mirror.yandex.ru/ubuntu noble-updates/main amd64 libexpat1 amd64 2.6.1-2ubuntu0.4 [88.2 kB] +Get:5 http://mirror.yandex.ru/ubuntu noble-updates/main amd64 binutils-common amd64 2.42-4ubuntu2.8 [240 kB] +Get:6 http://mirror.yandex.ru/ubuntu noble-updates/main amd64 libsframe1 amd64 2.42-4ubuntu2.8 [15.6 kB] +Get:7 http://mirror.yandex.ru/ubuntu noble-updates/main amd64 libbinutils amd64 2.42-4ubuntu2.8 [576 kB] +Get:8 http://mirror.yandex.ru/ubuntu noble-updates/main amd64 libctf-nobfd0 amd64 2.42-4ubuntu2.8 [97.9 kB] +Get:9 http://mirror.yandex.ru/ubuntu noble-updates/main amd64 libctf0 amd64 2.42-4ubuntu2.8 [94.5 kB] +Get:10 http://mirror.yandex.ru/ubuntu noble-updates/main amd64 libgprofng0 amd64 2.42-4ubuntu2.8 [849 kB] +Get:11 http://mirror.yandex.ru/ubuntu noble-updates/main amd64 binutils-x86-64-linux-gnu amd64 2.42-4ubuntu2.8 [2463 kB] +Get:12 http://mirror.yandex.ru/ubuntu noble-updates/main amd64 binutils amd64 2.42-4ubuntu2.8 [18.1 kB] +Get:13 http://mirror.yandex.ru/ubuntu noble-updates/main amd64 gcc-13-base amd64 13.3.0-6ubuntu2~24.04.1 [51.6 kB] +Get:14 http://mirror.yandex.ru/ubuntu noble-updates/main amd64 libisl23 amd64 0.26-3build1.1 [680 kB] +Get:15 http://mirror.yandex.ru/ubuntu noble-updates/main amd64 libmpc3 amd64 1.3.1-1build1.1 [54.6 kB] +Get:16 http://mirror.yandex.ru/ubuntu noble-updates/main amd64 cpp-13-x86-64-linux-gnu amd64 13.3.0-6ubuntu2~24.04.1 [10.7 MB] +Get:17 http://mirror.yandex.ru/ubuntu noble-updates/main amd64 cpp-13 amd64 13.3.0-6ubuntu2~24.04.1 [1042 B] +Get:18 http://mirror.yandex.ru/ubuntu noble/main amd64 cpp-x86-64-linux-gnu amd64 4:13.2.0-7ubuntu1 [5326 B] +Get:19 http://mirror.yandex.ru/ubuntu noble/main amd64 cpp amd64 4:13.2.0-7ubuntu1 [22.4 kB] +Get:20 http://mirror.yandex.ru/ubuntu noble-updates/main amd64 libcc1-0 amd64 14.2.0-4ubuntu2~24.04.1 [48.0 kB] +Get:21 http://mirror.yandex.ru/ubuntu noble-updates/main amd64 libgomp1 amd64 14.2.0-4ubuntu2~24.04.1 [148 kB] +Get:22 http://mirror.yandex.ru/ubuntu noble-updates/main amd64 libitm1 amd64 14.2.0-4ubuntu2~24.04.1 [29.7 kB] +Get:23 http://mirror.yandex.ru/ubuntu noble-updates/main amd64 libatomic1 amd64 14.2.0-4ubuntu2~24.04.1 [10.5 kB] +Get:24 http://mirror.yandex.ru/ubuntu noble-updates/main amd64 libasan8 amd64 14.2.0-4ubuntu2~24.04.1 [3027 kB] +Get:25 http://mirror.yandex.ru/ubuntu noble-updates/main amd64 liblsan0 amd64 14.2.0-4ubuntu2~24.04.1 [1322 kB] +Get:26 http://mirror.yandex.ru/ubuntu noble-updates/main amd64 libtsan2 amd64 14.2.0-4ubuntu2~24.04.1 [2772 kB] +Get:27 http://mirror.yandex.ru/ubuntu noble-updates/main amd64 libubsan1 amd64 14.2.0-4ubuntu2~24.04.1 [1184 kB] +Get:28 http://mirror.yandex.ru/ubuntu noble-updates/main amd64 libhwasan0 amd64 14.2.0-4ubuntu2~24.04.1 [1641 kB] +Get:29 http://mirror.yandex.ru/ubuntu noble-updates/main amd64 libquadmath0 amd64 14.2.0-4ubuntu2~24.04.1 [153 kB] +Get:30 http://mirror.yandex.ru/ubuntu noble-updates/main amd64 libgcc-13-dev amd64 13.3.0-6ubuntu2~24.04.1 [2681 kB] +Get:31 http://mirror.yandex.ru/ubuntu noble-updates/main amd64 gcc-13-x86-64-linux-gnu amd64 13.3.0-6ubuntu2~24.04.1 [21.1 MB] +Get:32 http://mirror.yandex.ru/ubuntu noble-updates/main amd64 gcc-13 amd64 13.3.0-6ubuntu2~24.04.1 [494 kB] +Get:33 http://mirror.yandex.ru/ubuntu noble/main amd64 gcc-x86-64-linux-gnu amd64 4:13.2.0-7ubuntu1 [1212 B] +Get:34 http://mirror.yandex.ru/ubuntu noble/main amd64 gcc amd64 4:13.2.0-7ubuntu1 [5018 B] +Get:35 http://mirror.yandex.ru/ubuntu noble-updates/main amd64 libstdc++-13-dev amd64 13.3.0-6ubuntu2~24.04.1 [2420 kB] +Get:36 http://mirror.yandex.ru/ubuntu noble-updates/main amd64 g++-13-x86-64-linux-gnu amd64 13.3.0-6ubuntu2~24.04.1 [12.2 MB] +Get:37 http://mirror.yandex.ru/ubuntu noble-updates/main amd64 g++-13 amd64 13.3.0-6ubuntu2~24.04.1 [16.0 kB] +Get:38 http://mirror.yandex.ru/ubuntu noble/main amd64 g++-x86-64-linux-gnu amd64 4:13.2.0-7ubuntu1 [964 B] +Get:39 http://mirror.yandex.ru/ubuntu noble/main amd64 g++ amd64 4:13.2.0-7ubuntu1 [1100 B] +Get:40 http://mirror.yandex.ru/ubuntu noble/main amd64 make amd64 4.3-4.1build2 [180 kB] +Get:41 http://mirror.yandex.ru/ubuntu noble-updates/main amd64 libdpkg-perl all 1.22.6ubuntu6.5 [269 kB] +Get:42 http://mirror.yandex.ru/ubuntu noble-updates/main amd64 bzip2 amd64 1.0.8-5.1build0.1 [34.5 kB] +Get:43 http://mirror.yandex.ru/ubuntu noble/main amd64 lto-disabled-list all 47 [12.4 kB] +Get:44 http://mirror.yandex.ru/ubuntu noble-updates/main amd64 dpkg-dev all 1.22.6ubuntu6.5 [1074 kB] +Get:45 http://mirror.yandex.ru/ubuntu noble/main amd64 build-essential amd64 12.10ubuntu1 [4928 B] +Get:46 http://mirror.yandex.ru/ubuntu noble/main amd64 libfakeroot amd64 1.33-1 [32.4 kB] +Get:47 http://mirror.yandex.ru/ubuntu noble/main amd64 fakeroot amd64 1.33-1 [67.2 kB] +Get:48 http://mirror.yandex.ru/ubuntu noble/main amd64 javascript-common all 11+nmu1 [5936 B] +Get:49 http://mirror.yandex.ru/ubuntu noble/main amd64 libalgorithm-diff-perl all 1.201-1 [41.8 kB] +Get:50 http://mirror.yandex.ru/ubuntu noble/main amd64 libalgorithm-diff-xs-perl amd64 0.04-8build3 [11.2 kB] +Get:51 http://mirror.yandex.ru/ubuntu noble/main amd64 libalgorithm-merge-perl all 0.08-5 [11.4 kB] +Get:52 http://mirror.yandex.ru/ubuntu noble-updates/main amd64 libexpat1-dev amd64 2.6.1-2ubuntu0.4 [140 kB] +Get:53 http://mirror.yandex.ru/ubuntu noble/main amd64 libfile-fcntllock-perl amd64 0.22-4ubuntu5 [30.7 kB] +Get:54 http://mirror.yandex.ru/ubuntu noble/main amd64 libjs-jquery all 3.6.1+dfsg+~3.5.14-1 [328 kB] +Get:55 http://mirror.yandex.ru/ubuntu noble/main amd64 libjs-underscore all 1.13.4~dfsg+~1.11.4-3 [118 kB] +Get:56 http://mirror.yandex.ru/ubuntu noble/main amd64 libjs-sphinxdoc all 7.2.6-6 [149 kB] +Get:57 http://mirror.yandex.ru/ubuntu noble-updates/main amd64 zlib1g-dev amd64 1:1.3.dfsg-3.1ubuntu2.1 [894 kB] +Get:58 http://mirror.yandex.ru/ubuntu noble-updates/main amd64 libpython3.12-dev amd64 3.12.3-1ubuntu0.11 [5683 kB] +Get:59 http://mirror.yandex.ru/ubuntu noble-updates/main amd64 libpython3-dev amd64 3.12.3-0ubuntu2.1 [10.3 kB] +Get:60 http://mirror.yandex.ru/ubuntu noble-updates/main amd64 python3.12-dev amd64 3.12.3-1ubuntu0.11 [498 kB] +Get:61 http://mirror.yandex.ru/ubuntu noble-updates/main amd64 python3-dev amd64 3.12.3-0ubuntu2.1 [26.7 kB] +Get:62 http://mirror.yandex.ru/ubuntu noble/universe amd64 python3-wheel all 0.42.0-2 [53.1 kB] +Get:63 http://mirror.yandex.ru/ubuntu noble-updates/universe amd64 python3-pip all 24.0+dfsg-1ubuntu1.3 [1320 kB] +Get:64 http://mirror.yandex.ru/ubuntu noble-updates/universe amd64 python3-pip-whl all 24.0+dfsg-1ubuntu1.3 [1707 kB] +Get:65 http://mirror.yandex.ru/ubuntu noble-updates/universe amd64 python3-setuptools-whl all 68.1.2-2ubuntu1.2 [716 kB] +Get:66 http://mirror.yandex.ru/ubuntu noble-updates/universe amd64 python3.12-venv amd64 3.12.3-1ubuntu0.11 [5680 B] +Get:67 http://mirror.yandex.ru/ubuntu noble-updates/universe amd64 python3-venv amd64 3.12.3-0ubuntu2.1 [1032 B] +Fetched 79.5 MB in 1s (68.1 MB/s) +Extracting templates from packages: 100% +(Reading database ... 106316 files and directories currently installed.) +Preparing to unpack .../gcc-14-base_14.2.0-4ubuntu2~24.04.1_amd64.deb ... +Unpacking gcc-14-base:amd64 (14.2.0-4ubuntu2~24.04.1) over (14.2.0-4ubuntu2~24.04) ... +Setting up gcc-14-base:amd64 (14.2.0-4ubuntu2~24.04.1) ... +(Reading database ... 106316 files and directories currently installed.) +Preparing to unpack .../libstdc++6_14.2.0-4ubuntu2~24.04.1_amd64.deb ... +Unpacking libstdc++6:amd64 (14.2.0-4ubuntu2~24.04.1) over (14.2.0-4ubuntu2~24.04) ... +Setting up libstdc++6:amd64 (14.2.0-4ubuntu2~24.04.1) ... +(Reading database ... 106316 files and directories currently installed.) +Preparing to unpack .../libgcc-s1_14.2.0-4ubuntu2~24.04.1_amd64.deb ... +Unpacking libgcc-s1:amd64 (14.2.0-4ubuntu2~24.04.1) over (14.2.0-4ubuntu2~24.04) ... +Setting up libgcc-s1:amd64 (14.2.0-4ubuntu2~24.04.1) ... +(Reading database ... 106316 files and directories currently installed.) +Preparing to unpack .../00-libexpat1_2.6.1-2ubuntu0.4_amd64.deb ... +Unpacking libexpat1:amd64 (2.6.1-2ubuntu0.4) over (2.6.1-2ubuntu0.3) ... +Selecting previously unselected package binutils-common:amd64. +Preparing to unpack .../01-binutils-common_2.42-4ubuntu2.8_amd64.deb ... +Unpacking binutils-common:amd64 (2.42-4ubuntu2.8) ... +Selecting previously unselected package libsframe1:amd64. +Preparing to unpack .../02-libsframe1_2.42-4ubuntu2.8_amd64.deb ... +Unpacking libsframe1:amd64 (2.42-4ubuntu2.8) ... +Selecting previously unselected package libbinutils:amd64. +Preparing to unpack .../03-libbinutils_2.42-4ubuntu2.8_amd64.deb ... +Unpacking libbinutils:amd64 (2.42-4ubuntu2.8) ... +Selecting previously unselected package libctf-nobfd0:amd64. +Preparing to unpack .../04-libctf-nobfd0_2.42-4ubuntu2.8_amd64.deb ... +Unpacking libctf-nobfd0:amd64 (2.42-4ubuntu2.8) ... +Selecting previously unselected package libctf0:amd64. +Preparing to unpack .../05-libctf0_2.42-4ubuntu2.8_amd64.deb ... +Unpacking libctf0:amd64 (2.42-4ubuntu2.8) ... +Selecting previously unselected package libgprofng0:amd64. +Preparing to unpack .../06-libgprofng0_2.42-4ubuntu2.8_amd64.deb ... +Unpacking libgprofng0:amd64 (2.42-4ubuntu2.8) ... +Selecting previously unselected package binutils-x86-64-linux-gnu. +Preparing to unpack .../07-binutils-x86-64-linux-gnu_2.42-4ubuntu2.8_amd64.deb ... +Unpacking binutils-x86-64-linux-gnu (2.42-4ubuntu2.8) ... +Selecting previously unselected package binutils. +Preparing to unpack .../08-binutils_2.42-4ubuntu2.8_amd64.deb ... +Unpacking binutils (2.42-4ubuntu2.8) ... +Selecting previously unselected package gcc-13-base:amd64. +Preparing to unpack .../09-gcc-13-base_13.3.0-6ubuntu2~24.04.1_amd64.deb ... +Unpacking gcc-13-base:amd64 (13.3.0-6ubuntu2~24.04.1) ... +Selecting previously unselected package libisl23:amd64. +Preparing to unpack .../10-libisl23_0.26-3build1.1_amd64.deb ... +Unpacking libisl23:amd64 (0.26-3build1.1) ... +Selecting previously unselected package libmpc3:amd64. +Preparing to unpack .../11-libmpc3_1.3.1-1build1.1_amd64.deb ... +Unpacking libmpc3:amd64 (1.3.1-1build1.1) ... +Selecting previously unselected package cpp-13-x86-64-linux-gnu. +Preparing to unpack .../12-cpp-13-x86-64-linux-gnu_13.3.0-6ubuntu2~24.04.1_amd64.deb ... +Unpacking cpp-13-x86-64-linux-gnu (13.3.0-6ubuntu2~24.04.1) ... +Selecting previously unselected package cpp-13. +Preparing to unpack .../13-cpp-13_13.3.0-6ubuntu2~24.04.1_amd64.deb ... +Unpacking cpp-13 (13.3.0-6ubuntu2~24.04.1) ... +Selecting previously unselected package cpp-x86-64-linux-gnu. +Preparing to unpack .../14-cpp-x86-64-linux-gnu_4%3a13.2.0-7ubuntu1_amd64.deb ... +Unpacking cpp-x86-64-linux-gnu (4:13.2.0-7ubuntu1) ... +Selecting previously unselected package cpp. +Preparing to unpack .../15-cpp_4%3a13.2.0-7ubuntu1_amd64.deb ... +Unpacking cpp (4:13.2.0-7ubuntu1) ... +Selecting previously unselected package libcc1-0:amd64. +Preparing to unpack .../16-libcc1-0_14.2.0-4ubuntu2~24.04.1_amd64.deb ... +Unpacking libcc1-0:amd64 (14.2.0-4ubuntu2~24.04.1) ... +Selecting previously unselected package libgomp1:amd64. +Preparing to unpack .../17-libgomp1_14.2.0-4ubuntu2~24.04.1_amd64.deb ... +Unpacking libgomp1:amd64 (14.2.0-4ubuntu2~24.04.1) ... +Selecting previously unselected package libitm1:amd64. +Preparing to unpack .../18-libitm1_14.2.0-4ubuntu2~24.04.1_amd64.deb ... +Unpacking libitm1:amd64 (14.2.0-4ubuntu2~24.04.1) ... +Selecting previously unselected package libatomic1:amd64. +Preparing to unpack .../19-libatomic1_14.2.0-4ubuntu2~24.04.1_amd64.deb ... +Unpacking libatomic1:amd64 (14.2.0-4ubuntu2~24.04.1) ... +Selecting previously unselected package libasan8:amd64. +Preparing to unpack .../20-libasan8_14.2.0-4ubuntu2~24.04.1_amd64.deb ... +Unpacking libasan8:amd64 (14.2.0-4ubuntu2~24.04.1) ... +Selecting previously unselected package liblsan0:amd64. +Preparing to unpack .../21-liblsan0_14.2.0-4ubuntu2~24.04.1_amd64.deb ... +Unpacking liblsan0:amd64 (14.2.0-4ubuntu2~24.04.1) ... +Selecting previously unselected package libtsan2:amd64. +Preparing to unpack .../22-libtsan2_14.2.0-4ubuntu2~24.04.1_amd64.deb ... +Unpacking libtsan2:amd64 (14.2.0-4ubuntu2~24.04.1) ... +Selecting previously unselected package libubsan1:amd64. +Preparing to unpack .../23-libubsan1_14.2.0-4ubuntu2~24.04.1_amd64.deb ... +Unpacking libubsan1:amd64 (14.2.0-4ubuntu2~24.04.1) ... +Selecting previously unselected package libhwasan0:amd64. +Preparing to unpack .../24-libhwasan0_14.2.0-4ubuntu2~24.04.1_amd64.deb ... +Unpacking libhwasan0:amd64 (14.2.0-4ubuntu2~24.04.1) ... +Selecting previously unselected package libquadmath0:amd64. +Preparing to unpack .../25-libquadmath0_14.2.0-4ubuntu2~24.04.1_amd64.deb ... +Unpacking libquadmath0:amd64 (14.2.0-4ubuntu2~24.04.1) ... +Selecting previously unselected package libgcc-13-dev:amd64. +Preparing to unpack .../26-libgcc-13-dev_13.3.0-6ubuntu2~24.04.1_amd64.deb ... +Unpacking libgcc-13-dev:amd64 (13.3.0-6ubuntu2~24.04.1) ... +Selecting previously unselected package gcc-13-x86-64-linux-gnu. +Preparing to unpack .../27-gcc-13-x86-64-linux-gnu_13.3.0-6ubuntu2~24.04.1_amd64.deb ... +Unpacking gcc-13-x86-64-linux-gnu (13.3.0-6ubuntu2~24.04.1) ... +Selecting previously unselected package gcc-13. +Preparing to unpack .../28-gcc-13_13.3.0-6ubuntu2~24.04.1_amd64.deb ... +Unpacking gcc-13 (13.3.0-6ubuntu2~24.04.1) ... +Selecting previously unselected package gcc-x86-64-linux-gnu. +Preparing to unpack .../29-gcc-x86-64-linux-gnu_4%3a13.2.0-7ubuntu1_amd64.deb ... +Unpacking gcc-x86-64-linux-gnu (4:13.2.0-7ubuntu1) ... +Selecting previously unselected package gcc. +Preparing to unpack .../30-gcc_4%3a13.2.0-7ubuntu1_amd64.deb ... +Unpacking gcc (4:13.2.0-7ubuntu1) ... +Selecting previously unselected package libstdc++-13-dev:amd64. +Preparing to unpack .../31-libstdc++-13-dev_13.3.0-6ubuntu2~24.04.1_amd64.deb ... +Unpacking libstdc++-13-dev:amd64 (13.3.0-6ubuntu2~24.04.1) ... +Selecting previously unselected package g++-13-x86-64-linux-gnu. +Preparing to unpack .../32-g++-13-x86-64-linux-gnu_13.3.0-6ubuntu2~24.04.1_amd64.deb ... +Unpacking g++-13-x86-64-linux-gnu (13.3.0-6ubuntu2~24.04.1) ... +Selecting previously unselected package g++-13. +Preparing to unpack .../33-g++-13_13.3.0-6ubuntu2~24.04.1_amd64.deb ... +Unpacking g++-13 (13.3.0-6ubuntu2~24.04.1) ... +Selecting previously unselected package g++-x86-64-linux-gnu. +Preparing to unpack .../34-g++-x86-64-linux-gnu_4%3a13.2.0-7ubuntu1_amd64.deb ... +Unpacking g++-x86-64-linux-gnu (4:13.2.0-7ubuntu1) ... +Selecting previously unselected package g++. +Preparing to unpack .../35-g++_4%3a13.2.0-7ubuntu1_amd64.deb ... +Unpacking g++ (4:13.2.0-7ubuntu1) ... +Selecting previously unselected package make. +Preparing to unpack .../36-make_4.3-4.1build2_amd64.deb ... +Unpacking make (4.3-4.1build2) ... +Selecting previously unselected package libdpkg-perl. +Preparing to unpack .../37-libdpkg-perl_1.22.6ubuntu6.5_all.deb ... +Unpacking libdpkg-perl (1.22.6ubuntu6.5) ... +Selecting previously unselected package bzip2. +Preparing to unpack .../38-bzip2_1.0.8-5.1build0.1_amd64.deb ... +Unpacking bzip2 (1.0.8-5.1build0.1) ... +Selecting previously unselected package lto-disabled-list. +Preparing to unpack .../39-lto-disabled-list_47_all.deb ... +Unpacking lto-disabled-list (47) ... +Selecting previously unselected package dpkg-dev. +Preparing to unpack .../40-dpkg-dev_1.22.6ubuntu6.5_all.deb ... +Unpacking dpkg-dev (1.22.6ubuntu6.5) ... +Selecting previously unselected package build-essential. +Preparing to unpack .../41-build-essential_12.10ubuntu1_amd64.deb ... +Unpacking build-essential (12.10ubuntu1) ... +Selecting previously unselected package libfakeroot:amd64. +Preparing to unpack .../42-libfakeroot_1.33-1_amd64.deb ... +Unpacking libfakeroot:amd64 (1.33-1) ... +Selecting previously unselected package fakeroot. +Preparing to unpack .../43-fakeroot_1.33-1_amd64.deb ... +Unpacking fakeroot (1.33-1) ... +Selecting previously unselected package javascript-common. +Preparing to unpack .../44-javascript-common_11+nmu1_all.deb ... +Unpacking javascript-common (11+nmu1) ... +Selecting previously unselected package libalgorithm-diff-perl. +Preparing to unpack .../45-libalgorithm-diff-perl_1.201-1_all.deb ... +Unpacking libalgorithm-diff-perl (1.201-1) ... +Selecting previously unselected package libalgorithm-diff-xs-perl:amd64. +Preparing to unpack .../46-libalgorithm-diff-xs-perl_0.04-8build3_amd64.deb ... +Unpacking libalgorithm-diff-xs-perl:amd64 (0.04-8build3) ... +Selecting previously unselected package libalgorithm-merge-perl. +Preparing to unpack .../47-libalgorithm-merge-perl_0.08-5_all.deb ... +Unpacking libalgorithm-merge-perl (0.08-5) ... +Selecting previously unselected package libexpat1-dev:amd64. +Preparing to unpack .../48-libexpat1-dev_2.6.1-2ubuntu0.4_amd64.deb ... +Unpacking libexpat1-dev:amd64 (2.6.1-2ubuntu0.4) ... +Selecting previously unselected package libfile-fcntllock-perl. +Preparing to unpack .../49-libfile-fcntllock-perl_0.22-4ubuntu5_amd64.deb ... +Unpacking libfile-fcntllock-perl (0.22-4ubuntu5) ... +Selecting previously unselected package libjs-jquery. +Preparing to unpack .../50-libjs-jquery_3.6.1+dfsg+~3.5.14-1_all.deb ... +Unpacking libjs-jquery (3.6.1+dfsg+~3.5.14-1) ... +Selecting previously unselected package libjs-underscore. +Preparing to unpack .../51-libjs-underscore_1.13.4~dfsg+~1.11.4-3_all.deb ... +Unpacking libjs-underscore (1.13.4~dfsg+~1.11.4-3) ... +Selecting previously unselected package libjs-sphinxdoc. +Preparing to unpack .../52-libjs-sphinxdoc_7.2.6-6_all.deb ... +Unpacking libjs-sphinxdoc (7.2.6-6) ... +Selecting previously unselected package zlib1g-dev:amd64. +Preparing to unpack .../53-zlib1g-dev_1%3a1.3.dfsg-3.1ubuntu2.1_amd64.deb ... +Unpacking zlib1g-dev:amd64 (1:1.3.dfsg-3.1ubuntu2.1) ... +Selecting previously unselected package libpython3.12-dev:amd64. +Preparing to unpack .../54-libpython3.12-dev_3.12.3-1ubuntu0.11_amd64.deb ... +Unpacking libpython3.12-dev:amd64 (3.12.3-1ubuntu0.11) ... +Selecting previously unselected package libpython3-dev:amd64. +Preparing to unpack .../55-libpython3-dev_3.12.3-0ubuntu2.1_amd64.deb ... +Unpacking libpython3-dev:amd64 (3.12.3-0ubuntu2.1) ... +Selecting previously unselected package python3.12-dev. +Preparing to unpack .../56-python3.12-dev_3.12.3-1ubuntu0.11_amd64.deb ... +Unpacking python3.12-dev (3.12.3-1ubuntu0.11) ... +Selecting previously unselected package python3-dev. +Preparing to unpack .../57-python3-dev_3.12.3-0ubuntu2.1_amd64.deb ... +Unpacking python3-dev (3.12.3-0ubuntu2.1) ... +Selecting previously unselected package python3-wheel. +Preparing to unpack .../58-python3-wheel_0.42.0-2_all.deb ... +Unpacking python3-wheel (0.42.0-2) ... +Selecting previously unselected package python3-pip. +Preparing to unpack .../59-python3-pip_24.0+dfsg-1ubuntu1.3_all.deb ... +Unpacking python3-pip (24.0+dfsg-1ubuntu1.3) ... +Selecting previously unselected package python3-pip-whl. +Preparing to unpack .../60-python3-pip-whl_24.0+dfsg-1ubuntu1.3_all.deb ... +Unpacking python3-pip-whl (24.0+dfsg-1ubuntu1.3) ... +Selecting previously unselected package python3-setuptools-whl. +Preparing to unpack .../61-python3-setuptools-whl_68.1.2-2ubuntu1.2_all.deb ... +Unpacking python3-setuptools-whl (68.1.2-2ubuntu1.2) ... +Selecting previously unselected package python3.12-venv. +Preparing to unpack .../62-python3.12-venv_3.12.3-1ubuntu0.11_amd64.deb ... +Unpacking python3.12-venv (3.12.3-1ubuntu0.11) ... +Selecting previously unselected package python3-venv. +Preparing to unpack .../63-python3-venv_3.12.3-0ubuntu2.1_amd64.deb ... +Unpacking python3-venv (3.12.3-0ubuntu2.1) ... +Setting up libexpat1:amd64 (2.6.1-2ubuntu0.4) ... +Setting up javascript-common (11+nmu1) ... +Setting up python3-setuptools-whl (68.1.2-2ubuntu1.2) ... +Setting up lto-disabled-list (47) ... +Setting up libfile-fcntllock-perl (0.22-4ubuntu5) ... +Setting up python3-pip-whl (24.0+dfsg-1ubuntu1.3) ... +Setting up libalgorithm-diff-perl (1.201-1) ... +Setting up binutils-common:amd64 (2.42-4ubuntu2.8) ... +Setting up libctf-nobfd0:amd64 (2.42-4ubuntu2.8) ... +Setting up libgomp1:amd64 (14.2.0-4ubuntu2~24.04.1) ... +Setting up bzip2 (1.0.8-5.1build0.1) ... +Setting up python3-wheel (0.42.0-2) ... +Setting up libsframe1:amd64 (2.42-4ubuntu2.8) ... +Setting up libfakeroot:amd64 (1.33-1) ... +Setting up fakeroot (1.33-1) ... +update-alternatives: using /usr/bin/fakeroot-sysv to provide /usr/bin/fakeroot (fakeroot) in auto mode +Setting up gcc-13-base:amd64 (13.3.0-6ubuntu2~24.04.1) ... +Setting up libexpat1-dev:amd64 (2.6.1-2ubuntu0.4) ... +Setting up make (4.3-4.1build2) ... +Setting up libquadmath0:amd64 (14.2.0-4ubuntu2~24.04.1) ... +Setting up libmpc3:amd64 (1.3.1-1build1.1) ... +Setting up libatomic1:amd64 (14.2.0-4ubuntu2~24.04.1) ... +Setting up python3-pip (24.0+dfsg-1ubuntu1.3) ... +Setting up libdpkg-perl (1.22.6ubuntu6.5) ... +Setting up libubsan1:amd64 (14.2.0-4ubuntu2~24.04.1) ... +Setting up zlib1g-dev:amd64 (1:1.3.dfsg-3.1ubuntu2.1) ... +Setting up libhwasan0:amd64 (14.2.0-4ubuntu2~24.04.1) ... +Setting up libasan8:amd64 (14.2.0-4ubuntu2~24.04.1) ... +Setting up libtsan2:amd64 (14.2.0-4ubuntu2~24.04.1) ... +Setting up libjs-jquery (3.6.1+dfsg+~3.5.14-1) ... +Setting up libbinutils:amd64 (2.42-4ubuntu2.8) ... +Setting up libisl23:amd64 (0.26-3build1.1) ... +Setting up libalgorithm-diff-xs-perl:amd64 (0.04-8build3) ... +Setting up libcc1-0:amd64 (14.2.0-4ubuntu2~24.04.1) ... +Setting up liblsan0:amd64 (14.2.0-4ubuntu2~24.04.1) ... +Setting up libitm1:amd64 (14.2.0-4ubuntu2~24.04.1) ... +Setting up libjs-underscore (1.13.4~dfsg+~1.11.4-3) ... +Setting up libalgorithm-merge-perl (0.08-5) ... +Setting up libctf0:amd64 (2.42-4ubuntu2.8) ... +Setting up python3.12-venv (3.12.3-1ubuntu0.11) ... +Setting up cpp-13-x86-64-linux-gnu (13.3.0-6ubuntu2~24.04.1) ... +Setting up libpython3.12-dev:amd64 (3.12.3-1ubuntu0.11) ... +Setting up libgprofng0:amd64 (2.42-4ubuntu2.8) ... +Setting up python3-venv (3.12.3-0ubuntu2.1) ... +Setting up python3.12-dev (3.12.3-1ubuntu0.11) ... +Setting up libjs-sphinxdoc (7.2.6-6) ... +Setting up libgcc-13-dev:amd64 (13.3.0-6ubuntu2~24.04.1) ... +Setting up libstdc++-13-dev:amd64 (13.3.0-6ubuntu2~24.04.1) ... +Setting up binutils-x86-64-linux-gnu (2.42-4ubuntu2.8) ... +Setting up cpp-x86-64-linux-gnu (4:13.2.0-7ubuntu1) ... +Setting up libpython3-dev:amd64 (3.12.3-0ubuntu2.1) ... +Setting up cpp-13 (13.3.0-6ubuntu2~24.04.1) ... +Setting up gcc-13-x86-64-linux-gnu (13.3.0-6ubuntu2~24.04.1) ... +Setting up binutils (2.42-4ubuntu2.8) ... +Setting up dpkg-dev (1.22.6ubuntu6.5) ... +Setting up python3-dev (3.12.3-0ubuntu2.1) ... +Setting up gcc-13 (13.3.0-6ubuntu2~24.04.1) ... +Setting up cpp (4:13.2.0-7ubuntu1) ... +Setting up g++-13-x86-64-linux-gnu (13.3.0-6ubuntu2~24.04.1) ... +Setting up gcc-x86-64-linux-gnu (4:13.2.0-7ubuntu1) ... +Setting up gcc (4:13.2.0-7ubuntu1) ... +Setting up g++-x86-64-linux-gnu (4:13.2.0-7ubuntu1) ... +Setting up g++-13 (13.3.0-6ubuntu2~24.04.1) ... +Setting up g++ (4:13.2.0-7ubuntu1) ... +update-alternatives: using /usr/bin/g++ to provide /usr/bin/c++ (c++) in auto mode +Setting up build-essential (12.10ubuntu1) ... +Processing triggers for man-db (2.12.0-4build2) ... +Processing triggers for libc-bin (2.39-0ubuntu8.7) ... +Scanning processes... +Scanning candidates... +Scanning linux images... + +Running kernel seems to be up-to-date. + +Restarting services... + systemctl restart multipathd.service packagekit.service polkit.service + +Service restarts being deferred: + /etc/needrestart/restart.d/dbus.service + systemctl restart unattended-upgrades.service + +No containers need to be restarted. + +User sessions running outdated binaries: + ubuntu @ session #1: apt[1405] + +No VM guests are running outdated hypervisor (qemu) binaries on this host. +ubuntu@fhmdplo02ifil4mk7odj:~$ sudo apt install -y python3-venv python3-pip +Reading package lists... Done +Building dependency tree... Done +Reading state information... Done +python3-venv is already the newest version (3.12.3-0ubuntu2.1). +python3-pip is already the newest version (24.0+dfsg-1ubuntu1.3). +0 upgraded, 0 newly installed, 0 to remove and 4 not upgraded. +ubuntu@fhmdplo02ifil4mk7odj:~$ mkdir -p ~/app && cd ~/app +ubuntu@fhmdplo02ifil4mk7odj:~/app$ cat > app.py <<'PY' +> from flask import Flask +> app = Flask(__name__) +> +> @app.get("/") +> def hello(): +> return "OK: lab04 VM is alive\n" +> PY +ubuntu@fhmdplo02ifil4mk7odj:~/app$ python3 -m venv venv +ubuntu@fhmdplo02ifil4mk7odj:~/app$ source venv/bin/activate +(venv) ubuntu@fhmdplo02ifil4mk7odj:~/app$ pip install flask +Collecting flask + Downloading flask-3.1.3-py3-none-any.whl.metadata (3.2 kB) +Collecting blinker>=1.9.0 (from flask) + Downloading blinker-1.9.0-py3-none-any.whl.metadata (1.6 kB) +Collecting click>=8.1.3 (from flask) + Downloading click-8.3.1-py3-none-any.whl.metadata (2.6 kB) +Collecting itsdangerous>=2.2.0 (from flask) + Downloading itsdangerous-2.2.0-py3-none-any.whl.metadata (1.9 kB) +Collecting jinja2>=3.1.2 (from flask) + Downloading jinja2-3.1.6-py3-none-any.whl.metadata (2.9 kB) +Collecting markupsafe>=2.1.1 (from flask) + Downloading markupsafe-3.0.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl.metadata (2.7 kB) +Collecting werkzeug>=3.1.0 (from flask) + Downloading werkzeug-3.1.6-py3-none-any.whl.metadata (4.0 kB) +Downloading flask-3.1.3-py3-none-any.whl (103 kB) + ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 103.4/103.4 kB 1.8 MB/s eta 0:00:00 +Downloading blinker-1.9.0-py3-none-any.whl (8.5 kB) +Downloading click-8.3.1-py3-none-any.whl (108 kB) + ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 108.3/108.3 kB 7.4 MB/s eta 0:00:00 +Downloading itsdangerous-2.2.0-py3-none-any.whl (16 kB) +Downloading jinja2-3.1.6-py3-none-any.whl (134 kB) + ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 134.9/134.9 kB 8.5 MB/s eta 0:00:00 +Downloading markupsafe-3.0.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl (22 kB) +Downloading werkzeug-3.1.6-py3-none-any.whl (225 kB) + ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 225.2/225.2 kB 11.1 MB/s eta 0:00:00 +Installing collected packages: markupsafe, itsdangerous, click, blinker, werkzeug, jinja2, flask +Successfully installed blinker-1.9.0 click-8.3.1 flask-3.1.3 itsdangerous-2.2.0 jinja2-3.1.6 markupsafe-3.0.3 werkzeug-3.1.6 +(venv) ubuntu@fhmdplo02ifil4mk7odj:~/app$ FLASK_APP=app.py flask run --host=0.0.0.0 --port=5000 + * Serving Flask app 'app.py' + * Debug mode: off +WARNING: This is a development server. Do not use it in a production deployment. Use a production WSGI server instead. + * Running on all addresses (0.0.0.0) + * Running on http://127.0.0.1:5000 + * Running on http://10.10.0.22:5000 +Press CTRL+C to quit +95.111.204.70 - - [19/Feb/2026 19:33:15] "GET / HTTP/1.1" 200 - +95.111.204.70 - - [19/Feb/2026 19:33:17] "GET /favicon.ico HTTP/1.1" 404 - \ No newline at end of file diff --git a/monitoring/.env.example b/monitoring/.env.example new file mode 100644 index 0000000000..dd8134271f --- /dev/null +++ b/monitoring/.env.example @@ -0,0 +1,11 @@ +# Copy this file to .env before starting the stack. +# PowerShell: Copy-Item .env.example .env +# Bash: cp .env.example .env + +GRAFANA_ADMIN_USER=admin +GRAFANA_ADMIN_PASSWORD=ChangeMe_Lab08! +GRAFANA_PORT=3000 +LOKI_PORT=3100 +PROMTAIL_PORT=9080 +PROMETHEUS_PORT=9090 +APP_PORT=8000 diff --git a/monitoring/docker-compose.yml b/monitoring/docker-compose.yml new file mode 100644 index 0000000000..c32068b172 --- /dev/null +++ b/monitoring/docker-compose.yml @@ -0,0 +1,196 @@ +name: devops-monitoring + +services: + loki: + image: grafana/loki:3.0.0 + container_name: devops-loki + command: + - -config.file=/etc/loki/config.yml + ports: + - "${LOKI_PORT:-3100}:3100" + volumes: + - ./loki/config.yml:/etc/loki/config.yml:ro + - loki-data:/loki + networks: + - logging + labels: + logging: "promtail" + app: "devops-loki" + restart: unless-stopped + healthcheck: + test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://127.0.0.1:3100/ready"] + interval: 10s + timeout: 5s + retries: 5 + start_period: 20s + deploy: + resources: + limits: + cpus: "1.0" + memory: 1G + reservations: + cpus: "0.25" + memory: 256M + + promtail: + image: grafana/promtail:3.0.0 + container_name: devops-promtail + command: + - -config.file=/etc/promtail/config.yml + ports: + - "${PROMTAIL_PORT:-9080}:9080" + volumes: + - ./promtail/config.yml:/etc/promtail/config.yml:ro + - promtail-data:/var/lib/promtail + - /var/lib/docker/containers:/var/lib/docker/containers:ro + - /var/run/docker.sock:/var/run/docker.sock:ro + networks: + - logging + labels: + logging: "promtail" + app: "devops-promtail" + restart: unless-stopped + depends_on: + loki: + condition: service_healthy + healthcheck: + test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://127.0.0.1:9080/ready"] + interval: 10s + timeout: 5s + retries: 5 + start_period: 20s + deploy: + resources: + limits: + cpus: "0.5" + memory: 512M + reservations: + cpus: "0.10" + memory: 128M + + grafana: + image: grafana/grafana:12.3.1 + container_name: devops-grafana + env_file: + - .env + environment: + GF_SECURITY_ADMIN_USER: "${GRAFANA_ADMIN_USER:-admin}" + GF_SECURITY_ADMIN_PASSWORD: "${GRAFANA_ADMIN_PASSWORD}" + GF_AUTH_ANONYMOUS_ENABLED: "false" + GF_AUTH_ANONYMOUS_ORG_ROLE: Viewer + GF_SECURITY_ALLOW_EMBEDDING: "false" + GF_METRICS_ENABLED: "true" + ports: + - "${GRAFANA_PORT:-3000}:3000" + volumes: + - grafana-data:/var/lib/grafana + - ./grafana/provisioning:/etc/grafana/provisioning:ro + - ./grafana/dashboards:/etc/grafana/dashboards:ro + networks: + - logging + labels: + logging: "promtail" + app: "devops-grafana" + restart: unless-stopped + depends_on: + loki: + condition: service_healthy + healthcheck: + test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://127.0.0.1:3000/api/health"] + interval: 10s + timeout: 5s + retries: 5 + start_period: 30s + deploy: + resources: + limits: + cpus: "0.5" + memory: 512M + reservations: + cpus: "0.10" + memory: 128M + + prometheus: + image: prom/prometheus:v3.9.0 + container_name: devops-prometheus + command: + - --config.file=/etc/prometheus/prometheus.yml + - --storage.tsdb.path=/prometheus + - --storage.tsdb.retention.time=15d + - --storage.tsdb.retention.size=10GB + - --web.enable-lifecycle + ports: + - "${PROMETHEUS_PORT:-9090}:9090" + volumes: + - ./prometheus/prometheus.yml:/etc/prometheus/prometheus.yml:ro + - prometheus-data:/prometheus + networks: + - logging + labels: + logging: "promtail" + app: "devops-prometheus" + restart: unless-stopped + depends_on: + loki: + condition: service_healthy + grafana: + condition: service_healthy + app-python: + condition: service_healthy + healthcheck: + test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://127.0.0.1:9090/-/healthy"] + interval: 10s + timeout: 5s + retries: 5 + start_period: 20s + deploy: + resources: + limits: + cpus: "1.0" + memory: 1G + reservations: + cpus: "0.25" + memory: 256M + + app-python: + build: + context: ../app_python + dockerfile: Dockerfile + image: devops-info-service:lab08 + container_name: devops-python + environment: + HOST: "0.0.0.0" + PORT: "8000" + DEBUG: "false" + ports: + - "${APP_PORT:-8000}:8000" + networks: + - logging + labels: + logging: "promtail" + app: "devops-python" + restart: unless-stopped + healthcheck: + test: ["CMD", "python", "-c", "import urllib.request; urllib.request.urlopen('http://127.0.0.1:8000/health')"] + interval: 10s + timeout: 5s + retries: 5 + start_period: 20s + deploy: + resources: + limits: + cpus: "0.5" + memory: 256M + reservations: + cpus: "0.10" + memory: 128M + +networks: + logging: + driver: bridge + +volumes: + loki-data: + grafana-data: + promtail-data: + prometheus-data: diff --git a/monitoring/docs/LAB07.md b/monitoring/docs/LAB07.md new file mode 100644 index 0000000000..30563cd6ff --- /dev/null +++ b/monitoring/docs/LAB07.md @@ -0,0 +1,244 @@ +# Lab07 — Observability & Logging with Loki Stack + +## Summary + +This implementation follows the lab requirements and is adapted to a fully local, free workflow on Windows 11 using WSL2. The repository now contains: + +- a ready-to-run Loki + Promtail + Grafana stack in `monitoring/` +- structured JSON logging in the Python Flask app +- Grafana data source provisioning +- a prebuilt Grafana dashboard definition with four required panels +- an optional Ansible automation role (`roles/monitoring`) and playbook (`playbooks/deploy-monitoring.yml`) + +The final practical validation for this lab was completed locally with Docker Compose in WSL2. + +## Architecture + +```text ++-------------------+ +-------------------+ +| Python App | | Grafana | +| JSON logs stdout | | dashboards/query | ++---------+---------+ +---------+---------+ + | | + | Docker json-file logs | HTTP API / UI + v | ++---------+---------+ | +| Promtail | push logs | +| docker_sd_configs +----------------> | ++---------+---------+ | + | v + | +-----+------+ + +--------------------> | Loki | + | TSDB + FS | + +------------+ +``` + +## Repository Structure + +```text +monitoring/ +├── .env.example +├── docker-compose.yml +├── docs/ +│ ├── LAB07.md +│ ├── LOCAL_VALIDATION_WINDOWS.md +│ └── screenshots/ +├── grafana/ +│ ├── dashboards/ +│ │ └── lab07-logging.json +│ └── provisioning/ +│ ├── dashboards/ +│ │ └── dashboard-provider.yml +│ └── datasources/ +│ └── loki.yml +├── loki/ +│ └── config.yml +└── promtail/ + └── config.yml +``` + +## Configuration Notes + +### Loki + +- single-node deployment +- `store: tsdb` +- `schema: v13` +- filesystem-backed local storage +- 7-day retention (`168h`) +- compactor enabled for retention processing + +### Promtail + +- Docker service discovery via `docker_sd_configs` +- filters only containers with label `logging=promtail` +- extracts container name into the `container` label +- keeps the application label from Docker metadata as `app` +- uses the `docker` pipeline stage so Docker JSON logs are parsed correctly + +### Grafana + +- anonymous access disabled +- admin password loaded from `.env` +- Loki data source provisioned automatically on startup +- dashboard provider loads a ready-made dashboard from disk + +## Application Logging + +The Flask app emits structured JSON logs to stdout using a custom `JSONFormatter`. + +Logged events include: +- service startup +- request start +- request completion with method/path/status/client IP/duration +- 404 events +- unhandled 500 errors with exception details + +Example log line: + +```json +{"timestamp":"2026-03-13T01:11:28.923Z","level":"INFO","logger":"werkzeug","message":"127.0.0.1 - - [13/Mar/2026 01:11:28] \"GET /health HTTP/1.1\" 200 -"} +``` + +## Dashboard + +The provisioned dashboard contains four required panels: + +1. **Recent Logs (all apps)** — logs panel +2. **Request Rate by App** — time series +3. **Error Logs Only** — logs panel filtered by log level +4. **Log Level Distribution (last 5m)** — pie chart grouped by log level + +## Production Readiness Choices + +- resource constraints were added to all services +- Grafana anonymous authentication is disabled +- Grafana admin credentials are externalized through `.env` +- health checks were added for Loki, Promtail, Grafana, and the Python app +- Loki retention is set to 7 days to prevent unbounded log growth + +## Local Validation Workflow + +The stack was validated locally from WSL2 using Docker Compose. + +Commands used: + +```bash +cd monitoring +cp .env.example .env +# edit Grafana admin password in .env + +docker compose up -d --build +docker compose ps +curl http://127.0.0.1:3100/ready +curl http://127.0.0.1:3000 +curl http://127.0.0.1:8000/health +``` + +Traffic generation used for log validation: + +```bash +curl http://127.0.0.1:8000/ +curl http://127.0.0.1:8000/health +curl http://127.0.0.1:8000/ +curl http://127.0.0.1:8000/health +``` + +## Validation Results + +The monitoring stack was validated successfully in a fully local Windows 11 + WSL2 environment using Docker Compose. + +Confirmed results: +- Grafana is reachable on `http://127.0.0.1:3000` +- Loki is reachable on `http://127.0.0.1:3100/ready` and returns `ready` +- the Flask application is reachable on `http://127.0.0.1:8000` +- the application health endpoint returns a successful JSON response +- container logs are ingested into Loki and visible in Grafana Explore + +The following LogQL query was confirmed to work locally: + +```logql +{job="docker", app="devops-python"} +``` + +A broader query also returned logs successfully: + +```logql +{job="docker"} +``` + +The returned logs included Flask access entries such as `GET /health` and `GET /`. + +## Recommended LogQL Queries + +Working queries confirmed locally: + +```logql +{job="docker"} +{job="docker", app="devops-python"} +{job="docker", container="devops-python"} +{job="docker", service_name="devops-python"} +``` + +Additional useful queries: + +```logql +{app="devops-python"} | json | level="INFO" +{app="devops-python"} | json | level="ERROR" +{app="devops-python"} | json | method="GET" +sum by (app) (rate({job="docker"}[1m])) +sum by (level) (count_over_time({job="docker"} | json [5m])) +``` + +## Ansible Bonus Automation + +The repository also includes bonus automation for the existing Lab06 Ansible layout: + +- role: `ansible/roles/monitoring` +- playbook: `ansible/playbooks/deploy-monitoring.yml` + +The role: +- depends on the existing `docker` role +- creates the monitoring directory structure on the VM +- copies the local Python app source to the target host +- templates Loki, Promtail, Grafana, and Docker Compose files +- builds the Python app locally on the target VM with Docker Compose v2 +- verifies Loki, Grafana, the app health endpoint, and the provisioned data source + +## Challenges and Practical Notes + +1. **Windows + WSL2 networking** + - forwarded ports from VirtualBox may work on Windows `127.0.0.1` but require the current Windows host IP from inside WSL +2. **Registry/network instability** + - in this environment, network timeouts to Docker infrastructure are possible + - the final successful validation was completed locally with Docker Compose in WSL2 +3. **Promtail lifecycle** + - the lab still requires Promtail, so this solution keeps it + - for long-term production use, plan a future migration path to Grafana Alloy + +## Captured Screenshots + +The following screenshots should be saved under `monitoring/docs/screenshots/`: + +- `grafana-login.png` — Grafana login page +- `grafana-datasource-loki.png` — Loki datasource with successful connection test +- `grafana-explore-all-logs.png` — Grafana Explore with the broad query `{job="docker"}` +- `grafana-explore-app-logs.png` — Grafana Explore with application-specific logs using `{job="docker", app="devops-python"}` +- `grafana-dashboard.png` — provisioned dashboard with visible log/metrics panels +- `app-health-and-stack.png` — terminal validation with `docker compose ps`, Loki ready check, and app health check + +## Evidence Captured Locally + +The following evidence was captured after the stack started successfully: + +- Grafana login page +- Loki datasource connection test +- Grafana Explore with working LogQL queries +- provisioned dashboard +- successful Loki readiness check +- successful application health check +- running Docker Compose services + +## Conclusion + +Lab07 was completed successfully in a local Windows 11 + WSL2 environment. The Grafana + Loki + Promtail stack is running, the Flask application emits structured JSON logs, and those logs are visible in Grafana through Loki. Local validation confirmed successful service startup, log ingestion, and dashboard availability. \ No newline at end of file diff --git a/monitoring/docs/LAB08.md b/monitoring/docs/LAB08.md new file mode 100644 index 0000000000..2a1e0dfd2a --- /dev/null +++ b/monitoring/docs/LAB08.md @@ -0,0 +1,522 @@ +# Lab08 — Metrics & Monitoring with Prometheus + +## Summary + +This implementation extends the existing Lab07 observability stack with Prometheus-based metrics collection and a Grafana metrics dashboard. + +The solution is designed for a fully local, free workflow: +- Windows host +- WSL2 for Linux tooling +- optional Vagrant VM for a 100% local deployment target +- no external cloud services required + +The repository now contains: +- Prometheus instrumentation in the Python Flask application +- a `/metrics` endpoint compatible with Prometheus scraping +- a Prometheus service added to `monitoring/docker-compose.yml` +- Grafana provisioning for both Loki and Prometheus data sources +- a prebuilt Grafana metrics dashboard with 9 panels +- updated local validation instructions for Windows + WSL2 + Vagrant +- extended Ansible automation for the full Loki + Prometheus + Grafana stack + +## Architecture + +```text + +-----------------------+ + | Grafana | + | dashboards + Explore | + +-----------+-----------+ + ^ + | + +--------------+---------------+ + | | + | | + metrics | | logs + | | + | | ++---------------------+-----+ +---------+---------+ +| Python Flask App | | Promtail | +| /, /health, /metrics | | Docker SD + labels| ++------------+--------------+ +---------+---------+ + | | + | scrape | push + v v + +------+----------------+ +-------+--------+ + | Prometheus | | Loki | + | pull-based TSDB | | log storage | + +-----------------------+ +----------------+ +``` + +## Repository Structure + +```text +monitoring/ +├── .env.example +├── docker-compose.yml +├── docs/ +│ ├── LAB07.md +│ ├── LAB08.md +│ ├── LOCAL_VALIDATION_WINDOWS.md +│ └── screenshots/ +├── grafana/ +│ ├── dashboards/ +│ │ ├── lab07-logging.json +│ │ └── lab08-metrics.json +│ └── provisioning/ +│ ├── dashboards/ +│ │ └── dashboard-provider.yml +│ └── datasources/ +│ └── loki.yml +├── loki/ +│ └── config.yml +├── prometheus/ +│ └── prometheus.yml +└── promtail/ + └── config.yml +``` + +## Application Instrumentation + +### Metrics added to the Flask application + +#### 1. HTTP request counter + +Metric: + +```text +http_requests_total{method, endpoint, status_code} +``` + +Purpose: +- total request volume +- request rate calculations +- status code distribution +- error rate calculations + +#### 2. HTTP request duration histogram + +Metric: + +```text +http_request_duration_seconds{method, endpoint} +``` + +Purpose: +- latency measurements +- percentile calculations such as p95 +- heatmap visualizations for duration buckets + +Note: +- `status_code` is intentionally not used on the latency histogram to avoid unnecessary cardinality growth and to keep PromQL queries simpler + +#### 3. In-progress requests gauge + +Metric: + +```text +http_requests_in_progress +``` + +Purpose: +- concurrent request visibility +- simple saturation signal +- active request panel in Grafana + +#### 4. Application-specific business metrics + +Metrics: + +```text +devops_info_endpoint_calls_total{endpoint} +devops_info_system_collection_seconds +devops_info_uptime_seconds +``` + +Purpose: +- endpoint usage tracking +- system information collection cost tracking +- current service uptime for dashboards and troubleshooting + +### Instrumentation design choices + +- request metrics are collected in Flask hooks (`before_request`, `after_request`, `teardown_request`) +- endpoint labels are normalized to avoid high-cardinality labels +- unknown routes are grouped as `unmatched` +- `/metrics` scrape traffic is excluded from request-rate business metrics to avoid self-scrape noise in dashboards +- `/metrics` keeps the Prometheus content type and is not overwritten by the JSON response middleware +- the in-progress gauge is decremented in `teardown_request` to avoid leaks in error scenarios + +## Prometheus Configuration + +File: + +```text +monitoring/prometheus/prometheus.yml +``` + +### Scrape settings + +- scrape interval: `15s` +- evaluation interval: `15s` + +### Scrape targets + +1. `prometheus` + - target: `localhost:9090` +2. `app` + - target: `app-python:8000` + - path: `/metrics` +3. `loki` + - target: `loki:3100` +4. `grafana` + - target: `grafana:3000` + +### Retention policy + +Prometheus retention is configured via container flags in `docker-compose.yml`: + +```text +--storage.tsdb.retention.time=15d +--storage.tsdb.retention.size=10GB +``` + +Why this matters: +- avoids unbounded disk usage +- keeps local storage predictable on a laptop or VM +- is fully compatible with a free local deployment model + +## Grafana Dashboard Walkthrough + +Dashboard file: + +```text +monitoring/grafana/dashboards/lab08-metrics.json +``` + +The dashboard includes 9 panels. + +### 1. Request Rate by Endpoint + +Query: + +```promql +sum by (endpoint) (rate(http_requests_total{endpoint!="/metrics"}[5m])) +``` + +Purpose: +- visualizes request throughput +- supports the **R** in RED + +### 2. Error Rate (5xx) + +Query: + +```promql +sum(rate(http_requests_total{status_code=~"5.."}[5m])) +``` + +Purpose: +- shows server-side errors per second +- supports the **E** in RED + +### 3. Request Duration p95 by Endpoint + +Query: + +```promql +histogram_quantile(0.95, sum by (le, endpoint) (rate(http_request_duration_seconds_bucket{endpoint!="/metrics"}[5m]))) +``` + +Purpose: +- tracks latency percentiles +- supports the **D** in RED + +### 4. Active Requests + +Query: + +```promql +sum(http_requests_in_progress) +``` + +Purpose: +- highlights current concurrency pressure + +### 5. Application Uptime + +Query: + +```promql +max(devops_info_uptime_seconds) +``` + +Purpose: +- confirms the service is alive and progressing normally + +### 6. Request Duration Heatmap + +Query: + +```promql +sum by (le) (rate(http_request_duration_seconds_bucket{endpoint!="/metrics"}[5m])) +``` + +Purpose: +- visualizes latency bucket distribution over time + +### 7. Status Code Distribution + +Query: + +```promql +sum by (status_code) (rate(http_requests_total[5m])) +``` + +Purpose: +- shows how traffic is split across response classes and codes + +### 8. App Target Uptime + +Query: + +```promql +up{job="app"} +``` + +Purpose: +- shows whether Prometheus is successfully scraping the application + +### 9. System Info Collection p95 + +Query: + +```promql +histogram_quantile(0.95, sum by (le) (rate(devops_info_system_collection_seconds_bucket[5m]))) +``` + +Purpose: +- tracks the internal cost of the service-specific system information collection function + +## PromQL Examples + +### RED method queries + +```promql +sum by (endpoint) (rate(http_requests_total{endpoint!="/metrics"}[5m])) +sum(rate(http_requests_total{status_code=~"5.."}[5m])) +histogram_quantile(0.95, sum by (le, endpoint) (rate(http_request_duration_seconds_bucket{endpoint!="/metrics"}[5m]))) +``` + +### Additional useful queries + +```promql +sum by (status_code) (rate(http_requests_total[5m])) +sum(http_requests_in_progress) +up{job="app"} +max(devops_info_uptime_seconds) +sum by (job) (up) +histogram_quantile(0.95, sum by (le) (rate(devops_info_system_collection_seconds_bucket[5m]))) +``` + +## Production Setup + +### Health checks + +Health checks are configured for: +- Loki +- Promtail +- Grafana +- Prometheus +- Python application + +### Resource limits + +Configured Docker Compose limits: + +| Service | CPU | Memory | +|--------|-----|--------| +| Loki | 1.0 | 1G | +| Promtail | 0.5 | 512M | +| Grafana | 0.5 | 512M | +| Prometheus | 1.0 | 1G | +| App | 0.5 | 256M | + +### Persistence + +Named volumes: +- `loki-data` +- `promtail-data` +- `grafana-data` +- `prometheus-data` + +### Security and operational choices + +- Grafana anonymous access is disabled +- Grafana credentials are externalized via `.env` +- Grafana metrics are explicitly enabled for Prometheus scraping +- the app health check uses Python stdlib instead of `curl` to keep the slim image lightweight + +## Local Run Commands + +### Docker Compose from WSL2 + +```bash +cd monitoring +cp .env.example .env +# edit GRAFANA_ADMIN_PASSWORD in .env + +docker compose up -d --build +docker compose ps +``` + +### Generate traffic + +```bash +for i in {1..25}; do curl -s http://127.0.0.1:8000/ > /dev/null; done +for i in {1..25}; do curl -s http://127.0.0.1:8000/health > /dev/null; done +curl -s http://127.0.0.1:8000/does-not-exist > /dev/null +curl -s http://127.0.0.1:8000/metrics | head -40 +``` + +### Validate Prometheus + +```bash +curl http://127.0.0.1:9090/-/healthy +curl http://127.0.0.1:9090/api/v1/targets | python -m json.tool +``` + +Open in browser: +- `http://127.0.0.1:9090/targets` +- `http://127.0.0.1:3000` + +### Useful Grafana checks + +- Confirm both data sources are provisioned: Loki and Prometheus +- Open dashboard: `Lab08 - Prometheus Metrics Overview` +- Run ad hoc PromQL query: `up` + +## Testing Results + +### Static validation completed in the sandbox + +The following checks were completed while preparing this repository update: +- Python syntax validation for the Flask application +- JSON validation for the Grafana dashboard file +- YAML validation for Docker Compose, Prometheus, Loki, and Promtail configuration files +- local application runtime validation for `/`, `/health`, `/metrics`, and request counters + +Captured application-side evidence is stored in: + +```text +monitoring/docs/evidence/ +``` + +Files included there: +- `app-root.json` +- `app-health.json` +- `app-metrics-sample.txt` +- `app-metrics-headers.txt` + +### Full stack runtime validation to run locally + +Because the sandbox used to prepare this patch does not provide a Docker daemon, UI screenshots and multi-container runtime proofs must be captured locally with the provided commands. + +Recommended screenshots to capture locally: +- all `lab08*.png` files in `monitoring/docs/screenshots` + + +## Metrics vs Logs + +### Use metrics when you need +- rates and trends over time +- low-cost aggregation +- alert thresholds +- latency and saturation views + +### Use logs when you need +- exact event details +- request-specific context +- raw error messages and stack traces +- forensic troubleshooting + +### Combined observability model in this repository + +- **Loki + Promtail** answer: *what happened?* +- **Prometheus** answers: *how much, how often, how fast?* +- **Grafana** provides one UI for both views + +## Challenges and Solutions + +### 1. `/metrics` vs JSON middleware + +Problem: +- the existing `after_request` hook forced `application/json` on every response + +Solution: +- the hook now preserves the Prometheus metrics content type and only rewrites JSON responses + +### 2. Label cardinality control + +Problem: +- raw request paths can create unbounded label values + +Solution: +- metrics use normalized endpoints and group unknown routes as `unmatched` + +### 3. In-progress gauge correctness + +Problem: +- a gauge can leak if it is incremented but not decremented during exceptions + +Solution: +- the decrement is handled in `teardown_request` + +### 4. Windows + WSL2 + Vagrant networking + +Problem: +- forwarded ports can behave differently in Windows and WSL2 + +Solution: +- the local validation guide includes both localhost and host-IP guidance + +### 5. Free local deployment requirement + +Problem: +- the lab should not depend on paid services or third-party cloud platforms + +Solution: +- the full stack runs locally with Docker Compose and can also run on the Vagrant VM via Ansible + +## Bonus — Ansible Automation + +The existing `ansible/roles/monitoring` role was extended to cover Lab08 as well. + +Implemented bonus scope: +- Prometheus variables added to role defaults +- Prometheus config generated from a Jinja2 template +- Docker Compose template updated to include Prometheus +- Grafana provisioning now includes both Loki and Prometheus data sources +- the Lab08 dashboard JSON is rendered by Ansible automatically +- the deployment task verifies Prometheus health and both data sources + +Playbook: + +```bash +cd ansible +ansible-galaxy collection install -r requirements.yml +ansible-playbook -i inventory/hosts.ini playbooks/deploy-monitoring.yml +``` + +## Evidence Checklist + +After you run the stack locally, verify the following: + +- `/metrics` endpoint returns Prometheus-formatted metrics +- Prometheus `/targets` shows all targets as `UP` +- Grafana has both Loki and Prometheus data sources +- the Lab08 dashboard shows live data in all panels +- `docker compose ps` shows healthy containers +- dashboards survive `docker compose down` / `docker compose up -d` + +All these checklist you can see as a screenshots in: +- all `lab08*.png` files in `monitoring/docs/screenshots` diff --git a/monitoring/docs/LOCAL_VALIDATION_WINDOWS.md b/monitoring/docs/LOCAL_VALIDATION_WINDOWS.md new file mode 100644 index 0000000000..f66c637525 --- /dev/null +++ b/monitoring/docs/LOCAL_VALIDATION_WINDOWS.md @@ -0,0 +1,119 @@ +# Local Validation on Windows 11 (WSL2 + Vagrant-friendly) + +This guide is tailored for a fully local and free setup: +- Windows host +- WSL2 for Linux commands +- VS Code connected to WSL2 +- optional Vagrant VM for a dedicated Linux target +- no external cloud services required + +## Ports used by the observability stack + +- App: `8000` +- Grafana: `3000` +- Loki: `3100` +- Promtail: `9080` +- Prometheus: `9090` + +## Option A — Run directly from WSL2 + +### 1. Prepare environment file + +```bash +cd monitoring +cp .env.example .env +``` + +Set a real Grafana password in `.env`. + +### 2. Start the stack + +```bash +docker compose up -d --build +docker compose ps +``` + +### 3. Verify endpoints + +```bash +curl http://127.0.0.1:8000/health +curl http://127.0.0.1:8000/metrics | head -40 +curl http://127.0.0.1:3100/ready +curl http://127.0.0.1:9080/targets +curl http://127.0.0.1:9090/-/healthy +``` + +### 4. Open browser pages + +- Grafana: `http://127.0.0.1:3000` +- Prometheus targets: `http://127.0.0.1:9090/targets` +- Prometheus graph: `http://127.0.0.1:9090/graph` + +### 5. Generate traffic + +```bash +for i in {1..30}; do curl -s http://127.0.0.1:8000/ > /dev/null; done +for i in {1..30}; do curl -s http://127.0.0.1:8000/health > /dev/null; done +curl -s http://127.0.0.1:8000/does-not-exist > /dev/null +``` + +### 6. Prometheus queries to test + +```promql +up +sum by (endpoint) (rate(http_requests_total{endpoint!="/metrics"}[5m])) +sum(rate(http_requests_total{status_code=~"5.."}[5m])) +histogram_quantile(0.95, sum by (le, endpoint) (rate(http_request_duration_seconds_bucket{endpoint!="/metrics"}[5m]))) +sum by (status_code) (rate(http_requests_total[5m])) +``` + +### 7. Grafana checks + +- log in with the credentials from `.env` +- confirm both data sources exist: Loki and Prometheus +- open the `Lab08 - Prometheus Metrics Overview` dashboard +- verify the panels update after traffic generation + +## Option B — Run on the Vagrant VM + +### 1. Reload Vagrant to apply port forwarding + +```powershell +vagrant reload +vagrant status +vagrant port +``` + +### 2. Run Ansible deployment from WSL2 + +```bash +cd ansible +ansible-galaxy collection install -r requirements.yml +ansible-playbook -i inventory/hosts.ini playbooks/deploy-monitoring.yml +``` + +### 3. Verify from the Windows host or WSL2 + +```bash +curl http://127.0.0.1:8000/health +curl http://127.0.0.1:9090/-/healthy +``` + +If WSL2 cannot reach forwarded ports through `127.0.0.1`, use the current Windows host IP. + +## PowerShell traffic generation + +```powershell +1..30 | ForEach-Object { Invoke-WebRequest -UseBasicParsing http://127.0.0.1:8000/ | Out-Null } +1..30 | ForEach-Object { Invoke-WebRequest -UseBasicParsing http://127.0.0.1:8000/health | Out-Null } +try { Invoke-WebRequest -UseBasicParsing http://127.0.0.1:8000/does-not-exist | Out-Null } catch {} +``` + +## What to capture for proof + +- `/metrics` endpoint output in the browser or terminal +- Prometheus `/targets` page with all targets `UP` +- Prometheus query page with `up` +- Grafana Prometheus data source test +- Grafana dashboard with all panels populated +- `docker compose ps` showing healthy services diff --git a/monitoring/docs/evidence/README.md b/monitoring/docs/evidence/README.md new file mode 100644 index 0000000000..96a3a3a14e --- /dev/null +++ b/monitoring/docs/evidence/README.md @@ -0,0 +1,11 @@ +This directory contains lightweight validation artifacts captured from the sandbox for the instrumented Flask application itself. + +Included files: +- `app-root.json` — sample response from `GET /` +- `app-health.json` — sample response from `GET /health` +- `app-metrics-sample.txt` — sample Prometheus metrics output from `GET /metrics` +- `app-metrics-headers.txt` — response headers for `/metrics` confirming the Prometheus content type + +These files validate the application-side instrumentation. + +Full multi-container validation for Prometheus, Grafana, and Loki must still be executed locally because the sandbox does not provide a Docker daemon. diff --git a/monitoring/docs/evidence/app-health.json b/monitoring/docs/evidence/app-health.json new file mode 100644 index 0000000000..09e5ff36e3 --- /dev/null +++ b/monitoring/docs/evidence/app-health.json @@ -0,0 +1,5 @@ +{ + "status": "healthy", + "timestamp": "2026-03-19T13:05:23.900Z", + "uptime_seconds": 9 +} diff --git a/monitoring/docs/evidence/app-metrics-headers.txt b/monitoring/docs/evidence/app-metrics-headers.txt new file mode 100644 index 0000000000..a9447d71ae --- /dev/null +++ b/monitoring/docs/evidence/app-metrics-headers.txt @@ -0,0 +1,127 @@ +HTTP/1.1 200 OK +Server: Werkzeug/3.1.6 Python/3.13.5 +Date: Thu, 19 Mar 2026 13:05:16 GMT +Content-Type: text/plain; version=1.0.0; charset=utf-8 +Content-Length: 9658 +Connection: close + +# HELP python_gc_objects_collected_total Objects collected during gc +# TYPE python_gc_objects_collected_total counter +python_gc_objects_collected_total{generation="0"} 523.0 +python_gc_objects_collected_total{generation="1"} 0.0 +python_gc_objects_collected_total{generation="2"} 0.0 +# HELP python_gc_objects_uncollectable_total Uncollectable objects found during GC +# TYPE python_gc_objects_uncollectable_total counter +python_gc_objects_uncollectable_total{generation="0"} 0.0 +python_gc_objects_uncollectable_total{generation="1"} 0.0 +python_gc_objects_uncollectable_total{generation="2"} 0.0 +# HELP python_gc_collections_total Number of times this generation was collected +# TYPE python_gc_collections_total counter +python_gc_collections_total{generation="0"} 26.0 +python_gc_collections_total{generation="1"} 2.0 +python_gc_collections_total{generation="2"} 0.0 +# HELP python_info Python platform information +# TYPE python_info gauge +python_info{implementation="CPython",major="3",minor="13",patchlevel="5",version="3.13.5"} 1.0 +# HELP process_virtual_memory_bytes Virtual memory size in bytes. +# TYPE process_virtual_memory_bytes gauge +process_virtual_memory_bytes 1.35983104e+08 +# HELP process_resident_memory_bytes Resident memory size in bytes. +# TYPE process_resident_memory_bytes gauge +process_resident_memory_bytes 5.4464512e+07 +# HELP process_start_time_seconds Start time of the process since unix epoch in seconds. +# TYPE process_start_time_seconds gauge +process_start_time_seconds 1.77392551375e+09 +# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds. +# TYPE process_cpu_seconds_total counter +process_cpu_seconds_total 0.26 +# HELP process_open_fds Number of open file descriptors. +# TYPE process_open_fds gauge +process_open_fds 6.0 +# HELP process_max_fds Maximum number of open file descriptors. +# TYPE process_max_fds gauge +process_max_fds 1.048576e+06 +# HELP http_requests_total Total HTTP requests processed by the service. +# TYPE http_requests_total counter +http_requests_total{endpoint="/",method="GET",status_code="200"} 1.0 +http_requests_total{endpoint="/health",method="GET",status_code="200"} 1.0 +http_requests_total{endpoint="unmatched",method="GET",status_code="404"} 1.0 +# HELP http_requests_created Total HTTP requests processed by the service. +# TYPE http_requests_created gauge +http_requests_created{endpoint="/",method="GET",status_code="200"} 1.773925515830881e+09 +http_requests_created{endpoint="/health",method="GET",status_code="200"} 1.7739255158618011e+09 +http_requests_created{endpoint="unmatched",method="GET",status_code="404"} 1.773925515895349e+09 +# HELP http_request_duration_seconds HTTP request duration in seconds. +# TYPE http_request_duration_seconds histogram +http_request_duration_seconds_bucket{endpoint="/",le="0.005",method="GET"} 0.0 +http_request_duration_seconds_bucket{endpoint="/",le="0.01",method="GET"} 0.0 +http_request_duration_seconds_bucket{endpoint="/",le="0.025",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="/",le="0.05",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="/",le="0.075",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="/",le="0.1",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="/",le="0.25",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="/",le="0.5",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="/",le="0.75",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="/",le="1.0",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="/",le="2.5",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="/",le="5.0",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="/",le="7.5",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="/",le="10.0",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="/",le="+Inf",method="GET"} 1.0 +http_request_duration_seconds_count{endpoint="/",method="GET"} 1.0 +http_request_duration_seconds_sum{endpoint="/",method="GET"} 0.011629433000052813 +http_request_duration_seconds_bucket{endpoint="/health",le="0.005",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="/health",le="0.01",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="/health",le="0.025",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="/health",le="0.05",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="/health",le="0.075",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="/health",le="0.1",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="/health",le="0.25",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="/health",le="0.5",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="/health",le="0.75",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="/health",le="1.0",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="/health",le="2.5",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="/health",le="5.0",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="/health",le="7.5",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="/health",le="10.0",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="/health",le="+Inf",method="GET"} 1.0 +http_request_duration_seconds_count{endpoint="/health",method="GET"} 1.0 +http_request_duration_seconds_sum{endpoint="/health",method="GET"} 0.00015099899997039756 +http_request_duration_seconds_bucket{endpoint="unmatched",le="0.005",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="unmatched",le="0.01",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="unmatched",le="0.025",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="unmatched",le="0.05",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="unmatched",le="0.075",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="unmatched",le="0.1",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="unmatched",le="0.25",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="unmatched",le="0.5",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="unmatched",le="0.75",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="unmatched",le="1.0",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="unmatched",le="2.5",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="unmatched",le="5.0",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="unmatched",le="7.5",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="unmatched",le="10.0",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="unmatched",le="+Inf",method="GET"} 1.0 +http_request_duration_seconds_count{endpoint="unmatched",method="GET"} 1.0 +http_request_duration_seconds_sum{endpoint="unmatched",method="GET"} 0.00043901000003643276 +# HELP http_request_duration_seconds_created HTTP request duration in seconds. +# TYPE http_request_duration_seconds_created gauge +http_request_duration_seconds_created{endpoint="/",method="GET"} 1.773925515830906e+09 +http_request_duration_seconds_created{endpoint="/health",method="GET"} 1.7739255158618293e+09 +http_request_duration_seconds_created{endpoint="unmatched",method="GET"} 1.773925515895371e+09 +# HELP http_requests_in_progress HTTP requests currently being processed. +# TYPE http_requests_in_progress gauge +http_requests_in_progress 0.0 +# HELP devops_info_endpoint_calls_total Total endpoint calls for the DevOps info service. +# TYPE devops_info_endpoint_calls_total counter +devops_info_endpoint_calls_total{endpoint="/"} 1.0 +devops_info_endpoint_calls_total{endpoint="/health"} 1.0 +devops_info_endpoint_calls_total{endpoint="unmatched"} 1.0 +# HELP devops_info_endpoint_calls_created Total endpoint calls for the DevOps info service. +# TYPE devops_info_endpoint_calls_created gauge +devops_info_endpoint_calls_created{endpoint="/"} 1.7739255158309686e+09 +devops_info_endpoint_calls_created{endpoint="/health"} 1.7739255158618608e+09 +devops_info_endpoint_calls_created{endpoint="unmatched"} 1.773925515895407e+09 +# HELP devops_info_system_collection_seconds Time spent collecting system information. +# TYPE devops_info_system_collection_seconds histogram +devops_info_system_collection_seconds_bucket{le="0.005"} 0.0 diff --git a/monitoring/docs/evidence/app-metrics-sample.txt b/monitoring/docs/evidence/app-metrics-sample.txt new file mode 100644 index 0000000000..5b6436e2cf --- /dev/null +++ b/monitoring/docs/evidence/app-metrics-sample.txt @@ -0,0 +1,142 @@ +# HELP python_gc_objects_collected_total Objects collected during gc +# TYPE python_gc_objects_collected_total counter +python_gc_objects_collected_total{generation="0"} 523.0 +python_gc_objects_collected_total{generation="1"} 0.0 +python_gc_objects_collected_total{generation="2"} 0.0 +# HELP python_gc_objects_uncollectable_total Uncollectable objects found during GC +# TYPE python_gc_objects_uncollectable_total counter +python_gc_objects_uncollectable_total{generation="0"} 0.0 +python_gc_objects_uncollectable_total{generation="1"} 0.0 +python_gc_objects_uncollectable_total{generation="2"} 0.0 +# HELP python_gc_collections_total Number of times this generation was collected +# TYPE python_gc_collections_total counter +python_gc_collections_total{generation="0"} 26.0 +python_gc_collections_total{generation="1"} 2.0 +python_gc_collections_total{generation="2"} 0.0 +# HELP python_info Python platform information +# TYPE python_info gauge +python_info{implementation="CPython",major="3",minor="13",patchlevel="5",version="3.13.5"} 1.0 +# HELP process_virtual_memory_bytes Virtual memory size in bytes. +# TYPE process_virtual_memory_bytes gauge +process_virtual_memory_bytes 1.35983104e+08 +# HELP process_resident_memory_bytes Resident memory size in bytes. +# TYPE process_resident_memory_bytes gauge +process_resident_memory_bytes 5.4464512e+07 +# HELP process_start_time_seconds Start time of the process since unix epoch in seconds. +# TYPE process_start_time_seconds gauge +process_start_time_seconds 1.77392551375e+09 +# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds. +# TYPE process_cpu_seconds_total counter +process_cpu_seconds_total 0.26 +# HELP process_open_fds Number of open file descriptors. +# TYPE process_open_fds gauge +process_open_fds 6.0 +# HELP process_max_fds Maximum number of open file descriptors. +# TYPE process_max_fds gauge +process_max_fds 1.048576e+06 +# HELP http_requests_total Total HTTP requests processed by the service. +# TYPE http_requests_total counter +http_requests_total{endpoint="/",method="GET",status_code="200"} 1.0 +http_requests_total{endpoint="/health",method="GET",status_code="200"} 1.0 +http_requests_total{endpoint="unmatched",method="GET",status_code="404"} 1.0 +# HELP http_requests_created Total HTTP requests processed by the service. +# TYPE http_requests_created gauge +http_requests_created{endpoint="/",method="GET",status_code="200"} 1.773925515830881e+09 +http_requests_created{endpoint="/health",method="GET",status_code="200"} 1.7739255158618011e+09 +http_requests_created{endpoint="unmatched",method="GET",status_code="404"} 1.773925515895349e+09 +# HELP http_request_duration_seconds HTTP request duration in seconds. +# TYPE http_request_duration_seconds histogram +http_request_duration_seconds_bucket{endpoint="/",le="0.005",method="GET"} 0.0 +http_request_duration_seconds_bucket{endpoint="/",le="0.01",method="GET"} 0.0 +http_request_duration_seconds_bucket{endpoint="/",le="0.025",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="/",le="0.05",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="/",le="0.075",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="/",le="0.1",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="/",le="0.25",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="/",le="0.5",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="/",le="0.75",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="/",le="1.0",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="/",le="2.5",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="/",le="5.0",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="/",le="7.5",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="/",le="10.0",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="/",le="+Inf",method="GET"} 1.0 +http_request_duration_seconds_count{endpoint="/",method="GET"} 1.0 +http_request_duration_seconds_sum{endpoint="/",method="GET"} 0.011629433000052813 +http_request_duration_seconds_bucket{endpoint="/health",le="0.005",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="/health",le="0.01",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="/health",le="0.025",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="/health",le="0.05",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="/health",le="0.075",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="/health",le="0.1",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="/health",le="0.25",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="/health",le="0.5",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="/health",le="0.75",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="/health",le="1.0",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="/health",le="2.5",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="/health",le="5.0",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="/health",le="7.5",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="/health",le="10.0",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="/health",le="+Inf",method="GET"} 1.0 +http_request_duration_seconds_count{endpoint="/health",method="GET"} 1.0 +http_request_duration_seconds_sum{endpoint="/health",method="GET"} 0.00015099899997039756 +http_request_duration_seconds_bucket{endpoint="unmatched",le="0.005",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="unmatched",le="0.01",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="unmatched",le="0.025",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="unmatched",le="0.05",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="unmatched",le="0.075",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="unmatched",le="0.1",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="unmatched",le="0.25",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="unmatched",le="0.5",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="unmatched",le="0.75",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="unmatched",le="1.0",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="unmatched",le="2.5",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="unmatched",le="5.0",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="unmatched",le="7.5",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="unmatched",le="10.0",method="GET"} 1.0 +http_request_duration_seconds_bucket{endpoint="unmatched",le="+Inf",method="GET"} 1.0 +http_request_duration_seconds_count{endpoint="unmatched",method="GET"} 1.0 +http_request_duration_seconds_sum{endpoint="unmatched",method="GET"} 0.00043901000003643276 +# HELP http_request_duration_seconds_created HTTP request duration in seconds. +# TYPE http_request_duration_seconds_created gauge +http_request_duration_seconds_created{endpoint="/",method="GET"} 1.773925515830906e+09 +http_request_duration_seconds_created{endpoint="/health",method="GET"} 1.7739255158618293e+09 +http_request_duration_seconds_created{endpoint="unmatched",method="GET"} 1.773925515895371e+09 +# HELP http_requests_in_progress HTTP requests currently being processed. +# TYPE http_requests_in_progress gauge +http_requests_in_progress 0.0 +# HELP devops_info_endpoint_calls_total Total endpoint calls for the DevOps info service. +# TYPE devops_info_endpoint_calls_total counter +devops_info_endpoint_calls_total{endpoint="/"} 1.0 +devops_info_endpoint_calls_total{endpoint="/health"} 1.0 +devops_info_endpoint_calls_total{endpoint="unmatched"} 1.0 +# HELP devops_info_endpoint_calls_created Total endpoint calls for the DevOps info service. +# TYPE devops_info_endpoint_calls_created gauge +devops_info_endpoint_calls_created{endpoint="/"} 1.7739255158309686e+09 +devops_info_endpoint_calls_created{endpoint="/health"} 1.7739255158618608e+09 +devops_info_endpoint_calls_created{endpoint="unmatched"} 1.773925515895407e+09 +# HELP devops_info_system_collection_seconds Time spent collecting system information. +# TYPE devops_info_system_collection_seconds histogram +devops_info_system_collection_seconds_bucket{le="0.005"} 0.0 +devops_info_system_collection_seconds_bucket{le="0.01"} 0.0 +devops_info_system_collection_seconds_bucket{le="0.025"} 1.0 +devops_info_system_collection_seconds_bucket{le="0.05"} 1.0 +devops_info_system_collection_seconds_bucket{le="0.075"} 1.0 +devops_info_system_collection_seconds_bucket{le="0.1"} 1.0 +devops_info_system_collection_seconds_bucket{le="0.25"} 1.0 +devops_info_system_collection_seconds_bucket{le="0.5"} 1.0 +devops_info_system_collection_seconds_bucket{le="0.75"} 1.0 +devops_info_system_collection_seconds_bucket{le="1.0"} 1.0 +devops_info_system_collection_seconds_bucket{le="2.5"} 1.0 +devops_info_system_collection_seconds_bucket{le="5.0"} 1.0 +devops_info_system_collection_seconds_bucket{le="7.5"} 1.0 +devops_info_system_collection_seconds_bucket{le="10.0"} 1.0 +devops_info_system_collection_seconds_bucket{le="+Inf"} 1.0 +devops_info_system_collection_seconds_count 1.0 +devops_info_system_collection_seconds_sum 0.011327940000001036 +# HELP devops_info_system_collection_seconds_created Time spent collecting system information. +# TYPE devops_info_system_collection_seconds_created gauge +devops_info_system_collection_seconds_created 1.7739255140238588e+09 +# HELP devops_info_uptime_seconds Current service uptime in seconds. +# TYPE devops_info_uptime_seconds gauge +devops_info_uptime_seconds 1.0 diff --git a/monitoring/docs/evidence/app-root.json b/monitoring/docs/evidence/app-root.json new file mode 100644 index 0000000000..005e35267b --- /dev/null +++ b/monitoring/docs/evidence/app-root.json @@ -0,0 +1,45 @@ +{ + "endpoints": [ + { + "description": "Service information", + "method": "GET", + "path": "/" + }, + { + "description": "Health check", + "method": "GET", + "path": "/health" + }, + { + "description": "Prometheus metrics", + "method": "GET", + "path": "/metrics" + } + ], + "request": { + "client_ip": "127.0.0.1", + "method": "GET", + "path": "/", + "user_agent": "curl/8.10.1" + }, + "runtime": { + "current_time": "2026-03-19T13:05:22.568Z", + "timezone": "UTC", + "uptime_human": "0 hours, 0 minutes", + "uptime_seconds": 8 + }, + "service": { + "description": "DevOps course info service", + "framework": "Flask", + "name": "devops-info-service", + "version": "1.1.0" + }, + "system": { + "architecture": "x86_64", + "cpu_count": 56, + "hostname": "17ceccfebd47", + "platform": "Linux", + "platform_version": "Linux-4.4.0-x86_64-with-glibc2.41", + "python_version": "3.13.5" + } +} diff --git a/monitoring/docs/screenshots/README.md b/monitoring/docs/screenshots/README.md new file mode 100644 index 0000000000..8b6572b859 --- /dev/null +++ b/monitoring/docs/screenshots/README.md @@ -0,0 +1,10 @@ +Store your local proof screenshots here after you run the stack: + +- metrics-endpoint.png +- prometheus-targets.png +- prometheus-query-up.png +- grafana-prometheus-datasource.png +- grafana-lab08-dashboard.png +- docker-compose-ps-healthy.png +- grafana-explore-app-logs.png +- grafana-dashboard.png diff --git a/monitoring/docs/screenshots/app-health-and-stack.png b/monitoring/docs/screenshots/app-health-and-stack.png new file mode 100644 index 0000000000..32b9216f89 Binary files /dev/null and b/monitoring/docs/screenshots/app-health-and-stack.png differ diff --git a/monitoring/docs/screenshots/grafana-dashboard.png b/monitoring/docs/screenshots/grafana-dashboard.png new file mode 100644 index 0000000000..3d2537b045 Binary files /dev/null and b/monitoring/docs/screenshots/grafana-dashboard.png differ diff --git a/monitoring/docs/screenshots/grafana-datasource-loki.png b/monitoring/docs/screenshots/grafana-datasource-loki.png new file mode 100644 index 0000000000..afbb8afc4e Binary files /dev/null and b/monitoring/docs/screenshots/grafana-datasource-loki.png differ diff --git a/monitoring/docs/screenshots/grafana-explore-all-logs.png b/monitoring/docs/screenshots/grafana-explore-all-logs.png new file mode 100644 index 0000000000..1c163543fe Binary files /dev/null and b/monitoring/docs/screenshots/grafana-explore-all-logs.png differ diff --git a/monitoring/docs/screenshots/grafana-explore-app-logs2.png b/monitoring/docs/screenshots/grafana-explore-app-logs2.png new file mode 100644 index 0000000000..f6dd6243fd Binary files /dev/null and b/monitoring/docs/screenshots/grafana-explore-app-logs2.png differ diff --git a/monitoring/docs/screenshots/grafana-login.png b/monitoring/docs/screenshots/grafana-login.png new file mode 100644 index 0000000000..f277b7a146 Binary files /dev/null and b/monitoring/docs/screenshots/grafana-login.png differ diff --git a/monitoring/docs/screenshots/lab08-datasources.png b/monitoring/docs/screenshots/lab08-datasources.png new file mode 100644 index 0000000000..c607a53a92 Binary files /dev/null and b/monitoring/docs/screenshots/lab08-datasources.png differ diff --git a/monitoring/docs/screenshots/lab08-devops_info_uptime_seconds.png b/monitoring/docs/screenshots/lab08-devops_info_uptime_seconds.png new file mode 100644 index 0000000000..eaf983b8a8 Binary files /dev/null and b/monitoring/docs/screenshots/lab08-devops_info_uptime_seconds.png differ diff --git a/monitoring/docs/screenshots/lab08-docker-compose-ps.png b/monitoring/docs/screenshots/lab08-docker-compose-ps.png new file mode 100644 index 0000000000..8e518c34cd Binary files /dev/null and b/monitoring/docs/screenshots/lab08-docker-compose-ps.png differ diff --git a/monitoring/docs/screenshots/lab08-grafana-dasshboards.png b/monitoring/docs/screenshots/lab08-grafana-dasshboards.png new file mode 100644 index 0000000000..30740e612b Binary files /dev/null and b/monitoring/docs/screenshots/lab08-grafana-dasshboards.png differ diff --git a/monitoring/docs/screenshots/lab08-grafana-logs.png b/monitoring/docs/screenshots/lab08-grafana-logs.png new file mode 100644 index 0000000000..423709452f Binary files /dev/null and b/monitoring/docs/screenshots/lab08-grafana-logs.png differ diff --git a/monitoring/docs/screenshots/lab08-http_requests_total.png b/monitoring/docs/screenshots/lab08-http_requests_total.png new file mode 100644 index 0000000000..9771656300 Binary files /dev/null and b/monitoring/docs/screenshots/lab08-http_requests_total.png differ diff --git a/monitoring/docs/screenshots/lab08-prometheus-targets-up.png b/monitoring/docs/screenshots/lab08-prometheus-targets-up.png new file mode 100644 index 0000000000..88ad4c6365 Binary files /dev/null and b/monitoring/docs/screenshots/lab08-prometheus-targets-up.png differ diff --git a/monitoring/docs/screenshots/lab08-rate(http_requests_total[5m])-query.png b/monitoring/docs/screenshots/lab08-rate(http_requests_total[5m])-query.png new file mode 100644 index 0000000000..1ecfc5ad6e Binary files /dev/null and b/monitoring/docs/screenshots/lab08-rate(http_requests_total[5m])-query.png differ diff --git a/monitoring/docs/screenshots/lab08-up.png b/monitoring/docs/screenshots/lab08-up.png new file mode 100644 index 0000000000..8e1ca8d3fb Binary files /dev/null and b/monitoring/docs/screenshots/lab08-up.png differ diff --git a/monitoring/grafana/dashboards/lab07-logging.json b/monitoring/grafana/dashboards/lab07-logging.json new file mode 100644 index 0000000000..e2b3426b9d --- /dev/null +++ b/monitoring/grafana/dashboards/lab07-logging.json @@ -0,0 +1,163 @@ +{ + "id": null, + "uid": "lab07-logging-overview", + "title": "Lab07 - Loki Logging Overview", + "tags": [ + "lab07", + "loki", + "logging" + ], + "timezone": "browser", + "schemaVersion": 39, + "version": 1, + "refresh": "10s", + "time": { + "from": "now-30m", + "to": "now" + }, + "panels": [ + { + "id": 1, + "type": "logs", + "title": "Recent Logs (all apps)", + "datasource": { + "type": "loki", + "uid": "loki" + }, + "gridPos": { + "h": 9, + "w": 24, + "x": 0, + "y": 0 + }, + "targets": [ + { + "refId": "A", + "expr": "{app=~\"devops-.*\"}", + "queryType": "range", + "datasource": { + "type": "loki", + "uid": "loki" + } + } + ], + "options": { + "showTime": true, + "showLabels": true, + "sortOrder": "Descending" + } + }, + { + "id": 2, + "type": "timeseries", + "title": "Request Rate by App", + "datasource": { + "type": "loki", + "uid": "loki" + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 9 + }, + "targets": [ + { + "refId": "A", + "expr": "sum by (app) (rate({app=~\"devops-.*\"}[1m]))", + "queryType": "range", + "datasource": { + "type": "loki", + "uid": "loki" + } + } + ], + "fieldConfig": { + "defaults": { + "unit": "reqps" + }, + "overrides": [] + }, + "options": { + "legend": { + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "single" + } + } + }, + { + "id": 3, + "type": "logs", + "title": "Error Logs Only", + "datasource": { + "type": "loki", + "uid": "loki" + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 9 + }, + "targets": [ + { + "refId": "A", + "expr": "{app=~\"devops-.*\"} | json | level=\"ERROR\"", + "queryType": "range", + "datasource": { + "type": "loki", + "uid": "loki" + } + } + ], + "options": { + "showTime": true, + "showLabels": true, + "sortOrder": "Descending" + } + }, + { + "id": 4, + "type": "piechart", + "title": "Log Level Distribution (last 5m)", + "datasource": { + "type": "loki", + "uid": "loki" + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 17 + }, + "targets": [ + { + "refId": "A", + "expr": "sum by (level) (count_over_time({app=~\"devops-.*\"} | json [5m]))", + "queryType": "range", + "datasource": { + "type": "loki", + "uid": "loki" + } + } + ], + "options": { + "legend": { + "displayMode": "list", + "placement": "right" + }, + "pieType": "pie", + "reduceOptions": { + "values": false, + "calcs": [ + "lastNotNull" + ], + "fields": "" + } + } + } + ] +} \ No newline at end of file diff --git a/monitoring/grafana/dashboards/lab08-metrics.json b/monitoring/grafana/dashboards/lab08-metrics.json new file mode 100644 index 0000000000..b05c0fa86a --- /dev/null +++ b/monitoring/grafana/dashboards/lab08-metrics.json @@ -0,0 +1,454 @@ +{ + "id": null, + "uid": "lab08-prometheus-metrics", + "title": "Lab08 - Prometheus Metrics Overview", + "tags": [ + "lab08", + "prometheus", + "metrics", + "observability" + ], + "timezone": "browser", + "schemaVersion": 39, + "version": 1, + "refresh": "10s", + "time": { + "from": "now-30m", + "to": "now" + }, + "panels": [ + { + "id": 1, + "type": "timeseries", + "title": "Request Rate by Endpoint", + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 0 + }, + "targets": [ + { + "refId": "A", + "expr": "sum by (endpoint) (rate(http_requests_total{endpoint!=\"/metrics\"}[5m]))", + "legendFormat": "{{endpoint}}", + "datasource": { + "type": "prometheus", + "uid": "prometheus" + } + } + ], + "fieldConfig": { + "defaults": { + "unit": "reqps" + }, + "overrides": [] + }, + "options": { + "legend": { + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "multi" + } + } + }, + { + "id": 2, + "type": "timeseries", + "title": "Error Rate (5xx)", + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 0 + }, + "targets": [ + { + "refId": "A", + "expr": "sum(rate(http_requests_total{status_code=~\"5..\"}[5m]))", + "legendFormat": "5xx errors/sec", + "datasource": { + "type": "prometheus", + "uid": "prometheus" + } + } + ], + "fieldConfig": { + "defaults": { + "unit": "reqps" + }, + "overrides": [] + }, + "options": { + "legend": { + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "single" + } + } + }, + { + "id": 3, + "type": "timeseries", + "title": "Request Duration p95 by Endpoint", + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 8 + }, + "targets": [ + { + "refId": "A", + "expr": "histogram_quantile(0.95, sum by (le, endpoint) (rate(http_request_duration_seconds_bucket{endpoint!=\"/metrics\"}[5m])))", + "legendFormat": "{{endpoint}} p95", + "datasource": { + "type": "prometheus", + "uid": "prometheus" + } + } + ], + "fieldConfig": { + "defaults": { + "unit": "s" + }, + "overrides": [] + }, + "options": { + "legend": { + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "multi" + } + } + }, + { + "id": 4, + "type": "stat", + "title": "Active Requests", + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 12, + "y": 8 + }, + "targets": [ + { + "refId": "A", + "expr": "sum(http_requests_in_progress)", + "legendFormat": "in-flight", + "datasource": { + "type": "prometheus", + "uid": "prometheus" + } + } + ], + "fieldConfig": { + "defaults": { + "unit": "none", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "orange", + "value": 5 + }, + { + "color": "red", + "value": 10 + } + ] + } + }, + "overrides": [] + }, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "center", + "reduceOptions": { + "values": false, + "calcs": [ + "lastNotNull" + ], + "fields": "" + }, + "textMode": "auto" + } + }, + { + "id": 5, + "type": "stat", + "title": "Application Uptime", + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 18, + "y": 8 + }, + "targets": [ + { + "refId": "A", + "expr": "max(devops_info_uptime_seconds)", + "legendFormat": "uptime", + "datasource": { + "type": "prometheus", + "uid": "prometheus" + } + } + ], + "fieldConfig": { + "defaults": { + "unit": "s" + }, + "overrides": [] + }, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "center", + "reduceOptions": { + "values": false, + "calcs": [ + "lastNotNull" + ], + "fields": "" + }, + "textMode": "auto" + } + }, + { + "id": 6, + "type": "heatmap", + "title": "Request Duration Heatmap", + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 16 + }, + "targets": [ + { + "refId": "A", + "expr": "sum by (le) (rate(http_request_duration_seconds_bucket{endpoint!=\"/metrics\"}[5m]))", + "legendFormat": "{{le}}", + "datasource": { + "type": "prometheus", + "uid": "prometheus" + } + } + ], + "fieldConfig": { + "defaults": { + "unit": "reqps" + }, + "overrides": [] + }, + "options": { + "calculate": false, + "legend": { + "show": false + }, + "tooltip": { + "show": true, + "yHistogram": false + }, + "yAxis": { + "unit": "s" + } + } + }, + { + "id": 7, + "type": "piechart", + "title": "Status Code Distribution", + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 16 + }, + "targets": [ + { + "refId": "A", + "expr": "sum by (status_code) (rate(http_requests_total[5m]))", + "legendFormat": "{{status_code}}", + "datasource": { + "type": "prometheus", + "uid": "prometheus" + } + } + ], + "options": { + "legend": { + "displayMode": "list", + "placement": "right" + }, + "pieType": "pie", + "reduceOptions": { + "values": false, + "calcs": [ + "lastNotNull" + ], + "fields": "" + } + } + }, + { + "id": 8, + "type": "stat", + "title": "App Target Uptime", + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 25 + }, + "targets": [ + { + "refId": "A", + "expr": "up{job=\"app\"}", + "legendFormat": "app", + "datasource": { + "type": "prometheus", + "uid": "prometheus" + } + } + ], + "fieldConfig": { + "defaults": { + "unit": "none", + "mappings": [ + { + "type": "value", + "options": { + "0": { + "text": "DOWN" + }, + "1": { + "text": "UP" + } + } + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "red", + "value": null + }, + { + "color": "green", + "value": 1 + } + ] + } + }, + "overrides": [] + }, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "center", + "reduceOptions": { + "values": false, + "calcs": [ + "lastNotNull" + ], + "fields": "" + }, + "textMode": "auto" + } + }, + { + "id": 9, + "type": "timeseries", + "title": "System Info Collection p95", + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "gridPos": { + "h": 7, + "w": 16, + "x": 8, + "y": 25 + }, + "targets": [ + { + "refId": "A", + "expr": "histogram_quantile(0.95, sum by (le) (rate(devops_info_system_collection_seconds_bucket[5m])))", + "legendFormat": "system info p95", + "datasource": { + "type": "prometheus", + "uid": "prometheus" + } + } + ], + "fieldConfig": { + "defaults": { + "unit": "s" + }, + "overrides": [] + }, + "options": { + "legend": { + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "single" + } + } + } + ], + "templating": { + "list": [] + }, + "annotations": { + "list": [] + } +} diff --git a/monitoring/grafana/provisioning/dashboards/dashboard-provider.yml b/monitoring/grafana/provisioning/dashboards/dashboard-provider.yml new file mode 100644 index 0000000000..e85b723a5c --- /dev/null +++ b/monitoring/grafana/provisioning/dashboards/dashboard-provider.yml @@ -0,0 +1,12 @@ +apiVersion: 1 + +providers: + - name: lab07-logging + orgId: 1 + folder: Lab07 Logging + type: file + disableDeletion: false + allowUiUpdates: true + updateIntervalSeconds: 30 + options: + path: /etc/grafana/dashboards diff --git a/monitoring/grafana/provisioning/datasources/loki.yml b/monitoring/grafana/provisioning/datasources/loki.yml new file mode 100644 index 0000000000..abeeb463cf --- /dev/null +++ b/monitoring/grafana/provisioning/datasources/loki.yml @@ -0,0 +1,33 @@ +apiVersion: 1 + +deleteDatasources: + - name: Loki + orgId: 1 + - name: Prometheus + orgId: 1 + +prune: true + +datasources: + - name: Loki + uid: loki + type: loki + access: proxy + url: http://loki:3100 + isDefault: true + editable: false + jsonData: + maxLines: 1000 + timeout: 60 + + - name: Prometheus + uid: prometheus + type: prometheus + access: proxy + url: http://prometheus:9090 + isDefault: false + editable: false + jsonData: + httpMethod: POST + prometheusType: Prometheus + timeInterval: 15s diff --git a/monitoring/loki/config.yml b/monitoring/loki/config.yml new file mode 100644 index 0000000000..370b47e5fb --- /dev/null +++ b/monitoring/loki/config.yml @@ -0,0 +1,45 @@ +auth_enabled: false + +server: + http_listen_port: 3100 + grpc_listen_port: 9096 + +common: + path_prefix: /loki + replication_factor: 1 + ring: + kvstore: + store: inmemory + storage: + filesystem: + chunks_directory: /loki/chunks + rules_directory: /loki/rules + +schema_config: + configs: + - from: "2024-01-01" + store: tsdb + object_store: filesystem + schema: v13 + index: + prefix: index_ + period: 24h + +storage_config: + tsdb_shipper: + active_index_directory: /loki/tsdb-index + cache_location: /loki/tsdb-cache + +compactor: + working_directory: /loki/compactor + compaction_interval: 10m + retention_enabled: true + retention_delete_delay: 2h + delete_request_store: filesystem + +limits_config: + retention_period: 168h + volume_enabled: true + +analytics: + reporting_enabled: false diff --git a/monitoring/prometheus/prometheus.yml b/monitoring/prometheus/prometheus.yml new file mode 100644 index 0000000000..405abc0fdf --- /dev/null +++ b/monitoring/prometheus/prometheus.yml @@ -0,0 +1,27 @@ +global: + scrape_interval: 15s + evaluation_interval: 15s + +scrape_configs: + - job_name: prometheus + static_configs: + - targets: + - localhost:9090 + + - job_name: app + metrics_path: /metrics + static_configs: + - targets: + - app-python:8000 + + - job_name: loki + metrics_path: /metrics + static_configs: + - targets: + - loki:3100 + + - job_name: grafana + metrics_path: /metrics + static_configs: + - targets: + - grafana:3000 diff --git a/monitoring/promtail/config.yml b/monitoring/promtail/config.yml new file mode 100644 index 0000000000..8db835ba55 --- /dev/null +++ b/monitoring/promtail/config.yml @@ -0,0 +1,30 @@ +server: + http_listen_port: 9080 + grpc_listen_port: 0 + +positions: + filename: /var/lib/promtail/positions.yml + +clients: + - url: http://loki:3100/loki/api/v1/push + +scrape_configs: + - job_name: docker + docker_sd_configs: + - host: unix:///var/run/docker.sock + refresh_interval: 5s + filters: + - name: label + values: ["logging=promtail"] + relabel_configs: + - source_labels: [__meta_docker_container_name] + regex: '/(.*)' + target_label: container + - source_labels: [__meta_docker_container_label_app] + target_label: app + - source_labels: [__meta_docker_container_log_stream] + target_label: stream + - target_label: job + replacement: docker + pipeline_stages: + - docker: {} diff --git a/provision_run1.log b/provision_run1.log new file mode 100644 index 0000000000..740f606508 --- /dev/null +++ b/provision_run1.log @@ -0,0 +1,60 @@ + +PLAY [Provision web servers] *************************************************** + +TASK [Gathering Facts] ********************************************************* +ok: [vagrant1] + +TASK [common : Update apt cache] *********************************************** +changed: [vagrant1] + +TASK [common : Install common packages] **************************************** +changed: [vagrant1] + +TASK [common : Set timezone] *************************************************** +skipping: [vagrant1] + +TASK [docker : Install prerequisites for Docker repository] ******************** +ok: [vagrant1] + +TASK [docker : Ensure /etc/apt/keyrings exists] ******************************** +ok: [vagrant1] + +TASK [docker : Download Docker GPG key (ASCII)] ******************************** +changed: [vagrant1] + +TASK [docker : Check if Docker keyring already exists] ************************* +ok: [vagrant1] + +TASK [docker : Convert (dearmor) Docker GPG key to keyring] ******************** +changed: [vagrant1] + +TASK [docker : Set correct permissions on Docker keyring] ********************** +ok: [vagrant1] + +TASK [docker : Set Docker APT architecture mapping] **************************** +ok: [vagrant1] + +TASK [docker : Add official Docker APT repository] ***************************** +changed: [vagrant1] + +TASK [docker : Install Docker Engine packages] ********************************* +changed: [vagrant1] + +TASK [docker : Ensure Docker service is enabled and running] ******************* +ok: [vagrant1] + +TASK [docker : Ensure docker group exists] ************************************* +ok: [vagrant1] + +TASK [docker : Add user to docker group] *************************************** +changed: [vagrant1] + +TASK [docker : Install Docker SDK for Python on target (for Ansible docker modules)] *** +changed: [vagrant1] + +RUNNING HANDLER [docker : restart docker] ************************************** +changed: [vagrant1] + +PLAY RECAP ********************************************************************* +vagrant1 : ok=17 changed=9 unreachable=0 failed=0 skipped=1 rescued=0 ignored=0 + diff --git a/provision_run2.log b/provision_run2.log new file mode 100644 index 0000000000..4a8768b0e9 --- /dev/null +++ b/provision_run2.log @@ -0,0 +1,57 @@ + +PLAY [Provision web servers] *************************************************** + +TASK [Gathering Facts] ********************************************************* +ok: [vagrant1] + +TASK [common : Update apt cache] *********************************************** +ok: [vagrant1] + +TASK [common : Install common packages] **************************************** +ok: [vagrant1] + +TASK [common : Set timezone] *************************************************** +skipping: [vagrant1] + +TASK [docker : Install prerequisites for Docker repository] ******************** +ok: [vagrant1] + +TASK [docker : Ensure /etc/apt/keyrings exists] ******************************** +ok: [vagrant1] + +TASK [docker : Download Docker GPG key (ASCII)] ******************************** +ok: [vagrant1] + +TASK [docker : Check if Docker keyring already exists] ************************* +ok: [vagrant1] + +TASK [docker : Convert (dearmor) Docker GPG key to keyring] ******************** +skipping: [vagrant1] + +TASK [docker : Set correct permissions on Docker keyring] ********************** +ok: [vagrant1] + +TASK [docker : Set Docker APT architecture mapping] **************************** +ok: [vagrant1] + +TASK [docker : Add official Docker APT repository] ***************************** +ok: [vagrant1] + +TASK [docker : Install Docker Engine packages] ********************************* +ok: [vagrant1] + +TASK [docker : Ensure Docker service is enabled and running] ******************* +ok: [vagrant1] + +TASK [docker : Ensure docker group exists] ************************************* +ok: [vagrant1] + +TASK [docker : Add user to docker group] *************************************** +ok: [vagrant1] + +TASK [docker : Install Docker SDK for Python on target (for Ansible docker modules)] *** +ok: [vagrant1] + +PLAY RECAP ********************************************************************* +vagrant1 : ok=15 changed=0 unreachable=0 failed=0 skipped=2 rescued=0 ignored=0 + diff --git a/pulumi/.gitignore b/pulumi/.gitignore new file mode 100644 index 0000000000..4aff6b34cb --- /dev/null +++ b/pulumi/.gitignore @@ -0,0 +1,12 @@ +# Python +__pycache__/ +*.py[cod] +.venv/ +venv/ + +# Pulumi state/config +Pulumi.*.yaml + +# IDE +.vscode/ +.idea/ diff --git a/pulumi/.pulumi-state/.pulumi/backups/lab04-pulumi-yc/dev/dev.1771534421400792500.json b/pulumi/.pulumi-state/.pulumi/backups/lab04-pulumi-yc/dev/dev.1771534421400792500.json new file mode 100644 index 0000000000..b14f996692 --- /dev/null +++ b/pulumi/.pulumi-state/.pulumi/backups/lab04-pulumi-yc/dev/dev.1771534421400792500.json @@ -0,0 +1,427 @@ +{ + "version": 3, + "checkpoint": { + "stack": "organization/lab04-pulumi-yc/dev", + "latest": { + "manifest": { + "time": "2026-02-19T23:53:41.118824+03:00", + "magic": "7b54cd6e79f5cecd9ae124cb92b834b486c7e21993124e3a4e456c27c3ce48f9", + "version": "v3.222.0" + }, + "secrets_providers": { + "type": "passphrase", + "state": { + "salt": "v1:3yNf1Kmt4mA=:v1:Uk9In8z0RD2XJTza:lncnLY42u3UftJy+QfrDw6S2OT6vmQ==" + } + }, + "resources": [ + { + "urn": "urn:pulumi:dev::lab04-pulumi-yc::pulumi:providers:yandex::default_0_13_0", + "custom": true, + "id": "7d12287d-55f3-465d-8fa0-e63bfc770a1f", + "type": "pulumi:providers:yandex", + "inputs": { + "__internal": {}, + "version": "0.13.0" + }, + "outputs": { + "version": "0.13.0" + }, + "created": "2026-02-19T20:52:45.8287147Z", + "modified": "2026-02-19T20:52:45.8287147Z" + }, + { + "urn": "urn:pulumi:dev::lab04-pulumi-yc::pulumi:pulumi:Stack::lab04-pulumi-yc-dev", + "custom": false, + "type": "pulumi:pulumi:Stack", + "outputs": { + "app_url": "http://89.169.135.154:5000/", + "http_url": "http://89.169.135.154/", + "internal_ip": "10.10.0.6", + "public_ip": "89.169.135.154", + "ssh_command": "ssh -i ~/.ssh/lab04_ed25519 ubuntu@89.169.135.154" + }, + "created": "2026-02-19T20:52:45.8630518Z", + "modified": "2026-02-19T20:52:45.8630518Z" + }, + { + "urn": "urn:pulumi:dev::lab04-pulumi-yc::yandex:index/vpcNetwork:VpcNetwork::lab04-net", + "custom": true, + "id": "enpet9l87ashn294afa1", + "type": "yandex:index/vpcNetwork:VpcNetwork", + "inputs": { + "__defaults": [ + "name" + ], + "name": "lab04-net-7bf49ed" + }, + "outputs": { + "__meta": "{\"e2bfb730-ecaa-11e6-8f88-34363bc7c4c0\":{\"create\":60000000000,\"delete\":60000000000,\"update\":60000000000}}", + "createdAt": "2026-02-19T20:52:48Z", + "defaultSecurityGroupId": "enp912ofg69vrp8iegur", + "description": "", + "folderId": "b1g82kdcn5grlmu79ano", + "id": "enpet9l87ashn294afa1", + "labels": {}, + "name": "lab04-net-7bf49ed", + "subnetIds": [] + }, + "parent": "urn:pulumi:dev::lab04-pulumi-yc::pulumi:pulumi:Stack::lab04-pulumi-yc-dev", + "provider": "urn:pulumi:dev::lab04-pulumi-yc::pulumi:providers:yandex::default_0_13_0::7d12287d-55f3-465d-8fa0-e63bfc770a1f", + "created": "2026-02-19T20:52:49.994421Z", + "modified": "2026-02-19T20:52:49.994421Z" + }, + { + "urn": "urn:pulumi:dev::lab04-pulumi-yc::yandex:index/vpcSubnet:VpcSubnet::lab04-subnet", + "custom": true, + "id": "e9b3hl3h09t6drs68pbg", + "type": "yandex:index/vpcSubnet:VpcSubnet", + "inputs": { + "__defaults": [ + "name" + ], + "name": "lab04-subnet-1fa918f", + "networkId": "enpet9l87ashn294afa1", + "v4CidrBlocks": [ + "10.10.0.0/24" + ], + "zone": "ru-central1-a" + }, + "outputs": { + "__meta": "{\"e2bfb730-ecaa-11e6-8f88-34363bc7c4c0\":{\"create\":180000000000,\"delete\":180000000000,\"update\":180000000000}}", + "createdAt": "2026-02-19T20:52:50Z", + "description": "", + "dhcpOptions": null, + "folderId": "b1g82kdcn5grlmu79ano", + "id": "e9b3hl3h09t6drs68pbg", + "labels": {}, + "name": "lab04-subnet-1fa918f", + "networkId": "enpet9l87ashn294afa1", + "routeTableId": "", + "v4CidrBlocks": [ + "10.10.0.0/24" + ], + "v6CidrBlocks": [], + "zone": "ru-central1-a" + }, + "parent": "urn:pulumi:dev::lab04-pulumi-yc::pulumi:pulumi:Stack::lab04-pulumi-yc-dev", + "dependencies": [ + "urn:pulumi:dev::lab04-pulumi-yc::yandex:index/vpcNetwork:VpcNetwork::lab04-net" + ], + "provider": "urn:pulumi:dev::lab04-pulumi-yc::pulumi:providers:yandex::default_0_13_0::7d12287d-55f3-465d-8fa0-e63bfc770a1f", + "propertyDependencies": { + "networkId": [ + "urn:pulumi:dev::lab04-pulumi-yc::yandex:index/vpcNetwork:VpcNetwork::lab04-net" + ], + "v4CidrBlocks": [], + "zone": [] + }, + "created": "2026-02-19T20:52:50.5480973Z", + "modified": "2026-02-19T20:52:50.5480973Z" + }, + { + "urn": "urn:pulumi:dev::lab04-pulumi-yc::yandex:index/vpcSecurityGroup:VpcSecurityGroup::lab04-sg", + "custom": true, + "id": "enpb8heveqi8pp7mvbcv", + "type": "yandex:index/vpcSecurityGroup:VpcSecurityGroup", + "inputs": { + "__defaults": [ + "name" + ], + "egresses": [ + { + "__defaults": [ + "port" + ], + "description": "Allow all outbound", + "fromPort": 0, + "port": -1, + "protocol": "ANY", + "toPort": 65535, + "v4CidrBlocks": [ + "0.0.0.0/0" + ] + } + ], + "ingresses": [ + { + "__defaults": [ + "fromPort", + "toPort" + ], + "description": "SSH", + "fromPort": -1, + "port": 22, + "protocol": "TCP", + "toPort": -1, + "v4CidrBlocks": [ + "95.111.204.70/32" + ] + }, + { + "__defaults": [ + "fromPort", + "toPort" + ], + "description": "HTTP", + "fromPort": -1, + "port": 80, + "protocol": "TCP", + "toPort": -1, + "v4CidrBlocks": [ + "0.0.0.0/0" + ] + }, + { + "__defaults": [ + "fromPort", + "toPort" + ], + "description": "App 5000", + "fromPort": -1, + "port": 5000, + "protocol": "TCP", + "toPort": -1, + "v4CidrBlocks": [ + "0.0.0.0/0" + ] + } + ], + "name": "lab04-sg-7fdbda7", + "networkId": "enpet9l87ashn294afa1" + }, + "outputs": { + "__meta": "{\"e2bfb730-ecaa-11e6-8f88-34363bc7c4c0\":{\"create\":180000000000,\"delete\":180000000000,\"update\":180000000000}}", + "createdAt": "2026-02-19T20:52:52Z", + "description": "", + "egresses": [ + { + "description": "Allow all outbound", + "fromPort": 0, + "id": "enpqsfspqfq9fqiee48t", + "labels": {}, + "port": -1, + "predefinedTarget": "", + "protocol": "ANY", + "securityGroupId": "", + "toPort": 65535, + "v4CidrBlocks": [ + "0.0.0.0/0" + ], + "v6CidrBlocks": [] + } + ], + "folderId": "b1g82kdcn5grlmu79ano", + "id": "enpb8heveqi8pp7mvbcv", + "ingresses": [ + { + "description": "SSH", + "fromPort": -1, + "id": "enpbhntafog7fraba6rd", + "labels": {}, + "port": 22, + "predefinedTarget": "", + "protocol": "TCP", + "securityGroupId": "", + "toPort": -1, + "v4CidrBlocks": [ + "95.111.204.70/32" + ], + "v6CidrBlocks": [] + }, + { + "description": "HTTP", + "fromPort": -1, + "id": "enp80blit93rhb8rh3eb", + "labels": {}, + "port": 80, + "predefinedTarget": "", + "protocol": "TCP", + "securityGroupId": "", + "toPort": -1, + "v4CidrBlocks": [ + "0.0.0.0/0" + ], + "v6CidrBlocks": [] + }, + { + "description": "App 5000", + "fromPort": -1, + "id": "enp39asi999eg5hhq4o1", + "labels": {}, + "port": 5000, + "predefinedTarget": "", + "protocol": "TCP", + "securityGroupId": "", + "toPort": -1, + "v4CidrBlocks": [ + "0.0.0.0/0" + ], + "v6CidrBlocks": [] + } + ], + "labels": {}, + "name": "lab04-sg-7fdbda7", + "networkId": "enpet9l87ashn294afa1", + "status": "ACTIVE" + }, + "parent": "urn:pulumi:dev::lab04-pulumi-yc::pulumi:pulumi:Stack::lab04-pulumi-yc-dev", + "dependencies": [ + "urn:pulumi:dev::lab04-pulumi-yc::yandex:index/vpcNetwork:VpcNetwork::lab04-net" + ], + "provider": "urn:pulumi:dev::lab04-pulumi-yc::pulumi:providers:yandex::default_0_13_0::7d12287d-55f3-465d-8fa0-e63bfc770a1f", + "propertyDependencies": { + "egresses": [], + "ingresses": [], + "networkId": [ + "urn:pulumi:dev::lab04-pulumi-yc::yandex:index/vpcNetwork:VpcNetwork::lab04-net" + ] + }, + "created": "2026-02-19T20:52:52.5246052Z", + "modified": "2026-02-19T20:52:52.5246052Z" + }, + { + "urn": "urn:pulumi:dev::lab04-pulumi-yc::yandex:index/computeInstance:ComputeInstance::lab04-vm", + "custom": true, + "id": "fhm7ed1a5b85rs8u3uop", + "type": "yandex:index/computeInstance:ComputeInstance", + "inputs": { + "__defaults": [ + "name", + "networkAccelerationType" + ], + "allowStoppingForUpdate": true, + "bootDisk": { + "__defaults": [ + "autoDelete" + ], + "autoDelete": true, + "initializeParams": { + "__defaults": [], + "imageId": "fd8lt661chfo5i13a40d", + "size": 10, + "type": "network-hdd" + } + }, + "metadata": { + "__defaults": [], + "ssh-keys": "ubuntu:ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAII5nKglXX/E2/S3oPKXahGr77IZELnnZlqE4oGCyEhZQ Данил@Dorley" + }, + "name": "lab04-vm-6023a00", + "networkAccelerationType": "standard", + "networkInterfaces": [ + { + "__defaults": [ + "ipv4" + ], + "ipv4": true, + "nat": true, + "securityGroupIds": [ + "enpb8heveqi8pp7mvbcv" + ], + "subnetId": "e9b3hl3h09t6drs68pbg" + } + ], + "platformId": "standard-v2", + "resources": { + "__defaults": [], + "coreFraction": 20, + "cores": 2, + "memory": 1 + }, + "zone": "ru-central1-a" + }, + "outputs": { + "__meta": "{\"e2bfb730-ecaa-11e6-8f88-34363bc7c4c0\":{\"create\":300000000000,\"delete\":300000000000,\"update\":300000000000},\"schema_version\":\"1\"}", + "allowStoppingForUpdate": true, + "bootDisk": { + "autoDelete": true, + "deviceName": "fhmonvdcv96scusbdsm3", + "diskId": "fhmonvdcv96scusbdsm3", + "initializeParams": { + "blockSize": 4096, + "description": "", + "imageId": "fd8lt661chfo5i13a40d", + "name": "", + "size": 10, + "snapshotId": "", + "type": "network-hdd" + }, + "mode": "READ_WRITE" + }, + "createdAt": "2026-02-19T20:52:53Z", + "description": "", + "folderId": "b1g82kdcn5grlmu79ano", + "fqdn": "fhm7ed1a5b85rs8u3uop.auto.internal", + "hostname": "fhm7ed1a5b85rs8u3uop", + "id": "fhm7ed1a5b85rs8u3uop", + "labels": {}, + "metadata": { + "ssh-keys": "ubuntu:ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAII5nKglXX/E2/S3oPKXahGr77IZELnnZlqE4oGCyEhZQ Данил@Dorley" + }, + "name": "lab04-vm-6023a00", + "networkAccelerationType": "standard", + "networkInterfaces": [ + { + "dnsRecords": [], + "index": 0, + "ipAddress": "10.10.0.6", + "ipv4": true, + "ipv6": false, + "ipv6Address": "", + "ipv6DnsRecords": [], + "macAddress": "d0:0d:77:34:2a:2a", + "nat": true, + "natDnsRecords": [], + "natIpAddress": "89.169.135.154", + "natIpVersion": "IPV4", + "securityGroupIds": [ + "enpb8heveqi8pp7mvbcv" + ], + "subnetId": "e9b3hl3h09t6drs68pbg" + } + ], + "placementPolicy": { + "placementGroupId": "" + }, + "platformId": "standard-v2", + "resources": { + "coreFraction": 20, + "cores": 2, + "gpus": 0, + "memory": 1 + }, + "schedulingPolicy": { + "preemptible": false + }, + "secondaryDisks": [], + "serviceAccountId": "", + "status": "running", + "zone": "ru-central1-a" + }, + "parent": "urn:pulumi:dev::lab04-pulumi-yc::pulumi:pulumi:Stack::lab04-pulumi-yc-dev", + "dependencies": [ + "urn:pulumi:dev::lab04-pulumi-yc::yandex:index/vpcSecurityGroup:VpcSecurityGroup::lab04-sg", + "urn:pulumi:dev::lab04-pulumi-yc::yandex:index/vpcSubnet:VpcSubnet::lab04-subnet" + ], + "provider": "urn:pulumi:dev::lab04-pulumi-yc::pulumi:providers:yandex::default_0_13_0::7d12287d-55f3-465d-8fa0-e63bfc770a1f", + "propertyDependencies": { + "allowStoppingForUpdate": [], + "bootDisk": [], + "metadata": [], + "networkInterfaces": [ + "urn:pulumi:dev::lab04-pulumi-yc::yandex:index/vpcSecurityGroup:VpcSecurityGroup::lab04-sg", + "urn:pulumi:dev::lab04-pulumi-yc::yandex:index/vpcSubnet:VpcSubnet::lab04-subnet" + ], + "platformId": [], + "resources": [], + "zone": [] + }, + "created": "2026-02-19T20:53:41.0764236Z", + "modified": "2026-02-19T20:53:41.0764236Z" + } + ], + "metadata": {} + } + } +} diff --git a/pulumi/.pulumi-state/.pulumi/backups/lab04-pulumi-yc/dev/dev.1771534421400792500.json.attrs b/pulumi/.pulumi-state/.pulumi/backups/lab04-pulumi-yc/dev/dev.1771534421400792500.json.attrs new file mode 100644 index 0000000000..4dbba8b4a3 --- /dev/null +++ b/pulumi/.pulumi-state/.pulumi/backups/lab04-pulumi-yc/dev/dev.1771534421400792500.json.attrs @@ -0,0 +1 @@ +{"user.cache_control":"","user.content_disposition":"","user.content_encoding":"","user.content_language":"","user.content_type":"text/plain; charset=utf-8","user.metadata":null,"md5":"8aVExZ8N8dRKSmZc6/Q9ng=="} diff --git a/pulumi/.pulumi-state/.pulumi/history/lab04-pulumi-yc/dev/dev-1771534421389384600.checkpoint.json b/pulumi/.pulumi-state/.pulumi/history/lab04-pulumi-yc/dev/dev-1771534421389384600.checkpoint.json new file mode 100644 index 0000000000..b14f996692 --- /dev/null +++ b/pulumi/.pulumi-state/.pulumi/history/lab04-pulumi-yc/dev/dev-1771534421389384600.checkpoint.json @@ -0,0 +1,427 @@ +{ + "version": 3, + "checkpoint": { + "stack": "organization/lab04-pulumi-yc/dev", + "latest": { + "manifest": { + "time": "2026-02-19T23:53:41.118824+03:00", + "magic": "7b54cd6e79f5cecd9ae124cb92b834b486c7e21993124e3a4e456c27c3ce48f9", + "version": "v3.222.0" + }, + "secrets_providers": { + "type": "passphrase", + "state": { + "salt": "v1:3yNf1Kmt4mA=:v1:Uk9In8z0RD2XJTza:lncnLY42u3UftJy+QfrDw6S2OT6vmQ==" + } + }, + "resources": [ + { + "urn": "urn:pulumi:dev::lab04-pulumi-yc::pulumi:providers:yandex::default_0_13_0", + "custom": true, + "id": "7d12287d-55f3-465d-8fa0-e63bfc770a1f", + "type": "pulumi:providers:yandex", + "inputs": { + "__internal": {}, + "version": "0.13.0" + }, + "outputs": { + "version": "0.13.0" + }, + "created": "2026-02-19T20:52:45.8287147Z", + "modified": "2026-02-19T20:52:45.8287147Z" + }, + { + "urn": "urn:pulumi:dev::lab04-pulumi-yc::pulumi:pulumi:Stack::lab04-pulumi-yc-dev", + "custom": false, + "type": "pulumi:pulumi:Stack", + "outputs": { + "app_url": "http://89.169.135.154:5000/", + "http_url": "http://89.169.135.154/", + "internal_ip": "10.10.0.6", + "public_ip": "89.169.135.154", + "ssh_command": "ssh -i ~/.ssh/lab04_ed25519 ubuntu@89.169.135.154" + }, + "created": "2026-02-19T20:52:45.8630518Z", + "modified": "2026-02-19T20:52:45.8630518Z" + }, + { + "urn": "urn:pulumi:dev::lab04-pulumi-yc::yandex:index/vpcNetwork:VpcNetwork::lab04-net", + "custom": true, + "id": "enpet9l87ashn294afa1", + "type": "yandex:index/vpcNetwork:VpcNetwork", + "inputs": { + "__defaults": [ + "name" + ], + "name": "lab04-net-7bf49ed" + }, + "outputs": { + "__meta": "{\"e2bfb730-ecaa-11e6-8f88-34363bc7c4c0\":{\"create\":60000000000,\"delete\":60000000000,\"update\":60000000000}}", + "createdAt": "2026-02-19T20:52:48Z", + "defaultSecurityGroupId": "enp912ofg69vrp8iegur", + "description": "", + "folderId": "b1g82kdcn5grlmu79ano", + "id": "enpet9l87ashn294afa1", + "labels": {}, + "name": "lab04-net-7bf49ed", + "subnetIds": [] + }, + "parent": "urn:pulumi:dev::lab04-pulumi-yc::pulumi:pulumi:Stack::lab04-pulumi-yc-dev", + "provider": "urn:pulumi:dev::lab04-pulumi-yc::pulumi:providers:yandex::default_0_13_0::7d12287d-55f3-465d-8fa0-e63bfc770a1f", + "created": "2026-02-19T20:52:49.994421Z", + "modified": "2026-02-19T20:52:49.994421Z" + }, + { + "urn": "urn:pulumi:dev::lab04-pulumi-yc::yandex:index/vpcSubnet:VpcSubnet::lab04-subnet", + "custom": true, + "id": "e9b3hl3h09t6drs68pbg", + "type": "yandex:index/vpcSubnet:VpcSubnet", + "inputs": { + "__defaults": [ + "name" + ], + "name": "lab04-subnet-1fa918f", + "networkId": "enpet9l87ashn294afa1", + "v4CidrBlocks": [ + "10.10.0.0/24" + ], + "zone": "ru-central1-a" + }, + "outputs": { + "__meta": "{\"e2bfb730-ecaa-11e6-8f88-34363bc7c4c0\":{\"create\":180000000000,\"delete\":180000000000,\"update\":180000000000}}", + "createdAt": "2026-02-19T20:52:50Z", + "description": "", + "dhcpOptions": null, + "folderId": "b1g82kdcn5grlmu79ano", + "id": "e9b3hl3h09t6drs68pbg", + "labels": {}, + "name": "lab04-subnet-1fa918f", + "networkId": "enpet9l87ashn294afa1", + "routeTableId": "", + "v4CidrBlocks": [ + "10.10.0.0/24" + ], + "v6CidrBlocks": [], + "zone": "ru-central1-a" + }, + "parent": "urn:pulumi:dev::lab04-pulumi-yc::pulumi:pulumi:Stack::lab04-pulumi-yc-dev", + "dependencies": [ + "urn:pulumi:dev::lab04-pulumi-yc::yandex:index/vpcNetwork:VpcNetwork::lab04-net" + ], + "provider": "urn:pulumi:dev::lab04-pulumi-yc::pulumi:providers:yandex::default_0_13_0::7d12287d-55f3-465d-8fa0-e63bfc770a1f", + "propertyDependencies": { + "networkId": [ + "urn:pulumi:dev::lab04-pulumi-yc::yandex:index/vpcNetwork:VpcNetwork::lab04-net" + ], + "v4CidrBlocks": [], + "zone": [] + }, + "created": "2026-02-19T20:52:50.5480973Z", + "modified": "2026-02-19T20:52:50.5480973Z" + }, + { + "urn": "urn:pulumi:dev::lab04-pulumi-yc::yandex:index/vpcSecurityGroup:VpcSecurityGroup::lab04-sg", + "custom": true, + "id": "enpb8heveqi8pp7mvbcv", + "type": "yandex:index/vpcSecurityGroup:VpcSecurityGroup", + "inputs": { + "__defaults": [ + "name" + ], + "egresses": [ + { + "__defaults": [ + "port" + ], + "description": "Allow all outbound", + "fromPort": 0, + "port": -1, + "protocol": "ANY", + "toPort": 65535, + "v4CidrBlocks": [ + "0.0.0.0/0" + ] + } + ], + "ingresses": [ + { + "__defaults": [ + "fromPort", + "toPort" + ], + "description": "SSH", + "fromPort": -1, + "port": 22, + "protocol": "TCP", + "toPort": -1, + "v4CidrBlocks": [ + "95.111.204.70/32" + ] + }, + { + "__defaults": [ + "fromPort", + "toPort" + ], + "description": "HTTP", + "fromPort": -1, + "port": 80, + "protocol": "TCP", + "toPort": -1, + "v4CidrBlocks": [ + "0.0.0.0/0" + ] + }, + { + "__defaults": [ + "fromPort", + "toPort" + ], + "description": "App 5000", + "fromPort": -1, + "port": 5000, + "protocol": "TCP", + "toPort": -1, + "v4CidrBlocks": [ + "0.0.0.0/0" + ] + } + ], + "name": "lab04-sg-7fdbda7", + "networkId": "enpet9l87ashn294afa1" + }, + "outputs": { + "__meta": "{\"e2bfb730-ecaa-11e6-8f88-34363bc7c4c0\":{\"create\":180000000000,\"delete\":180000000000,\"update\":180000000000}}", + "createdAt": "2026-02-19T20:52:52Z", + "description": "", + "egresses": [ + { + "description": "Allow all outbound", + "fromPort": 0, + "id": "enpqsfspqfq9fqiee48t", + "labels": {}, + "port": -1, + "predefinedTarget": "", + "protocol": "ANY", + "securityGroupId": "", + "toPort": 65535, + "v4CidrBlocks": [ + "0.0.0.0/0" + ], + "v6CidrBlocks": [] + } + ], + "folderId": "b1g82kdcn5grlmu79ano", + "id": "enpb8heveqi8pp7mvbcv", + "ingresses": [ + { + "description": "SSH", + "fromPort": -1, + "id": "enpbhntafog7fraba6rd", + "labels": {}, + "port": 22, + "predefinedTarget": "", + "protocol": "TCP", + "securityGroupId": "", + "toPort": -1, + "v4CidrBlocks": [ + "95.111.204.70/32" + ], + "v6CidrBlocks": [] + }, + { + "description": "HTTP", + "fromPort": -1, + "id": "enp80blit93rhb8rh3eb", + "labels": {}, + "port": 80, + "predefinedTarget": "", + "protocol": "TCP", + "securityGroupId": "", + "toPort": -1, + "v4CidrBlocks": [ + "0.0.0.0/0" + ], + "v6CidrBlocks": [] + }, + { + "description": "App 5000", + "fromPort": -1, + "id": "enp39asi999eg5hhq4o1", + "labels": {}, + "port": 5000, + "predefinedTarget": "", + "protocol": "TCP", + "securityGroupId": "", + "toPort": -1, + "v4CidrBlocks": [ + "0.0.0.0/0" + ], + "v6CidrBlocks": [] + } + ], + "labels": {}, + "name": "lab04-sg-7fdbda7", + "networkId": "enpet9l87ashn294afa1", + "status": "ACTIVE" + }, + "parent": "urn:pulumi:dev::lab04-pulumi-yc::pulumi:pulumi:Stack::lab04-pulumi-yc-dev", + "dependencies": [ + "urn:pulumi:dev::lab04-pulumi-yc::yandex:index/vpcNetwork:VpcNetwork::lab04-net" + ], + "provider": "urn:pulumi:dev::lab04-pulumi-yc::pulumi:providers:yandex::default_0_13_0::7d12287d-55f3-465d-8fa0-e63bfc770a1f", + "propertyDependencies": { + "egresses": [], + "ingresses": [], + "networkId": [ + "urn:pulumi:dev::lab04-pulumi-yc::yandex:index/vpcNetwork:VpcNetwork::lab04-net" + ] + }, + "created": "2026-02-19T20:52:52.5246052Z", + "modified": "2026-02-19T20:52:52.5246052Z" + }, + { + "urn": "urn:pulumi:dev::lab04-pulumi-yc::yandex:index/computeInstance:ComputeInstance::lab04-vm", + "custom": true, + "id": "fhm7ed1a5b85rs8u3uop", + "type": "yandex:index/computeInstance:ComputeInstance", + "inputs": { + "__defaults": [ + "name", + "networkAccelerationType" + ], + "allowStoppingForUpdate": true, + "bootDisk": { + "__defaults": [ + "autoDelete" + ], + "autoDelete": true, + "initializeParams": { + "__defaults": [], + "imageId": "fd8lt661chfo5i13a40d", + "size": 10, + "type": "network-hdd" + } + }, + "metadata": { + "__defaults": [], + "ssh-keys": "ubuntu:ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAII5nKglXX/E2/S3oPKXahGr77IZELnnZlqE4oGCyEhZQ Данил@Dorley" + }, + "name": "lab04-vm-6023a00", + "networkAccelerationType": "standard", + "networkInterfaces": [ + { + "__defaults": [ + "ipv4" + ], + "ipv4": true, + "nat": true, + "securityGroupIds": [ + "enpb8heveqi8pp7mvbcv" + ], + "subnetId": "e9b3hl3h09t6drs68pbg" + } + ], + "platformId": "standard-v2", + "resources": { + "__defaults": [], + "coreFraction": 20, + "cores": 2, + "memory": 1 + }, + "zone": "ru-central1-a" + }, + "outputs": { + "__meta": "{\"e2bfb730-ecaa-11e6-8f88-34363bc7c4c0\":{\"create\":300000000000,\"delete\":300000000000,\"update\":300000000000},\"schema_version\":\"1\"}", + "allowStoppingForUpdate": true, + "bootDisk": { + "autoDelete": true, + "deviceName": "fhmonvdcv96scusbdsm3", + "diskId": "fhmonvdcv96scusbdsm3", + "initializeParams": { + "blockSize": 4096, + "description": "", + "imageId": "fd8lt661chfo5i13a40d", + "name": "", + "size": 10, + "snapshotId": "", + "type": "network-hdd" + }, + "mode": "READ_WRITE" + }, + "createdAt": "2026-02-19T20:52:53Z", + "description": "", + "folderId": "b1g82kdcn5grlmu79ano", + "fqdn": "fhm7ed1a5b85rs8u3uop.auto.internal", + "hostname": "fhm7ed1a5b85rs8u3uop", + "id": "fhm7ed1a5b85rs8u3uop", + "labels": {}, + "metadata": { + "ssh-keys": "ubuntu:ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAII5nKglXX/E2/S3oPKXahGr77IZELnnZlqE4oGCyEhZQ Данил@Dorley" + }, + "name": "lab04-vm-6023a00", + "networkAccelerationType": "standard", + "networkInterfaces": [ + { + "dnsRecords": [], + "index": 0, + "ipAddress": "10.10.0.6", + "ipv4": true, + "ipv6": false, + "ipv6Address": "", + "ipv6DnsRecords": [], + "macAddress": "d0:0d:77:34:2a:2a", + "nat": true, + "natDnsRecords": [], + "natIpAddress": "89.169.135.154", + "natIpVersion": "IPV4", + "securityGroupIds": [ + "enpb8heveqi8pp7mvbcv" + ], + "subnetId": "e9b3hl3h09t6drs68pbg" + } + ], + "placementPolicy": { + "placementGroupId": "" + }, + "platformId": "standard-v2", + "resources": { + "coreFraction": 20, + "cores": 2, + "gpus": 0, + "memory": 1 + }, + "schedulingPolicy": { + "preemptible": false + }, + "secondaryDisks": [], + "serviceAccountId": "", + "status": "running", + "zone": "ru-central1-a" + }, + "parent": "urn:pulumi:dev::lab04-pulumi-yc::pulumi:pulumi:Stack::lab04-pulumi-yc-dev", + "dependencies": [ + "urn:pulumi:dev::lab04-pulumi-yc::yandex:index/vpcSecurityGroup:VpcSecurityGroup::lab04-sg", + "urn:pulumi:dev::lab04-pulumi-yc::yandex:index/vpcSubnet:VpcSubnet::lab04-subnet" + ], + "provider": "urn:pulumi:dev::lab04-pulumi-yc::pulumi:providers:yandex::default_0_13_0::7d12287d-55f3-465d-8fa0-e63bfc770a1f", + "propertyDependencies": { + "allowStoppingForUpdate": [], + "bootDisk": [], + "metadata": [], + "networkInterfaces": [ + "urn:pulumi:dev::lab04-pulumi-yc::yandex:index/vpcSecurityGroup:VpcSecurityGroup::lab04-sg", + "urn:pulumi:dev::lab04-pulumi-yc::yandex:index/vpcSubnet:VpcSubnet::lab04-subnet" + ], + "platformId": [], + "resources": [], + "zone": [] + }, + "created": "2026-02-19T20:53:41.0764236Z", + "modified": "2026-02-19T20:53:41.0764236Z" + } + ], + "metadata": {} + } + } +} diff --git a/pulumi/.pulumi-state/.pulumi/history/lab04-pulumi-yc/dev/dev-1771534421389384600.checkpoint.json.attrs b/pulumi/.pulumi-state/.pulumi/history/lab04-pulumi-yc/dev/dev-1771534421389384600.checkpoint.json.attrs new file mode 100644 index 0000000000..4dbba8b4a3 --- /dev/null +++ b/pulumi/.pulumi-state/.pulumi/history/lab04-pulumi-yc/dev/dev-1771534421389384600.checkpoint.json.attrs @@ -0,0 +1 @@ +{"user.cache_control":"","user.content_disposition":"","user.content_encoding":"","user.content_language":"","user.content_type":"text/plain; charset=utf-8","user.metadata":null,"md5":"8aVExZ8N8dRKSmZc6/Q9ng=="} diff --git a/pulumi/.pulumi-state/.pulumi/history/lab04-pulumi-yc/dev/dev-1771534421389384600.history.json b/pulumi/.pulumi-state/.pulumi/history/lab04-pulumi-yc/dev/dev-1771534421389384600.history.json new file mode 100644 index 0000000000..aeb75b84a2 --- /dev/null +++ b/pulumi/.pulumi-state/.pulumi/history/lab04-pulumi-yc/dev/dev-1771534421389384600.history.json @@ -0,0 +1,44 @@ +{ + "kind": "update", + "startTime": 1771534364, + "message": "feat: added terraform lab part", + "environment": { + "exec.kind": "cli", + "git.author": "dorley174", + "git.author.email": "d.valiev@innopolis.university", + "git.committer": "dorley174", + "git.committer.email": "d.valiev@innopolis.university", + "git.dirty": "true", + "git.head": "46866051a9837ae9503ab3f8665d0317ce29e2dc", + "git.headName": "refs/heads/lab04", + "pulumi.arch": "amd64", + "pulumi.os": "windows", + "pulumi.version": "v3.222.0", + "runtime.executable": "C:\\DevOps\\DevOps-Core-Course\\pulumi\\venv\\Scripts\\python.exe", + "runtime.metadata.toolchain": "Pip", + "runtime.metadata.toolchainVersion": "26.0.1", + "runtime.metadata.typechecker": "None", + "runtime.name": "python", + "runtime.version": "3.13.1", + "stack.environments": "[]", + "updatePlan": "false", + "vcs.kind": "github.com", + "vcs.owner": "dorley174", + "vcs.repo": "DevOps-Core-Course", + "vcs.root": "pulumi" + }, + "config": { + "lab04-pulumi-yc:allowedSshCidr": "95.111.204.70/32", + "lab04-pulumi-yc:imageFamily": "ubuntu-2404-lts", + "lab04-pulumi-yc:sshPublicKeyPath": "~/.ssh/lab04_ed25519.pub", + "lab04-pulumi-yc:sshUser": "ubuntu", + "lab04-pulumi-yc:subnetCidr": "10.10.0.0/24", + "lab04-pulumi-yc:zone": "ru-central1-a" + }, + "version": 0, + "result": "succeeded", + "endTime": 1771534421, + "resourceChanges": { + "create": 5 + } +} diff --git a/pulumi/.pulumi-state/.pulumi/history/lab04-pulumi-yc/dev/dev-1771534421389384600.history.json.attrs b/pulumi/.pulumi-state/.pulumi/history/lab04-pulumi-yc/dev/dev-1771534421389384600.history.json.attrs new file mode 100644 index 0000000000..9921a06eaa --- /dev/null +++ b/pulumi/.pulumi-state/.pulumi/history/lab04-pulumi-yc/dev/dev-1771534421389384600.history.json.attrs @@ -0,0 +1 @@ +{"user.cache_control":"","user.content_disposition":"","user.content_encoding":"","user.content_language":"","user.content_type":"text/plain; charset=utf-8","user.metadata":null,"md5":"ff0qIY8bvz4izQQb8WO+fA=="} diff --git a/pulumi/.pulumi-state/.pulumi/meta.yaml b/pulumi/.pulumi-state/.pulumi/meta.yaml new file mode 100644 index 0000000000..b82551848c --- /dev/null +++ b/pulumi/.pulumi-state/.pulumi/meta.yaml @@ -0,0 +1 @@ +version: 1 diff --git a/pulumi/.pulumi-state/.pulumi/meta.yaml.attrs b/pulumi/.pulumi-state/.pulumi/meta.yaml.attrs new file mode 100644 index 0000000000..4466031b30 --- /dev/null +++ b/pulumi/.pulumi-state/.pulumi/meta.yaml.attrs @@ -0,0 +1 @@ +{"user.cache_control":"","user.content_disposition":"","user.content_encoding":"","user.content_language":"","user.content_type":"text/plain; charset=utf-8","user.metadata":null,"md5":"EaRWdV65+nlqCnYlI4a4Wg=="} diff --git a/pulumi/.pulumi-state/.pulumi/stacks/lab04-pulumi-yc/dev.json b/pulumi/.pulumi-state/.pulumi/stacks/lab04-pulumi-yc/dev.json new file mode 100644 index 0000000000..b14f996692 --- /dev/null +++ b/pulumi/.pulumi-state/.pulumi/stacks/lab04-pulumi-yc/dev.json @@ -0,0 +1,427 @@ +{ + "version": 3, + "checkpoint": { + "stack": "organization/lab04-pulumi-yc/dev", + "latest": { + "manifest": { + "time": "2026-02-19T23:53:41.118824+03:00", + "magic": "7b54cd6e79f5cecd9ae124cb92b834b486c7e21993124e3a4e456c27c3ce48f9", + "version": "v3.222.0" + }, + "secrets_providers": { + "type": "passphrase", + "state": { + "salt": "v1:3yNf1Kmt4mA=:v1:Uk9In8z0RD2XJTza:lncnLY42u3UftJy+QfrDw6S2OT6vmQ==" + } + }, + "resources": [ + { + "urn": "urn:pulumi:dev::lab04-pulumi-yc::pulumi:providers:yandex::default_0_13_0", + "custom": true, + "id": "7d12287d-55f3-465d-8fa0-e63bfc770a1f", + "type": "pulumi:providers:yandex", + "inputs": { + "__internal": {}, + "version": "0.13.0" + }, + "outputs": { + "version": "0.13.0" + }, + "created": "2026-02-19T20:52:45.8287147Z", + "modified": "2026-02-19T20:52:45.8287147Z" + }, + { + "urn": "urn:pulumi:dev::lab04-pulumi-yc::pulumi:pulumi:Stack::lab04-pulumi-yc-dev", + "custom": false, + "type": "pulumi:pulumi:Stack", + "outputs": { + "app_url": "http://89.169.135.154:5000/", + "http_url": "http://89.169.135.154/", + "internal_ip": "10.10.0.6", + "public_ip": "89.169.135.154", + "ssh_command": "ssh -i ~/.ssh/lab04_ed25519 ubuntu@89.169.135.154" + }, + "created": "2026-02-19T20:52:45.8630518Z", + "modified": "2026-02-19T20:52:45.8630518Z" + }, + { + "urn": "urn:pulumi:dev::lab04-pulumi-yc::yandex:index/vpcNetwork:VpcNetwork::lab04-net", + "custom": true, + "id": "enpet9l87ashn294afa1", + "type": "yandex:index/vpcNetwork:VpcNetwork", + "inputs": { + "__defaults": [ + "name" + ], + "name": "lab04-net-7bf49ed" + }, + "outputs": { + "__meta": "{\"e2bfb730-ecaa-11e6-8f88-34363bc7c4c0\":{\"create\":60000000000,\"delete\":60000000000,\"update\":60000000000}}", + "createdAt": "2026-02-19T20:52:48Z", + "defaultSecurityGroupId": "enp912ofg69vrp8iegur", + "description": "", + "folderId": "b1g82kdcn5grlmu79ano", + "id": "enpet9l87ashn294afa1", + "labels": {}, + "name": "lab04-net-7bf49ed", + "subnetIds": [] + }, + "parent": "urn:pulumi:dev::lab04-pulumi-yc::pulumi:pulumi:Stack::lab04-pulumi-yc-dev", + "provider": "urn:pulumi:dev::lab04-pulumi-yc::pulumi:providers:yandex::default_0_13_0::7d12287d-55f3-465d-8fa0-e63bfc770a1f", + "created": "2026-02-19T20:52:49.994421Z", + "modified": "2026-02-19T20:52:49.994421Z" + }, + { + "urn": "urn:pulumi:dev::lab04-pulumi-yc::yandex:index/vpcSubnet:VpcSubnet::lab04-subnet", + "custom": true, + "id": "e9b3hl3h09t6drs68pbg", + "type": "yandex:index/vpcSubnet:VpcSubnet", + "inputs": { + "__defaults": [ + "name" + ], + "name": "lab04-subnet-1fa918f", + "networkId": "enpet9l87ashn294afa1", + "v4CidrBlocks": [ + "10.10.0.0/24" + ], + "zone": "ru-central1-a" + }, + "outputs": { + "__meta": "{\"e2bfb730-ecaa-11e6-8f88-34363bc7c4c0\":{\"create\":180000000000,\"delete\":180000000000,\"update\":180000000000}}", + "createdAt": "2026-02-19T20:52:50Z", + "description": "", + "dhcpOptions": null, + "folderId": "b1g82kdcn5grlmu79ano", + "id": "e9b3hl3h09t6drs68pbg", + "labels": {}, + "name": "lab04-subnet-1fa918f", + "networkId": "enpet9l87ashn294afa1", + "routeTableId": "", + "v4CidrBlocks": [ + "10.10.0.0/24" + ], + "v6CidrBlocks": [], + "zone": "ru-central1-a" + }, + "parent": "urn:pulumi:dev::lab04-pulumi-yc::pulumi:pulumi:Stack::lab04-pulumi-yc-dev", + "dependencies": [ + "urn:pulumi:dev::lab04-pulumi-yc::yandex:index/vpcNetwork:VpcNetwork::lab04-net" + ], + "provider": "urn:pulumi:dev::lab04-pulumi-yc::pulumi:providers:yandex::default_0_13_0::7d12287d-55f3-465d-8fa0-e63bfc770a1f", + "propertyDependencies": { + "networkId": [ + "urn:pulumi:dev::lab04-pulumi-yc::yandex:index/vpcNetwork:VpcNetwork::lab04-net" + ], + "v4CidrBlocks": [], + "zone": [] + }, + "created": "2026-02-19T20:52:50.5480973Z", + "modified": "2026-02-19T20:52:50.5480973Z" + }, + { + "urn": "urn:pulumi:dev::lab04-pulumi-yc::yandex:index/vpcSecurityGroup:VpcSecurityGroup::lab04-sg", + "custom": true, + "id": "enpb8heveqi8pp7mvbcv", + "type": "yandex:index/vpcSecurityGroup:VpcSecurityGroup", + "inputs": { + "__defaults": [ + "name" + ], + "egresses": [ + { + "__defaults": [ + "port" + ], + "description": "Allow all outbound", + "fromPort": 0, + "port": -1, + "protocol": "ANY", + "toPort": 65535, + "v4CidrBlocks": [ + "0.0.0.0/0" + ] + } + ], + "ingresses": [ + { + "__defaults": [ + "fromPort", + "toPort" + ], + "description": "SSH", + "fromPort": -1, + "port": 22, + "protocol": "TCP", + "toPort": -1, + "v4CidrBlocks": [ + "95.111.204.70/32" + ] + }, + { + "__defaults": [ + "fromPort", + "toPort" + ], + "description": "HTTP", + "fromPort": -1, + "port": 80, + "protocol": "TCP", + "toPort": -1, + "v4CidrBlocks": [ + "0.0.0.0/0" + ] + }, + { + "__defaults": [ + "fromPort", + "toPort" + ], + "description": "App 5000", + "fromPort": -1, + "port": 5000, + "protocol": "TCP", + "toPort": -1, + "v4CidrBlocks": [ + "0.0.0.0/0" + ] + } + ], + "name": "lab04-sg-7fdbda7", + "networkId": "enpet9l87ashn294afa1" + }, + "outputs": { + "__meta": "{\"e2bfb730-ecaa-11e6-8f88-34363bc7c4c0\":{\"create\":180000000000,\"delete\":180000000000,\"update\":180000000000}}", + "createdAt": "2026-02-19T20:52:52Z", + "description": "", + "egresses": [ + { + "description": "Allow all outbound", + "fromPort": 0, + "id": "enpqsfspqfq9fqiee48t", + "labels": {}, + "port": -1, + "predefinedTarget": "", + "protocol": "ANY", + "securityGroupId": "", + "toPort": 65535, + "v4CidrBlocks": [ + "0.0.0.0/0" + ], + "v6CidrBlocks": [] + } + ], + "folderId": "b1g82kdcn5grlmu79ano", + "id": "enpb8heveqi8pp7mvbcv", + "ingresses": [ + { + "description": "SSH", + "fromPort": -1, + "id": "enpbhntafog7fraba6rd", + "labels": {}, + "port": 22, + "predefinedTarget": "", + "protocol": "TCP", + "securityGroupId": "", + "toPort": -1, + "v4CidrBlocks": [ + "95.111.204.70/32" + ], + "v6CidrBlocks": [] + }, + { + "description": "HTTP", + "fromPort": -1, + "id": "enp80blit93rhb8rh3eb", + "labels": {}, + "port": 80, + "predefinedTarget": "", + "protocol": "TCP", + "securityGroupId": "", + "toPort": -1, + "v4CidrBlocks": [ + "0.0.0.0/0" + ], + "v6CidrBlocks": [] + }, + { + "description": "App 5000", + "fromPort": -1, + "id": "enp39asi999eg5hhq4o1", + "labels": {}, + "port": 5000, + "predefinedTarget": "", + "protocol": "TCP", + "securityGroupId": "", + "toPort": -1, + "v4CidrBlocks": [ + "0.0.0.0/0" + ], + "v6CidrBlocks": [] + } + ], + "labels": {}, + "name": "lab04-sg-7fdbda7", + "networkId": "enpet9l87ashn294afa1", + "status": "ACTIVE" + }, + "parent": "urn:pulumi:dev::lab04-pulumi-yc::pulumi:pulumi:Stack::lab04-pulumi-yc-dev", + "dependencies": [ + "urn:pulumi:dev::lab04-pulumi-yc::yandex:index/vpcNetwork:VpcNetwork::lab04-net" + ], + "provider": "urn:pulumi:dev::lab04-pulumi-yc::pulumi:providers:yandex::default_0_13_0::7d12287d-55f3-465d-8fa0-e63bfc770a1f", + "propertyDependencies": { + "egresses": [], + "ingresses": [], + "networkId": [ + "urn:pulumi:dev::lab04-pulumi-yc::yandex:index/vpcNetwork:VpcNetwork::lab04-net" + ] + }, + "created": "2026-02-19T20:52:52.5246052Z", + "modified": "2026-02-19T20:52:52.5246052Z" + }, + { + "urn": "urn:pulumi:dev::lab04-pulumi-yc::yandex:index/computeInstance:ComputeInstance::lab04-vm", + "custom": true, + "id": "fhm7ed1a5b85rs8u3uop", + "type": "yandex:index/computeInstance:ComputeInstance", + "inputs": { + "__defaults": [ + "name", + "networkAccelerationType" + ], + "allowStoppingForUpdate": true, + "bootDisk": { + "__defaults": [ + "autoDelete" + ], + "autoDelete": true, + "initializeParams": { + "__defaults": [], + "imageId": "fd8lt661chfo5i13a40d", + "size": 10, + "type": "network-hdd" + } + }, + "metadata": { + "__defaults": [], + "ssh-keys": "ubuntu:ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAII5nKglXX/E2/S3oPKXahGr77IZELnnZlqE4oGCyEhZQ Данил@Dorley" + }, + "name": "lab04-vm-6023a00", + "networkAccelerationType": "standard", + "networkInterfaces": [ + { + "__defaults": [ + "ipv4" + ], + "ipv4": true, + "nat": true, + "securityGroupIds": [ + "enpb8heveqi8pp7mvbcv" + ], + "subnetId": "e9b3hl3h09t6drs68pbg" + } + ], + "platformId": "standard-v2", + "resources": { + "__defaults": [], + "coreFraction": 20, + "cores": 2, + "memory": 1 + }, + "zone": "ru-central1-a" + }, + "outputs": { + "__meta": "{\"e2bfb730-ecaa-11e6-8f88-34363bc7c4c0\":{\"create\":300000000000,\"delete\":300000000000,\"update\":300000000000},\"schema_version\":\"1\"}", + "allowStoppingForUpdate": true, + "bootDisk": { + "autoDelete": true, + "deviceName": "fhmonvdcv96scusbdsm3", + "diskId": "fhmonvdcv96scusbdsm3", + "initializeParams": { + "blockSize": 4096, + "description": "", + "imageId": "fd8lt661chfo5i13a40d", + "name": "", + "size": 10, + "snapshotId": "", + "type": "network-hdd" + }, + "mode": "READ_WRITE" + }, + "createdAt": "2026-02-19T20:52:53Z", + "description": "", + "folderId": "b1g82kdcn5grlmu79ano", + "fqdn": "fhm7ed1a5b85rs8u3uop.auto.internal", + "hostname": "fhm7ed1a5b85rs8u3uop", + "id": "fhm7ed1a5b85rs8u3uop", + "labels": {}, + "metadata": { + "ssh-keys": "ubuntu:ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAII5nKglXX/E2/S3oPKXahGr77IZELnnZlqE4oGCyEhZQ Данил@Dorley" + }, + "name": "lab04-vm-6023a00", + "networkAccelerationType": "standard", + "networkInterfaces": [ + { + "dnsRecords": [], + "index": 0, + "ipAddress": "10.10.0.6", + "ipv4": true, + "ipv6": false, + "ipv6Address": "", + "ipv6DnsRecords": [], + "macAddress": "d0:0d:77:34:2a:2a", + "nat": true, + "natDnsRecords": [], + "natIpAddress": "89.169.135.154", + "natIpVersion": "IPV4", + "securityGroupIds": [ + "enpb8heveqi8pp7mvbcv" + ], + "subnetId": "e9b3hl3h09t6drs68pbg" + } + ], + "placementPolicy": { + "placementGroupId": "" + }, + "platformId": "standard-v2", + "resources": { + "coreFraction": 20, + "cores": 2, + "gpus": 0, + "memory": 1 + }, + "schedulingPolicy": { + "preemptible": false + }, + "secondaryDisks": [], + "serviceAccountId": "", + "status": "running", + "zone": "ru-central1-a" + }, + "parent": "urn:pulumi:dev::lab04-pulumi-yc::pulumi:pulumi:Stack::lab04-pulumi-yc-dev", + "dependencies": [ + "urn:pulumi:dev::lab04-pulumi-yc::yandex:index/vpcSecurityGroup:VpcSecurityGroup::lab04-sg", + "urn:pulumi:dev::lab04-pulumi-yc::yandex:index/vpcSubnet:VpcSubnet::lab04-subnet" + ], + "provider": "urn:pulumi:dev::lab04-pulumi-yc::pulumi:providers:yandex::default_0_13_0::7d12287d-55f3-465d-8fa0-e63bfc770a1f", + "propertyDependencies": { + "allowStoppingForUpdate": [], + "bootDisk": [], + "metadata": [], + "networkInterfaces": [ + "urn:pulumi:dev::lab04-pulumi-yc::yandex:index/vpcSecurityGroup:VpcSecurityGroup::lab04-sg", + "urn:pulumi:dev::lab04-pulumi-yc::yandex:index/vpcSubnet:VpcSubnet::lab04-subnet" + ], + "platformId": [], + "resources": [], + "zone": [] + }, + "created": "2026-02-19T20:53:41.0764236Z", + "modified": "2026-02-19T20:53:41.0764236Z" + } + ], + "metadata": {} + } + } +} diff --git a/pulumi/.pulumi-state/.pulumi/stacks/lab04-pulumi-yc/dev.json.attrs b/pulumi/.pulumi-state/.pulumi/stacks/lab04-pulumi-yc/dev.json.attrs new file mode 100644 index 0000000000..4dbba8b4a3 --- /dev/null +++ b/pulumi/.pulumi-state/.pulumi/stacks/lab04-pulumi-yc/dev.json.attrs @@ -0,0 +1 @@ +{"user.cache_control":"","user.content_disposition":"","user.content_encoding":"","user.content_language":"","user.content_type":"text/plain; charset=utf-8","user.metadata":null,"md5":"8aVExZ8N8dRKSmZc6/Q9ng=="} diff --git a/pulumi/.pulumi-state/.pulumi/stacks/lab04-pulumi-yc/dev.json.bak b/pulumi/.pulumi-state/.pulumi/stacks/lab04-pulumi-yc/dev.json.bak new file mode 100644 index 0000000000..5e5007b70d --- /dev/null +++ b/pulumi/.pulumi-state/.pulumi/stacks/lab04-pulumi-yc/dev.json.bak @@ -0,0 +1,420 @@ +{ + "version": 3, + "checkpoint": { + "stack": "organization/lab04-pulumi-yc/dev", + "latest": { + "manifest": { + "time": "2026-02-19T23:53:41.0764236+03:00", + "magic": "7b54cd6e79f5cecd9ae124cb92b834b486c7e21993124e3a4e456c27c3ce48f9", + "version": "v3.222.0" + }, + "secrets_providers": { + "type": "passphrase", + "state": { + "salt": "v1:3yNf1Kmt4mA=:v1:Uk9In8z0RD2XJTza:lncnLY42u3UftJy+QfrDw6S2OT6vmQ==" + } + }, + "resources": [ + { + "urn": "urn:pulumi:dev::lab04-pulumi-yc::pulumi:providers:yandex::default_0_13_0", + "custom": true, + "id": "7d12287d-55f3-465d-8fa0-e63bfc770a1f", + "type": "pulumi:providers:yandex", + "inputs": { + "__internal": {}, + "version": "0.13.0" + }, + "outputs": { + "version": "0.13.0" + }, + "created": "2026-02-19T20:52:45.8287147Z", + "modified": "2026-02-19T20:52:45.8287147Z" + }, + { + "urn": "urn:pulumi:dev::lab04-pulumi-yc::pulumi:pulumi:Stack::lab04-pulumi-yc-dev", + "custom": false, + "type": "pulumi:pulumi:Stack", + "created": "2026-02-19T20:52:45.8630518Z", + "modified": "2026-02-19T20:52:45.8630518Z" + }, + { + "urn": "urn:pulumi:dev::lab04-pulumi-yc::yandex:index/vpcNetwork:VpcNetwork::lab04-net", + "custom": true, + "id": "enpet9l87ashn294afa1", + "type": "yandex:index/vpcNetwork:VpcNetwork", + "inputs": { + "__defaults": [ + "name" + ], + "name": "lab04-net-7bf49ed" + }, + "outputs": { + "__meta": "{\"e2bfb730-ecaa-11e6-8f88-34363bc7c4c0\":{\"create\":60000000000,\"delete\":60000000000,\"update\":60000000000}}", + "createdAt": "2026-02-19T20:52:48Z", + "defaultSecurityGroupId": "enp912ofg69vrp8iegur", + "description": "", + "folderId": "b1g82kdcn5grlmu79ano", + "id": "enpet9l87ashn294afa1", + "labels": {}, + "name": "lab04-net-7bf49ed", + "subnetIds": [] + }, + "parent": "urn:pulumi:dev::lab04-pulumi-yc::pulumi:pulumi:Stack::lab04-pulumi-yc-dev", + "provider": "urn:pulumi:dev::lab04-pulumi-yc::pulumi:providers:yandex::default_0_13_0::7d12287d-55f3-465d-8fa0-e63bfc770a1f", + "created": "2026-02-19T20:52:49.994421Z", + "modified": "2026-02-19T20:52:49.994421Z" + }, + { + "urn": "urn:pulumi:dev::lab04-pulumi-yc::yandex:index/vpcSubnet:VpcSubnet::lab04-subnet", + "custom": true, + "id": "e9b3hl3h09t6drs68pbg", + "type": "yandex:index/vpcSubnet:VpcSubnet", + "inputs": { + "__defaults": [ + "name" + ], + "name": "lab04-subnet-1fa918f", + "networkId": "enpet9l87ashn294afa1", + "v4CidrBlocks": [ + "10.10.0.0/24" + ], + "zone": "ru-central1-a" + }, + "outputs": { + "__meta": "{\"e2bfb730-ecaa-11e6-8f88-34363bc7c4c0\":{\"create\":180000000000,\"delete\":180000000000,\"update\":180000000000}}", + "createdAt": "2026-02-19T20:52:50Z", + "description": "", + "dhcpOptions": null, + "folderId": "b1g82kdcn5grlmu79ano", + "id": "e9b3hl3h09t6drs68pbg", + "labels": {}, + "name": "lab04-subnet-1fa918f", + "networkId": "enpet9l87ashn294afa1", + "routeTableId": "", + "v4CidrBlocks": [ + "10.10.0.0/24" + ], + "v6CidrBlocks": [], + "zone": "ru-central1-a" + }, + "parent": "urn:pulumi:dev::lab04-pulumi-yc::pulumi:pulumi:Stack::lab04-pulumi-yc-dev", + "dependencies": [ + "urn:pulumi:dev::lab04-pulumi-yc::yandex:index/vpcNetwork:VpcNetwork::lab04-net" + ], + "provider": "urn:pulumi:dev::lab04-pulumi-yc::pulumi:providers:yandex::default_0_13_0::7d12287d-55f3-465d-8fa0-e63bfc770a1f", + "propertyDependencies": { + "networkId": [ + "urn:pulumi:dev::lab04-pulumi-yc::yandex:index/vpcNetwork:VpcNetwork::lab04-net" + ], + "v4CidrBlocks": [], + "zone": [] + }, + "created": "2026-02-19T20:52:50.5480973Z", + "modified": "2026-02-19T20:52:50.5480973Z" + }, + { + "urn": "urn:pulumi:dev::lab04-pulumi-yc::yandex:index/vpcSecurityGroup:VpcSecurityGroup::lab04-sg", + "custom": true, + "id": "enpb8heveqi8pp7mvbcv", + "type": "yandex:index/vpcSecurityGroup:VpcSecurityGroup", + "inputs": { + "__defaults": [ + "name" + ], + "egresses": [ + { + "__defaults": [ + "port" + ], + "description": "Allow all outbound", + "fromPort": 0, + "port": -1, + "protocol": "ANY", + "toPort": 65535, + "v4CidrBlocks": [ + "0.0.0.0/0" + ] + } + ], + "ingresses": [ + { + "__defaults": [ + "fromPort", + "toPort" + ], + "description": "SSH", + "fromPort": -1, + "port": 22, + "protocol": "TCP", + "toPort": -1, + "v4CidrBlocks": [ + "95.111.204.70/32" + ] + }, + { + "__defaults": [ + "fromPort", + "toPort" + ], + "description": "HTTP", + "fromPort": -1, + "port": 80, + "protocol": "TCP", + "toPort": -1, + "v4CidrBlocks": [ + "0.0.0.0/0" + ] + }, + { + "__defaults": [ + "fromPort", + "toPort" + ], + "description": "App 5000", + "fromPort": -1, + "port": 5000, + "protocol": "TCP", + "toPort": -1, + "v4CidrBlocks": [ + "0.0.0.0/0" + ] + } + ], + "name": "lab04-sg-7fdbda7", + "networkId": "enpet9l87ashn294afa1" + }, + "outputs": { + "__meta": "{\"e2bfb730-ecaa-11e6-8f88-34363bc7c4c0\":{\"create\":180000000000,\"delete\":180000000000,\"update\":180000000000}}", + "createdAt": "2026-02-19T20:52:52Z", + "description": "", + "egresses": [ + { + "description": "Allow all outbound", + "fromPort": 0, + "id": "enpqsfspqfq9fqiee48t", + "labels": {}, + "port": -1, + "predefinedTarget": "", + "protocol": "ANY", + "securityGroupId": "", + "toPort": 65535, + "v4CidrBlocks": [ + "0.0.0.0/0" + ], + "v6CidrBlocks": [] + } + ], + "folderId": "b1g82kdcn5grlmu79ano", + "id": "enpb8heveqi8pp7mvbcv", + "ingresses": [ + { + "description": "SSH", + "fromPort": -1, + "id": "enpbhntafog7fraba6rd", + "labels": {}, + "port": 22, + "predefinedTarget": "", + "protocol": "TCP", + "securityGroupId": "", + "toPort": -1, + "v4CidrBlocks": [ + "95.111.204.70/32" + ], + "v6CidrBlocks": [] + }, + { + "description": "HTTP", + "fromPort": -1, + "id": "enp80blit93rhb8rh3eb", + "labels": {}, + "port": 80, + "predefinedTarget": "", + "protocol": "TCP", + "securityGroupId": "", + "toPort": -1, + "v4CidrBlocks": [ + "0.0.0.0/0" + ], + "v6CidrBlocks": [] + }, + { + "description": "App 5000", + "fromPort": -1, + "id": "enp39asi999eg5hhq4o1", + "labels": {}, + "port": 5000, + "predefinedTarget": "", + "protocol": "TCP", + "securityGroupId": "", + "toPort": -1, + "v4CidrBlocks": [ + "0.0.0.0/0" + ], + "v6CidrBlocks": [] + } + ], + "labels": {}, + "name": "lab04-sg-7fdbda7", + "networkId": "enpet9l87ashn294afa1", + "status": "ACTIVE" + }, + "parent": "urn:pulumi:dev::lab04-pulumi-yc::pulumi:pulumi:Stack::lab04-pulumi-yc-dev", + "dependencies": [ + "urn:pulumi:dev::lab04-pulumi-yc::yandex:index/vpcNetwork:VpcNetwork::lab04-net" + ], + "provider": "urn:pulumi:dev::lab04-pulumi-yc::pulumi:providers:yandex::default_0_13_0::7d12287d-55f3-465d-8fa0-e63bfc770a1f", + "propertyDependencies": { + "egresses": [], + "ingresses": [], + "networkId": [ + "urn:pulumi:dev::lab04-pulumi-yc::yandex:index/vpcNetwork:VpcNetwork::lab04-net" + ] + }, + "created": "2026-02-19T20:52:52.5246052Z", + "modified": "2026-02-19T20:52:52.5246052Z" + }, + { + "urn": "urn:pulumi:dev::lab04-pulumi-yc::yandex:index/computeInstance:ComputeInstance::lab04-vm", + "custom": true, + "id": "fhm7ed1a5b85rs8u3uop", + "type": "yandex:index/computeInstance:ComputeInstance", + "inputs": { + "__defaults": [ + "name", + "networkAccelerationType" + ], + "allowStoppingForUpdate": true, + "bootDisk": { + "__defaults": [ + "autoDelete" + ], + "autoDelete": true, + "initializeParams": { + "__defaults": [], + "imageId": "fd8lt661chfo5i13a40d", + "size": 10, + "type": "network-hdd" + } + }, + "metadata": { + "__defaults": [], + "ssh-keys": "ubuntu:ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAII5nKglXX/E2/S3oPKXahGr77IZELnnZlqE4oGCyEhZQ Данил@Dorley" + }, + "name": "lab04-vm-6023a00", + "networkAccelerationType": "standard", + "networkInterfaces": [ + { + "__defaults": [ + "ipv4" + ], + "ipv4": true, + "nat": true, + "securityGroupIds": [ + "enpb8heveqi8pp7mvbcv" + ], + "subnetId": "e9b3hl3h09t6drs68pbg" + } + ], + "platformId": "standard-v2", + "resources": { + "__defaults": [], + "coreFraction": 20, + "cores": 2, + "memory": 1 + }, + "zone": "ru-central1-a" + }, + "outputs": { + "__meta": "{\"e2bfb730-ecaa-11e6-8f88-34363bc7c4c0\":{\"create\":300000000000,\"delete\":300000000000,\"update\":300000000000},\"schema_version\":\"1\"}", + "allowStoppingForUpdate": true, + "bootDisk": { + "autoDelete": true, + "deviceName": "fhmonvdcv96scusbdsm3", + "diskId": "fhmonvdcv96scusbdsm3", + "initializeParams": { + "blockSize": 4096, + "description": "", + "imageId": "fd8lt661chfo5i13a40d", + "name": "", + "size": 10, + "snapshotId": "", + "type": "network-hdd" + }, + "mode": "READ_WRITE" + }, + "createdAt": "2026-02-19T20:52:53Z", + "description": "", + "folderId": "b1g82kdcn5grlmu79ano", + "fqdn": "fhm7ed1a5b85rs8u3uop.auto.internal", + "hostname": "fhm7ed1a5b85rs8u3uop", + "id": "fhm7ed1a5b85rs8u3uop", + "labels": {}, + "metadata": { + "ssh-keys": "ubuntu:ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAII5nKglXX/E2/S3oPKXahGr77IZELnnZlqE4oGCyEhZQ Данил@Dorley" + }, + "name": "lab04-vm-6023a00", + "networkAccelerationType": "standard", + "networkInterfaces": [ + { + "dnsRecords": [], + "index": 0, + "ipAddress": "10.10.0.6", + "ipv4": true, + "ipv6": false, + "ipv6Address": "", + "ipv6DnsRecords": [], + "macAddress": "d0:0d:77:34:2a:2a", + "nat": true, + "natDnsRecords": [], + "natIpAddress": "89.169.135.154", + "natIpVersion": "IPV4", + "securityGroupIds": [ + "enpb8heveqi8pp7mvbcv" + ], + "subnetId": "e9b3hl3h09t6drs68pbg" + } + ], + "placementPolicy": { + "placementGroupId": "" + }, + "platformId": "standard-v2", + "resources": { + "coreFraction": 20, + "cores": 2, + "gpus": 0, + "memory": 1 + }, + "schedulingPolicy": { + "preemptible": false + }, + "secondaryDisks": [], + "serviceAccountId": "", + "status": "running", + "zone": "ru-central1-a" + }, + "parent": "urn:pulumi:dev::lab04-pulumi-yc::pulumi:pulumi:Stack::lab04-pulumi-yc-dev", + "dependencies": [ + "urn:pulumi:dev::lab04-pulumi-yc::yandex:index/vpcSecurityGroup:VpcSecurityGroup::lab04-sg", + "urn:pulumi:dev::lab04-pulumi-yc::yandex:index/vpcSubnet:VpcSubnet::lab04-subnet" + ], + "provider": "urn:pulumi:dev::lab04-pulumi-yc::pulumi:providers:yandex::default_0_13_0::7d12287d-55f3-465d-8fa0-e63bfc770a1f", + "propertyDependencies": { + "allowStoppingForUpdate": [], + "bootDisk": [], + "metadata": [], + "networkInterfaces": [ + "urn:pulumi:dev::lab04-pulumi-yc::yandex:index/vpcSecurityGroup:VpcSecurityGroup::lab04-sg", + "urn:pulumi:dev::lab04-pulumi-yc::yandex:index/vpcSubnet:VpcSubnet::lab04-subnet" + ], + "platformId": [], + "resources": [], + "zone": [] + }, + "created": "2026-02-19T20:53:41.0764236Z", + "modified": "2026-02-19T20:53:41.0764236Z" + } + ], + "metadata": {} + } + } +} diff --git a/pulumi/.pulumi-state/.pulumi/stacks/lab04-pulumi-yc/dev.json.bak.attrs b/pulumi/.pulumi-state/.pulumi/stacks/lab04-pulumi-yc/dev.json.bak.attrs new file mode 100644 index 0000000000..60a4efc8b8 --- /dev/null +++ b/pulumi/.pulumi-state/.pulumi/stacks/lab04-pulumi-yc/dev.json.bak.attrs @@ -0,0 +1 @@ +{"user.cache_control":"","user.content_disposition":"","user.content_encoding":"","user.content_language":"","user.content_type":"text/plain; charset=utf-8","user.metadata":null,"md5":"N1dWOE4/X1j2axM1iAPBng=="} diff --git a/pulumi/Pulumi.yaml b/pulumi/Pulumi.yaml new file mode 100644 index 0000000000..7e92a0ad45 --- /dev/null +++ b/pulumi/Pulumi.yaml @@ -0,0 +1,6 @@ +name: lab04-pulumi-yc +runtime: + name: python + options: + virtualenv: venv +description: Lab04 infrastructure in Yandex Cloud using Pulumi (Python) diff --git a/pulumi/README.md b/pulumi/README.md new file mode 100644 index 0000000000..92b74ed68e --- /dev/null +++ b/pulumi/README.md @@ -0,0 +1,62 @@ +# Lab04 — Pulumi (Yandex Cloud, Python) + +## Before + +1) Download Pulumi. +2) Download Python 3.10+. +3) Download project requirements: + +```bash +python -m venv venv +# Windows: +# .\venv\Scripts\activate +# macOS/Linux: +# source venv/bin/activate +pip install -r requirements.txt +``` + +## Auth using Yandex Cloud + +Pulumi provider Yandex uses same env/confic as Terrarform +Simplier + +- add env variables `YC_TOKEN`, `YC_CLOUD_ID`, `YC_FOLDER_ID`, `YC_ZONE` + +or (using pulumi config): + +```bash +pulumi config set yandex:cloudId +pulumi config set yandex:folderId +pulumi config set yandex:zone ru-central1-a +pulumi config set --secret yandex:token +``` + +## Project settings (my vaaariables) + +```bash +pulumi config set zone ru-central1-a +pulumi config set subnetCidr 10.10.0.0/24 +pulumi config set allowedSshCidr "<ваш_IP>/32" + +pulumi config set sshUser ubuntu +pulumi config set sshPublicKeyPath "~/.ssh/lab04_ed25519.pub" +# либо: +# pulumi config set sshPublicKey "ssh-ed25519 AAAA... lab04" + +pulumi config set imageFamily ubuntu-2404-lts +``` + +## Start + +```bash +pulumi preview +pulumi up +``` + +After `pulumi up` in output will be `public_ip` and `ssh_command`. + +## Delete resourses + +```bash +pulumi destroy +``` diff --git a/pulumi/__main__.py b/pulumi/__main__.py new file mode 100644 index 0000000000..738775ea30 --- /dev/null +++ b/pulumi/__main__.py @@ -0,0 +1,146 @@ +import os + +import pulumi +import pulumi_yandex as yandex + +cfg = pulumi.Config() + +# ----------------- +# Config (project) +# ----------------- +zone = cfg.get("zone") or os.getenv("YC_ZONE") or "ru-central1-a" +subnet_cidr = cfg.get("subnetCidr") or "10.10.0.0/24" +allowed_ssh_cidr = cfg.get("allowedSshCidr") or "0.0.0.0/0" + +ssh_user = cfg.get("sshUser") or "ubuntu" +ssh_public_key = cfg.get("sshPublicKey") +ssh_public_key_path = cfg.get("sshPublicKeyPath") + +image_family = cfg.get("imageFamily") or "ubuntu-2404-lts" + +vm_cores = int(cfg.get("vmCores") or 2) +vm_memory_gb = int(cfg.get("vmMemoryGb") or 1) +vm_core_fraction = int(cfg.get("vmCoreFraction") or 20) + +disk_size_gb = int(cfg.get("diskSizeGb") or 10) +disk_type = cfg.get("diskType") or "network-hdd" + +platform_id = cfg.get("platformId") or "standard-v2" + +# SSH public key: either inline or from file +if not ssh_public_key: + if ssh_public_key_path: + path = os.path.expanduser(ssh_public_key_path) + with open(path, "r", encoding="utf-8") as f: + ssh_public_key = f.read().strip() + else: + raise Exception( + "Set sshPublicKey (inline) or sshPublicKeyPath (path to .pub) via pulumi config" + ) + +# ----------------- +# Data: image +# ----------------- +image = yandex.get_compute_image(family=image_family) + +# ----------------- +# Network +# ----------------- +net = yandex.VpcNetwork("lab04-net") + +subnet = yandex.VpcSubnet( + "lab04-subnet", + network_id=net.id, + zone=zone, + v4_cidr_blocks=[subnet_cidr], +) + +# ----------------- +# Security Group +# ----------------- +sg = yandex.VpcSecurityGroup( + "lab04-sg", + network_id=net.id, + ingresses=[ + yandex.VpcSecurityGroupIngressArgs( + protocol="TCP", + description="SSH", + v4_cidr_blocks=[allowed_ssh_cidr], + port=22, + ), + yandex.VpcSecurityGroupIngressArgs( + protocol="TCP", + description="HTTP", + v4_cidr_blocks=["0.0.0.0/0"], + port=80, + ), + yandex.VpcSecurityGroupIngressArgs( + protocol="TCP", + description="App 5000", + v4_cidr_blocks=["0.0.0.0/0"], + port=5000, + ), + ], + egresses=[ + yandex.VpcSecurityGroupEgressArgs( + protocol="ANY", + description="Allow all outbound", + v4_cidr_blocks=["0.0.0.0/0"], + from_port=0, + to_port=65535, + ) + ], +) + + +# ----------------- +# VM +# ----------------- +vm = yandex.ComputeInstance( + "lab04-vm", + zone=zone, + platform_id=platform_id, + resources=yandex.ComputeInstanceResourcesArgs( + cores=vm_cores, + memory=vm_memory_gb, + core_fraction=vm_core_fraction, + ), + boot_disk=yandex.ComputeInstanceBootDiskArgs( + initialize_params=yandex.ComputeInstanceBootDiskInitializeParamsArgs( + image_id=image.id, + size=disk_size_gb, + type=disk_type, + ) + ), + network_interfaces=[ + yandex.ComputeInstanceNetworkInterfaceArgs( + subnet_id=subnet.id, + nat=True, + security_group_ids=[sg.id], + ) + ], + metadata={ + "ssh-keys": f"{ssh_user}:{ssh_public_key}", + }, + allow_stopping_for_update=True, +) + + +def _nat_ip(network_interfaces): + """Handle both dict-style and typed outputs.""" + ni0 = network_interfaces[0] + if isinstance(ni0, dict): + return ni0.get("nat_ip_address") + return getattr(ni0, "nat_ip_address", None) + + +public_ip = vm.network_interfaces.apply(_nat_ip) +internal_ip = vm.network_interfaces.apply( + lambda nis: nis[0].get("ip_address") if isinstance(nis[0], dict) else getattr(nis[0], "ip_address", None) +) + +pulumi.export("public_ip", public_ip) +pulumi.export("internal_ip", internal_ip) +pulumi.export("ssh_command", public_ip.apply(lambda ip: f"ssh -i ~/.ssh/lab04_ed25519 {ssh_user}@{ip}")) +pulumi.export("http_url", public_ip.apply(lambda ip: f"http://{ip}/")) +pulumi.export("app_url", public_ip.apply(lambda ip: f"http://{ip}:5000/")) diff --git a/pulumi/requirements.txt b/pulumi/requirements.txt new file mode 100644 index 0000000000..4fcd3c0981 --- /dev/null +++ b/pulumi/requirements.txt @@ -0,0 +1,2 @@ +pulumi>=3.0.0,<4.0.0 +pulumi-yandex diff --git a/scripts/load-env.ps1 b/scripts/load-env.ps1 new file mode 100644 index 0000000000..498d804eea --- /dev/null +++ b/scripts/load-env.ps1 @@ -0,0 +1,33 @@ +$envFile = Join-Path (Get-Location) ".env" +if (!(Test-Path $envFile)) { + throw "Не найден .env в корне: $envFile" +} + +Get-Content $envFile | ForEach-Object { + $line = $_.Trim() + if ($line.Length -eq 0) { return } + if ($line.StartsWith("#")) { return } + + $parts = $line -split "=", 2 + if ($parts.Count -ne 2) { return } + + $name = $parts[0].Trim() + $value = $parts[1].Trim().Trim('"').Trim("'") + + # экспорт в текущее окружение + Set-Item -Path "Env:$name" -Value $value +} + +# привести YC_SERVICE_ACCOUNT_KEY_FILE к абсолютному пути, если он относительный +if ($env:YC_SERVICE_ACCOUNT_KEY_FILE -and !(Split-Path $env:YC_SERVICE_ACCOUNT_KEY_FILE -IsAbsolute)) { + $candidate = Join-Path (Get-Location) $env:YC_SERVICE_ACCOUNT_KEY_FILE + if (Test-Path $candidate) { + $env:YC_SERVICE_ACCOUNT_KEY_FILE = (Resolve-Path $candidate).Path + } +} + +Write-Host "Loaded .env OK" +Write-Host "YC_CLOUD_ID=$env:YC_CLOUD_ID" +Write-Host "YC_FOLDER_ID=$env:YC_FOLDER_ID" +Write-Host "YC_ZONE=$env:YC_ZONE" +Write-Host "YC_SERVICE_ACCOUNT_KEY_FILE=$env:YC_SERVICE_ACCOUNT_KEY_FILE" diff --git a/terraform/.gitignore b/terraform/.gitignore new file mode 100644 index 0000000000..2d2b98b02a --- /dev/null +++ b/terraform/.gitignore @@ -0,0 +1,21 @@ +# Terraform local state / cache +.terraform/ +*.tfstate +*.tfstate.* + +# Variables (не коммитим) +*.tfvars +terraform.tfvars + +# Crash logs +crash.log +crash.*.log + +# Override files +override.tf +override.tf.json +*_override.tf +*_override.tf.json + +# Lock file (в реальных проектах часто коммитят, но в рамках лабы можно игнорировать) +.terraform.lock.hcl diff --git a/terraform/.tflint.hcl b/terraform/.tflint.hcl new file mode 100644 index 0000000000..6eb5862287 --- /dev/null +++ b/terraform/.tflint.hcl @@ -0,0 +1,6 @@ +plugin "terraform" { + enabled = true +} + +# Базовые правила tflint включаются по умолчанию. +# Провайдер-специфичные ruleset'ы можно подключать отдельно, но для лабы достаточно базового. diff --git a/terraform/README.md b/terraform/README.md new file mode 100644 index 0000000000..def80b4b55 --- /dev/null +++ b/terraform/README.md @@ -0,0 +1,56 @@ +# Lab04 — Terraform (Yandex Cloud) + +Target: up the VM or netwwork and security group using ports **22/80/5000**. + +## Before + +1) Download Terraform. +2) Download YC CLI и залогиньтесь. +3) Prepare SSH-ключ (ed25519 рекомендуется): + +```bash +ssh-keygen -t ed25519 -C "lab04" -f ~/.ssh/lab04_ed25519 +``` + +## Variables settings + +Copy exaample andd fill: + +```bash +cp terraform.tfvars.example terraform.tfvars +``` + +Check that: +- `allowed_ssh_cidr` is better to take in outter IP `/32`. +- SSH-key may transfer using `ssh_public_key` (as a string), лor `ssh_public_key_path`. + +### How to transfer creds of Yandex Cloud + +There are 2 ways: + +**A) Using env:** +- `YC_TOKEN` или `YC_SERVICE_ACCOUNT_KEY_FILE` +- `YC_CLOUD_ID` +- `YC_FOLDER_ID` + +**B) Using terraform.tfvars:** +- `cloud_id`, `folder_id` +- `service_account_key_file` or `yc_token` + +## Start + +```bash +terraform init +terraform fmt +terraform validate +terraform plan +terraform apply +``` + +After `apply` Terraform outputs `public_ip` и `ssh_command`. + +## Delete resourses + +```bash +terraform destroy +``` diff --git a/terraform/github/.gitignore b/terraform/github/.gitignore new file mode 100644 index 0000000000..6c721539e4 --- /dev/null +++ b/terraform/github/.gitignore @@ -0,0 +1,7 @@ +.terraform/ +*.tfstate +*.tfstate.* +*.tfvars +terraform.tfvars +crash.log +.terraform.lock.hcl diff --git a/terraform/github/.tflint.hcl b/terraform/github/.tflint.hcl new file mode 100644 index 0000000000..75d15f14aa --- /dev/null +++ b/terraform/github/.tflint.hcl @@ -0,0 +1,3 @@ +plugin "terraform" { + enabled = true +} diff --git a/terraform/github/main.tf b/terraform/github/main.tf new file mode 100644 index 0000000000..2709d7849d --- /dev/null +++ b/terraform/github/main.tf @@ -0,0 +1,11 @@ +resource "github_repository" "course_repo" { + name = var.repo_name + description = var.repo_description + visibility = var.visibility + + has_issues = true + has_projects = false + has_wiki = false + + # auto_init = true # включите, если хотите создать repo с пустым коммитом +} diff --git a/terraform/github/outputs.tf b/terraform/github/outputs.tf new file mode 100644 index 0000000000..81dffb5fb3 --- /dev/null +++ b/terraform/github/outputs.tf @@ -0,0 +1,9 @@ +output "repo_full_name" { + value = github_repository.course_repo.full_name + description = "owner/name" +} + +output "repo_url" { + value = github_repository.course_repo.html_url + description = "URL репозитория" +} diff --git a/terraform/github/providers.tf b/terraform/github/providers.tf new file mode 100644 index 0000000000..fa406ffc5e --- /dev/null +++ b/terraform/github/providers.tf @@ -0,0 +1,8 @@ +provider "github" { + # Можно передать token/owner через переменные, либо через env: + # GITHUB_TOKEN + # GITHUB_OWNER (не всегда поддерживается, обычно owner задают в provider block) + + token = var.github_token != "" ? var.github_token : null + owner = var.github_owner != "" ? var.github_owner : null +} diff --git a/terraform/github/terraform.tfvars.example b/terraform/github/terraform.tfvars.example new file mode 100644 index 0000000000..3557c3db2d --- /dev/null +++ b/terraform/github/terraform.tfvars.example @@ -0,0 +1,14 @@ +# Лучше хранить токен в env: +# export GITHUB_TOKEN=... +# или в PowerShell: +# $env:GITHUB_TOKEN="..." + +# Но если надо: +# github_token = "..." # НЕ КОММИТИТЬ + +# Если owner не задан, GitHub provider обычно берёт owner из токена. +# github_owner = "your-username-or-org" + +repo_name = "DevOps-Core-Course" +repo_description = "DevOps course repository managed by Terraform" +visibility = "public" diff --git a/terraform/github/variables.tf b/terraform/github/variables.tf new file mode 100644 index 0000000000..87b10b3062 --- /dev/null +++ b/terraform/github/variables.tf @@ -0,0 +1,30 @@ +variable "github_token" { + type = string + description = "GitHub Personal Access Token (PAT). Можно оставить пустым и задать через env GITHUB_TOKEN" + default = "" + sensitive = true +} + +variable "github_owner" { + type = string + description = "Владелец (username или org). Можно оставить пустым — owner будет взят из токена (если провайдер сможет)" + default = "" +} + +variable "repo_name" { + type = string + description = "Название репозитория (например DevOps-Core-Course)" + default = "DevOps-Core-Course" +} + +variable "repo_description" { + type = string + description = "Описание репозитория" + default = "DevOps course repository managed by Terraform" +} + +variable "visibility" { + type = string + description = "public/private/internal" + default = "public" +} diff --git a/terraform/github/versions.tf b/terraform/github/versions.tf new file mode 100644 index 0000000000..dd2ecc138e --- /dev/null +++ b/terraform/github/versions.tf @@ -0,0 +1,10 @@ +terraform { + required_version = ">= 1.6.0" + + required_providers { + github = { + source = "integrations/github" + version = ">= 5.0.0" + } + } +} diff --git a/terraform/locals.tf b/terraform/locals.tf new file mode 100644 index 0000000000..3704986e2d --- /dev/null +++ b/terraform/locals.tf @@ -0,0 +1,15 @@ +locals { + # Можно задать ключ прямо строкой (ssh_public_key), а можно указать путь к файлу (ssh_public_key_path). + # try(...) нужен, чтобы terraform validate проходил даже без файла. + ssh_public_key_resolved = trimspace(coalesce( + (var.ssh_public_key != "" ? var.ssh_public_key : null), + try(file(var.ssh_public_key_path), null), + "" + )) + + instance_metadata = local.ssh_public_key_resolved != "" ? { + # Формат, который ожидает cloud-init в образах YC: "user:ssh_public_key" + # (см. офиц. примеры/доки) + "ssh-keys" = "${var.ssh_user}:${local.ssh_public_key_resolved}" + } : {} +} diff --git a/terraform/main.tf b/terraform/main.tf new file mode 100644 index 0000000000..b933dbe32d --- /dev/null +++ b/terraform/main.tf @@ -0,0 +1,88 @@ +data "yandex_compute_image" "os" { + family = var.image_family +} + +resource "yandex_vpc_network" "lab_net" { + name = "${var.project_name}-net" + labels = var.labels +} + +resource "yandex_vpc_subnet" "lab_subnet" { + name = "${var.project_name}-subnet" + zone = var.zone + network_id = yandex_vpc_network.lab_net.id + v4_cidr_blocks = [var.subnet_cidr] + labels = var.labels +} + +resource "yandex_vpc_security_group" "vm_sg" { + name = "${var.project_name}-sg" + description = "Security Group for Lab04 VM" + network_id = yandex_vpc_network.lab_net.id + + ingress { + description = "SSH from allowed CIDR" + protocol = "TCP" + v4_cidr_blocks = [var.allowed_ssh_cidr] + port = 22 + } + + ingress { + description = "HTTP" + protocol = "TCP" + v4_cidr_blocks = ["0.0.0.0/0"] + port = 80 + } + + ingress { + description = "App port (Flask)" + protocol = "TCP" + v4_cidr_blocks = ["0.0.0.0/0"] + port = 5000 + } + + egress { + description = "Allow all outbound" + protocol = "ANY" + v4_cidr_blocks = ["0.0.0.0/0"] + from_port = 0 + to_port = 65535 + } + + labels = var.labels +} + +resource "yandex_compute_instance" "vm" { + name = "${var.project_name}-vm" + platform_id = var.platform_id + zone = var.zone + + resources { + cores = var.vm_cores + memory = var.vm_memory_gb + core_fraction = var.vm_core_fraction + } + + scheduling_policy { + preemptible = var.preemptible + } + + boot_disk { + initialize_params { + image_id = data.yandex_compute_image.os.id + size = var.disk_size_gb + type = var.disk_type + } + } + + network_interface { + subnet_id = yandex_vpc_subnet.lab_subnet.id + nat = true + security_group_ids = [yandex_vpc_security_group.vm_sg.id] + } + + metadata = local.instance_metadata + + allow_stopping_for_update = true + labels = var.labels +} diff --git a/terraform/outputs.tf b/terraform/outputs.tf new file mode 100644 index 0000000000..1be16233f7 --- /dev/null +++ b/terraform/outputs.tf @@ -0,0 +1,29 @@ +output "vm_id" { + value = yandex_compute_instance.vm.id + description = "ID виртуальной машины" +} + +output "internal_ip" { + value = yandex_compute_instance.vm.network_interface[0].ip_address + description = "Внутренний IP (в подсети)" +} + +output "public_ip" { + value = yandex_compute_instance.vm.network_interface[0].nat_ip_address + description = "Публичный IP (NAT)" +} + +output "ssh_command" { + description = "Команда для подключения по SSH (подставьте путь к приватному ключу)" + value = "ssh -i ~/.ssh/id_ed25519 ${var.ssh_user}@${yandex_compute_instance.vm.network_interface[0].nat_ip_address}" +} + +output "http_url" { + description = "URL для HTTP (если вы поставите веб-сервер на VM)" + value = "http://${yandex_compute_instance.vm.network_interface[0].nat_ip_address}/" +} + +output "app_url" { + description = "URL для приложения на 5000 порту (если вы его запустите)" + value = "http://${yandex_compute_instance.vm.network_interface[0].nat_ip_address}:5000/" +} diff --git a/terraform/providers.tf b/terraform/providers.tf new file mode 100644 index 0000000000..365effc630 --- /dev/null +++ b/terraform/providers.tf @@ -0,0 +1,12 @@ +provider "yandex" { + # Можно передавать через переменные (terraform.tfvars), а можно через env: + # YC_CLOUD_ID, YC_FOLDER_ID, YC_TOKEN / YC_SERVICE_ACCOUNT_KEY_FILE + # Если значения пустые, Terraform передаст null и провайдер попробует взять их из env. + + cloud_id = var.cloud_id != "" ? var.cloud_id : null + folder_id = var.folder_id != "" ? var.folder_id : null + zone = var.zone + + token = var.yc_token != "" ? var.yc_token : null + service_account_key_file = var.service_account_key_file != "" ? var.service_account_key_file : null +} diff --git a/terraform/terraform.tfvars.example b/terraform/terraform.tfvars.example new file mode 100644 index 0000000000..e766e30f61 --- /dev/null +++ b/terraform/terraform.tfvars.example @@ -0,0 +1,40 @@ +# --- YC identifiers --- +# Можно оставить пустым и задать через env YC_CLOUD_ID / YC_FOLDER_ID +cloud_id = "" +folder_id = "" + +# --- Zone / Network --- +zone = "ru-central1-a" +subnet_cidr = "10.10.0.0/24" + +# Разрешить SSH только с вашего IP (пример): +# allowed_ssh_cidr = "93.184.216.34/32" +allowed_ssh_cidr = "0.0.0.0/0" + +# --- Auth --- +# Рекомендуется: env YC_TOKEN / YC_SERVICE_ACCOUNT_KEY_FILE. +# yc_token = "..." +# service_account_key_file = "C:/Users//key.json" + +# --- SSH --- +ssh_user = "ubuntu" + +# ВАРИАНТ 1: вставить ключ строкой +# ssh_public_key = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAI... lab04" + +# ВАРИАНТ 2: путь до .pub +ssh_public_key_path = "~/.ssh/lab04_ed25519.pub" + +# --- VM sizing --- +vm_cores = 2 +vm_memory_gb = 1 +vm_core_fraction = 20 + +preemptible = false +platform_id = "standard-v2" +disk_size_gb = 10 +disk_type = "network-hdd" + +# image_family может отличаться. Если data source не найдёт образ — +# уточните доступные семьи образов в YC. +image_family = "ubuntu-2404-lts" diff --git a/terraform/variables.tf b/terraform/variables.tf new file mode 100644 index 0000000000..49f75e7e93 --- /dev/null +++ b/terraform/variables.tf @@ -0,0 +1,133 @@ +variable "project_name" { + type = string + description = "Префикс/название для ресурсов Lab04" + default = "lab04" +} + +variable "labels" { + type = map(string) + description = "Лейблы Yandex Cloud (опционально)" + default = { + project = "lab04" + } +} + +# --- Yandex Cloud provider config --- + +variable "cloud_id" { + type = string + description = "YC cloud_id (можно оставить пустым и задать через env YC_CLOUD_ID)" + default = "" +} + +variable "folder_id" { + type = string + description = "YC folder_id (можно оставить пустым и задать через env YC_FOLDER_ID)" + default = "b1g82kdcn5grlmu79ano" +} + +variable "zone" { + type = string + description = "Зона доступности (например, ru-central1-a)" + default = "ru-central1-a" +} + +variable "yc_token" { + type = string + description = "OAuth/IAM токен (можно оставить пустым и задать через env YC_TOKEN)" + default = "" + sensitive = true +} + +variable "service_account_key_file" { + type = string + description = "Путь к JSON-ключу сервисного аккаунта (можно оставить пустым и задать через env YC_SERVICE_ACCOUNT_KEY_FILE)" + default = "" + sensitive = true +} + +# --- Network / Security --- + +variable "subnet_cidr" { + type = string + description = "CIDR подсети" + default = "10.10.0.0/24" +} + +variable "allowed_ssh_cidr" { + type = string + description = "CIDR, откуда разрешён SSH (желательно ваш белый IP /32)" + # Чтобы не заблокировать себя на первом запуске, оставляем дефолт открытым. + # В отчёте лучше указать ваш IP/32. + default = "0.0.0.0/0" +} + +# --- VM --- + +variable "image_family" { + type = string + description = "Семейство образа (data.yandex_compute_image.family)" + default = "ubuntu-2404-lts" +} + +variable "platform_id" { + type = string + description = "Платформа VM" + default = "standard-v2" +} + +variable "vm_cores" { + type = number + description = "vCPU" + default = 2 +} + +variable "vm_memory_gb" { + type = number + description = "RAM (GB)" + default = 1 +} + +variable "vm_core_fraction" { + type = number + description = "Доля CPU (5/20/50/100). 20 часто дешевле" + default = 20 +} + +variable "disk_size_gb" { + type = number + description = "Размер диска (GB)" + default = 10 +} + +variable "disk_type" { + type = string + description = "Тип диска (например network-hdd / network-ssd)" + default = "network-hdd" +} + +variable "preemptible" { + type = bool + description = "Преемптивная VM (дешевле, но может выключаться)" + default = false +} + +# --- SSH keys --- + +variable "ssh_user" { + type = string + description = "Пользователь в образе (для Ubuntu обычно ubuntu)" + default = "ubuntu" +} + +variable "ssh_public_key" { + type = string + description = "Содержимое публичного ключа (ssh-ed25519 AAAA... comment). Можно не задавать, если задаёте ssh_public_key_path" + default = "" +} + +variable "ssh_public_key_path" { + type = string + description = "Путь к публичному ключу (например ~/.ssh/id_ed25519.pub). Используется, если ssh_public_key пустой" + default = "" +} diff --git a/terraform/versions.tf b/terraform/versions.tf new file mode 100644 index 0000000000..b0f4ecb816 --- /dev/null +++ b/terraform/versions.tf @@ -0,0 +1,12 @@ +terraform { + required_version = ">= 1.6.0" + + required_providers { + yandex = { + source = "yandex-cloud/yandex" + # Не фиксируем точную версию, чтобы не ломаться при обновлениях. + # При желании можно зафиксировать (например, ">= 0.170.0"). + version = ">= 0.100.0" + } + } +}