diff --git a/README.rst b/README.rst
index aacf55d767..1ef3018d9a 100644
--- a/README.rst
+++ b/README.rst
@@ -53,7 +53,6 @@ Kolla Ansible deploys containers for the following OpenStack projects:
- `Horizon `__
- `Ironic `__
- `Keystone `__
-- `Kuryr `__
- `Magnum `__
- `Manila `__
- `Masakari `__
@@ -65,7 +64,6 @@ Kolla Ansible deploys containers for the following OpenStack projects:
- `Tacker `__
- `Trove `__
- `Watcher `__
-- `Zun `__
Infrastructure components
-------------------------
diff --git a/ansible/group_vars/all/horizon.yml b/ansible/group_vars/all/horizon.yml
index d906f04085..71e1cc5e1b 100644
--- a/ansible/group_vars/all/horizon.yml
+++ b/ansible/group_vars/all/horizon.yml
@@ -15,7 +15,6 @@ enable_horizon_octavia: "{{ enable_octavia | bool }}"
enable_horizon_tacker: "{{ enable_tacker | bool }}"
enable_horizon_trove: "{{ enable_trove | bool }}"
enable_horizon_watcher: "{{ enable_watcher | bool }}"
-enable_horizon_zun: "{{ enable_zun | bool }}"
#######################
# Horizon options
diff --git a/ansible/group_vars/all/kuryr.yml b/ansible/group_vars/all/kuryr.yml
deleted file mode 100644
index 8d6fada178..0000000000
--- a/ansible/group_vars/all/kuryr.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-enable_kuryr: false
-
-kuryr_port: "23750"
diff --git a/ansible/group_vars/all/placement.yml b/ansible/group_vars/all/placement.yml
index 4949393eaa..7afa7dcb8d 100644
--- a/ansible/group_vars/all/placement.yml
+++ b/ansible/group_vars/all/placement.yml
@@ -1,5 +1,5 @@
---
-enable_placement: "{{ enable_nova | bool or enable_zun | bool }}"
+enable_placement: "{{ enable_nova | bool }}"
placement_keystone_user: "placement"
diff --git a/ansible/group_vars/all/zun.yml b/ansible/group_vars/all/zun.yml
deleted file mode 100644
index 1d767401ce..0000000000
--- a/ansible/group_vars/all/zun.yml
+++ /dev/null
@@ -1,30 +0,0 @@
----
-enable_zun: false
-
-# Extra docker options for Zun
-docker_configure_for_zun: false
-docker_zun_options: -H tcp://{{ api_interface_address | put_address_in_context('url') }}:2375
-docker_zun_config: {}
-# Extra containerd options for Zun
-containerd_configure_for_zun: false
-
-# Enable Ceph backed Cinder Volumes for zun
-zun_configure_for_cinder_ceph: false
-
-# 42463 is the static group id of the zun user in the Zun image.
-# If users customize this value on building the Zun images,
-# they need to change this config accordingly.
-containerd_grpc_gid: 42463
-
-zun_api_port: "9517"
-zun_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else zun_api_port }}"
-zun_api_listen_port: "{{ zun_api_port }}"
-zun_wsproxy_internal_fqdn: "{{ kolla_internal_fqdn }}"
-zun_wsproxy_external_fqdn: "{{ kolla_external_fqdn }}"
-zun_wsproxy_port: "6784"
-zun_wsproxy_protocol: "{{ 'wss' if kolla_enable_tls_external | bool else 'ws' }}"
-zun_cni_daemon_port: "9036"
-zun_internal_fqdn: "{{ kolla_internal_fqdn }}"
-zun_external_fqdn: "{{ kolla_external_fqdn }}"
-zun_internal_base_endpoint: "{{ zun_internal_fqdn | kolla_url(internal_protocol, zun_api_port) }}"
-zun_public_base_endpoint: "{{ zun_external_fqdn | kolla_url(public_protocol, zun_api_public_port) }}"
diff --git a/ansible/inventory/all-in-one b/ansible/inventory/all-in-one
index a878e29a6b..2f5f1cf38c 100644
--- a/ansible/inventory/all-in-one
+++ b/ansible/inventory/all-in-one
@@ -142,9 +142,6 @@ control
[bifrost:children]
deployment
-[zun:children]
-control
-
[skyline:children]
control
@@ -491,19 +488,6 @@ designate
[placement-api:children]
placement
-# Zun
-[zun-api:children]
-zun
-
-[zun-wsproxy:children]
-zun
-
-[zun-compute:children]
-compute
-
-[zun-cni-daemon:children]
-compute
-
# Skyline
[skyline-apiserver:children]
skyline
diff --git a/ansible/inventory/multinode b/ansible/inventory/multinode
index 2b3157b9c9..7bfa71ed09 100644
--- a/ansible/inventory/multinode
+++ b/ansible/inventory/multinode
@@ -160,9 +160,6 @@ control
[bifrost:children]
deployment
-[zun:children]
-control
-
[skyline:children]
control
@@ -508,19 +505,6 @@ designate
[placement-api:children]
placement
-# Zun
-[zun-api:children]
-zun
-
-[zun-wsproxy:children]
-zun
-
-[zun-compute:children]
-compute
-
-[zun-cni-daemon:children]
-compute
-
# Skyline
[skyline-apiserver:children]
skyline
diff --git a/ansible/roles/cron/tasks/config.yml b/ansible/roles/cron/tasks/config.yml
index 2bc2018f41..04cb91ba58 100644
--- a/ansible/roles/cron/tasks/config.yml
+++ b/ansible/roles/cron/tasks/config.yml
@@ -54,7 +54,6 @@
- { name: "horizon", enabled: "{{ enable_horizon | bool }}" }
- { name: "ironic", enabled: "{{ enable_ironic | bool }}" }
- { name: "keystone", enabled: "{{ enable_keystone | bool }}" }
- - { name: "kuryr", enabled: "{{ enable_kuryr | bool }}" }
- { name: "magnum", enabled: "{{ enable_magnum | bool }}" }
- { name: "manila", enabled: "{{ enable_manila | bool }}" }
- { name: "mariadb", enabled: "{{ enable_mariadb | bool }}" }
@@ -76,7 +75,6 @@
- { name: "tacker", enabled: "{{ enable_tacker | bool }}" }
- { name: "trove", enabled: "{{ enable_trove | bool }}" }
- { name: "watcher", enabled: "{{ enable_watcher | bool }}" }
- - { name: "zun", enabled: "{{ enable_zun | bool }}" }
ansible.builtin.template:
src: "{{ item }}"
dest: "{{ node_config_directory }}/cron/logrotate.conf"
diff --git a/ansible/roles/fluentd/defaults/main.yml b/ansible/roles/fluentd/defaults/main.yml
index cdb6a8f47f..e21b24458f 100644
--- a/ansible/roles/fluentd/defaults/main.yml
+++ b/ansible/roles/fluentd/defaults/main.yml
@@ -101,8 +101,6 @@ fluentd_input_openstack_services:
enabled: "{{ enable_ironic | bool }}"
- name: keystone
enabled: "{{ enable_keystone | bool }}"
- - name: kuryr
- enabled: "{{ enable_kuryr | bool }}"
- name: magnum
enabled: "{{ enable_magnum | bool }}"
- name: manila
diff --git a/ansible/roles/fluentd/templates/conf/filter/01-rewrite.conf.j2 b/ansible/roles/fluentd/templates/conf/filter/01-rewrite.conf.j2
index d6bb684903..02f9607005 100644
--- a/ansible/roles/fluentd/templates/conf/filter/01-rewrite.conf.j2
+++ b/ansible/roles/fluentd/templates/conf/filter/01-rewrite.conf.j2
@@ -91,16 +91,6 @@
pattern ^(octavia-api|octavia-health-manager|octavia-housekeeping|octavia-worker)$
tag openstack_python
-
- key programname
- pattern ^(zun-api|zun-compute|zun-cni-daemon)$
- tag openstack_python
-
-
- key programname
- pattern ^(kuryr-server)$
- tag openstack_python
-
key programname
pattern ^(gnocchi-api|gnocchi-statsd|gnocchi-metricd|gnocchi-upgrade)$
diff --git a/ansible/roles/horizon/defaults/main.yml b/ansible/roles/horizon/defaults/main.yml
index de4f6129a3..04cd147611 100644
--- a/ansible/roles/horizon/defaults/main.yml
+++ b/ansible/roles/horizon/defaults/main.yml
@@ -21,7 +21,6 @@ horizon_services:
ENABLE_TACKER: "{{ 'yes' if enable_horizon_tacker | bool else 'no' }}"
ENABLE_TROVE: "{{ 'yes' if enable_horizon_trove | bool else 'no' }}"
ENABLE_WATCHER: "{{ 'yes' if enable_horizon_watcher | bool else 'no' }}"
- ENABLE_ZUN: "{{ 'yes' if enable_horizon_zun | bool else 'no' }}"
FORCE_GENERATE: "{{ 'yes' if horizon_dev_mode | bool else 'no' }}"
volumes: "{{ horizon_default_volumes + horizon_extra_volumes }}"
dimensions: "{{ horizon_dimensions }}"
diff --git a/ansible/roles/kuryr/defaults/main.yml b/ansible/roles/kuryr/defaults/main.yml
deleted file mode 100644
index 1b90c9d371..0000000000
--- a/ansible/roles/kuryr/defaults/main.yml
+++ /dev/null
@@ -1,98 +0,0 @@
----
-libnetwork_project_name: "kuryr-libnetwork"
-
-# NOTE(huikang, apuimedo): when you request a driver in a docker operation, such
-# as docker network create, docker searches /usr/lib/docker or /etc/docker
-# subdirs for network/storage plugin specs or json definitions. so it's either
-# have ansible place the file there, or volume mount it and let the container
-# place the file there
-
-kuryr_services:
- kuryr:
- container_name: kuryr
- group: compute
- enabled: true
- image: "{{ kuryr_image_full }}"
- privileged: true
- cap_add:
- - NET_ADMIN
- volumes: "{{ kuryr_default_volumes + kuryr_extra_volumes + lookup('vars', 'run_default_volumes_' + kolla_container_engine) }}"
- dimensions: "{{ kuryr_dimensions }}"
- healthcheck: "{{ kuryr_healthcheck }}"
-
-####################
-# Config Validate
-####################
-kuryr_config_validation:
- - generator: "/kuryr/etc/kuryr-config-generator.conf"
- config: "/etc/kuryr/kuryr.conf"
-
-####################
-# Docker
-####################
-kuryr_image: "{{ docker_image_url }}kuryr-libnetwork"
-kuryr_tag: "{{ openstack_tag }}"
-kuryr_image_full: "{{ kuryr_image }}:{{ kuryr_tag }}"
-
-kuryr_enable_healthchecks: "{{ enable_container_healthchecks }}"
-kuryr_healthcheck_interval: "{{ default_container_healthcheck_interval }}"
-kuryr_healthcheck_retries: "{{ default_container_healthcheck_retries }}"
-kuryr_healthcheck_start_period: "{{ default_container_healthcheck_start_period }}"
-kuryr_healthcheck_test: ["CMD-SHELL", "healthcheck_listen kuryr-server {{ kuryr_port }}"]
-kuryr_healthcheck_timeout: "{{ default_container_healthcheck_timeout }}"
-kuryr_healthcheck:
- interval: "{{ kuryr_healthcheck_interval }}"
- retries: "{{ kuryr_healthcheck_retries }}"
- start_period: "{{ kuryr_healthcheck_start_period }}"
- test: "{% if kuryr_enable_healthchecks | bool %}{{ kuryr_healthcheck_test }}{% else %}NONE{% endif %}"
- timeout: "{{ kuryr_healthcheck_timeout }}"
-
-kuryr_default_volumes:
- - "{{ node_config_directory }}/kuryr/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "/lib/modules:/lib/modules:ro"
- - "/run:/run{{ ':shared' if kolla_container_engine == 'docker' else '' }}"
- - "/usr/lib/docker:/usr/lib/docker"
- - "{{ kolla_dev_repos_directory ~ '/kuryr:/dev-mode/kuryr' if kuryr_dev_mode | bool else '' }}"
- - "{{ kolla_dev_repos_directory ~ '/kuryr-libnetwork:/dev-mode/kuryr-libnetwork' if kuryr_dev_mode | bool else '' }}"
- - "kolla_logs:/var/log/kolla/"
-
-kuryr_extra_volumes: "{{ default_extra_volumes }}"
-
-####################
-# OpenStack
-####################
-kuryr_logging_debug: "{{ openstack_logging_debug }}"
-
-kuryr_keystone_user: "kuryr"
-
-openstack_kuryr_auth: "{{ openstack_auth }}"
-
-####################
-# Kolla
-####################
-kuryr_git_repository: "{{ kolla_dev_repos_git }}/{{ project_name }}"
-kuryr_libnetwork_git_repository: "{{ kolla_dev_repos_git }}/{{ libnetwork_project_name }}"
-kuryr_dev_repos_pull: "{{ kolla_dev_repos_pull }}"
-kuryr_dev_mode: "{{ kolla_dev_mode }}"
-kuryr_dimensions: "{{ default_container_dimensions }}"
-kuryr_source_version: "{{ kolla_source_version }}"
-
-####################
-# Keystone
-####################
-kuryr_ks_users:
- - project: "service"
- user: "{{ kuryr_keystone_user }}"
- password: "{{ kuryr_keystone_password }}"
- role: "admin"
-
-###########
-# Endpoints
-##########
-kuryr_internal_endpoint: "{{ internal_protocol }}://{{ api_interface_address | put_address_in_context('url') }}:{{ kuryr_port }}"
-
-###################
-# Copy certificates
-###################
-kuryr_copy_certs: "{{ kolla_copy_ca_into_containers | bool }}"
diff --git a/ansible/roles/kuryr/handlers/main.yml b/ansible/roles/kuryr/handlers/main.yml
deleted file mode 100644
index aa4cad094e..0000000000
--- a/ansible/roles/kuryr/handlers/main.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-- name: Restart kuryr container
- vars:
- service_name: "kuryr"
- service: "{{ kuryr_services[service_name] }}"
- become: true
- kolla_container:
- action: "recreate_or_restart_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ service.container_name }}"
- image: "{{ service.image }}"
- privileged: "{{ service.privileged | default(False) }}"
- cap_add: "{{ service.cap_add }}"
- volumes: "{{ service.volumes }}"
- dimensions: "{{ service.dimensions }}"
- healthcheck: "{{ service.healthcheck | default(omit) }}"
diff --git a/ansible/roles/kuryr/tasks/check-containers.yml b/ansible/roles/kuryr/tasks/check-containers.yml
deleted file mode 100644
index 8de17dc336..0000000000
--- a/ansible/roles/kuryr/tasks/check-containers.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-- name: Check kuryr containers
- ansible.builtin.import_role:
- name: service-check-containers
diff --git a/ansible/roles/kuryr/tasks/check.yml b/ansible/roles/kuryr/tasks/check.yml
deleted file mode 100644
index bea4512cc0..0000000000
--- a/ansible/roles/kuryr/tasks/check.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-- name: Checking Kuryr containers
- ansible.builtin.import_role:
- role: service-check
diff --git a/ansible/roles/kuryr/tasks/clone.yml b/ansible/roles/kuryr/tasks/clone.yml
deleted file mode 100644
index 8eaead5d06..0000000000
--- a/ansible/roles/kuryr/tasks/clone.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-- name: Cloning kuryr source repository for development
- become: true
- ansible.builtin.git:
- repo: "{{ kuryr_git_repository }}"
- dest: "{{ kolla_dev_repos_directory }}/{{ project_name }}"
- update: "{{ kuryr_dev_repos_pull }}"
- version: "{{ kuryr_source_version }}"
-
-- name: Cloning kuryr-libnetwork source repository for development
- become: true
- ansible.builtin.git:
- repo: "{{ kuryr_libnetwork_git_repository }}"
- dest: "{{ kolla_dev_repos_directory }}/{{ libnetwork_project_name }}"
- update: "{{ kuryr_dev_repos_pull }}"
- version: "{{ kuryr_source_version }}"
diff --git a/ansible/roles/kuryr/tasks/config.yml b/ansible/roles/kuryr/tasks/config.yml
deleted file mode 100644
index f1f8ad1909..0000000000
--- a/ansible/roles/kuryr/tasks/config.yml
+++ /dev/null
@@ -1,79 +0,0 @@
----
-- name: Ensuring config directories exist
- ansible.builtin.file:
- path: "{{ node_config_directory }}/{{ item.key }}"
- state: "directory"
- owner: "{{ config_owner_user }}"
- group: "{{ config_owner_group }}"
- mode: "0770"
- become: true
- with_dict: "{{ kuryr_services | select_services_enabled_and_mapped_to_host }}"
-
-- name: Check if policies shall be overwritten
- ansible.builtin.stat:
- path: "{{ item }}"
- delegate_to: localhost
- run_once: true
- register: kuryr_policy
- with_first_found:
- - files: "{{ supported_policy_format_list }}"
- paths:
- - "{{ node_custom_config }}/kuryr/"
- skip: true
-
-- name: Set kuryr policy file
- ansible.builtin.set_fact:
- kuryr_policy_file: "{{ kuryr_policy.results.0.stat.path | basename }}"
- kuryr_policy_file_path: "{{ kuryr_policy.results.0.stat.path }}"
- when:
- - kuryr_policy.results | length > 0
-
-- name: Copying over TLS certificates
- ansible.builtin.include_tasks: copy-certs.yml
- when:
- - kuryr_copy_certs | bool
-
-- name: Copying over config.json files for services
- ansible.builtin.template:
- src: "{{ item.key }}.json.j2"
- dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
- mode: "0660"
- become: true
- with_dict: "{{ kuryr_services | select_services_enabled_and_mapped_to_host }}"
-
-- name: Copying over kuryr.conf
- vars:
- service_name: "{{ item.key }}"
- merge_configs:
- sources:
- - "{{ role_path }}/templates/kuryr.conf.j2"
- - "{{ node_custom_config }}/global.conf"
- - "{{ node_custom_config }}/kuryr.conf"
- - "{{ node_custom_config }}/kuryr/{{ item.key }}.conf"
- - "{{ node_custom_config }}/kuryr/{{ inventory_hostname }}/{{ item.key }}.conf"
- dest: "{{ node_config_directory }}/{{ item.key }}/kuryr.conf"
- mode: "0660"
- become: true
- with_dict: "{{ kuryr_services | select_services_enabled_and_mapped_to_host }}"
-
-- name: Copying over kuryr.spec
- vars:
- service: "{{ kuryr_services['kuryr'] }}"
- ansible.builtin.template:
- src: "kuryr.spec.j2"
- dest: "{{ node_config_directory }}/{{ item }}/kuryr.spec"
- mode: "0660"
- become: true
- when: service | service_enabled_and_mapped_to_host
- with_items:
- - "kuryr"
-
-- name: Copying over existing policy file
- ansible.builtin.template:
- src: "{{ kuryr_policy_file_path }}"
- dest: "{{ node_config_directory }}/{{ item.key }}/{{ kuryr_policy_file }}"
- mode: "0660"
- become: true
- when:
- - kuryr_policy_file is defined
- with_dict: "{{ kuryr_services | select_services_enabled_and_mapped_to_host }}"
diff --git a/ansible/roles/kuryr/tasks/config_validate.yml b/ansible/roles/kuryr/tasks/config_validate.yml
deleted file mode 100644
index 47d6a66219..0000000000
--- a/ansible/roles/kuryr/tasks/config_validate.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-- name: Validate kuryr configuration
- ansible.builtin.import_role:
- name: service-config-validate
- vars:
- service_config_validate_services: "{{ kuryr_services }}"
- service_name: "{{ project_name }}"
- service_config_validation: "{{ kuryr_config_validation }}"
diff --git a/ansible/roles/kuryr/tasks/copy-certs.yml b/ansible/roles/kuryr/tasks/copy-certs.yml
deleted file mode 100644
index 90781daed1..0000000000
--- a/ansible/roles/kuryr/tasks/copy-certs.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- name: "Copy certificates and keys for {{ project_name }}"
- ansible.builtin.import_role:
- role: service-cert-copy
- vars:
- project_services: "{{ kuryr_services }}"
diff --git a/ansible/roles/kuryr/tasks/deploy-containers.yml b/ansible/roles/kuryr/tasks/deploy-containers.yml
deleted file mode 100644
index 764417502b..0000000000
--- a/ansible/roles/kuryr/tasks/deploy-containers.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-- name: Check kuryr containers
- ansible.builtin.import_tasks: check-containers.yml
diff --git a/ansible/roles/kuryr/tasks/deploy.yml b/ansible/roles/kuryr/tasks/deploy.yml
deleted file mode 100644
index 6a4dfc7dc0..0000000000
--- a/ansible/roles/kuryr/tasks/deploy.yml
+++ /dev/null
@@ -1,17 +0,0 @@
----
-- name: Register kuryr in Keystone
- ansible.builtin.import_tasks: register.yml
-
-- name: Configure kuryr
- ansible.builtin.import_tasks: config.yml
-
-- name: Check kuryr containers
- ansible.builtin.import_tasks: check-containers.yml
-
-- name: Clone kuryr repository
- ansible.builtin.include_tasks: clone.yml
- when:
- - kuryr_dev_mode | bool
-
-- name: Flush handlers
- ansible.builtin.meta: flush_handlers
diff --git a/ansible/roles/kuryr/tasks/main.yml b/ansible/roles/kuryr/tasks/main.yml
deleted file mode 100644
index 594ad5d851..0000000000
--- a/ansible/roles/kuryr/tasks/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-- name: "Include tasks for action {{ kolla_action }}"
- ansible.builtin.include_tasks: "{{ kolla_action }}.yml"
diff --git a/ansible/roles/kuryr/tasks/precheck.yml b/ansible/roles/kuryr/tasks/precheck.yml
deleted file mode 100644
index 4741081358..0000000000
--- a/ansible/roles/kuryr/tasks/precheck.yml
+++ /dev/null
@@ -1,28 +0,0 @@
----
-- name: Precheck kuryr configuration
- ansible.builtin.import_role:
- name: service-precheck
- vars:
- service_precheck_services: "{{ kuryr_services }}"
- service_name: "{{ project_name }}"
-
-- name: Get container facts
- become: true
- kolla_container_facts:
- action: get_containers
- container_engine: "{{ kolla_container_engine }}"
- name:
- - kuryr
- check_mode: false
- register: container_facts
-
-- name: Checking free port for Kuryr
- ansible.builtin.wait_for:
- host: "{{ api_interface_address }}"
- port: "{{ kuryr_port }}"
- connect_timeout: 1
- timeout: 1
- state: stopped
- when:
- - container_facts.containers['kuryr'] is not defined
- - inventory_hostname in groups['compute']
diff --git a/ansible/roles/kuryr/tasks/pull.yml b/ansible/roles/kuryr/tasks/pull.yml
deleted file mode 100644
index 7b81e14746..0000000000
--- a/ansible/roles/kuryr/tasks/pull.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-- name: Pull kuryr images
- ansible.builtin.import_role:
- role: service-images-pull
diff --git a/ansible/roles/kuryr/tasks/reconfigure.yml b/ansible/roles/kuryr/tasks/reconfigure.yml
deleted file mode 100644
index 5d261b6686..0000000000
--- a/ansible/roles/kuryr/tasks/reconfigure.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-- name: Deploy kuryr
- ansible.builtin.import_tasks: deploy.yml
diff --git a/ansible/roles/kuryr/tasks/register.yml b/ansible/roles/kuryr/tasks/register.yml
deleted file mode 100644
index 483792787e..0000000000
--- a/ansible/roles/kuryr/tasks/register.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-- name: Register kuryr service, endpoints, and users in Keystone
- ansible.builtin.import_role:
- name: service-ks-register
- vars:
- service_ks_register_auth: "{{ openstack_kuryr_auth }}"
- service_ks_register_users: "{{ kuryr_ks_users }}"
diff --git a/ansible/roles/kuryr/tasks/stop.yml b/ansible/roles/kuryr/tasks/stop.yml
deleted file mode 100644
index 1cd93b148c..0000000000
--- a/ansible/roles/kuryr/tasks/stop.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-- name: Stop kuryr containers
- ansible.builtin.import_role:
- name: service-stop
- vars:
- project_services: "{{ kuryr_services }}"
- service_name: "{{ project_name }}"
diff --git a/ansible/roles/kuryr/tasks/upgrade.yml b/ansible/roles/kuryr/tasks/upgrade.yml
deleted file mode 100644
index 57b441eb2b..0000000000
--- a/ansible/roles/kuryr/tasks/upgrade.yml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-- name: Configure kuryr
- ansible.builtin.import_tasks: config.yml
-
-- name: Check kuryr containers
- ansible.builtin.import_tasks: check-containers.yml
-
-- name: Flush handlers
- ansible.builtin.meta: flush_handlers
diff --git a/ansible/roles/kuryr/templates/kuryr.conf.j2 b/ansible/roles/kuryr/templates/kuryr.conf.j2
deleted file mode 100644
index 00c8784bbb..0000000000
--- a/ansible/roles/kuryr/templates/kuryr.conf.j2
+++ /dev/null
@@ -1,32 +0,0 @@
-[DEFAULT]
-kuryr_uri = {{ kuryr_internal_endpoint }}
-debug = {{ kuryr_logging_debug }}
-log_dir = /var/log/kolla/kuryr
-
-capability_scope = local
-bindir = /var/lib/kolla/venv/libexec/kuryr
-
-[binding]
-default_driver = kuryr.lib.binding.drivers.veth
-
-[neutron]
-auth_uri = {{ keystone_internal_url }}
-auth_url = {{ keystone_internal_url }}
-auth_type = password
-endpoint_type = internal
-project_domain_name = {{ default_project_domain_name }}
-project_name = service
-user_domain_name = {{ default_user_domain_name }}
-project_domain_id = {{ default_project_domain_id }}
-user_domain_id = {{ default_user_domain_id }}
-password = {{ kuryr_keystone_password }}
-username = {{ kuryr_keystone_user }}
-cafile = {{ openstack_cacert }}
-
-{% if kuryr_policy_file is defined %}
-[oslo_policy]
-policy_file = {{ kuryr_policy_file }}
-{% endif %}
-
-[oslo_concurrency]
-lock_path = /var/lib/kuryr/tmp
diff --git a/ansible/roles/kuryr/templates/kuryr.json.j2 b/ansible/roles/kuryr/templates/kuryr.json.j2
deleted file mode 100644
index 4ba23100ff..0000000000
--- a/ansible/roles/kuryr/templates/kuryr.json.j2
+++ /dev/null
@@ -1,36 +0,0 @@
-{
- "command": "kuryr-server --config-file /etc/kuryr/kuryr.conf",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/kuryr.conf",
- "dest": "/etc/kuryr/kuryr.conf",
- "owner": "kuryr",
- "perm": "0600"
- },
- {
- "source": "{{ container_config_directory }}/kuryr.spec",
- "dest": "/usr/lib/docker/plugins/kuryr/kuryr.spec",
- "owner": "root",
- "perm": "0600"
- }{% if kuryr_policy_file is defined %},
- {
- "source": "{{ container_config_directory }}/{{ kuryr_policy_file }}",
- "dest": "/etc/kuryr/{{ kuryr_policy_file }}",
- "owner": "kuryr",
- "perm": "0600"
- }{% endif %}{% if kolla_copy_ca_into_containers | bool %},
- {
- "source": "{{ container_config_directory }}/ca-certificates",
- "dest": "/var/lib/kolla/share/ca-certificates",
- "owner": "root",
- "perm": "0600"
- }{% endif %}
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/kuryr",
- "owner": "kuryr:kolla",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/kuryr/templates/kuryr.spec.j2 b/ansible/roles/kuryr/templates/kuryr.spec.j2
deleted file mode 100644
index 06c8be3d00..0000000000
--- a/ansible/roles/kuryr/templates/kuryr.spec.j2
+++ /dev/null
@@ -1 +0,0 @@
-{{ kuryr_internal_endpoint }}
diff --git a/ansible/roles/kuryr/vars/main.yml b/ansible/roles/kuryr/vars/main.yml
deleted file mode 100644
index dfe30f4a92..0000000000
--- a/ansible/roles/kuryr/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-project_name: "kuryr"
diff --git a/ansible/roles/loadbalancer/tasks/precheck.yml b/ansible/roles/loadbalancer/tasks/precheck.yml
index 73e3a15661..54ab859031 100644
--- a/ansible/roles/loadbalancer/tasks/precheck.yml
+++ b/ansible/roles/loadbalancer/tasks/precheck.yml
@@ -717,19 +717,6 @@
- haproxy_stat.find('watcher_api') == -1
- haproxy_vip_prechecks
-- name: Checking free port for Zun API HAProxy
- ansible.builtin.wait_for:
- host: "{{ kolla_internal_vip_address }}"
- port: "{{ zun_api_port }}"
- connect_timeout: 1
- timeout: 1
- state: stopped
- when:
- - enable_zun | bool
- - inventory_hostname in groups['loadbalancer']
- - haproxy_stat.find('zun_api') == -1
- - haproxy_vip_prechecks
-
- name: Firewalld checks
when:
- enable_external_api_firewalld | bool
diff --git a/ansible/roles/manila/templates/manila.conf.j2 b/ansible/roles/manila/templates/manila.conf.j2
index cadde4c0a1..148989d9aa 100644
--- a/ansible/roles/manila/templates/manila.conf.j2
+++ b/ansible/roles/manila/templates/manila.conf.j2
@@ -2,6 +2,9 @@
debug = {{ manila_logging_debug }}
log_dir = /var/log/kolla/manila
+{% if service_name == "manila-api" %}
+log_file = manila-api.log
+{% endif %}
use_forwarded_for = true
my_ip = {{ api_interface_address }}
diff --git a/ansible/roles/nova-cell/tasks/config.yml b/ansible/roles/nova-cell/tasks/config.yml
index 7f0cfca22e..f9e9c10eec 100644
--- a/ansible/roles/nova-cell/tasks/config.yml
+++ b/ansible/roles/nova-cell/tasks/config.yml
@@ -228,3 +228,23 @@
with_items:
- nova-compute
- nova-compute-ironic
+
+- name: Copying over multipath.conf
+ become: true
+ vars:
+ service: "{{ nova_cell_services['nova-compute'] }}"
+ ansible.builtin.template:
+ src: "{{ item }}"
+ dest: "{{ node_config_directory }}/nova-compute/multipath.conf"
+ mode: "0660"
+ with_first_found:
+ - files:
+ - "{{ node_custom_config }}/nova/{{ inventory_hostname }}/multipath.conf"
+ - "{{ node_custom_config }}/multipath/{{ inventory_hostname }}/multipath.conf"
+ - "{{ node_custom_config }}/nova/multipath.conf"
+ - "{{ node_custom_config }}/multipath.conf"
+ - "multipath.conf.j2"
+ skip: true
+ when:
+ - service | service_enabled_and_mapped_to_host
+ - enable_multipathd | bool
diff --git a/ansible/roles/nova-cell/templates/multipath.conf.j2 b/ansible/roles/nova-cell/templates/multipath.conf.j2
new file mode 100644
index 0000000000..478eef0230
--- /dev/null
+++ b/ansible/roles/nova-cell/templates/multipath.conf.j2
@@ -0,0 +1,8 @@
+defaults {
+ user_friendly_names no
+ find_multipaths yes
+ skip_kpartx yes
+}
+
+blacklist {
+}
diff --git a/ansible/roles/nova-cell/templates/nova-compute.json.j2 b/ansible/roles/nova-cell/templates/nova-compute.json.j2
index 655ee03794..97bd5235ef 100644
--- a/ansible/roles/nova-cell/templates/nova-compute.json.j2
+++ b/ansible/roles/nova-cell/templates/nova-compute.json.j2
@@ -79,6 +79,12 @@
"dest": "/var/lib/kolla/share/ca-certificates",
"owner": "root",
"perm": "0600"
+ }{% endif %}{% if enable_multipathd | bool %},
+ {
+ "source": "{{ container_config_directory }}/multipath.conf",
+ "dest": "/etc/multipath.conf",
+ "owner": "root",
+ "perm": "0644"
}{% endif %}
],
"permissions": [
diff --git a/ansible/roles/prometheus/defaults/main.yml b/ansible/roles/prometheus/defaults/main.yml
index 1145286c8c..3416f72d9b 100644
--- a/ansible/roles/prometheus/defaults/main.yml
+++ b/ansible/roles/prometheus/defaults/main.yml
@@ -313,10 +313,6 @@ prometheus_blackbox_exporter_endpoints_default:
- "watcher:os_endpoint:{{ watcher_public_endpoint }}"
- "{{ ('watcher_internal:os_endpoint:' + watcher_internal_endpoint) if not kolla_same_external_internal_vip | bool }}"
enabled: "{{ enable_watcher | bool }}"
- - endpoints:
- - "zun:os_endpoint:{{ zun_public_base_endpoint }}"
- - "{{ ('zun_internal:os_endpoint:' + zun_internal_base_endpoint) if not kolla_same_external_internal_vip | bool }}"
- enabled: "{{ enable_zun | bool }}"
# Additional service endpoints
- endpoints: >-
{%- set ns = namespace(etcd_endpoints=[]) -%}
diff --git a/ansible/roles/prometheus/templates/prometheus-openstack-network-exporter.json.j2 b/ansible/roles/prometheus/templates/prometheus-openstack-network-exporter.json.j2
index ea74a9b7ae..fe27ee57c8 100644
--- a/ansible/roles/prometheus/templates/prometheus-openstack-network-exporter.json.j2
+++ b/ansible/roles/prometheus/templates/prometheus-openstack-network-exporter.json.j2
@@ -1,9 +1,9 @@
{
- "command": "/opt/openstack-network-exporter/dataplane-node-exporter",
+ "command": "/opt/openstack-network-exporter/openstack-network-exporter",
"config_files": [
{
"source": "{{ container_config_directory }}/openstack-network-exporter.yaml",
- "dest": "/etc/dataplane-node-exporter.yaml",
+ "dest": "/etc/openstack-network-exporter.yaml",
"owner": "prometheus",
"perm": "0600"
}{% if kolla_copy_ca_into_containers | bool %},
diff --git a/ansible/roles/rabbitmq/defaults/main.yml b/ansible/roles/rabbitmq/defaults/main.yml
index 72cb1e657f..0a1c7b1cec 100644
--- a/ansible/roles/rabbitmq/defaults/main.yml
+++ b/ansible/roles/rabbitmq/defaults/main.yml
@@ -89,6 +89,19 @@ rabbitmq_message_ttl_ms: 600000
rabbitmq_queue_expiry_ms: 3600000
rabbitmq_extra_config: {}
+### RabbitMQ stream queue retention policy ###
+# These policies will only be applied when om_enable_rabbitmq_stream_fanout is true
+
+# Maximum size of stream segments. This variable needs to be positive integer.
+# Default value follows RabbitMQ default size of a segment in bytes: 500000000 (500MB)
+# https://www.rabbitmq.com/docs/streams#declaring
+rabbitmq_stream_max_segment_size_bytes: 500000000
+# Retention time of segments that reached the maximum size.
+# This variable needs to be string with valid options of Y, M, D, h, m, s.
+# Default value follows oslo.messaging default: 1800s
+# https://docs.openstack.org/oslo.messaging/latest/configuration/opts.html#oslo_messaging_rabbit.rabbit_transient_queues_ttl
+rabbitmq_stream_segment_max_age: "1800s"
+
####################
# Plugins
####################
diff --git a/ansible/roles/rabbitmq/tasks/precheck.yml b/ansible/roles/rabbitmq/tasks/precheck.yml
index 19f192dfc1..a5c8f6340f 100644
--- a/ansible/roles/rabbitmq/tasks/precheck.yml
+++ b/ansible/roles/rabbitmq/tasks/precheck.yml
@@ -161,3 +161,24 @@
when:
- item.name is search('_fanout')
- om_enable_rabbitmq_stream_fanout | bool
+
+- name: Check if RabbitMQ stream queue policy variables are valid
+ when: om_enable_rabbitmq_stream_fanout | bool
+ block:
+ - name: Check if the value of rabbitmq_stream_max_segment_size_bytes is valid
+ ansible.builtin.assert:
+ that:
+ - rabbitmq_stream_max_segment_size_bytes is integer
+ - rabbitmq_stream_max_segment_size_bytes > 0
+ fail_msg: >
+ rabbitmq_stream_max_segment_size_bytes needs to be a positive integer.
+ Please check its value again.
+
+ - name: Check if the value of rabbitmq_stream_segment_max_age is valid
+ ansible.builtin.assert:
+ that:
+ - rabbitmq_stream_segment_max_age is string
+ - 'rabbitmq_stream_segment_max_age is regex("^[0-9]+[YMDhms]$")'
+ fail_msg: >
+ rabbitmq_stream_segment_max_age needs to have numeric string that ends with one of the units in [Y, M, D, h, m, s].
+ Please check its value again.
diff --git a/ansible/roles/rabbitmq/templates/definitions.json.j2 b/ansible/roles/rabbitmq/templates/definitions.json.j2
index 2987066336..319e95f13b 100644
--- a/ansible/roles/rabbitmq/templates/definitions.json.j2
+++ b/ansible/roles/rabbitmq/templates/definitions.json.j2
@@ -1,3 +1,4 @@
+#jinja2: trim_blocks: False
{
"vhosts": [
{"name": "/"}
@@ -10,5 +11,21 @@
{"user": "{{ role_rabbitmq_user }}", "vhost": "/", "configure": ".*", "write": ".*", "read": ".*"}{% if role_rabbitmq_monitoring_user is defined and role_rabbitmq_monitoring_user %},
{"user": "{{ role_rabbitmq_monitoring_user }}", "vhost": "/", "configure": "^$", "write": "^$", "read": ".*"}{% endif %}
],
- "policies":[]
+{% if om_enable_rabbitmq_stream_fanout | bool %}
+ "policies": [
+ {
+ "vhost": "/",
+ "name": "stream-retention-policy",
+ "apply-to": "streams",
+ "pattern": ".+",
+ "definition": {
+ "stream-max-segment-size-bytes": {{ rabbitmq_stream_max_segment_size_bytes }},
+ "max-age": "{{ rabbitmq_stream_segment_max_age }}"
+ },
+ "priority": 1
+ }
+ ]
+{% else %}
+ "policies": []
+{% endif %}
}
diff --git a/ansible/roles/skyline/templates/nginx.conf.j2 b/ansible/roles/skyline/templates/nginx.conf.j2
index 4a647fbe58..221323ed21 100644
--- a/ansible/roles/skyline/templates/nginx.conf.j2
+++ b/ansible/roles/skyline/templates/nginx.conf.j2
@@ -222,18 +222,6 @@ http {
}
{% endif %}
- {% if enable_zun | bool %}# Region: {{ openstack_region_name }}, Service: zun
- location {{ skyline_nginx_prefix }}/{{ openstack_region_name | lower }}/zun {
- proxy_pass {{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ zun_api_port }}/;
- proxy_redirect {{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ zun_api_port }}/ {{ skyline_nginx_prefix }}/{{ openstack_region_name | lower }}/zun/;
- proxy_buffering off;
- proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
- proxy_set_header X-Forwarded-Proto $scheme;
- proxy_set_header X-Forwarded-Host $host;
- proxy_set_header Host $http_host;
- }
- {% endif %}
-
{% if enable_magnum | bool %}# Region: {{ openstack_region_name }}, Service: magnum
location {{ skyline_nginx_prefix }}/{{ openstack_region_name | lower }}/magnum {
proxy_pass {{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ magnum_api_port }}/;
diff --git a/ansible/roles/skyline/templates/skyline.yaml.j2 b/ansible/roles/skyline/templates/skyline.yaml.j2
index 9d0ae07410..64c57b329f 100644
--- a/ansible/roles/skyline/templates/skyline.yaml.j2
+++ b/ansible/roles/skyline/templates/skyline.yaml.j2
@@ -44,9 +44,6 @@ openstack:
{% if enable_nova | bool %}
compute: nova
{% endif %}
-{% if enable_zun | bool %}
- container: zun
-{% endif %}
{% if enable_magnum | bool %}
container-infra: magnum
{% endif %}
diff --git a/ansible/roles/zun/defaults/main.yml b/ansible/roles/zun/defaults/main.yml
deleted file mode 100644
index 69fce72c29..0000000000
--- a/ansible/roles/zun/defaults/main.yml
+++ /dev/null
@@ -1,265 +0,0 @@
----
-zun_services:
- zun-api:
- container_name: zun_api
- group: zun-api
- enabled: true
- image: "{{ zun_api_image_full }}"
- volumes: "{{ zun_api_default_volumes + zun_api_extra_volumes }}"
- dimensions: "{{ zun_api_dimensions }}"
- healthcheck: "{{ zun_api_healthcheck }}"
- wsgi: "zun.wsgi.api:application"
- haproxy:
- zun_api:
- enabled: "{{ enable_zun }}"
- mode: "http"
- external: false
- port: "{{ zun_api_port }}"
- listen_port: "{{ zun_api_listen_port }}"
- backend_http_extra:
- - "option httpchk GET /healthcheck"
- - "http-check expect status 200"
- zun_api_external:
- enabled: "{{ enable_zun }}"
- mode: "http"
- external: true
- external_fqdn: "{{ zun_external_fqdn }}"
- port: "{{ zun_api_public_port }}"
- listen_port: "{{ zun_api_listen_port }}"
- backend_http_extra:
- - "option httpchk GET /healthcheck"
- - "http-check expect status 200"
- zun-wsproxy:
- container_name: zun_wsproxy
- group: zun-wsproxy
- enabled: true
- image: "{{ zun_wsproxy_image_full }}"
- volumes: "{{ zun_wsproxy_default_volumes + zun_wsproxy_extra_volumes }}"
- dimensions: "{{ zun_wsproxy_dimensions }}"
- healthcheck: "{{ zun_wsproxy_healthcheck }}"
- haproxy:
- zun_wsproxy:
- enabled: "{{ enable_zun }}"
- mode: "http"
- external: false
- port: "{{ zun_wsproxy_port }}"
- zun_wsproxy_external:
- enabled: "{{ enable_zun }}"
- mode: "http"
- external: true
- port: "{{ zun_wsproxy_port }}"
- zun-compute:
- container_name: zun_compute
- group: zun-compute
- enabled: true
- image: "{{ zun_compute_image_full }}"
- privileged: true
- volumes: "{{ zun_compute_default_volumes + zun_compute_extra_volumes + lookup('vars', 'run_default_volumes_' + kolla_container_engine) }}"
- dimensions: "{{ zun_compute_dimensions }}"
- healthcheck: "{{ zun_compute_healthcheck }}"
- zun-cni-daemon:
- container_name: zun_cni_daemon
- group: zun-cni-daemon
- enabled: true
- image: "{{ zun_cni_daemon_image_full }}"
- privileged: true
- volumes: "{{ zun_cni_daemon_default_volumes + zun_cni_daemon_extra_volumes + lookup('vars', 'run_default_volumes_' + kolla_container_engine) }}"
- dimensions: "{{ zun_cni_daemon_dimensions }}"
- healthcheck: "{{ zun_cni_daemon_healthcheck }}"
-
-####################
-# Config Validate
-####################
-zun_config_validation:
- - generator: "/zun/etc/zun/zun-config-generator.conf"
- config: "/etc/zun/zun.conf"
-
-####################
-## Database
-####################
-zun_database_name: "zun"
-zun_database_user: "{% if use_preconfigured_databases | bool and use_common_mariadb_user | bool %}{{ database_user }}{% else %}zun{% endif %}"
-zun_database_address: "{{ database_address | put_address_in_context('url') }}:{{ database_port }}"
-
-####################
-# Database sharding
-####################
-zun_database_shard_root_user: "{% if enable_proxysql | bool %}root_shard_{{ zun_database_shard_id }}{% else %}{{ database_user }}{% endif %}"
-zun_database_shard_id: "{{ mariadb_default_database_shard_id | int }}"
-zun_database_shard:
- users:
- - user: "{{ zun_database_user }}"
- password: "{{ zun_database_password }}"
- shard_id: "{{ zun_database_shard_id }}"
- rules:
- - schema: "{{ zun_database_name }}"
- shard_id: "{{ zun_database_shard_id }}"
- - user: "{{ zun_database_user }}"
- shard_id: "{{ zun_database_shard_id }}"
-
-
-####################
-## Docker
-####################
-zun_tag: "{{ openstack_tag }}"
-
-zun_api_image: "{{ docker_image_url }}zun-api"
-zun_api_tag: "{{ zun_tag }}"
-zun_api_image_full: "{{ zun_api_image }}:{{ zun_api_tag }}"
-
-zun_wsproxy_image: "{{ docker_image_url }}zun-wsproxy"
-zun_wsproxy_tag: "{{ zun_tag }}"
-zun_wsproxy_image_full: "{{ zun_wsproxy_image }}:{{ zun_wsproxy_tag }}"
-
-zun_compute_image: "{{ docker_image_url }}zun-compute"
-zun_compute_tag: "{{ zun_tag }}"
-zun_compute_image_full: "{{ zun_compute_image }}:{{ zun_compute_tag }}"
-
-zun_cni_daemon_image: "{{ docker_image_url }}zun-cni-daemon"
-zun_cni_daemon_tag: "{{ zun_tag }}"
-zun_cni_daemon_image_full: "{{ zun_cni_daemon_image }}:{{ zun_cni_daemon_tag }}"
-
-
-zun_api_dimensions: "{{ default_container_dimensions }}"
-zun_wsproxy_dimensions: "{{ default_container_dimensions }}"
-zun_compute_dimensions: "{{ default_container_dimensions }}"
-zun_cni_daemon_dimensions: "{{ default_container_dimensions }}"
-
-zun_api_enable_healthchecks: "{{ enable_container_healthchecks }}"
-zun_api_healthcheck_interval: "{{ default_container_healthcheck_interval }}"
-zun_api_healthcheck_retries: "{{ default_container_healthcheck_retries }}"
-zun_api_healthcheck_start_period: "{{ default_container_healthcheck_start_period }}"
-zun_api_healthcheck_test: ["CMD-SHELL", "healthcheck_curl http://{{ api_interface_address | put_address_in_context('url') }}:{{ zun_api_port }}/healthcheck"]
-zun_api_healthcheck_timeout: "{{ default_container_healthcheck_timeout }}"
-zun_api_healthcheck:
- interval: "{{ zun_api_healthcheck_interval }}"
- retries: "{{ zun_api_healthcheck_retries }}"
- start_period: "{{ zun_api_healthcheck_start_period }}"
- test: "{% if zun_api_enable_healthchecks | bool %}{{ zun_api_healthcheck_test }}{% else %}NONE{% endif %}"
- timeout: "{{ zun_api_healthcheck_timeout }}"
-
-zun_wsproxy_enable_healthchecks: "{{ enable_container_healthchecks }}"
-zun_wsproxy_healthcheck_interval: "{{ default_container_healthcheck_interval }}"
-zun_wsproxy_healthcheck_retries: "{{ default_container_healthcheck_retries }}"
-zun_wsproxy_healthcheck_start_period: "{{ default_container_healthcheck_start_period }}"
-zun_wsproxy_healthcheck_test: ["CMD-SHELL", "healthcheck_listen zun-wsproxy {{ zun_wsproxy_port }}"]
-zun_wsproxy_healthcheck_timeout: "{{ default_container_healthcheck_timeout }}"
-zun_wsproxy_healthcheck:
- interval: "{{ zun_wsproxy_healthcheck_interval }}"
- retries: "{{ zun_wsproxy_healthcheck_retries }}"
- start_period: "{{ zun_wsproxy_healthcheck_start_period }}"
- test: "{% if zun_wsproxy_enable_healthchecks | bool %}{{ zun_wsproxy_healthcheck_test }}{% else %}NONE{% endif %}"
- timeout: "{{ zun_wsproxy_healthcheck_timeout }}"
-
-zun_compute_enable_healthchecks: "{{ enable_container_healthchecks }}"
-zun_compute_healthcheck_interval: "{{ default_container_healthcheck_interval }}"
-zun_compute_healthcheck_retries: "{{ default_container_healthcheck_retries }}"
-zun_compute_healthcheck_start_period: "{{ default_container_healthcheck_start_period }}"
-zun_compute_healthcheck_test: ["CMD-SHELL", "healthcheck_port zun-compute {{ om_rpc_port }}"]
-zun_compute_healthcheck_timeout: "{{ default_container_healthcheck_timeout }}"
-zun_compute_healthcheck:
- interval: "{{ zun_compute_healthcheck_interval }}"
- retries: "{{ zun_compute_healthcheck_retries }}"
- start_period: "{{ zun_compute_healthcheck_start_period }}"
- test: "{% if zun_compute_enable_healthchecks | bool %}{{ zun_compute_healthcheck_test }}{% else %}NONE{% endif %}"
- timeout: "{{ zun_compute_healthcheck_timeout }}"
-
-zun_cni_daemon_enable_healthchecks: "{{ enable_container_healthchecks }}"
-zun_cni_daemon_healthcheck_interval: "{{ default_container_healthcheck_interval }}"
-zun_cni_daemon_healthcheck_retries: "{{ default_container_healthcheck_retries }}"
-zun_cni_daemon_healthcheck_start_period: "{{ default_container_healthcheck_start_period }}"
-zun_cni_daemon_healthcheck_test: ["CMD-SHELL", "healthcheck_listen zun-cni-daemon {{ zun_cni_daemon_port }}"]
-zun_cni_daemon_healthcheck_timeout: "{{ default_container_healthcheck_timeout }}"
-zun_cni_daemon_healthcheck:
- interval: "{{ zun_cni_daemon_healthcheck_interval }}"
- retries: "{{ zun_cni_daemon_healthcheck_retries }}"
- start_period: "{{ zun_cni_daemon_healthcheck_start_period }}"
- test: "{% if zun_cni_daemon_enable_healthchecks | bool %}{{ zun_cni_daemon_healthcheck_test }}{% else %}NONE{% endif %}"
- timeout: "{{ zun_cni_daemon_healthcheck_timeout }}"
-
-zun_api_default_volumes:
- - "{{ node_config_directory }}/zun-api/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "{{ kolla_dev_repos_directory ~ '/zun:/dev-mode/zun' if zun_dev_mode | bool else '' }}"
- - "kolla_logs:/var/log/kolla/"
- - "{{ '/dev/shm:/dev/shm' }}"
-zun_wsproxy_default_volumes:
- - "{{ node_config_directory }}/zun-wsproxy/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "{{ kolla_dev_repos_directory ~ '/zun:/dev-mode/zun' if zun_dev_mode | bool else '' }}"
- - "kolla_logs:/var/log/kolla/"
- - "{{ '/dev/shm:/dev/shm' }}"
-zun_compute_default_volumes:
- - "{{ node_config_directory }}/zun-compute/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
- - "{{ kolla_dev_repos_directory ~ '/zun:/dev-mode/zun' if zun_dev_mode | bool else '' }}"
- - "/run:/run{{ ':shared' if kolla_container_engine == 'docker' else '' }}"
- - "/usr/lib/docker:/usr/lib/docker"
- - "/var/lib/docker:/var/lib/docker"
- - "/lib/modules:/lib/modules:ro"
- - "/dev:/dev"
- - "{% if enable_iscsid | bool %}iscsi_info:/etc/iscsi{% endif %}"
- - "{% if zun_configure_for_cinder_ceph | bool %}/var/lib/zun:/var/lib/zun:shared{% endif %}"
-zun_cni_daemon_default_volumes:
- - "{{ node_config_directory }}/zun-cni-daemon/:{{ container_config_directory }}/:ro"
- - "/etc/localtime:/etc/localtime:ro"
- - "kolla_logs:/var/log/kolla/"
- - "{{ '/dev/shm:/dev/shm' }}"
- - "{{ kolla_dev_repos_directory ~ '/zun:/dev-mode/zun' if zun_dev_mode | bool else '' }}"
- - "/run:/run{{ ':shared' if kolla_container_engine == 'docker' else '' }}"
-
-zun_extra_volumes: "{{ default_extra_volumes }}"
-zun_api_extra_volumes: "{{ zun_extra_volumes }}"
-zun_wsproxy_extra_volumes: "{{ zun_extra_volumes }}"
-zun_compute_extra_volumes: "{{ zun_extra_volumes }}"
-zun_cni_daemon_extra_volumes: "{{ zun_extra_volumes }}"
-
-####################
-## OpenStack
-####################
-zun_internal_endpoint: "{{ zun_internal_base_endpoint }}/v1/"
-zun_public_endpoint: "{{ zun_public_base_endpoint }}/v1/"
-
-zun_logging_debug: "{{ openstack_logging_debug }}"
-
-zun_keystone_user: "zun"
-
-openstack_zun_auth: "{{ openstack_auth }}"
-
-zun_api_workers: "{{ openstack_service_workers }}"
-
-####################
-# Kolla
-####################
-zun_git_repository: "{{ kolla_dev_repos_git }}/{{ project_name }}"
-zun_dev_repos_pull: "{{ kolla_dev_repos_pull }}"
-zun_dev_mode: "{{ kolla_dev_mode }}"
-zun_source_version: "{{ kolla_source_version }}"
-
-####################
-# Keystone
-####################
-zun_ks_services:
- - name: "zun"
- type: "container"
- description: "Container Service"
- endpoints:
- - {'interface': 'internal', 'url': '{{ zun_internal_endpoint }}'}
- - {'interface': 'public', 'url': '{{ zun_public_endpoint }}'}
-
-zun_ks_users:
- - project: "service"
- user: "{{ zun_keystone_user }}"
- password: "{{ zun_keystone_password }}"
- role: "admin"
-
-zun_ceph_cluster: "ceph"
-
-# Database
-zun_database_enable_tls_internal: "{{ database_enable_tls_internal | bool }}"
-
-###################
-# Copy certificates
-###################
-zun_copy_certs: "{{ kolla_copy_ca_into_containers | bool or zun_database_enable_tls_internal | bool }}"
diff --git a/ansible/roles/zun/handlers/main.yml b/ansible/roles/zun/handlers/main.yml
deleted file mode 100644
index 9185a70cb7..0000000000
--- a/ansible/roles/zun/handlers/main.yml
+++ /dev/null
@@ -1,72 +0,0 @@
----
-- name: Restart zun-api container
- vars:
- service_name: "zun-api"
- service: "{{ zun_services[service_name] }}"
- become: true
- kolla_container:
- action: "recreate_or_restart_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ service.container_name }}"
- image: "{{ service.image }}"
- privileged: "{{ service.privileged | default(False) }}"
- volumes: "{{ service.volumes | reject('equalto', '') | list }}"
- dimensions: "{{ service.dimensions }}"
- healthcheck: "{{ service.healthcheck | default(omit) }}"
-
-- name: Restart zun-wsproxy container
- vars:
- service_name: "zun-wsproxy"
- service: "{{ zun_services[service_name] }}"
- become: true
- kolla_container:
- action: "recreate_or_restart_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ service.container_name }}"
- image: "{{ service.image }}"
- privileged: "{{ service.privileged | default(False) }}"
- volumes: "{{ service.volumes | reject('equalto', '') | list }}"
- dimensions: "{{ service.dimensions }}"
- healthcheck: "{{ service.healthcheck | default(omit) }}"
-
-- name: Restart zun-compute container
- vars:
- service_name: "zun-compute"
- service: "{{ zun_services[service_name] }}"
- become: true
- kolla_container:
- action: "recreate_or_restart_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ service.container_name }}"
- image: "{{ service.image }}"
- privileged: "{{ service.privileged | default(False) }}"
- volumes: "{{ service.volumes | reject('equalto', '') | list }}"
- dimensions: "{{ service.dimensions }}"
- healthcheck: "{{ service.healthcheck | default(omit) }}"
-
-- name: Restart zun-cni-daemon container
- vars:
- service_name: "zun-cni-daemon"
- service: "{{ zun_services[service_name] }}"
- become: true
- kolla_container:
- action: "recreate_or_restart_container"
- common_options: "{{ docker_common_options }}"
- name: "{{ service.container_name }}"
- image: "{{ service.image }}"
- privileged: "{{ service.privileged | default(False) }}"
- volumes: "{{ service.volumes | reject('equalto', '') | list }}"
- dimensions: "{{ service.dimensions }}"
- healthcheck: "{{ service.healthcheck | default(omit) }}"
-
-- name: Copy loopback binary from zun-cni-daemon container to host
- vars:
- service_name: "zun-cni-daemon"
- service: "{{ zun_services[service_name] }}"
- become: true
- changed_when: true
- ansible.builtin.command: "{{ kolla_container_engine }} cp {{ service.container_name }}:/opt/loopback /opt/cni/bin/"
- # NOTE(yoctozepto): it would be cleaner to listen only on image change
- # but there is no such mechanism (yet)
- listen:
- - Restart zun-cni-daemon container
diff --git a/ansible/roles/zun/tasks/bootstrap.yml b/ansible/roles/zun/tasks/bootstrap.yml
deleted file mode 100644
index dca9cd4890..0000000000
--- a/ansible/roles/zun/tasks/bootstrap.yml
+++ /dev/null
@@ -1,41 +0,0 @@
----
-- name: Creating Zun database
- become: true
- kolla_toolbox:
- container_engine: "{{ kolla_container_engine }}"
- module_name: mysql_db
- module_args:
- ca_cert: "{{ openstack_cacert if database_enable_tls_internal | bool else omit }}"
- login_host: "{{ database_address }}"
- login_port: "{{ database_port }}"
- login_user: "{{ zun_database_shard_root_user }}"
- login_password: "{{ database_password }}"
- name: "{{ zun_database_name }}"
- run_once: true
- delegate_to: "{{ groups['zun-api'][0] }}"
- when:
- - not use_preconfigured_databases | bool
-
-- name: Creating Zun database user and setting permissions
- become: true
- kolla_toolbox:
- container_engine: "{{ kolla_container_engine }}"
- module_name: mysql_user
- module_args:
- ca_cert: "{{ openstack_cacert if database_enable_tls_internal | bool else omit }}"
- login_host: "{{ database_address }}"
- login_port: "{{ database_port }}"
- login_user: "{{ zun_database_shard_root_user }}"
- login_password: "{{ database_password }}"
- name: "{{ zun_database_user }}"
- password: "{{ zun_database_password }}"
- host: "%"
- priv: "{{ zun_database_name }}.*:ALL"
- append_privs: "yes"
- run_once: true
- delegate_to: "{{ groups['zun-api'][0] }}"
- when:
- - not use_preconfigured_databases | bool
-
-- name: Run zun bootstrap container
- ansible.builtin.import_tasks: bootstrap_service.yml
diff --git a/ansible/roles/zun/tasks/bootstrap_service.yml b/ansible/roles/zun/tasks/bootstrap_service.yml
deleted file mode 100644
index b68dc002a4..0000000000
--- a/ansible/roles/zun/tasks/bootstrap_service.yml
+++ /dev/null
@@ -1,20 +0,0 @@
----
-- name: Running Zun bootstrap container
- vars:
- zun_api: "{{ zun_services['zun-api'] }}"
- become: true
- kolla_container:
- action: "start_container"
- common_options: "{{ docker_common_options }}"
- detach: false
- environment:
- KOLLA_BOOTSTRAP:
- KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
- image: "{{ zun_api.image }}"
- labels:
- BOOTSTRAP:
- name: "bootstrap_zun"
- restart_policy: oneshot
- volumes: "{{ zun_api.volumes | reject('equalto', '') | list }}"
- run_once: true
- delegate_to: "{{ groups[zun_api.group][0] }}"
diff --git a/ansible/roles/zun/tasks/check-containers.yml b/ansible/roles/zun/tasks/check-containers.yml
deleted file mode 100644
index 59ab0b4914..0000000000
--- a/ansible/roles/zun/tasks/check-containers.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-- name: Check zun containers
- ansible.builtin.import_role:
- name: service-check-containers
diff --git a/ansible/roles/zun/tasks/check.yml b/ansible/roles/zun/tasks/check.yml
deleted file mode 100644
index b2bde3bf90..0000000000
--- a/ansible/roles/zun/tasks/check.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-- name: Checking Zun containers
- ansible.builtin.import_role:
- role: service-check
diff --git a/ansible/roles/zun/tasks/clone.yml b/ansible/roles/zun/tasks/clone.yml
deleted file mode 100644
index 629f06e7e7..0000000000
--- a/ansible/roles/zun/tasks/clone.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-- name: Cloning zun source repository for development
- become: true
- ansible.builtin.git:
- repo: "{{ zun_git_repository }}"
- dest: "{{ kolla_dev_repos_directory }}/{{ project_name }}"
- update: "{{ zun_dev_repos_pull }}"
- version: "{{ zun_source_version }}"
diff --git a/ansible/roles/zun/tasks/config.yml b/ansible/roles/zun/tasks/config.yml
deleted file mode 100644
index 1f50148125..0000000000
--- a/ansible/roles/zun/tasks/config.yml
+++ /dev/null
@@ -1,88 +0,0 @@
----
-- name: Ensuring config directories exist
- ansible.builtin.file:
- path: "{{ node_config_directory }}/{{ item.key }}"
- state: "directory"
- owner: "{{ config_owner_user }}"
- group: "{{ config_owner_group }}"
- mode: "0770"
- become: true
- with_dict: "{{ zun_services | select_services_enabled_and_mapped_to_host }}"
-
-- name: Include tasks from external_ceph.yml
- ansible.builtin.include_tasks: external_ceph.yml
- when:
- - zun_configure_for_cinder_ceph | bool
- - inventory_hostname in groups['zun-compute']
-
-- name: Check if policies shall be overwritten
- ansible.builtin.stat:
- path: "{{ item }}"
- run_once: true
- delegate_to: localhost
- register: zun_policy
- with_first_found:
- - files: "{{ supported_policy_format_list }}"
- paths:
- - "{{ node_custom_config }}/zun/"
- skip: true
-
-- name: Set zun policy file
- ansible.builtin.set_fact:
- zun_policy_file: "{{ zun_policy.results.0.stat.path | basename }}"
- zun_policy_file_path: "{{ zun_policy.results.0.stat.path }}"
- when:
- - zun_policy.results | length > 0
-
-- name: Copying over TLS certificates
- ansible.builtin.include_tasks: copy-certs.yml
- when:
- - zun_copy_certs | bool
-
-- name: Copying over config.json files for services
- ansible.builtin.template:
- src: "{{ item.key }}.json.j2"
- dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
- mode: "0660"
- become: true
- with_dict: "{{ zun_services | select_services_enabled_and_mapped_to_host }}"
-
-- name: Copying over zun.conf
- vars:
- service_name: "{{ item.key }}"
- merge_configs:
- sources:
- - "{{ role_path }}/templates/zun.conf.j2"
- - "{{ node_custom_config }}/global.conf"
- - "{{ node_custom_config }}/zun.conf"
- - "{{ node_custom_config }}/zun/{{ item.key }}.conf"
- - "{{ node_custom_config }}/zun/{{ inventory_hostname }}/zun.conf"
- dest: "{{ node_config_directory }}/{{ item.key }}/zun.conf"
- mode: "0660"
- become: true
- with_dict: "{{ zun_services | select_services_enabled_and_mapped_to_host }}"
-
-- name: "Configure uWSGI for zun-api"
- ansible.builtin.include_role:
- name: service-uwsgi-config
- vars:
- project_services: "{{ zun_services }}"
- service: "{{ zun_services['zun-api'] }}"
- service_name: "zun-api"
- service_uwsgi_config_http_port: "{{ zun_api_listen_port }}"
- service_uwsgi_config_log_file_chmod: "644"
- service_uwsgi_config_module: "{{ service.wsgi }}"
- service_uwsgi_config_uid: "zun"
- service_uwsgi_config_workers: "{{ zun_api_workers }}"
- when:
- - service | service_enabled_and_mapped_to_host
-
-- name: Copying over existing policy file
- ansible.builtin.template:
- src: "{{ zun_policy_file_path }}"
- dest: "{{ node_config_directory }}/{{ item.key }}/{{ zun_policy_file }}"
- mode: "0660"
- become: true
- when:
- - zun_policy_file is defined
- with_dict: "{{ zun_services | select_services_enabled_and_mapped_to_host }}"
diff --git a/ansible/roles/zun/tasks/config_validate.yml b/ansible/roles/zun/tasks/config_validate.yml
deleted file mode 100644
index e9352fb368..0000000000
--- a/ansible/roles/zun/tasks/config_validate.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-- name: Validate zun configuration
- ansible.builtin.import_role:
- name: service-config-validate
- vars:
- service_config_validate_services: "{{ zun_services }}"
- service_name: "{{ project_name }}"
- service_config_validation: "{{ zun_config_validation }}"
diff --git a/ansible/roles/zun/tasks/copy-certs.yml b/ansible/roles/zun/tasks/copy-certs.yml
deleted file mode 100644
index 080eb08c41..0000000000
--- a/ansible/roles/zun/tasks/copy-certs.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- name: "Copy certificates and keys for {{ project_name }}"
- ansible.builtin.import_role:
- role: service-cert-copy
- vars:
- project_services: "{{ zun_services }}"
diff --git a/ansible/roles/zun/tasks/deploy-containers.yml b/ansible/roles/zun/tasks/deploy-containers.yml
deleted file mode 100644
index 628fb97301..0000000000
--- a/ansible/roles/zun/tasks/deploy-containers.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-- name: Check zun containers
- ansible.builtin.import_tasks: check-containers.yml
diff --git a/ansible/roles/zun/tasks/deploy.yml b/ansible/roles/zun/tasks/deploy.yml
deleted file mode 100644
index 06db29b410..0000000000
--- a/ansible/roles/zun/tasks/deploy.yml
+++ /dev/null
@@ -1,19 +0,0 @@
----
-- name: Register zun in Keystone
- ansible.builtin.import_tasks: register.yml
-
-- name: Configure zun
- ansible.builtin.import_tasks: config.yml
-
-- name: Check zun containers
- ansible.builtin.import_tasks: check-containers.yml
-
-- name: Clone zun repository
- ansible.builtin.include_tasks: clone.yml
- when: zun_dev_mode | bool
-
-- name: Bootstrap zun service
- ansible.builtin.import_tasks: bootstrap.yml
-
-- name: Flush handlers
- ansible.builtin.meta: flush_handlers
diff --git a/ansible/roles/zun/tasks/external_ceph.yml b/ansible/roles/zun/tasks/external_ceph.yml
deleted file mode 100644
index ec0c205437..0000000000
--- a/ansible/roles/zun/tasks/external_ceph.yml
+++ /dev/null
@@ -1,23 +0,0 @@
----
-- name: Copying over ceph.conf for Zun
- ansible.builtin.copy:
- src: "{{ node_custom_config }}/zun/zun-compute/{{ ceph_cluster }}.conf"
- dest: "{{ node_config_directory }}/zun-compute/"
- mode: "0660"
- become: true
-
-- name: Copy over Ceph keyring files for zun-compute
- ansible.builtin.copy:
- src: "{{ node_custom_config }}/zun/zun-compute/{{ ceph_cluster }}.client.{{ ceph_cinder_user }}.keyring"
- dest: "{{ node_config_directory }}/zun-compute/"
- mode: "0660"
- become: true
- when: external_ceph_cephx_enabled | bool
-
-- name: Ensuring config directory has correct owner and permission
- become: true
- ansible.builtin.file:
- path: "{{ node_config_directory }}/zun-compute"
- recurse: true
- owner: "{{ config_owner_user }}"
- group: "{{ config_owner_group }}"
diff --git a/ansible/roles/zun/tasks/loadbalancer.yml b/ansible/roles/zun/tasks/loadbalancer.yml
deleted file mode 100644
index 1f46646e0e..0000000000
--- a/ansible/roles/zun/tasks/loadbalancer.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-- name: "Configure loadbalancer for {{ project_name }}"
- ansible.builtin.import_role:
- name: loadbalancer-config
- vars:
- project_services: "{{ zun_services }}"
- tags: always
diff --git a/ansible/roles/zun/tasks/main.yml b/ansible/roles/zun/tasks/main.yml
deleted file mode 100644
index 594ad5d851..0000000000
--- a/ansible/roles/zun/tasks/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-- name: "Include tasks for action {{ kolla_action }}"
- ansible.builtin.include_tasks: "{{ kolla_action }}.yml"
diff --git a/ansible/roles/zun/tasks/precheck.yml b/ansible/roles/zun/tasks/precheck.yml
deleted file mode 100644
index 7227fb0566..0000000000
--- a/ansible/roles/zun/tasks/precheck.yml
+++ /dev/null
@@ -1,60 +0,0 @@
----
-- name: Precheck zun configuration
- ansible.builtin.import_role:
- name: service-precheck
- vars:
- service_precheck_services: "{{ zun_services }}"
- service_name: "{{ project_name }}"
-
-- name: Get container facts
- become: true
- kolla_container_facts:
- action: get_containers
- container_engine: "{{ kolla_container_engine }}"
- name:
- - zun_api
- - zun_wsproxy
- - zun_cni_daemon
- check_mode: false
- register: container_facts
-
-- name: Checking free port for Zun API
- ansible.builtin.wait_for:
- host: "{{ api_interface_address }}"
- port: "{{ zun_api_port }}"
- connect_timeout: 1
- timeout: 1
- state: stopped
- when:
- - container_facts.containers['zun_api'] is not defined
- - inventory_hostname in groups['zun-api']
-
-- name: Checking free port for Zun WSproxy
- ansible.builtin.wait_for:
- host: "{{ api_interface_address }}"
- port: "{{ zun_wsproxy_port }}"
- connect_timeout: 1
- timeout: 1
- state: stopped
- when:
- - container_facts.containers['zun_wsproxy'] is not defined
- - inventory_hostname in groups['zun-wsproxy']
-
-- name: Checking free port for zun-cni-daemon
- ansible.builtin.wait_for:
- host: "{{ api_interface_address }}"
- port: "{{ zun_cni_daemon_port }}"
- connect_timeout: 1
- timeout: 1
- state: stopped
- when:
- - container_facts.containers['zun_cni_daemon'] is not defined
- - inventory_hostname in groups['zun-cni-daemon']
-
-- name: Ensure kuryr enabled for zun
- ansible.builtin.assert:
- that: enable_kuryr | bool
- fail_msg: "kuryr is required but not enabled"
- run_once: true
- when:
- - enable_zun | bool
diff --git a/ansible/roles/zun/tasks/pull.yml b/ansible/roles/zun/tasks/pull.yml
deleted file mode 100644
index 9ce4db5dc5..0000000000
--- a/ansible/roles/zun/tasks/pull.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-- name: Pull zun images
- ansible.builtin.import_role:
- role: service-images-pull
diff --git a/ansible/roles/zun/tasks/reconfigure.yml b/ansible/roles/zun/tasks/reconfigure.yml
deleted file mode 100644
index c324f9ff67..0000000000
--- a/ansible/roles/zun/tasks/reconfigure.yml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-- name: Deploy zun
- ansible.builtin.import_tasks: deploy.yml
diff --git a/ansible/roles/zun/tasks/register.yml b/ansible/roles/zun/tasks/register.yml
deleted file mode 100644
index 8be6a98bf3..0000000000
--- a/ansible/roles/zun/tasks/register.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-- name: Register zun service, endpoints, and users in Keystone
- ansible.builtin.import_role:
- name: service-ks-register
- vars:
- service_ks_register_auth: "{{ openstack_zun_auth }}"
- service_ks_register_services: "{{ zun_ks_services }}"
- service_ks_register_users: "{{ zun_ks_users }}"
diff --git a/ansible/roles/zun/tasks/stop.yml b/ansible/roles/zun/tasks/stop.yml
deleted file mode 100644
index cd749cc0f8..0000000000
--- a/ansible/roles/zun/tasks/stop.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-- name: Stop zun containers
- ansible.builtin.import_role:
- name: service-stop
- vars:
- project_services: "{{ zun_services }}"
- service_name: "{{ project_name }}"
diff --git a/ansible/roles/zun/tasks/upgrade.yml b/ansible/roles/zun/tasks/upgrade.yml
deleted file mode 100644
index afb48e1c3d..0000000000
--- a/ansible/roles/zun/tasks/upgrade.yml
+++ /dev/null
@@ -1,12 +0,0 @@
----
-- name: Configure zun
- ansible.builtin.import_tasks: config.yml
-
-- name: Check zun containers
- ansible.builtin.import_tasks: check-containers.yml
-
-- name: Run zun bootstrap container
- ansible.builtin.import_tasks: bootstrap_service.yml
-
-- name: Flush handlers
- ansible.builtin.meta: flush_handlers
diff --git a/ansible/roles/zun/templates/zun-api.json.j2 b/ansible/roles/zun/templates/zun-api.json.j2
deleted file mode 100644
index 27198a32b0..0000000000
--- a/ansible/roles/zun/templates/zun-api.json.j2
+++ /dev/null
@@ -1,36 +0,0 @@
-{
- "command": "uwsgi /etc/zun/zun-api-uwsgi.ini",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/zun.conf",
- "dest": "/etc/zun/zun.conf",
- "owner": "zun",
- "perm": "0600"
- },
- {
- "source": "{{ container_config_directory }}/zun-api-uwsgi.ini",
- "dest": "/etc/zun/zun-api-uwsgi.ini",
- "owner": "zun",
- "perm": "0600"
- }{% if zun_policy_file is defined %},
- {
- "source": "{{ container_config_directory }}/{{ zun_policy_file }}",
- "dest": "/etc/zun/{{ zun_policy_file }}",
- "owner": "zun",
- "perm": "0600"
- }{% endif %}{% if kolla_copy_ca_into_containers | bool %},
- {
- "source": "{{ container_config_directory }}/ca-certificates",
- "dest": "/var/lib/kolla/share/ca-certificates",
- "owner": "root",
- "perm": "0600"
- }{% endif %}
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/zun",
- "owner": "zun:kolla",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/zun/templates/zun-cni-daemon.json.j2 b/ansible/roles/zun/templates/zun-cni-daemon.json.j2
deleted file mode 100644
index 3e165e06ba..0000000000
--- a/ansible/roles/zun/templates/zun-cni-daemon.json.j2
+++ /dev/null
@@ -1,29 +0,0 @@
-{
- "command": "zun-cni-daemon --config-file /etc/zun/zun.conf",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/zun.conf",
- "dest": "/etc/zun/zun.conf",
- "owner": "zun",
- "perm": "0600"
- }{% if kolla_copy_ca_into_containers | bool %},
- {
- "source": "{{ container_config_directory }}/ca-certificates",
- "dest": "/var/lib/kolla/share/ca-certificates",
- "owner": "root",
- "perm": "0600"
- }{% endif %}
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/zun",
- "owner": "zun:kolla",
- "recurse": true
- },
- {
- "path": "/opt/cni/",
- "owner": "zun:kolla",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/zun/templates/zun-compute.json.j2 b/ansible/roles/zun/templates/zun-compute.json.j2
deleted file mode 100644
index c9f9a7ccdf..0000000000
--- a/ansible/roles/zun/templates/zun-compute.json.j2
+++ /dev/null
@@ -1,48 +0,0 @@
-{
- "command": "zun-compute --config-file /etc/zun/zun.conf",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/zun.conf",
- "dest": "/etc/zun/zun.conf",
- "owner": "zun",
- "perm": "0600"
- },
- {
- "source": "{{ container_config_directory }}/{{ ceph_cluster }}.client.{{ ceph_cinder_user }}.keyring",
- "dest": "/etc/ceph/{{ ceph_cluster }}.client.{{ ceph_cinder_user }}.keyring",
- "owner": "zun",
- "perm": "0600",
- "optional": {{ (not zun_configure_for_cinder_ceph | bool) | string | lower }}
- },
- {
- "source": "{{ container_config_directory }}/{{ ceph_cluster }}.conf",
- "dest": "/etc/ceph/{{ ceph_cluster }}.conf",
- "owner": "zun",
- "perm": "0600",
- "optional": {{ (not zun_configure_for_cinder_ceph | bool) | string | lower }}
- }{% if zun_policy_file is defined %},
- {
- "source": "{{ container_config_directory }}/{{ zun_policy_file }}",
- "dest": "/etc/zun/{{ zun_policy_file }}",
- "owner": "zun",
- "perm": "0600"
- }{% endif %}{% if kolla_copy_ca_into_containers | bool %},
- {
- "source": "{{ container_config_directory }}/ca-certificates",
- "dest": "/var/lib/kolla/share/ca-certificates",
- "owner": "root",
- "perm": "0600"
- }{% endif %}
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/zun",
- "owner": "zun:kolla",
- "recurse": true
- }{% if zun_configure_for_cinder_ceph | bool %},
- {
- "path": "/var/lib/zun",
- "owner": "zun:kolla"
- }{% endif %}
- ]
-}
diff --git a/ansible/roles/zun/templates/zun-wsproxy.json.j2 b/ansible/roles/zun/templates/zun-wsproxy.json.j2
deleted file mode 100644
index 89dd398b60..0000000000
--- a/ansible/roles/zun/templates/zun-wsproxy.json.j2
+++ /dev/null
@@ -1,30 +0,0 @@
-{
- "command": "zun-wsproxy --config-file /etc/zun/zun.conf",
- "config_files": [
- {
- "source": "{{ container_config_directory }}/zun.conf",
- "dest": "/etc/zun/zun.conf",
- "owner": "zun",
- "perm": "0600"
- }{% if zun_policy_file is defined %},
- {
- "source": "{{ container_config_directory }}/{{ zun_policy_file }}",
- "dest": "/etc/zun/{{ zun_policy_file }}",
- "owner": "zun",
- "perm": "0600"
- }{% endif %}{% if kolla_copy_ca_into_containers | bool %},
- {
- "source": "{{ container_config_directory }}/ca-certificates",
- "dest": "/var/lib/kolla/share/ca-certificates",
- "owner": "root",
- "perm": "0600"
- }{% endif %}
- ],
- "permissions": [
- {
- "path": "/var/log/kolla/zun",
- "owner": "zun:kolla",
- "recurse": true
- }
- ]
-}
diff --git a/ansible/roles/zun/templates/zun.conf.j2 b/ansible/roles/zun/templates/zun.conf.j2
deleted file mode 100644
index 2322dac7ab..0000000000
--- a/ansible/roles/zun/templates/zun.conf.j2
+++ /dev/null
@@ -1,144 +0,0 @@
-[DEFAULT]
-debug = {{ zun_logging_debug }}
-
-{% if service_name == 'zun-api' %}
-# Force zun-api.log or will use app.wsgi
-log_file = /var/log/kolla/zun/zun-api.log
-{% endif %}
-
-log_dir = /var/log/kolla/zun
-transport_url = {{ rpc_transport_url }}
-
-state_path = /var/lib/zun
-container_driver = docker
-capsule_driver = cri
-
-sandbox_image = k8s.gcr.io/pause:3.6
-
-[network]
-driver = kuryr
-
-[database]
-connection = mysql+pymysql://{{ zun_database_user }}:{{ zun_database_password }}@{{ zun_database_address }}/{{ zun_database_name }}{{ '?ssl_ca=' ~ openstack_cacert if zun_database_enable_tls_internal | bool }}
-connection_recycle_time = {{ database_connection_recycle_time }}
-max_pool_size = {{ database_max_pool_size }}
-max_retries = -1
-
-# NOTE(yoctozepto): despite what the docs say, both keystone_auth and
-# keystone_authtoken sections are used and Zun internals may use either -
-# - best keep them both in sync
-[keystone_auth]
-www_authenticate_uri = {{ keystone_public_url }}
-auth_url = {{ keystone_internal_url }}
-auth_type = password
-project_domain_id = {{ default_project_domain_id }}
-user_domain_id = {{ default_user_domain_id }}
-project_name = service
-username = {{ zun_keystone_user }}
-password = {{ zun_keystone_password }}
-service_token_roles_required = true
-region_name = {{ openstack_region_name }}
-cafile = {{ openstack_cacert }}
-
-{% if enable_memcached | bool %}
-memcache_security_strategy = {{ memcache_security_strategy }}
-memcache_secret_key = {{ memcache_secret_key }}
-memcached_servers = {% for host in groups['memcached'] %}{{ 'api' | kolla_address(host) | put_address_in_context('memcache') }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
-{% endif %}
-
-# NOTE(yoctozepto): despite what the docs say, both keystone_auth and
-# keystone_authtoken sections are used and Zun internals may use either -
-# - best keep them both in sync
-[keystone_authtoken]
-service_type = container
-www_authenticate_uri = {{ keystone_public_url }}
-auth_url = {{ keystone_internal_url }}
-auth_type = password
-project_domain_id = {{ default_project_domain_id }}
-user_domain_id = {{ default_user_domain_id }}
-project_name = service
-username = {{ zun_keystone_user }}
-password = {{ zun_keystone_password }}
-service_token_roles_required = true
-region_name = {{ openstack_region_name }}
-cafile = {{ openstack_cacert }}
-
-{% if enable_memcached | bool %}
-memcache_security_strategy = {{ memcache_security_strategy }}
-memcache_secret_key = {{ memcache_secret_key }}
-memcached_servers = {% for host in groups['memcached'] %}{{ 'api' | kolla_address(host) | put_address_in_context('memcache') }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
-{% endif %}
-
-[zun_client]
-region_name = {{ openstack_region_name }}
-endpoint_type = internalURL
-ca_file = {{ openstack_cacert }}
-
-[glance_client]
-region_name = {{ openstack_region_name }}
-endpoint_type = internalURL
-ca_file = {{ openstack_cacert }}
-
-[neutron_client]
-region_name = {{ openstack_region_name }}
-endpoint_type = internalURL
-ca_file = {{ openstack_cacert }}
-
-[cinder_client]
-region_name = {{ openstack_region_name }}
-endpoint_type = internalURL
-ca_file = {{ openstack_cacert }}
-
-[placement_client]
-region_name = {{ openstack_region_name }}
-endpoint_type = internalURL
-ca_file = {{ openstack_cacert }}
-
-{% if enable_osprofiler | bool %}
-[profiler]
-enabled = true
-trace_sqlalchemy = true
-hmac_keys = {{ osprofiler_secret }}
-connection_string = {{ osprofiler_backend_connection_string }}
-{% endif %}
-
-[oslo_concurrency]
-lock_path = /var/lib/zun/tmp
-
-{% if zun_policy_file is defined %}
-[oslo_policy]
-policy_file = {{ zun_policy_file }}
-{% endif %}
-
-[compute]
-host_shared_with_nova = {{ inventory_hostname in groups['compute'] and enable_nova | bool and not enable_nova_fake | bool }}
-
-[websocket_proxy]
-wsproxy_host = {{ api_interface_address }}
-wsproxy_port = {{ zun_wsproxy_port }}
-base_url = {{ zun_wsproxy_protocol }}://{{ zun_external_fqdn | put_address_in_context('url') }}:{{ zun_wsproxy_port }}
-
-[docker]
-api_url = tcp://{{ api_interface_address | put_address_in_context('url') }}:2375
-docker_remote_api_host = {{ api_interface_address }}
-docker_remote_api_port = 2375
-
-[cni_daemon]
-cni_daemon_port = {{ zun_cni_daemon_port }}
-
-[oslo_messaging_rabbit]
-use_queue_manager = true
-{% if service_name == 'zun-api' %}
-processname = {{ service_name }}
-{% endif %}
-heartbeat_in_pthread = {{ service_name == 'zun-api' }}
-{% if om_enable_rabbitmq_tls | bool %}
-ssl = true
-ssl_ca_file = {{ om_rabbitmq_cacert }}
-{% endif %}
-rabbit_quorum_queue = true
-{% if om_enable_rabbitmq_stream_fanout | bool %}
-rabbit_stream_fanout = true
-rabbit_qos_prefetch_count = {{ om_rabbitmq_qos_prefetch_count }}
-{% endif %}
-rabbit_transient_quorum_queue = true
diff --git a/ansible/roles/zun/vars/main.yml b/ansible/roles/zun/vars/main.yml
deleted file mode 100644
index 6be04f59f8..0000000000
--- a/ansible/roles/zun/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-project_name: "zun"
diff --git a/ansible/site.yml b/ansible/site.yml
index 911596dc1c..baf4754fac 100644
--- a/ansible/site.yml
+++ b/ansible/site.yml
@@ -44,7 +44,6 @@
- enable_ironic_{{ enable_ironic | bool }}
- enable_iscsid_{{ enable_iscsid | bool }}
- enable_keystone_{{ enable_keystone | bool }}
- - enable_kuryr_{{ enable_kuryr | bool }}
- enable_letsencrypt_{{ enable_letsencrypt | bool }}
- enable_loadbalancer_{{ enable_loadbalancer | bool }}
- enable_magnum_{{ enable_magnum | bool }}
@@ -69,7 +68,6 @@
- enable_tacker_{{ enable_tacker | bool }}
- enable_trove_{{ enable_trove | bool }}
- enable_watcher_{{ enable_watcher | bool }}
- - enable_zun_{{ enable_zun | bool }}
tags: always
- name: Apply role prechecks
@@ -379,12 +377,6 @@
tasks_from: loadbalancer
tags: watcher
when: enable_watcher | bool
- - name: Configure loadbalancer for zun
- ansible.builtin.include_role:
- name: zun
- tasks_from: loadbalancer
- tags: zun
- when: enable_zun | bool
# NOTE(r-krcek): This last step will notify handlers if any
# changes to the configuration have been made.
- name: Check loadbalancer containers
@@ -713,19 +705,6 @@
roles:
- { role: neutron, tags: neutron }
-- name: Apply role kuryr
- gather_facts: false
- hosts:
- - compute
- - "&enable_kuryr_True"
- serial: '{{ kolla_serial|default("0") }}'
- max_fail_percentage: >-
- {{ kuryr_max_fail_percentage |
- default(kolla_max_fail_percentage) |
- default(100) }}
- roles:
- - { role: kuryr, tags: kuryr }
-
- name: Apply role hacluster
gather_facts: false
hosts:
@@ -997,22 +976,6 @@
roles:
- { role: octavia, tags: octavia }
-- name: Apply role zun
- gather_facts: false
- hosts:
- - zun-api
- - zun-wsproxy
- - zun-compute
- - zun-cni-daemon
- - "&enable_zun_True"
- serial: '{{ kolla_serial|default("0") }}'
- max_fail_percentage: >-
- {{ zun_max_fail_percentage |
- default(kolla_max_fail_percentage) |
- default(100) }}
- roles:
- - { role: zun, tags: zun }
-
- name: Apply role blazar
gather_facts: false
hosts:
diff --git a/doc/source/admin/password-rotation.rst b/doc/source/admin/password-rotation.rst
index e5cfd02619..40f4ac9a6a 100644
--- a/doc/source/admin/password-rotation.rst
+++ b/doc/source/admin/password-rotation.rst
@@ -11,13 +11,6 @@ variable, only the most common ones.
Always back up your ``passwords.yml`` file before making any changes.
Otherwise, it is easy to make unrecoverable mistakes.
-.. warning::
-
- This guide relies on recent changes to Kolla and Kolla-Ansible. You may
- encounter errors if applying this guide to older deployments. It is
- recommended that you update your containers and kolla-ansible to the latest
- available versions before proceeding.
-
Regenerating secrets
--------------------
diff --git a/doc/source/contributor/ci-matrix.csv b/doc/source/contributor/ci-matrix.csv
index c49b728680..c9ba67f009 100644
--- a/doc/source/contributor/ci-matrix.csv
+++ b/doc/source/contributor/ci-matrix.csv
@@ -21,4 +21,3 @@ OVN;–;✅;✅;✅;✅;–;✅;✅;–;✅;–;–;Neutron ML2/OVN + OVN Octavi
Prometheus + OpenSearch;–;–;–;✅;–;✅;✅;–;–;–;–;–;Fluentd, OpenSearch, Grafana, Prometheus, RabbitMQ/DB core.
Skyline;❌;–;✅;–;–;✅;–;–;–;–;–;–;"Skyline dashboard; currently disabled (SSO issues)."
Telemetry;–;–;✅;–;–;✅;–;–;–;–;✅;✅;Aodh, Ceilometer, Gnocchi.
-Zun;–;✅;✅;–;✅;✅;✅;✅;–;–;–;–;"Zun + Kuryr + Cinder LVM; etcd; dashboard."
diff --git a/doc/source/reference/compute/index.rst b/doc/source/reference/compute/index.rst
index d155ffa694..b13bd2ec93 100644
--- a/doc/source/reference/compute/index.rst
+++ b/doc/source/reference/compute/index.rst
@@ -13,4 +13,3 @@ compute services.
nova-cells-guide
nova-fake-driver
nova-guide
- zun-guide
diff --git a/doc/source/reference/compute/zun-guide.rst b/doc/source/reference/compute/zun-guide.rst
deleted file mode 100644
index 8df209a735..0000000000
--- a/doc/source/reference/compute/zun-guide.rst
+++ /dev/null
@@ -1,116 +0,0 @@
-=======================
-Zun - Container service
-=======================
-
-"Zun is an OpenStack Container service. It aims to provide an
-OpenStack API for provisioning and managing containerized
-workload on OpenStack."
-For more details about Zun, see `OpenStack Zun Documentation
-`__.
-
-Preparation and Deployment
---------------------------
-
-By default Zun and its dependencies are disabled.
-In order to enable Zun, you need to edit globals.yml and set the
-following variables:
-
-.. code-block:: yaml
-
- enable_zun: true
- enable_kuryr: true
- enable_etcd: true
- docker_configure_for_zun: true
- containerd_configure_for_zun: true
-
-Docker reconfiguration requires rebootstrapping before deploy.
-Make sure you understand the consequences of restarting Docker.
-Please see :ref:`rebootstrapping` for details.
-If it's initial deploy, then there is nothing to worry about
-because it's initial bootstrapping as well and there are no
-running services to affect.
-
-.. code-block:: console
-
- $ kolla-ansible bootstrap-servers
-
-Finally deploy:
-
-.. code-block:: console
-
- $ kolla-ansible deploy
-
-Verification
-------------
-
-#. Generate the credentials file:
-
- .. code-block:: console
-
- $ kolla-ansible post-deploy
-
-#. Source credentials file:
-
- .. code-block:: console
-
- $ . /etc/kolla/admin-openrc.sh
-
-#. Download and create a glance container image:
-
- .. code-block:: console
-
- $ docker pull cirros
- $ docker save cirros | openstack image create cirros --public \
- --container-format docker --disk-format raw
-
-#. Create zun container:
-
- .. code-block:: console
-
- $ zun create --name test --net network=demo-net cirros ping -c4 8.8.8.8
-
- .. note::
-
- Kuryr does not support networks with DHCP enabled, disable DHCP in the
- subnet used for zun containers.
-
- .. code-block:: console
-
- $ openstack subnet set --no-dhcp
-
-#. Verify container is created:
-
- .. code-block:: console
-
- $ zun list
-
- +--------------------------------------+------+---------------+---------+------------+------------+-------+
- | uuid | name | image | status | task_state | addresses | ports |
- +--------------------------------------+------+---------------+---------+------------+------------+-------+
- | 3719a73e-5f86-47e1-bc5f-f4074fc749f2 | test | cirros | Created | None | 172.17.0.3 | [] |
- +--------------------------------------+------+---------------+---------+------------+------------+-------+
-
-#. Start container:
-
- .. code-block:: console
-
- $ zun start test
- Request to start container test has been accepted.
-
-#. Verify container:
-
- .. code-block:: console
-
- $ zun logs test
- PING 8.8.8.8 (8.8.8.8): 56 data bytes
- 64 bytes from 8.8.8.8: seq=0 ttl=45 time=96.396 ms
- 64 bytes from 8.8.8.8: seq=1 ttl=45 time=96.504 ms
- 64 bytes from 8.8.8.8: seq=2 ttl=45 time=96.721 ms
- 64 bytes from 8.8.8.8: seq=3 ttl=45 time=95.884 ms
-
- --- 8.8.8.8 ping statistics ---
- 4 packets transmitted, 4 packets received, 0% packet loss
- round-trip min/avg/max = 95.884/96.376/96.721 ms
-
-For more information about how zun works, see
-`zun, OpenStack Container service `__.
diff --git a/doc/source/reference/containers/index.rst b/doc/source/reference/containers/index.rst
index c1bcc84d4d..76afa71f8a 100644
--- a/doc/source/reference/containers/index.rst
+++ b/doc/source/reference/containers/index.rst
@@ -2,11 +2,9 @@
Containers
==========
-This section describes configuring and running container based services
-including kuryr.
+This section describes configuring and running container based services.
.. toctree::
:maxdepth: 1
- kuryr-guide
magnum-guide
diff --git a/doc/source/reference/containers/kuryr-guide.rst b/doc/source/reference/containers/kuryr-guide.rst
deleted file mode 100644
index f85451d57e..0000000000
--- a/doc/source/reference/containers/kuryr-guide.rst
+++ /dev/null
@@ -1,66 +0,0 @@
-============================
-Kuryr - Container networking
-============================
-
-"Kuryr is a Docker network plugin that uses Neutron to provide networking
-services to Docker containers. It provides containerized images for the common
-Neutron plugins. Kuryr requires at least Keystone and neutron. Kolla makes
-kuryr deployment faster and accessible.
-
-Requirements
-~~~~~~~~~~~~
-
-* A minimum of 3 hosts for a vanilla deploy
-
-Preparation and Deployment
-~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-To allow Docker daemon connect to the etcd, add the following in the
-``docker.service`` file.
-
-.. code-block:: ini
-
- ExecStart= -H tcp://172.16.1.13:2375 -H unix:///var/run/docker.sock --cluster-advertise=172.16.1.13:2375
-
-The IP address is host running the etcd service. ```2375``` is port that
-allows Docker daemon to be accessed remotely. ```2379``` is the etcd listening
-port.
-
-By default etcd and kuryr are disabled in the ``group_vars/all/etcd.yml`` and
-``group_vars/all/kuryr.yml`` files.
-In order to enable them, you need to edit the file globals.yml and set the
-following variables
-
-.. code-block:: yaml
-
- enable_etcd: true
- enable_kuryr: true
-
-Deploy the OpenStack cloud and kuryr network plugin
-
-.. code-block:: console
-
- kolla-ansible deploy
-
-Create a Virtual Network
-~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. code-block:: console
-
- docker network create -d kuryr --ipam-driver=kuryr --subnet=10.1.0.0/24 --gateway=10.1.0.1 docker-net1
-
-To list the created network:
-
-.. code-block:: console
-
- docker network ls
-
-The created network is also available from OpenStack CLI:
-
-.. code-block:: console
-
- openstack network list
-
-For more information about how kuryr works, see
-`kuryr (OpenStack Containers Networking)
-`__.
diff --git a/doc/source/reference/message-queues/rabbitmq.rst b/doc/source/reference/message-queues/rabbitmq.rst
index 1eed7ef146..1396976337 100644
--- a/doc/source/reference/message-queues/rabbitmq.rst
+++ b/doc/source/reference/message-queues/rabbitmq.rst
@@ -209,3 +209,34 @@ Currently, membership changes for streams `is not entirely safe
`__, so this
script should only be used when the RabbitMQ cluster is in a known healthy
state.
+
+Streams Retention Period
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+When using RabbitMQ streams for fanout queues by setting
+``om_enable_rabbitmq_stream_fanout: true``, users can set retention policy for
+them with the use of two variables ``rabbitmq_stream_max_segment_size_bytes``
+and ``rabbitmq_stream_segment_max_age`` to avoid running out of disk space
+eventually.
+
+Default configuration set segments of a stream queues to have maximum size of
+500 MB (`RabbitMQ default `__)
+and the retention time of 1800 seconds once a segment reaches the maximum size
+(`oslo.messaging default
+`__).
+These default values will leave large number of ready messages in stream
+queues even though old ones are removed by the retention policy.
+So it is recommended to tune them based on how busy the cloud is.
+
+``rabbitmq_stream_max_segment_size_bytes`` sets the maximum size of stream
+segments. This variable needs to be positive integer.
+
+``rabbitmq_stream_segment_max_age`` sets the retention time of segments that
+reached the maximum size. This variable needs to be string with valid options
+of Y, M, D, h, m, s (e.g. 24h for 24 hours).
+
+.. code-block:: yaml
+
+ # Example custom retention policy configuration
+ rabbitmq_stream_max_segment_size_bytes: 5000 # 5 KB
+ rabbitmq_stream_segment_max_age: "60s" # 60 seconds
diff --git a/doc/source/reference/storage/external-ceph-guide.rst b/doc/source/reference/storage/external-ceph-guide.rst
index 49e64d6c32..a831015ba7 100644
--- a/doc/source/reference/storage/external-ceph-guide.rst
+++ b/doc/source/reference/storage/external-ceph-guide.rst
@@ -312,24 +312,6 @@ for use with availability zones:
* ``/etc/kolla/config/nova//ceph2.conf``
* ``/etc/kolla/config/nova//ceph2.client.cinder.keyring``
-If ``zun`` is enabled, and you wish to use cinder volumes with zun,
-it must also be configured to allow access to Cinder volumes:
-
-* Enable Cinder Ceph backend for Zun in ``globals.yml``:
-
- .. code-block:: yaml
-
- zun_configure_for_cinder_ceph: "yes"
-
-* Copy Ceph configuration file to:
-
- * ``/etc/kolla/config/zun/zun-compute/ceph.conf``
-
-* Copy Ceph keyring file(s) to:
-
- * ``/etc/kolla/config/zun/zun-compute/ceph.client.cinder.keyring``
-
-
Nova
----
@@ -583,13 +565,3 @@ HTTPS (443) port will be used. For example:
The HAProxy frontend port is defined via ``ceph_rgw_port``, and defaults to
6780.
-
-Cephadm and Ceph Client Version
-===============================
-When configuring Zun with Cinder volumes, kolla-ansible installs some
-Ceph client packages on zun-compute hosts. You can set the version
-of the Ceph packages installed by,
-
-* Configuring Ceph version details in ``/etc/kolla/globals.yml``:
-
- * ``ceph_version`` (default: ``pacific``)
diff --git a/etc/kolla/globals.yml b/etc/kolla/globals.yml
index b4ade81cae..44904c9d7d 100644
--- a/etc/kolla/globals.yml
+++ b/etc/kolla/globals.yml
@@ -110,10 +110,6 @@ workaround_ansible_issue_8743: true
# Docker client timeout in seconds.
#docker_client_timeout: 120
-#docker_configure_for_zun: false
-#containerd_configure_for_zun: false
-#containerd_grpc_gid: 42463
-
###################
# Messaging options
###################
@@ -381,13 +377,11 @@ workaround_ansible_issue_8743: true
#enable_horizon_tacker: "{{ enable_tacker | bool }}"
#enable_horizon_trove: "{{ enable_trove | bool }}"
#enable_horizon_watcher: "{{ enable_watcher | bool }}"
-#enable_horizon_zun: "{{ enable_zun | bool }}"
#enable_ironic: false
#enable_ironic_neutron_agent: false
#enable_ironic_prometheus_exporter: "{{ enable_ironic | bool and enable_prometheus | bool }}"
#enable_ironic_pxe_filter: false
#enable_iscsid: "{{ enable_cinder | bool and enable_cinder_backend_iscsi | bool }}"
-#enable_kuryr: false
#enable_magnum: false
#enable_manila: false
#enable_manila_backend_generic: false
@@ -432,7 +426,7 @@ workaround_ansible_issue_8743: true
#enable_ovn: "{{ enable_neutron | bool and neutron_plugin_agent == 'ovn' }}"
#enable_ovs_dpdk: false
#enable_osprofiler: false
-#enable_placement: "{{ enable_nova | bool or enable_zun | bool }}"
+#enable_placement: "{{ enable_nova | bool }}"
#enable_prometheus: false
#enable_proxysql: true
#enable_valkey: false
@@ -441,7 +435,6 @@ workaround_ansible_issue_8743: true
#enable_trove: false
#enable_trove_singletenant: false
#enable_watcher: false
-#enable_zun: false
#############
# S3 options
diff --git a/etc/kolla/passwords.yml b/etc/kolla/passwords.yml
index 1120754a82..ac45a61ae4 100644
--- a/etc/kolla/passwords.yml
+++ b/etc/kolla/passwords.yml
@@ -60,8 +60,6 @@ glance_keystone_password:
gnocchi_database_password:
gnocchi_keystone_password:
-kuryr_keystone_password:
-
nova_database_password:
nova_api_database_password:
nova_keystone_password:
@@ -128,9 +126,6 @@ octavia_client_ca_password:
tacker_database_password:
tacker_keystone_password:
-zun_database_password:
-zun_keystone_password:
-
masakari_database_password:
masakari_keystone_password:
diff --git a/releasenotes/notes/add_neutron_external_interface_length_check-e7c4456b92cae281.yaml b/releasenotes/notes/add_neutron_external_interface_length_check-e7c4456b92cae281.yaml
index c66c808a9c..76531ed032 100644
--- a/releasenotes/notes/add_neutron_external_interface_length_check-e7c4456b92cae281.yaml
+++ b/releasenotes/notes/add_neutron_external_interface_length_check-e7c4456b92cae281.yaml
@@ -1,10 +1,9 @@
---
features:
- |
- Add neutron interface<->bridge parity validation.
- Validates that the number of neutron_external_interface
- entries matches the number of neutron_bridge_name entries
- in the configuration. This prevents misconfigurations
- that could cause deployment failures or incorrect
- networking behavior.
+ Added a validation check to ensure the number of
+ ``neutron_external_interface`` entries matches the number of
+ ``neutron_bridge_name`` entries in the configuration.
+ This prevents misconfigurations that could cause deployment
+ failures or incorrect networking behavior.
`LP#1864832 `__
diff --git a/releasenotes/notes/add_rabbitmq_mgm_tls-86706572b96aacfa.yaml b/releasenotes/notes/add_rabbitmq_mgm_tls-86706572b96aacfa.yaml
index 83a03b83b8..864e56ea63 100644
--- a/releasenotes/notes/add_rabbitmq_mgm_tls-86706572b96aacfa.yaml
+++ b/releasenotes/notes/add_rabbitmq_mgm_tls-86706572b96aacfa.yaml
@@ -1,4 +1,6 @@
---
features:
- |
- Added support for RabbitMQ management interface SSL configuration.
+ Added TLS support for the RabbitMQ management API and UI. When
+ ``rabbitmq_enable_tls`` is set to ``true``, TLS is now also enabled
+ for the RabbitMQ management interface.
diff --git a/releasenotes/notes/barbican-uwsgi-845123de69b4a30a.yaml b/releasenotes/notes/barbican-uwsgi-845123de69b4a30a.yaml
index 81b6580298..91e4ca7b4d 100644
--- a/releasenotes/notes/barbican-uwsgi-845123de69b4a30a.yaml
+++ b/releasenotes/notes/barbican-uwsgi-845123de69b4a30a.yaml
@@ -1,5 +1,5 @@
---
upgrade:
- |
- ``barbican`` uWSGI configuration has been reworked to use the same service
- role as other Ansible roles.
+ ``barbican-api`` uWSGI configuration has been migrated to use the
+ ``service-uwsgi-config`` role, consistent with other Kolla-Ansible roles.
diff --git a/releasenotes/notes/bug-2073159-67532593585a1e10.yaml b/releasenotes/notes/bug-2106219-67532593585a1e10.yaml
similarity index 100%
rename from releasenotes/notes/bug-2073159-67532593585a1e10.yaml
rename to releasenotes/notes/bug-2106219-67532593585a1e10.yaml
diff --git a/releasenotes/notes/bug-2131993-d67c68845ddf5556.yaml b/releasenotes/notes/bug-2131993-d67c68845ddf5556.yaml
index 8c06ba28b4..ad4ad91d95 100644
--- a/releasenotes/notes/bug-2131993-d67c68845ddf5556.yaml
+++ b/releasenotes/notes/bug-2131993-d67c68845ddf5556.yaml
@@ -3,4 +3,4 @@ fixes:
- |
Fixes issue where ProxySQL certificates were copied over even
with ``kolla_externally_managed_cert`` set to True.
- `LP#2073159 `__
+ `LP#2131993 `__
diff --git a/releasenotes/notes/bug-2138498-564e7475f1d4cc27.yaml b/releasenotes/notes/bug-2138498-564e7475f1d4cc27.yaml
index dcbd2c54da..41c353134f 100644
--- a/releasenotes/notes/bug-2138498-564e7475f1d4cc27.yaml
+++ b/releasenotes/notes/bug-2138498-564e7475f1d4cc27.yaml
@@ -4,5 +4,4 @@ fixes:
Fixed an issue where Neutron sub-services (RPC server, maintenance and
periodic workers) would crash when ``enable_neutron_vpnaas`` was set to
``yes`` due to missing ``neutron_vpnaas.conf`` file injection.
- neutron: inject neutron_vpnaas.conf into auxiliary services
`LP#2138498 `__
diff --git a/releasenotes/notes/bug-2143979-7c81b4560bba6efd.yaml b/releasenotes/notes/bug-2143979-7c81b4560bba6efd.yaml
index 5a9f4729b2..8d85754164 100644
--- a/releasenotes/notes/bug-2143979-7c81b4560bba6efd.yaml
+++ b/releasenotes/notes/bug-2143979-7c81b4560bba6efd.yaml
@@ -5,6 +5,6 @@ fixes:
unnecessarily restarted during every ``reconfigure``
operation. The container handler now correctly includes
the ``healthcheck`` configuration, ensuring idempotency
- by matching the container's runtime definition with
+ by matching the container's runtime definition with
the service defaults.
`LP#2143979 `__
diff --git a/releasenotes/notes/bugfix-bump-mariadb-innodb-log-file-size-6d280b738baa3897.yaml b/releasenotes/notes/bugfix-bump-mariadb-innodb-log-file-size-6d280b738baa3897.yaml
index 3dafce0c8f..038557bb72 100644
--- a/releasenotes/notes/bugfix-bump-mariadb-innodb-log-file-size-6d280b738baa3897.yaml
+++ b/releasenotes/notes/bugfix-bump-mariadb-innodb-log-file-size-6d280b738baa3897.yaml
@@ -2,22 +2,17 @@
features:
- |
Increased the default value of ``innodb_log_file_size`` from 96MB to 2GB.
- This change improves overall performance of MariaDB.
- However, the recovery of MariaDB may take longer time as a tradeoff.
- Users can adjust the value by overriding K-A variable
- ``mariadb_innodb_log_file_size_mb``.
- The allowed minimum is 4MB and maximum is 524288MB (512GB)
+ This change improves overall MariaDB performance, though crash recovery
+ may take longer as a tradeoff. Users can adjust the value by overriding
+ the ``mariadb_innodb_log_file_size_mb`` variable. The allowed range is
+ 4MB to 524288MB (512GB).
upgrade:
- |
The default value of ``innodb_log_file_size`` has increased from 96MB to
- 2GB.
- This improves MariaDB performance but recovery time from crash may take
- longer time as a tradeoff.
- Users are recommended to consider the recovery time with new default
- before upgrade.
- Users are recommended to check if disk space is enough with larger InnoDB
- log file.
+ 2GB. This improves MariaDB performance but crash recovery may take longer.
+ Before upgrading, ensure sufficient disk space is available for the larger
+ InnoDB log file.
fixes:
- |
- Fixes bug `LP#2129930 `__
- which made Zuul CI to fail MariaDB backup test sometimes.
+ Fixed an intermittent Zuul CI failure in the MariaDB backup test.
+ `LP#2129930 `__
diff --git a/releasenotes/notes/drop-influxdb-telegraf-53da82866d55c3e5.yaml b/releasenotes/notes/drop-influxdb-telegraf-53da82866d55c3e5.yaml
index 92deca5d73..4f8a2159e5 100644
--- a/releasenotes/notes/drop-influxdb-telegraf-53da82866d55c3e5.yaml
+++ b/releasenotes/notes/drop-influxdb-telegraf-53da82866d55c3e5.yaml
@@ -1,12 +1,10 @@
---
upgrade:
- |
- Support for deploying ``influxdb`` has been dropped, because Kolla
- delivers a community end of life version v1, and there are no plans
- to upgrade influxdb to v2 - there are better open source alternatives
- out there.
- Existing users need to remove influxdb containers and their
- configuration manually.
+ Support for deploying ``influxdb`` has been dropped. Kolla delivered
+ the community end-of-life version v1, and there are no plans to upgrade
+ to v2. Users should migrate to an alternative monitoring solution and
+ remove influxdb containers and their configuration manually.
- |
Support for deploying ``telegraf`` has been dropped after earlier
deprecation.
diff --git a/releasenotes/notes/drop-kuryr-and-zun-0c88940bb0878090.yaml b/releasenotes/notes/drop-kuryr-and-zun-0c88940bb0878090.yaml
new file mode 100644
index 0000000000..de730ae80e
--- /dev/null
+++ b/releasenotes/notes/drop-kuryr-and-zun-0c88940bb0878090.yaml
@@ -0,0 +1,6 @@
+---
+upgrade:
+ - |
+ Support for deploying ``zun`` has been dropped due to the service being
+ broken in 2026.1. ``kuryr`` has been dropped as well, due to it being
+ only used by ``zun``.
diff --git a/releasenotes/notes/harden_haproxy_tls_config-edeea2ed54316bad.yaml b/releasenotes/notes/harden_haproxy_tls_config-edeea2ed54316bad.yaml
index 1adcdcec1d..c5c2a235b2 100644
--- a/releasenotes/notes/harden_haproxy_tls_config-edeea2ed54316bad.yaml
+++ b/releasenotes/notes/harden_haproxy_tls_config-edeea2ed54316bad.yaml
@@ -1,7 +1,7 @@
---
features:
- |
- Harden the HAProxy TLS configuration according to Mozilla SSL
- recommendations for HAProxy 2.8.15 and OpenSSL 3.x
+ The HAProxy TLS configuration has been hardened following Mozilla SSL
+ Modern recommendations for HAProxy 2.8.15 and OpenSSL 3.x
(Rocky Linux and Ubuntu):
``__
diff --git a/releasenotes/notes/ironic-drop-legacy-6ab1fa7c20a80ff7.yaml b/releasenotes/notes/ironic-drop-legacy-6ab1fa7c20a80ff7.yaml
index 0fe4e9821a..07308c265c 100644
--- a/releasenotes/notes/ironic-drop-legacy-6ab1fa7c20a80ff7.yaml
+++ b/releasenotes/notes/ironic-drop-legacy-6ab1fa7c20a80ff7.yaml
@@ -1,4 +1,5 @@
---
upgrade:
- |
- ``Ironic`` legacy upgrade mechanism has been dropped.
+ The legacy (non-rolling) ``ironic`` upgrade mechanism has been dropped.
+ Ironic upgrades now exclusively use the rolling upgrade mechanism.
diff --git a/releasenotes/notes/make-rabbitmq-stream-retention-policy-configurable-99388b8113d5fcac.yaml b/releasenotes/notes/make-rabbitmq-stream-retention-policy-configurable-99388b8113d5fcac.yaml
new file mode 100644
index 0000000000..7f14e29b7b
--- /dev/null
+++ b/releasenotes/notes/make-rabbitmq-stream-retention-policy-configurable-99388b8113d5fcac.yaml
@@ -0,0 +1,10 @@
+---
+features:
+ - |
+ New variables ``rabbitmq_stream_max_segment_size_bytes`` and
+ ``rabbitmq_stream_segment_max_age`` are added to make RabbitMQ stream
+ retention policy configurable.
+ These two new variables are only applied when
+ ``om_enable_rabbitmq_stream_fanout`` is set to true.
+ It is recommended to tune the values based on user's environment to
+ prevent leaving too many ready messages in stream queues.
diff --git a/releasenotes/notes/new-datasource-alertmanager-d724981bd971da92.yaml b/releasenotes/notes/new-datasource-alertmanager-d724981bd971da92.yaml
index 0a63da14b2..53591f2034 100644
--- a/releasenotes/notes/new-datasource-alertmanager-d724981bd971da92.yaml
+++ b/releasenotes/notes/new-datasource-alertmanager-d724981bd971da92.yaml
@@ -1,4 +1,5 @@
---
features:
- |
- Added new Alertmanager datasource in Grafana.
+ An Alertmanager datasource is now automatically configured in Grafana
+ when ``enable_prometheus_alertmanager`` is set to ``true``.
diff --git a/releasenotes/notes/remove-distro-python-version-09adac8895cb87d1.yaml b/releasenotes/notes/remove-distro-python-version-09adac8895cb87d1.yaml
index 6f19718c39..c3629c1235 100644
--- a/releasenotes/notes/remove-distro-python-version-09adac8895cb87d1.yaml
+++ b/releasenotes/notes/remove-distro-python-version-09adac8895cb87d1.yaml
@@ -1,4 +1,5 @@
---
upgrade:
- |
- The global variable ``distro_python_version`` has been removed
+ The global variable ``distro_python_version`` has been removed as it is
+ no longer needed. Python path handling is now done at the image level.
diff --git a/releasenotes/notes/stop-grafana-plugin-install-via-ui-6c437ae59b68ef53.yaml b/releasenotes/notes/stop-grafana-plugin-install-via-ui-6c437ae59b68ef53.yaml
index bfba78baa2..c496552718 100644
--- a/releasenotes/notes/stop-grafana-plugin-install-via-ui-6c437ae59b68ef53.yaml
+++ b/releasenotes/notes/stop-grafana-plugin-install-via-ui-6c437ae59b68ef53.yaml
@@ -3,4 +3,4 @@ fixes:
- |
Prevents users installing plugins via the Grafana UI which will cause
Grafana instances to become out-of-sync in multinode deployments.
- See `LP#2122587 `_.
+ `LP#2122587 `__
diff --git a/releasenotes/notes/uwsgi-designate-649599b4b9a1bdfd.yaml b/releasenotes/notes/uwsgi-designate-649599b4b9a1bdfd.yaml
index 65097770c2..64280ae730 100644
--- a/releasenotes/notes/uwsgi-designate-649599b4b9a1bdfd.yaml
+++ b/releasenotes/notes/uwsgi-designate-649599b4b9a1bdfd.yaml
@@ -1,4 +1,4 @@
---
upgrade:
- |
- ``designate-api`` is now running under uWSGI and now supports backend TLS.
+ ``designate-api`` is now running under uWSGI and supports backend TLS.
diff --git a/releasenotes/notes/uwsgi-magnum-6702c72f30286978.yaml b/releasenotes/notes/uwsgi-magnum-6702c72f30286978.yaml
index d8794b4c85..47e5bf7a9e 100644
--- a/releasenotes/notes/uwsgi-magnum-6702c72f30286978.yaml
+++ b/releasenotes/notes/uwsgi-magnum-6702c72f30286978.yaml
@@ -1,4 +1,4 @@
---
upgrade:
- |
- ``magnum-api`` is now running under uWSGI and now supports backend TLS.
+ ``magnum-api`` is now running under uWSGI and supports backend TLS.
diff --git a/roles/openstack-clients/defaults/main.yml b/roles/openstack-clients/defaults/main.yml
index 034618843a..c0049c63aa 100644
--- a/roles/openstack-clients/defaults/main.yml
+++ b/roles/openstack-clients/defaults/main.yml
@@ -28,8 +28,6 @@ openstack_clients_pip_packages:
enabled: "{{ scenario == 'nfv' }}"
- package: python-troveclient
enabled: "{{ scenario == 'magnum' }}"
- - package: python-zunclient
- enabled: "{{ scenario == 'zun' }}"
openstack_clients_venv_base: "{{ ansible_user_dir }}"
openstack_clients_venv_name: "openstackclient-venv"
diff --git a/tests/check-logs.sh b/tests/check-logs.sh
index 2bc3ccee13..bb2420cf4f 100755
--- a/tests/check-logs.sh
+++ b/tests/check-logs.sh
@@ -103,9 +103,6 @@ function check_fluentd_missing_logs {
/var/log/kolla/tenks/*)
continue
;;
- /var/log/kolla/zun/*)
- continue
- ;;
*)
if ! sudo grep -q "following tail of $file" $fluentd_log_file; then
echo "no match for $file"
diff --git a/tests/pre.yml b/tests/pre.yml
index 1480609b36..59d1052108 100644
--- a/tests/pre.yml
+++ b/tests/pre.yml
@@ -79,7 +79,7 @@
become: true
package:
name: lvm2
- when: scenario in ['cephadm', 'zun']
+ when: scenario in ['cephadm']
- name: Ensure /tmp/logs/ dir
file:
diff --git a/tests/run.yml b/tests/run.yml
index efa817e24d..f6b7819717 100644
--- a/tests/run.yml
+++ b/tests/run.yml
@@ -22,7 +22,7 @@
need_build_image: "{{ kolla_build_images | default(false) }}"
build_image_tag: "change_{{ zuul.change | default('none') }}"
openstack_core_enabled: "{{ openstack_core_enabled }}"
- openstack_core_tested: "{{ scenario in ['core', 'cephadm', 'zun', 'cells', 'ovn', 'lets-encrypt', 'container-engine-migration'] }}"
+ openstack_core_tested: "{{ scenario in ['core', 'cephadm', 'cells', 'ovn', 'lets-encrypt', 'container-engine-migration'] }}"
dashboard_enabled: "{{ openstack_core_enabled }}"
upper_constraints_file: "{{ ansible_env.HOME }}/src/opendev.org/openstack/requirements/upper-constraints.txt"
docker_image_tag_suffix: "{{ '-aarch64' if ansible_architecture == 'aarch64' else '' }}"
@@ -38,7 +38,7 @@
- import_role:
name: kolla-ansible-setup-disks
- when: scenario in ['cephadm', 'zun']
+ when: scenario in ['cephadm']
- name: Update /etc/hosts with internal API FQDN
blockinfile:
@@ -427,7 +427,7 @@
EXT_NET_GATEWAY: "{{ neutron_external_network_prefix }}1"
EXT_NET_DEMO_ROUTER_ADDR: "{{ neutron_external_network_prefix }}10"
SCENARIO: "{{ scenario }}"
- when: openstack_core_tested or scenario in ['ironic', 'multi-compute-ironic' ,'magnum', 'nfv', 'zun', 'octavia']
+ when: openstack_core_tested or scenario in ['ironic', 'multi-compute-ironic' ,'magnum', 'nfv', 'octavia']
- name: Test OVN VPNaaS
import_role:
@@ -456,16 +456,6 @@
- "{{ tempest_src_dir if (zuul.branch == 'master' and not is_upgrade) else 'tempest' }}"
when: openstack_core_tested
- - name: Run test-zun.sh script
- script:
- cmd: test-zun.sh
- executable: /bin/bash
- chdir: "{{ kolla_ansible_src_dir }}"
- when: scenario == 'zun'
- environment:
- BASE_DISTRO: "{{ base_distro }}"
- CONTAINER_ENGINE: "{{ container_engine }}"
-
- name: Run test-scenario-nfv.sh script
script:
cmd: test-scenario-nfv.sh
diff --git a/tests/templates/globals-default.j2 b/tests/templates/globals-default.j2
index ca2ad562e0..0f6da316af 100644
--- a/tests/templates/globals-default.j2
+++ b/tests/templates/globals-default.j2
@@ -88,20 +88,6 @@ openstack_tag_suffix: "{{ docker_image_tag_suffix }}"
{% endif %}
{% endif %}
-{% if scenario == "zun" %}
-enable_zun: "yes"
-enable_kuryr: "yes"
-enable_etcd: "yes"
-etcd_remove_deleted_members: "yes"
-docker_configure_for_zun: "yes"
-containerd_configure_for_zun: "yes"
-enable_cinder: "yes"
-cinder_cluster_skip_precheck: "yes"
-# lvm backup driver for cinder-backup does not exist
-enable_cinder_backup: "no"
-enable_cinder_backend_lvm: "yes"
-{% endif %}
-
{% if scenario == "ipv6" %}
enable_prometheus: "yes"
enable_prometheus_openstack_exporter: "no"
diff --git a/tests/templates/inventory.j2 b/tests/templates/inventory.j2
index a3bea72130..e2c5ff3388 100644
--- a/tests/templates/inventory.j2
+++ b/tests/templates/inventory.j2
@@ -218,9 +218,6 @@ control
[bifrost:children]
deployment
-[zun:children]
-control
-
# TODO(gkoper): Remove redis group after G/2026.1 release
[redis:children]
control
@@ -601,19 +598,6 @@ designate
[placement-api:children]
placement
-# Zun
-[zun-api:children]
-zun
-
-[zun-wsproxy:children]
-zun
-
-[zun-compute:children]
-compute
-
-[zun-cni-daemon:children]
-compute
-
# Tacker
[tacker-server:children]
tacker
diff --git a/tests/test-zun.sh b/tests/test-zun.sh
deleted file mode 100755
index 984bb03f6b..0000000000
--- a/tests/test-zun.sh
+++ /dev/null
@@ -1,169 +0,0 @@
-#!/bin/bash
-
-set -o xtrace
-set -o errexit
-
-export PYTHONUNBUFFERED=1
-
-function test_zun_logged {
- . /etc/kolla/admin-openrc.sh
- . ~/openstackclient-venv/bin/activate
-
- container_engine="${1:-docker}"
-
- echo "TESTING: Zun"
- openstack appcontainer service list
- openstack appcontainer host list
- openstack subnet set --no-dhcp demo-subnet
- sudo ${container_engine} pull quay.io/openstack.kolla/alpine
- sudo ${container_engine} save quay.io/openstack.kolla/alpine | openstack image create alpine --public --container-format docker --disk-format raw
- openstack appcontainer run --net network=demo-net --name test alpine sleep 1000
- attempt=1
- while [[ $(openstack appcontainer show test -f value -c status) != "Running" ]]; do
- echo "Container not running yet"
- attempt=$((attempt+1))
- if [[ $attempt -eq 10 ]]; then
- echo "Container failed to start"
- openstack appcontainer show test
- return 1
- fi
- sleep 10
- done
- openstack appcontainer list
- openstack appcontainer show test
- openstack appcontainer delete --force --stop test
-
- # NOTE(yoctozepto): We have to wait for the container to be deleted due to
- # check-failure.sh checking stopped containers and failing.
- # It is also nice to test that deleting actually works.
- attempt=1
- while openstack appcontainer show test; do
- echo "Container not deleted yet"
- attempt=$((attempt+1))
- if [[ $attempt -eq 10 ]]; then
- echo "Zun failed to delete the container"
- openstack appcontainer show test
- return 1
- fi
- sleep 10
- done
-
- echo "SUCCESS: Zun"
-
- echo "TESTING: Zun Cinder volume attachment"
- openstack volume create --size 2 zun_test_volume
- attempt=1
- while [[ $(openstack volume show zun_test_volume -f value -c status) != "available" ]]; do
- echo "Volume not available yet"
- attempt=$((attempt+1))
- if [[ $attempt -eq 10 ]]; then
- echo "Volume failed to become available"
- openstack volume show zun_test_volume
- return 1
- fi
- sleep 10
- done
- openstack appcontainer run --net network=demo-net --name test2 --mount source=zun_test_volume,destination=/data alpine sleep 1000
- attempt=1
- while [[ $(openstack volume show zun_test_volume -f value -c status) != "in-use" ]]; do
- echo "Volume not attached yet"
- attempt=$((attempt+1))
- if [[ $attempt -eq 10 ]]; then
- echo "Volume failed to attach"
- openstack volume show zun_test_volume
- return 1
- fi
- sleep 10
- done
- attempt=1
- while [[ $(openstack appcontainer show test2 -f value -c status) != "Running" ]]; do
- echo "Container not running yet"
- attempt=$((attempt+1))
- if [[ $attempt -eq 10 ]]; then
- echo "Container failed to start"
- openstack appcontainer show test2
- return 1
- fi
- sleep 10
- done
- openstack appcontainer delete --stop test2
- attempt=1
- while [[ $(openstack volume show zun_test_volume -f value -c status) != "available" ]]; do
- echo "Volume not detached yet"
- attempt=$((attempt+1))
- if [[ $attempt -eq 10 ]]; then
- echo "Volume failed to detach"
- openstack volume show zun_test_volume
- return 1
- fi
- sleep 10
- done
- openstack volume delete zun_test_volume
- echo "SUCCESS: Zun Cinder volume attachment"
-
- echo "TESTING: Zun capsule"
- cat >/tmp/capsule.yaml < $log_file 2>&1
- result=$?
- if [[ $result != 0 ]]; then
- echo "Testing Zun failed. See ansible/test-zun for details"
- else
- echo "Successfully tested Zun. See ansible/test-zun for details"
- fi
- return $result
-}
-
-test_zun $1
diff --git a/zuul.d/scenarios/zun.yaml b/zuul.d/scenarios/zun.yaml
deleted file mode 100644
index ea5e96a52c..0000000000
--- a/zuul.d/scenarios/zun.yaml
+++ /dev/null
@@ -1,42 +0,0 @@
----
-- job:
- name: kolla-ansible-zun-base
- parent: kolla-ansible-base
- voting: false
- files: !inherit
- - ^ansible/group_vars/all/(zun|kuryr|etcd|cinder|iscsi).yml
- - ^ansible/roles/(zun|kuryr|etcd|cinder|iscsi)/
- - ^tests/setup_disks.sh
- - ^tests/test-core-openstack.sh
- - ^tests/test-zun.sh
- - ^tests/test-dashboard.sh
- vars:
- kolla_ansible_setup_disks_file_path: "/var/lib/cinder_data.img"
- kolla_ansible_setup_disks_vg_name: "cinder-volumes"
- scenario: zun
- scenario_images_extra:
- - ^zun
- - ^kuryr
- - ^etcd
- - ^cinder
- - ^iscsid
- - ^tgtd
-
-- job:
- name: kolla-ansible-debian-trixie-zun
- parent: kolla-ansible-zun-base
- nodeset: kolla-ansible-debian-trixie-multi-8GB
-
-- job:
- name: kolla-ansible-ubuntu-noble-zun
- parent: kolla-ansible-zun-base
- nodeset: kolla-ansible-ubuntu-noble-multi-8GB
-
-- project-template:
- name: kolla-ansible-scenario-zun
- description: |
- Runs Kolla-Ansible Zun scenario jobs.
- check:
- jobs:
- - kolla-ansible-debian-trixie-zun
- - kolla-ansible-ubuntu-noble-zun