diff --git a/ansible/group_vars/all.yml b/ansible/group_vars/all.yml index f09e5240db..c71a338125 100644 --- a/ansible/group_vars/all.yml +++ b/ansible/group_vars/all.yml @@ -678,6 +678,10 @@ prometheus_blackbox_exporter_port: "9115" prometheus_instance_label: proxysql_admin_port: "6032" +# Integer variable to set ProxySQL version. Valid options are 2 and 3 +# When it's set to 2 (Default), ProxySQL 2.7.x is deployed. +# When it's set to 3, ProxySQL 3.0.x is used. +proxysql_version: 2 rabbitmq_port: "{{ '5671' if rabbitmq_enable_tls | bool else '5672' }}" rabbitmq_management_port: "15672" diff --git a/ansible/inventory/all-in-one b/ansible/inventory/all-in-one index 2b813d7e25..7824ffb54e 100644 --- a/ansible/inventory/all-in-one +++ b/ansible/inventory/all-in-one @@ -130,7 +130,6 @@ control [cyborg:children] control -compute [tacker:children] control diff --git a/ansible/inventory/multinode b/ansible/inventory/multinode index 7d74054466..5eb7557879 100644 --- a/ansible/inventory/multinode +++ b/ansible/inventory/multinode @@ -145,7 +145,6 @@ control [cyborg:children] control -compute [gnocchi:children] control diff --git a/ansible/module_utils/kolla_podman_worker.py b/ansible/module_utils/kolla_podman_worker.py index 2f323eff24..5ed32988e1 100644 --- a/ansible/module_utils/kolla_podman_worker.py +++ b/ansible/module_utils/kolla_podman_worker.py @@ -192,6 +192,7 @@ def parse_volumes(self, volumes, mounts, filtered_volumes): ) if src == 'devpts': mount_item = dict( + source=src, target=dest, type='devpts' ) @@ -306,6 +307,42 @@ def compare_container(self): self.changed = True return self.changed + def compare_cap_add(self, container_info): + new_cap_add = self.params.get('cap_add', list()).copy() + + new_cap_add = [ + 'CAP_' + cap.upper() + if not cap.upper().startswith('CAP_') + else cap.upper() + for cap in new_cap_add + ] + + try: + current_cap_add = ( + container_info['HostConfig'].get('CapAdd', None) or [] + ) + except (KeyError, TypeError): + current_cap_add = [] + + current_cap_add = [cap.upper() for cap in current_cap_add] + + privileged = container_info['HostConfig'].get('Privileged', False) + if not privileged: + # NOTE(blanson): prepare_container_args() always adds AUDIT_WRITE + # for non-privileged containers. Also works around Podman <4.4 bug + # where AUDIT_WRITE doesn't appear in inspect. Since capabilities + # can't be modified post-creation, this won't mask real drift. + if 'CAP_AUDIT_WRITE' not in new_cap_add: + new_cap_add.append('CAP_AUDIT_WRITE') + + if 'CAP_AUDIT_WRITE' not in current_cap_add: + current_cap_add.append('CAP_AUDIT_WRITE') + + if set(new_cap_add).symmetric_difference(set(current_cap_add)): + return True + + return False + def compare_pid_mode(self, container_info): new_pid_mode = self.params.get('pid_mode') or self.params.get('pid') current_pid_mode = container_info['HostConfig'].get('PidMode') @@ -349,50 +386,65 @@ def check_slash(string): else: return string - raw_volumes, binds = self.generate_volumes() - raw_vols, current_binds = self.generate_volumes( - container_info['HostConfig'].get('Binds')) - - current_vols = [check_slash(vol) for vol in raw_vols if vol] - volumes = [check_slash(vol) for vol in raw_volumes if vol] + # NOTE(blanson): Podman automatically appends default flags + # such as rprivate, nosuid, nodev, rbind to all mounts. + # For special paths like /proc, /run, /sys, and /var/run, + # noexec is also added by default. We remove these defaults + # because they do not reflect a meaningful difference + # between the requested and current container configuration. + # Additionally, if neither 'ro' nor 'rw' is specified, + # we implicitly assume 'rw' (Podman's default behavior). + def normalize_mode(path, mode): + default_flags = {'rprivate', 'nosuid', 'nodev', 'rbind'} + special_paths_noexec = {'/proc', '/run', '/sys', '/var/run'} + + flags = set(mode.split(',')) if mode else set() + flags -= default_flags + + if any(path.startswith(p) for p in special_paths_noexec): + flags.discard('noexec') + if not (flags & {'ro', 'rw'}): + flags.add('rw') + return flags + + # NOTE(blanson): Convert a binds dict into a list of + # (src, dst, normalized_flags) tuples. Normalization ignores + # default Podman flags and noexec for special paths to allow + # consistent comparison. + def build_bind_list(binds_dict): + lst = [] + for src, info in (binds_dict or {}).items(): + src_path = check_slash(src) + dst_path = check_slash(info['bind']) + mode_flags = normalize_mode( + dst_path, + info['mode'], + ) + lst.append((src_path, dst_path, mode_flags)) + return lst - if not volumes: - volumes = list() - if not current_vols: - current_vols = list() - if not current_binds: - current_binds = list() + binds_input = container_info['HostConfig'].get('Binds') + raw_volumes, binds = self.generate_volumes() + raw_vols, current_binds = ( + [], {} + ) if not binds_input else self.generate_volumes(binds_input) - volumes.sort() - current_vols.sort() + volumes = [check_slash(v) for v in raw_volumes or [] if v] + current_vols = [check_slash(v) for v in raw_vols or [] if v] - if set(volumes).symmetric_difference(set(current_vols)): + if set(volumes) != set(current_vols): return True - new_binds = list() - new_current_binds = list() - if binds: - for k, v in binds.items(): - k = check_slash(k) - v['bind'] = check_slash(v['bind']) - new_binds.append( - "{}:{}:{}".format(k, v['bind'], v['mode'])) - - if current_binds: - for k, v in current_binds.items(): - k = check_slash(k) - v['bind'] = check_slash(v['bind']) - if 'ro' in v['mode']: - v['mode'] = 'ro' - else: - v['mode'] = 'rw' - new_current_binds.append( - "{}:{}:{}".format(k, v['bind'], v['mode'][0:2])) - - new_binds.sort() - new_current_binds.sort() + req_bind_list = [ + (src, dst, frozenset(flags)) + for src, dst, flags in build_bind_list(binds) + ] + cur_bind_list = [ + (src, dst, frozenset(flags)) + for src, dst, flags in build_bind_list(current_binds) + ] - if set(new_binds).symmetric_difference(set(new_current_binds)): + if set(req_bind_list) != set(cur_bind_list): return True def compare_dimensions(self, container_info): @@ -416,15 +468,53 @@ def compare_dimensions(self, container_info): failed=True, msg=repr("Unsupported dimensions"), unsupported_dimensions=unsupported) current_dimensions = container_info['HostConfig'] + + # NOTE(blanson): We normalize ulimits names because the podman api + # returns them as RLIMIT_ + def normalize_ulimit_name(name): + name = name.upper() + if not name.startswith('RLIMIT_'): + return 'RLIMIT_' + name + return name + for key1, key2 in dimension_map.items(): - # NOTE(mgoddard): If a resource has been explicitly requested, - # check for a match. Otherwise, ensure it is set to the default. - if key1 in new_dimensions: - if key1 == 'ulimits': - if self.compare_ulimits(new_dimensions[key1], - current_dimensions[key2]): - return True - elif new_dimensions[key1] != current_dimensions[key2]: + if key1 == 'ulimits': + current_ulimits = current_dimensions.get(key2, []) + + # NOTE(blanson): We strip podman default ulimits + # because they are not settable by users anyways + # and break idempotency. + filtered_current_ulimits = [ + u for u in current_ulimits + if u.get('Name') not in ('RLIMIT_NOFILE', 'RLIMIT_NPROC') + ] + + desired_ulimits = new_dimensions.get('ulimits', {}) + + desired_ulimits = { + normalize_ulimit_name(name): limits + for name, limits in desired_ulimits.items() + if normalize_ulimit_name(name) not in ( + 'RLIMIT_NOFILE', 'RLIMIT_NPROC') + } + + normalized_current = [ + { + 'Name': normalize_ulimit_name(u['Name']), + 'Soft': u.get('Soft'), + 'Hard': u.get('Hard') + } + for u in filtered_current_ulimits + ] + + if self.compare_ulimits( + desired_ulimits, + normalized_current + ): + return True + + elif key1 in new_dimensions: + if new_dimensions[key1] != current_dimensions.get(key2): return True elif current_dimensions[key2]: # The default values of all (except ulimits) currently diff --git a/ansible/roles/cinder/defaults/main.yml b/ansible/roles/cinder/defaults/main.yml index a1b0f321f4..018cde7c6c 100644 --- a/ansible/roles/cinder/defaults/main.yml +++ b/ansible/roles/cinder/defaults/main.yml @@ -54,6 +54,7 @@ cinder_services: enabled: "{{ enable_cinder_backup | bool }}" image: "{{ cinder_backup_image_full }}" privileged: True + ipc_mode: "host" volumes: "{{ cinder_backup_default_volumes + cinder_backup_extra_volumes + lookup('vars', 'run_default_volumes_' + kolla_container_engine) }}" dimensions: "{{ cinder_backup_dimensions }}" healthcheck: "{{ cinder_backup_healthcheck }}" diff --git a/ansible/roles/cinder/handlers/main.yml b/ansible/roles/cinder/handlers/main.yml index 51583bbcc4..f85cabd0ef 100644 --- a/ansible/roles/cinder/handlers/main.yml +++ b/ansible/roles/cinder/handlers/main.yml @@ -38,7 +38,7 @@ name: "{{ service.container_name }}" image: "{{ service.image }}" privileged: "{{ service.privileged | default(False) }}" - ipc_mode: "{{ service.ipc_mode | default('') }}" + ipc_mode: "{{ service.ipc_mode | default(omit) }}" tmpfs: "{{ service.tmpfs | default(omit) }}" volumes: "{{ service.volumes | reject('equalto', '') | list }}" dimensions: "{{ service.dimensions }}" @@ -55,6 +55,7 @@ name: "{{ service.container_name }}" image: "{{ service.image }}" privileged: "{{ service.privileged | default(False) }}" + ipc_mode: "{{ service.ipc_mode | default(omit) }}" volumes: "{{ service.volumes | reject('equalto', '') | list }}" dimensions: "{{ service.dimensions }}" healthcheck: "{{ service.healthcheck | default(omit) }}" diff --git a/ansible/roles/loadbalancer/defaults/main.yml b/ansible/roles/loadbalancer/defaults/main.yml index 714ea8439c..4150eb6193 100644 --- a/ansible/roles/loadbalancer/defaults/main.yml +++ b/ansible/roles/loadbalancer/defaults/main.yml @@ -47,7 +47,7 @@ haproxy_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_ haproxy_tag: "{{ openstack_tag }}" haproxy_image_full: "{{ haproxy_image }}:{{ haproxy_tag }}" -proxysql_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}proxysql" +proxysql_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}proxysql{{ proxysql_version_suffix }}" proxysql_tag: "{{ openstack_tag }}" proxysql_image_full: "{{ proxysql_image }}:{{ proxysql_tag }}" @@ -233,3 +233,5 @@ mariadb_singlenode: "{{ mariadb_shards_info.shards.values() | map(attribute='hos mariadb_shun_on_failures: "{{ '10' if mariadb_singlenode else '' }}" mariadb_connect_retries_delay: "{{ '1000' if mariadb_singlenode else '' }}" mariadb_connect_retries_on_failure: "{{ '20' if mariadb_singlenode else '' }}" + +proxysql_version_suffix: "{{ '-3' if proxysql_version | int == 3 else '' }}" diff --git a/ansible/roles/nova-cell/tasks/version-check.yml b/ansible/roles/nova-cell/tasks/version-check.yml index 5a339cc998..213427ace4 100644 --- a/ansible/roles/nova-cell/tasks/version-check.yml +++ b/ansible/roles/nova-cell/tasks/version-check.yml @@ -40,32 +40,25 @@ name: - "{{ service.container_name }}" register: container_facts_per_host - run_once: true - delegate_to: "{{ item }}" - loop: "{{ groups[service.group] }}" - loop_control: - label: "{{ item }}" + when: inventory_hostname in groups[service.group] - name: Get current Libvirt version + any_errors_fatal: true become: true command: "{{ kolla_container_engine }} exec {{ service.container_name }} libvirtd --version" register: libvirt_version_current_results changed_when: false - run_once: true - delegate_to: "{{ item.item }}" - loop: "{{ container_facts_per_host.results }}" - loop_control: - label: "{{ item.item }}" when: - - item.containers[service.container_name] is defined - - item.containers[service.container_name].State.Running + - container_facts_per_host is not skipped + - container_facts_per_host.containers[service.container_name] is defined - (hostvars[groups[service.group] | first].service_image_info.images | default([]) | length) > 0 - - item.containers[service.container_name].Image + - container_facts_per_host.containers[service.container_name].Image != hostvars[groups[service.group] | first].service_image_info.images[0].Id - name: Check that the new Libvirt version is >= current + any_errors_fatal: true vars: - current_version: "{{ item.stdout | regex_search('[0-9]+\\.[0-9]+\\.[0-9]+') }}" + current_version: "{{ libvirt_version_current_results.stdout | regex_search('[0-9]+\\.[0-9]+\\.[0-9]+') }}" new_version: "{{ hostvars[groups[service.group] | first].libvirt_new_version }}" assert: that: "{{ new_version is version(current_version, '>=', strict=true) }}" @@ -75,11 +68,7 @@ that you want to do this, please skip the tag `nova-libvirt-version-check`. success_msg: > Libvirt version check successful: target {{ new_version }} >= current {{ current_version }}. - run_once: true - loop: "{{ libvirt_version_current_results.results }}" - loop_control: - label: "{{ item.item }}" - when: item.stdout is defined + when: libvirt_version_current_results is not skipped tags: nova-libvirt-version-check when: enable_nova_libvirt_container | bool and (groups[service.group] | length) > 0 diff --git a/ansible/roles/ovn-db/defaults/main.yml b/ansible/roles/ovn-db/defaults/main.yml index ddeccf5644..823b847a30 100644 --- a/ansible/roles/ovn-db/defaults/main.yml +++ b/ansible/roles/ovn-db/defaults/main.yml @@ -33,7 +33,7 @@ ovn_db_services: group: ovn-sb-db-relay enabled: "{{ enable_ovn_sb_db_relay | bool }}" environment: - RELAY_ID: "{{ ovn_sb_db_relay_group_id | default('1') }}" + RELAY_ID: "{{ item | default(ovn_sb_db_relay_group_id | default('1')) | string }}" image: "{{ ovn_sb_db_relay_image_full }}" iterate: true iterate_var: "{{ ovn_sb_db_relay_count | int }}" diff --git a/ansible/roles/ovn-db/templates/ovn-sb-db-relay.json.j2 b/ansible/roles/ovn-db/templates/ovn-sb-db-relay.json.j2 index aa88944631..5955d3417c 100644 --- a/ansible/roles/ovn-db/templates/ovn-sb-db-relay.json.j2 +++ b/ansible/roles/ovn-db/templates/ovn-sb-db-relay.json.j2 @@ -4,7 +4,7 @@ { "source": "{{ container_config_directory }}/ovsdb-relay.json", "dest": "/etc/ovn/ovsdb-relay.json", - "owner": "openvswitch", + "owner": "root", "perm": "0600" } ], diff --git a/ansible/roles/prometheus/templates/prometheus-cadvisor.json.j2 b/ansible/roles/prometheus/templates/prometheus-cadvisor.json.j2 index 21463e4335..7b00cfa5a3 100644 --- a/ansible/roles/prometheus/templates/prometheus-cadvisor.json.j2 +++ b/ansible/roles/prometheus/templates/prometheus-cadvisor.json.j2 @@ -1,5 +1,5 @@ { - "command": "/opt/cadvisor --port={{ prometheus_cadvisor_port }} --log_dir=/var/log/kolla/prometheus{% if prometheus_cadvisor_cmdline_extras %} {{ prometheus_cadvisor_cmdline_extras }}{% endif %}", + "command": "/opt/cadvisor --port={{ prometheus_cadvisor_port }} --log_dir=/var/log/kolla/prometheus --listen_ip {{ 'api' | kolla_address(inventory_hostname) }}{% if prometheus_cadvisor_cmdline_extras %} {{ prometheus_cadvisor_cmdline_extras }}{% endif %}", "config_files": [ {% if kolla_copy_ca_into_containers | bool %} { diff --git a/ansible/roles/service-check-containers/tasks/iterated.yml b/ansible/roles/service-check-containers/tasks/iterated.yml index a08614de6c..3e2837a87c 100644 --- a/ansible/roles/service-check-containers/tasks/iterated.yml +++ b/ansible/roles/service-check-containers/tasks/iterated.yml @@ -2,11 +2,11 @@ - name: "{{ kolla_role_name | default(project_name) }} | Check containers with iteration" become: true vars: - service: "{{ outer_item.value }}" + service: "{{ lookup('vars', (kolla_role_name | default(project_name)) + '_services')[service_name] }}" kolla_container: action: "compare_container" common_options: "{{ docker_common_options }}" - name: "{{ service.container_name }}" + name: "{{ service.container_name }}_{{ item }}" image: "{{ service.image | default(omit) }}" volumes: "{{ service.volumes | default(omit) }}" dimensions: "{{ service.dimensions | default(omit) }}" @@ -22,8 +22,7 @@ labels: "{{ service.labels | default(omit) }}" command: "{{ service.command | default(omit) }}" cgroupns_mode: "{{ service.cgroupns_mode | default(omit) }}" - loop: - - "{{ range(1,(service.iterate_var | int) + 1) | list }}" + loop: "{{ range(1, (iterate_count | int) + 1) | list }}" register: container_check # NOTE(yoctozepto): Must be a separate task because one cannot see the whole diff --git a/ansible/roles/service-check-containers/tasks/main.yml b/ansible/roles/service-check-containers/tasks/main.yml index da6fce15bb..f502ee62bc 100644 --- a/ansible/roles/service-check-containers/tasks/main.yml +++ b/ansible/roles/service-check-containers/tasks/main.yml @@ -42,9 +42,10 @@ - name: Include tasks vars: - service: "{{ outer_item.value }}" + service_name: "{{ outer_item.key }}" + iterate_count: "{{ outer_item.value.iterate_var | int }}" include_tasks: iterated.yml loop: "{{ lookup('vars', (kolla_role_name | default(project_name)) + '_services') | select_services_enabled_and_mapped_to_host | dict2items }}" loop_control: loop_var: outer_item - when: (service.iterate | default(False)) | bool + when: (outer_item.value.iterate | default(False)) | bool diff --git a/ansible/roles/service-stop/tasks/iterated.yml b/ansible/roles/service-stop/tasks/iterated.yml new file mode 100644 index 0000000000..f25bf0c26a --- /dev/null +++ b/ansible/roles/service-stop/tasks/iterated.yml @@ -0,0 +1,14 @@ +--- +- name: "Stopping containers (iterated) for {{ service_name }}" + become: true + vars: + service: "{{ outer_item.value }}" + service_name: "{{ outer_item.key }}" + kolla_container: + action: "stop_container" + common_options: "{{ docker_common_options }}" + name: "{{ service.container_name }}_{{ item }}" + ignore_missing: "{{ kolla_action_stop_ignore_missing | bool }}" + when: + - service.container_name not in skip_stop_containers + loop: "{{ range(1, (service.iterate_var | int) + 1) | list }}" diff --git a/ansible/roles/service-stop/tasks/main.yml b/ansible/roles/service-stop/tasks/main.yml index ea502f7144..b3d775ce48 100644 --- a/ansible/roles/service-stop/tasks/main.yml +++ b/ansible/roles/service-stop/tasks/main.yml @@ -10,4 +10,16 @@ ignore_missing: "{{ kolla_action_stop_ignore_missing | bool }}" when: - service.container_name not in skip_stop_containers + - not (service.iterate | default(False)) | bool with_dict: "{{ project_services | select_services_enabled_and_mapped_to_host }}" + +- name: Include tasks for iterated containers + vars: + service: "{{ outer_item.value }}" + include_tasks: iterated.yml + loop: "{{ project_services | select_services_enabled_and_mapped_to_host | dict2items }}" + loop_control: + loop_var: outer_item + when: + - (service.iterate | default(False)) | bool + - service.iterate_var is defined diff --git a/ansible/roles/skyline/templates/nginx.conf.j2 b/ansible/roles/skyline/templates/nginx.conf.j2 index 944480464b..bba1082f62 100644 --- a/ansible/roles/skyline/templates/nginx.conf.j2 +++ b/ansible/roles/skyline/templates/nginx.conf.j2 @@ -38,6 +38,12 @@ http { ssl_certificate {{ skyline_ssl_certfile }}; ssl_certificate_key {{ skyline_ssl_keyfile }}; {% endif %} + + {% if internal_protocol == 'https' %} + proxy_ssl_protocols TLSv1.2 TLSv1.3; + proxy_ssl_server_name on; + {% endif %} + ## # Logging Settings ## diff --git a/doc/source/admin/index.rst b/doc/source/admin/index.rst index eb259951c1..e24a757ee2 100644 --- a/doc/source/admin/index.rst +++ b/doc/source/admin/index.rst @@ -13,3 +13,4 @@ Admin Guides production-architecture-guide deployment-philosophy password-rotation + proxysql diff --git a/doc/source/admin/proxysql.rst b/doc/source/admin/proxysql.rst new file mode 100644 index 0000000000..ee68b4fef9 --- /dev/null +++ b/doc/source/admin/proxysql.rst @@ -0,0 +1,54 @@ +======== +ProxySQL +======== + +ProxySQL provides loadbalancing to MariaDB. Prior to 2025.1 release, +HAProxy was the default loadbalancer for database like other services. +But from 2025.1, the ProxySQL became the default and the support for +HAProxy as a database loadbalancer will be discontinued from 2025.2. + +.. note:: + + If your MariaDB cluster is not managed by Kolla-Ansible, this is + not applied. + +Migrating from HAProxy +~~~~~~~~~~~~~~~~~~~~~~ + +The migration is automatically handled by Kolla-Ansible. By default, +ProxySQL gets enabled when MariaDB is enabled from 2025.1 release. +So, if users are coming from 2024.1 or 2024.2 release, they can +simply run service upgrade command. + +Upgrading ProxySQL from 2.7.x to 3.0.x +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The default version of ProxySQL for 2025.1 release is 2.7.x however +this version of ProxySQL has a bug that it does not send all +certificate chain during SSL handshake. See `ProxySQL issue 4877 +`__ for more detail. +This bug was fixed on ProxySQL 3.0.x but not in 2.7.x release. + +This bug does not affect users system unless they use chain of +certificates for database TLS (e.g. Use of intermediate certificate) +If however, this does affect your system, you can upgrade your ProxySQL +by following. + +1. Set ``proxysql_version`` to 3 + + .. code-block:: yaml + + proxysql_version: 3 + +2. Run service deployment for loadbalancers + + .. code-block:: bash + + $ kolla-ansible deploy -i -t loadbalancer + +3. Verify the version of ProxySQL after deployment + + .. code-block:: bash + + $ docker exec proxysql proxysql --version + $ ProxySQL version 3.0.5 diff --git a/releasenotes/notes/add-support-for-deploying-proxysql-3-on-epoxy-34b83ff3edc280b7.yaml b/releasenotes/notes/add-support-for-deploying-proxysql-3-on-epoxy-34b83ff3edc280b7.yaml new file mode 100644 index 0000000000..a892e404e2 --- /dev/null +++ b/releasenotes/notes/add-support-for-deploying-proxysql-3-on-epoxy-34b83ff3edc280b7.yaml @@ -0,0 +1,12 @@ +--- +features: + - | + Add support for deploying ProxySQL 3.0.x on OpenStack 2025.1 system. + ProxySQL 2.7.x, which is the default version of ProxySQL of 2025.1, has a + bug that `SSL handshake does not send full certificate + chain `__. + This causes database TLS verification failure when users use intermediate + certificate. + This bug was only fixed on ProxySQL 3.0.x release. + Users can deploy/upgrade ProxySQL to 3.0.x by setting + ``proxysql_version`` to 3. diff --git a/releasenotes/notes/fix-cyborg-api-conductor-placement-f0cdf7274d31ffc0.yaml b/releasenotes/notes/fix-cyborg-api-conductor-placement-f0cdf7274d31ffc0.yaml new file mode 100644 index 0000000000..0b6a77bdcf --- /dev/null +++ b/releasenotes/notes/fix-cyborg-api-conductor-placement-f0cdf7274d31ffc0.yaml @@ -0,0 +1,7 @@ +--- +fixes: + - | + Fixes a placement problem for cyborg api and conductor services, + that would be also be scheduled on compute nodes, rather than + being exclusively on control plane. + `LP#2087552 `__ diff --git a/releasenotes/notes/skyline-tls-upstream-nginx-c93b39fb69b8d438.yaml b/releasenotes/notes/skyline-tls-upstream-nginx-c93b39fb69b8d438.yaml new file mode 100644 index 0000000000..390bde577f --- /dev/null +++ b/releasenotes/notes/skyline-tls-upstream-nginx-c93b39fb69b8d438.yaml @@ -0,0 +1,7 @@ +--- +fixes: + - | + Fixed TLS errors in Skyline's nginx configuration when upstream + endpoints use HTTPS. + `LP#2091935 `__ + `LP#1951437 `__ diff --git a/tests/kolla_container_tests/test_podman_worker.py b/tests/kolla_container_tests/test_podman_worker.py index 6cc2b18d59..55ab5c1657 100644 --- a/tests/kolla_container_tests/test_podman_worker.py +++ b/tests/kolla_container_tests/test_podman_worker.py @@ -1110,16 +1110,72 @@ def setUp(self): super(TestAttrComp, self).setUp() self.fake_data = copy.deepcopy(FAKE_DATA) - def test_compare_cap_add_neg(self): - container_info = {'HostConfig': dict(CapAdd=['data'])} - self.pw = get_PodmanWorker({'cap_add': ['data']}) + def test_compare_cap_add_unprivileged_no_user_caps(self): + container_info = {'HostConfig': dict( + CapAdd=['CAP_AUDIT_WRITE'], + Privileged=False + )} + self.pw = get_PodmanWorker({'cap_add': []}) + self.assertFalse(self.pw.compare_cap_add(container_info)) + + def test_compare_cap_add_unprivileged_with_user_caps(self): + container_info = {'HostConfig': dict( + CapAdd=['CAP_NET_ADMIN', 'CAP_AUDIT_WRITE'], + Privileged=False + )} + self.pw = get_PodmanWorker({'cap_add': ['net_admin']}) + self.assertFalse(self.pw.compare_cap_add(container_info)) + + def test_compare_cap_add_privileged_no_audit_write(self): + container_info = {'HostConfig': dict( + CapAdd=['CAP_NET_ADMIN'], + Privileged=True + )} + self.pw = get_PodmanWorker( + {'cap_add': ['net_admin'], 'privileged': True}) + self.assertFalse(self.pw.compare_cap_add(container_info)) + + def test_compare_cap_add_format_normalization(self): + container_info = {'HostConfig': dict( + CapAdd=['CAP_SYS_ADMIN', 'CAP_AUDIT_WRITE'], + Privileged=False + )} + self.pw = get_PodmanWorker({'cap_add': ['sys_admin']}) + self.assertFalse(self.pw.compare_cap_add(container_info)) + + def test_compare_cap_add_podman_bug_workaround(self): + container_info = {'HostConfig': dict( + CapAdd=[], + Privileged=False + )} + self.pw = get_PodmanWorker({'cap_add': []}) self.assertFalse(self.pw.compare_cap_add(container_info)) - def test_compare_cap_add_pos(self): - container_info = {'HostConfig': dict(CapAdd=['data1'])} - self.pw = get_PodmanWorker({'cap_add': ['data2']}) + def test_compare_cap_add_difference_detected(self): + container_info = {'HostConfig': dict( + CapAdd=['CAP_NET_ADMIN', 'CAP_AUDIT_WRITE'], + Privileged=False + )} + self.pw = get_PodmanWorker({'cap_add': ['sys_admin']}) self.assertTrue(self.pw.compare_cap_add(container_info)) + def test_compare_cap_add_mixed_case_formats(self): + container_info = {'HostConfig': dict( + CapAdd=['CAP_SYS_ADMIN', 'CAP_NET_ADMIN', 'CAP_AUDIT_WRITE'], + Privileged=False + )} + self.pw = get_PodmanWorker( + {'cap_add': ['SYS_ADMIN', 'cap_net_admin']}) + self.assertFalse(self.pw.compare_cap_add(container_info)) + + def test_compare_cap_add_empty_current(self): + container_info = {'HostConfig': dict( + CapAdd=None, + Privileged=False + )} + self.pw = get_PodmanWorker({'cap_add': []}) + self.assertFalse(self.pw.compare_cap_add(container_info)) + def test_compare_ipc_mode_neg(self): container_info = {'HostConfig': dict(IpcMode='data')} self.pw = get_PodmanWorker({'ipc_mode': 'data'}) @@ -1249,19 +1305,149 @@ def test_compare_volumes_from_post(self): def test_compare_volumes_neg(self): container_info = { 'Config': dict(Volumes=['/var/log/kolla/']), - 'HostConfig': dict(Binds=['kolla_logs:/var/log/kolla/:rw'])} - self.pw = get_PodmanWorker( - {'volumes': ['kolla_logs:/var/log/kolla/:rw']}) - + 'HostConfig': dict(Binds=[ + 'kolla_logs:/var/log/kolla/:rw,rprivate,nosuid,nodev,rbind' + ]) + } + self.pw = get_PodmanWorker({ + 'volumes': ['kolla_logs:/var/log/kolla/'] + }) self.assertFalse(self.pw.compare_volumes(container_info)) def test_compare_volumes_pos(self): container_info = { 'Config': dict(Volumes=['/var/log/kolla/']), - 'HostConfig': dict(Binds=['kolla_logs:/var/log/kolla/:rw'])} - self.pw = get_PodmanWorker( - {'volumes': ['/dev/:/dev/:rw']}) + 'HostConfig': dict(Binds=[ + 'kolla_logs:/var/log/kolla/:ro,rprivate,nosuid,nodev,rbind' + ]) + } + self.pw = get_PodmanWorker({ + 'volumes': ['kolla_logs:/var/log/kolla/'] + }) + self.assertTrue(self.pw.compare_volumes(container_info)) + + def test_compare_volumes_empty_add(self): + container_info = { + 'Config': dict(Volumes=[]), + 'HostConfig': dict(Binds=[]) + } + self.pw = get_PodmanWorker({ + 'volumes': ['kolla_logs:/var/log/kolla/'] + }) + self.assertTrue(self.pw.compare_volumes(container_info)) + + def test_compare_volumes_empty_del(self): + container_info = { + 'Config': dict(Volumes=['/var/log/kolla/']), + 'HostConfig': dict(Binds=[ + 'kolla_logs:/var/log/kolla/:ro,rprivate,nosuid,nodev,rbind' + ]) + } + self.pw = get_PodmanWorker({ + 'volumes': [] + }) + self.assertTrue(self.pw.compare_volumes(container_info)) + def test_compare_volumes_noexec_default(self): + container_info = { + 'Config': dict(Volumes=['/proc/', '/run/libvirt', '/sys/']), + 'HostConfig': dict(Binds=[ + '/proc/:/proc/:rw,shared,rprivate,nosuid,nodev,noexec,rbind', + '/run/libvirt:/run/libvirt:rw,nosuid,nodev,noexec,rbind', + '/sys/:/sys/:rw,rprivate,nosuid,nodev,noexec,rbind', + ]) + } + self.pw = get_PodmanWorker({ + 'volumes': [ + '/proc/:/proc/:shared', + '/run/libvirt:/run/libvirt:rprivate', + '/sys/:/sys/:rprivate' + ] + }) + self.assertFalse(self.pw.compare_volumes(container_info)) + + def test_compare_volumes_shared_vs_rw(self): + container_info = { + 'Config': dict(Volumes=['/run/libvirt/']), + 'HostConfig': dict(Binds=[ + '/run/libvirt:/run/libvirt:rw,rprivate,nosuid,nodev,rbind' + ]) + } + self.pw = get_PodmanWorker({ + 'volumes': ['/run/libvirt:/run/libvirt:shared'] + }) + self.assertTrue(self.pw.compare_volumes(container_info)) + + def test_compare_volumes_implicit_rw_both_sides(self): + container_info = { + 'Config': dict(Volumes=['/dev/shm/']), # nosec + 'HostConfig': dict(Binds=[ + '/dev/shm:/dev/shm:rprivate,nosuid,nodev,rbind' # nosec + ]) + } + self.pw = get_PodmanWorker({ + 'volumes': ['/dev/shm:/dev/shm'] # nosec + }) + self.assertFalse(self.pw.compare_volumes(container_info)) + + def test_compare_volumes_explicit_rw_vs_implicit(self): + container_info = { + 'Config': dict(Volumes=['/data/']), + 'HostConfig': dict(Binds=[ + '/host/data:/data:rprivate,nosuid,nodev,rbind' + ]) + } + self.pw = get_PodmanWorker({ + 'volumes': ['/host/data:/data:rw'] + }) + self.assertFalse(self.pw.compare_volumes(container_info)) + + def test_compare_volumes_var_run_noexec(self): + container_info = { + 'Config': dict(Volumes=['/var/run/libvirt/']), + 'HostConfig': dict(Binds=[ + '/var/run/libvirt:/var/run/libvirt:' + 'rw,rprivate,nosuid,nodev,noexec,rbind' + ]) + } + self.pw = get_PodmanWorker({ + 'volumes': ['/var/run/libvirt:/var/run/libvirt'] + }) + self.assertFalse(self.pw.compare_volumes(container_info)) + + def test_compare_volumes_multiple_binds(self): + container_info = { + 'Config': dict(Volumes=[ # nosec + '/var/log/kolla/', '/etc/kolla/', '/dev/shm/']), + 'HostConfig': dict(Binds=[ + 'kolla_logs:/var/log/kolla/:rw,rprivate,nosuid,nodev,rbind', + '/etc/kolla:/etc/kolla:ro,rprivate,nosuid,nodev,rbind', + '/dev/shm:/dev/shm:rprivate,nosuid,nodev,rbind' # nosec + ]) + } + self.pw = get_PodmanWorker({ + 'volumes': [ + 'kolla_logs:/var/log/kolla/', + '/etc/kolla:/etc/kolla:ro', + '/dev/shm:/dev/shm' # nosec + ] + }) + self.assertFalse(self.pw.compare_volumes(container_info)) + + def test_compare_volumes_multiple_binds_one_diff(self): + container_info = { + 'Config': dict(Volumes=['/var/log/kolla/', '/etc/kolla/']), + 'HostConfig': dict(Binds=[ + 'kolla_logs:/var/log/kolla/:rw,rprivate,nosuid,nodev,rbind', + '/etc/kolla:/etc/kolla:ro,rprivate,nosuid,nodev,rbind' + ]) + } + self.pw = get_PodmanWorker({ + 'volumes': [ + 'kolla_logs:/var/log/kolla/', + '/etc/kolla:/etc/kolla:rw' + ] + }) self.assertTrue(self.pw.compare_volumes(container_info)) def test_compare_environment_neg(self): @@ -1384,27 +1570,93 @@ def test_compare_dimensions_explicit_default(self): def test_compare_ulimits_pos(self): self.fake_data['params']['dimensions'] = { - 'ulimits': {'nofile': {'soft': 131072, 'hard': 131072}}} + 'ulimits': { + 'memlock': {'soft': 67108864, 'hard': 67108864}} + } container_info = dict() container_info['HostConfig'] = { 'CpuPeriod': 0, 'KernelMemory': 0, 'Memory': 0, 'CpuQuota': 0, 'CpusetCpus': '', 'CpuShares': 0, 'BlkioWeight': 0, 'CpusetMems': '', 'MemorySwap': 0, 'MemoryReservation': 0, - 'Ulimits': []} + 'Ulimits': [ + {'Name': 'RLIMIT_NOFILE', 'Soft': 1024, 'Hard': 4096}, + {'Name': 'RLIMIT_NPROC', 'Soft': 4096, 'Hard': 4096} + ]} self.pw = get_PodmanWorker(self.fake_data['params']) self.assertTrue(self.pw.compare_dimensions(container_info)) def test_compare_ulimits_neg(self): self.fake_data['params']['dimensions'] = { - 'ulimits': {'nofile': {'soft': 131072, 'hard': 131072}}} - ulimits_nofile = {'Name': 'nofile', - 'Soft': 131072, 'Hard': 131072} + 'ulimits': { + 'memlock': {'soft': 67108864, 'hard': 67108864}} + } + container_info = dict() + container_info['HostConfig'] = { + 'CpuPeriod': 0, 'KernelMemory': 0, 'Memory': 0, 'CpuQuota': 0, + 'CpusetCpus': '', 'CpuShares': 0, 'BlkioWeight': 0, + 'CpusetMems': '', 'MemorySwap': 0, 'MemoryReservation': 0, + 'Ulimits': [ + {'Name': 'RLIMIT_NOFILE', 'Soft': 1024, 'Hard': 4096}, + {'Name': 'RLIMIT_NPROC', 'Soft': 4096, 'Hard': 4096}, + {'Name': 'RLIMIT_MEMLOCK', 'Soft': 67108864, 'Hard': 67108864} + ]} + self.pw = get_PodmanWorker(self.fake_data['params']) + self.assertFalse(self.pw.compare_dimensions(container_info)) + + def test_compare_ulimits_ignore_podman_defaults(self): + self.fake_data['params']['dimensions'] = {'ulimits': {}} + container_info = dict() + container_info['HostConfig'] = { + 'CpuPeriod': 0, 'KernelMemory': 0, 'Memory': 0, 'CpuQuota': 0, + 'CpusetCpus': '', 'CpuShares': 0, 'BlkioWeight': 0, + 'CpusetMems': '', 'MemorySwap': 0, 'MemoryReservation': 0, + 'Ulimits': [ + # These ulimits are not settable by the user and + # are set by default on every podman container. + # We should ignore them on dimensions check. + {'Name': 'RLIMIT_NOFILE', 'Soft': 1024, 'Hard': 4096}, + {'Name': 'RLIMIT_NPROC', 'Soft': 4096, 'Hard': 4096} + ]} + self.pw = get_PodmanWorker(self.fake_data['params']) + self.assertFalse(self.pw.compare_dimensions(container_info)) + + def test_compare_ulimits_filter_defaults_both_sides(self): + self.fake_data['params']['dimensions'] = { + 'ulimits': { + 'RLIMIT_NOFILE': {'soft': 1048576, 'hard': 1048576}, + 'RLIMIT_NPROC': {'soft': 1048576, 'hard': 1048576} + } + } + container_info = dict() + container_info['HostConfig'] = { + 'CpuPeriod': 0, 'KernelMemory': 0, 'Memory': 0, 'CpuQuota': 0, + 'CpusetCpus': '', 'CpuShares': 0, 'BlkioWeight': 0, + 'CpusetMems': '', 'MemorySwap': 0, 'MemoryReservation': 0, + 'Ulimits': [ + {'Name': 'RLIMIT_NOFILE', 'Soft': 1048576, 'Hard': 1048576}, + {'Name': 'RLIMIT_NPROC', 'Soft': 1048576, 'Hard': 1048576} + ]} + self.pw = get_PodmanWorker(self.fake_data['params']) + self.assertFalse(self.pw.compare_dimensions(container_info)) + + def test_compare_ulimits_with_other_limits_and_defaults(self): + self.fake_data['params']['dimensions'] = { + 'ulimits': { + 'RLIMIT_NOFILE': {'soft': 1048576, 'hard': 1048576}, + 'RLIMIT_NPROC': {'soft': 1048576, 'hard': 1048576}, + 'memlock': {'soft': 67108864, 'hard': 67108864} + } + } container_info = dict() container_info['HostConfig'] = { 'CpuPeriod': 0, 'KernelMemory': 0, 'Memory': 0, 'CpuQuota': 0, 'CpusetCpus': '', 'CpuShares': 0, 'BlkioWeight': 0, 'CpusetMems': '', 'MemorySwap': 0, 'MemoryReservation': 0, - 'Ulimits': [ulimits_nofile]} + 'Ulimits': [ + {'Name': 'RLIMIT_NOFILE', 'Soft': 1048576, 'Hard': 1048576}, + {'Name': 'RLIMIT_NPROC', 'Soft': 1048576, 'Hard': 1048576}, + {'Name': 'RLIMIT_MEMLOCK', 'Soft': 67108864, 'Hard': 67108864} + ]} self.pw = get_PodmanWorker(self.fake_data['params']) self.assertFalse(self.pw.compare_dimensions(container_info)) diff --git a/tests/run.yml b/tests/run.yml index 336ab36642..2dc2c1b9ad 100644 --- a/tests/run.yml +++ b/tests/run.yml @@ -540,6 +540,16 @@ VIP: "{{ kolla_internal_vip_address }}" TLS_ENABLED: "{{ tls_enabled }}" + - name: Run test-proxysql-upgrade.sh script + script: + cmd: test-proxysql-upgrade.sh + executable: /bin/bash + chdir: "{{ kolla_ansible_src_dir }}" + when: scenario == "mariadb" + environment: + KOLLA_ANSIBLE_VENV_PATH: "{{ kolla_ansible_venv_path }}" + CONTAINER_ENGINE: "{{ container_engine }}" + - name: Run test-prometheus-opensearch.sh script script: cmd: test-prometheus-opensearch.sh diff --git a/tests/test-proxysql-upgrade.sh b/tests/test-proxysql-upgrade.sh new file mode 100644 index 0000000000..f1b8f50d1c --- /dev/null +++ b/tests/test-proxysql-upgrade.sh @@ -0,0 +1,28 @@ +#!/bin/bash +set -o xtrace +set -o pipefail + +function test_proxysql_upgrade { + echo "Testing upgrading ProxySQL" + test_proxysql_upgrade_logged > /tmp/logs/ansible/test-proxysql-upgrade 2>&1 + result=$? + if [[ $result != 0 ]]; then + echo "Testing ProxySQL failed. See ansible/test-proxysql-upgrade for details" + else + echo "Successfully tested ProxySQL. See ansible/test-proxysql-upgrade for details" + fi + return $result +} + +function test_proxysql_upgrade_logged { + RAW_INVENTORY=/etc/kolla/inventory + source $KOLLA_ANSIBLE_VENV_PATH/bin/activate + kolla-ansible deploy -i ${RAW_INVENTORY} -t loadbalancer -e proxysql_version=3 || return $? + version=$(sudo $container_engine exec proxysql proxysql --version) + echo "ProxySQL version: $version" + echo $version | grep -q "3.0" + return $? +} + +container_engine="${1:-${CONTAINER_ENGINE:-docker}}" +test_proxysql_upgrade diff --git a/zuul.d/base.yaml b/zuul.d/base.yaml index 8f2f47393d..02d0b1301a 100644 --- a/zuul.d/base.yaml +++ b/zuul.d/base.yaml @@ -191,6 +191,7 @@ files: !inherit - ^ansible/roles/(loadbalancer|mariadb|proxysql-config)/ - ^tests/test-mariadb.sh + - ^tests/test-proxysql-upgrade.sh vars: scenario: mariadb