From 6e3b68702338fef936a2d27ff8bf401805faea85 Mon Sep 17 00:00:00 2001 From: Bertrand Lanson Date: Sat, 29 Nov 2025 11:31:35 +0100 Subject: [PATCH 01/12] Fix idempotence on comparing capabilities for podman Currently, we add the AUDIT_WRITE capability for all unprivileged podman containers. This causes a diff because the cap is popped from params before comparing with the running container's actual values. This patch fixes it by overriding compare_cap_add() to normalize capability formats and account for auto-added AUDIT_WRITE, while working around a bug in older Podman versions where AUDIT_WRITE doesn't appear in the inspect API response. Closes-bug: #2133434 Change-Id: I61fc50654fb06e041776fd394f6b1cab2f9903ba Signed-off-by: Bertrand Lanson (cherry picked from commit c7472c0ff61ccbbc6843286269d627b7b1dee6e2) --- ansible/module_utils/kolla_podman_worker.py | 36 ++++++++++ .../test_podman_worker.py | 68 +++++++++++++++++-- 2 files changed, 98 insertions(+), 6 deletions(-) diff --git a/ansible/module_utils/kolla_podman_worker.py b/ansible/module_utils/kolla_podman_worker.py index 2f323eff24..7e04e4dced 100644 --- a/ansible/module_utils/kolla_podman_worker.py +++ b/ansible/module_utils/kolla_podman_worker.py @@ -306,6 +306,42 @@ def compare_container(self): self.changed = True return self.changed + def compare_cap_add(self, container_info): + new_cap_add = self.params.get('cap_add', list()).copy() + + new_cap_add = [ + 'CAP_' + cap.upper() + if not cap.upper().startswith('CAP_') + else cap.upper() + for cap in new_cap_add + ] + + try: + current_cap_add = ( + container_info['HostConfig'].get('CapAdd', None) or [] + ) + except (KeyError, TypeError): + current_cap_add = [] + + current_cap_add = [cap.upper() for cap in current_cap_add] + + privileged = container_info['HostConfig'].get('Privileged', False) + if not privileged: + # NOTE(blanson): prepare_container_args() always adds AUDIT_WRITE + # for non-privileged containers. Also works around Podman <4.4 bug + # where AUDIT_WRITE doesn't appear in inspect. Since capabilities + # can't be modified post-creation, this won't mask real drift. + if 'CAP_AUDIT_WRITE' not in new_cap_add: + new_cap_add.append('CAP_AUDIT_WRITE') + + if 'CAP_AUDIT_WRITE' not in current_cap_add: + current_cap_add.append('CAP_AUDIT_WRITE') + + if set(new_cap_add).symmetric_difference(set(current_cap_add)): + return True + + return False + def compare_pid_mode(self, container_info): new_pid_mode = self.params.get('pid_mode') or self.params.get('pid') current_pid_mode = container_info['HostConfig'].get('PidMode') diff --git a/tests/kolla_container_tests/test_podman_worker.py b/tests/kolla_container_tests/test_podman_worker.py index 6cc2b18d59..8442fac863 100644 --- a/tests/kolla_container_tests/test_podman_worker.py +++ b/tests/kolla_container_tests/test_podman_worker.py @@ -1110,16 +1110,72 @@ def setUp(self): super(TestAttrComp, self).setUp() self.fake_data = copy.deepcopy(FAKE_DATA) - def test_compare_cap_add_neg(self): - container_info = {'HostConfig': dict(CapAdd=['data'])} - self.pw = get_PodmanWorker({'cap_add': ['data']}) + def test_compare_cap_add_unprivileged_no_user_caps(self): + container_info = {'HostConfig': dict( + CapAdd=['CAP_AUDIT_WRITE'], + Privileged=False + )} + self.pw = get_PodmanWorker({'cap_add': []}) + self.assertFalse(self.pw.compare_cap_add(container_info)) + + def test_compare_cap_add_unprivileged_with_user_caps(self): + container_info = {'HostConfig': dict( + CapAdd=['CAP_NET_ADMIN', 'CAP_AUDIT_WRITE'], + Privileged=False + )} + self.pw = get_PodmanWorker({'cap_add': ['net_admin']}) + self.assertFalse(self.pw.compare_cap_add(container_info)) + + def test_compare_cap_add_privileged_no_audit_write(self): + container_info = {'HostConfig': dict( + CapAdd=['CAP_NET_ADMIN'], + Privileged=True + )} + self.pw = get_PodmanWorker( + {'cap_add': ['net_admin'], 'privileged': True}) + self.assertFalse(self.pw.compare_cap_add(container_info)) + + def test_compare_cap_add_format_normalization(self): + container_info = {'HostConfig': dict( + CapAdd=['CAP_SYS_ADMIN', 'CAP_AUDIT_WRITE'], + Privileged=False + )} + self.pw = get_PodmanWorker({'cap_add': ['sys_admin']}) self.assertFalse(self.pw.compare_cap_add(container_info)) - def test_compare_cap_add_pos(self): - container_info = {'HostConfig': dict(CapAdd=['data1'])} - self.pw = get_PodmanWorker({'cap_add': ['data2']}) + def test_compare_cap_add_podman_bug_workaround(self): + container_info = {'HostConfig': dict( + CapAdd=[], + Privileged=False + )} + self.pw = get_PodmanWorker({'cap_add': []}) + self.assertFalse(self.pw.compare_cap_add(container_info)) + + def test_compare_cap_add_difference_detected(self): + container_info = {'HostConfig': dict( + CapAdd=['CAP_NET_ADMIN', 'CAP_AUDIT_WRITE'], + Privileged=False + )} + self.pw = get_PodmanWorker({'cap_add': ['sys_admin']}) self.assertTrue(self.pw.compare_cap_add(container_info)) + def test_compare_cap_add_mixed_case_formats(self): + container_info = {'HostConfig': dict( + CapAdd=['CAP_SYS_ADMIN', 'CAP_NET_ADMIN', 'CAP_AUDIT_WRITE'], + Privileged=False + )} + self.pw = get_PodmanWorker( + {'cap_add': ['SYS_ADMIN', 'cap_net_admin']}) + self.assertFalse(self.pw.compare_cap_add(container_info)) + + def test_compare_cap_add_empty_current(self): + container_info = {'HostConfig': dict( + CapAdd=None, + Privileged=False + )} + self.pw = get_PodmanWorker({'cap_add': []}) + self.assertFalse(self.pw.compare_cap_add(container_info)) + def test_compare_ipc_mode_neg(self): container_info = {'HostConfig': dict(IpcMode='data')} self.pw = get_PodmanWorker({'ipc_mode': 'data'}) From a47fd7761175143c84208f5c0368496b5690f80f Mon Sep 17 00:00:00 2001 From: Bertrand Lanson Date: Wed, 29 Oct 2025 18:27:12 +0100 Subject: [PATCH 02/12] Fix podman idempotence on comparing container dimensions Podman containers are created with default ulimites for RLIMIT_NOFILE and RLIMIT_NPROC that are breaking idempotence for kolla_container in check_container mode. We forbid users from setting them, so we should also ignore them when checking dimensions to make the module idempotent. Closes-bug: #2131038 Change-Id: If71589a666c4a3a8003a3419518fd7e4182c5e2b Signed-off-by: Bertrand Lanson (cherry picked from commit f92973d0b8c9d1030fb57d479614990cdba46ee9) --- ansible/module_utils/kolla_podman_worker.py | 54 +++++++++++-- .../test_podman_worker.py | 78 +++++++++++++++++-- 2 files changed, 118 insertions(+), 14 deletions(-) diff --git a/ansible/module_utils/kolla_podman_worker.py b/ansible/module_utils/kolla_podman_worker.py index 7e04e4dced..fae0d32947 100644 --- a/ansible/module_utils/kolla_podman_worker.py +++ b/ansible/module_utils/kolla_podman_worker.py @@ -452,15 +452,53 @@ def compare_dimensions(self, container_info): failed=True, msg=repr("Unsupported dimensions"), unsupported_dimensions=unsupported) current_dimensions = container_info['HostConfig'] + + # NOTE(blanson): We normalize ulimits names because the podman api + # returns them as RLIMIT_ + def normalize_ulimit_name(name): + name = name.upper() + if not name.startswith('RLIMIT_'): + return 'RLIMIT_' + name + return name + for key1, key2 in dimension_map.items(): - # NOTE(mgoddard): If a resource has been explicitly requested, - # check for a match. Otherwise, ensure it is set to the default. - if key1 in new_dimensions: - if key1 == 'ulimits': - if self.compare_ulimits(new_dimensions[key1], - current_dimensions[key2]): - return True - elif new_dimensions[key1] != current_dimensions[key2]: + if key1 == 'ulimits': + current_ulimits = current_dimensions.get(key2, []) + + # NOTE(blanson): We strip podman default ulimits + # because they are not settable by users anyways + # and break idempotency. + filtered_current_ulimits = [ + u for u in current_ulimits + if u.get('Name') not in ('RLIMIT_NOFILE', 'RLIMIT_NPROC') + ] + + desired_ulimits = new_dimensions.get('ulimits', {}) + + desired_ulimits = { + normalize_ulimit_name(name): limits + for name, limits in desired_ulimits.items() + if normalize_ulimit_name(name) not in ( + 'RLIMIT_NOFILE', 'RLIMIT_NPROC') + } + + normalized_current = [ + { + 'Name': normalize_ulimit_name(u['Name']), + 'Soft': u.get('Soft'), + 'Hard': u.get('Hard') + } + for u in filtered_current_ulimits + ] + + if self.compare_ulimits( + desired_ulimits, + normalized_current + ): + return True + + elif key1 in new_dimensions: + if new_dimensions[key1] != current_dimensions.get(key2): return True elif current_dimensions[key2]: # The default values of all (except ulimits) currently diff --git a/tests/kolla_container_tests/test_podman_worker.py b/tests/kolla_container_tests/test_podman_worker.py index 8442fac863..93454eb216 100644 --- a/tests/kolla_container_tests/test_podman_worker.py +++ b/tests/kolla_container_tests/test_podman_worker.py @@ -1440,27 +1440,93 @@ def test_compare_dimensions_explicit_default(self): def test_compare_ulimits_pos(self): self.fake_data['params']['dimensions'] = { - 'ulimits': {'nofile': {'soft': 131072, 'hard': 131072}}} + 'ulimits': { + 'memlock': {'soft': 67108864, 'hard': 67108864}} + } container_info = dict() container_info['HostConfig'] = { 'CpuPeriod': 0, 'KernelMemory': 0, 'Memory': 0, 'CpuQuota': 0, 'CpusetCpus': '', 'CpuShares': 0, 'BlkioWeight': 0, 'CpusetMems': '', 'MemorySwap': 0, 'MemoryReservation': 0, - 'Ulimits': []} + 'Ulimits': [ + {'Name': 'RLIMIT_NOFILE', 'Soft': 1024, 'Hard': 4096}, + {'Name': 'RLIMIT_NPROC', 'Soft': 4096, 'Hard': 4096} + ]} self.pw = get_PodmanWorker(self.fake_data['params']) self.assertTrue(self.pw.compare_dimensions(container_info)) def test_compare_ulimits_neg(self): self.fake_data['params']['dimensions'] = { - 'ulimits': {'nofile': {'soft': 131072, 'hard': 131072}}} - ulimits_nofile = {'Name': 'nofile', - 'Soft': 131072, 'Hard': 131072} + 'ulimits': { + 'memlock': {'soft': 67108864, 'hard': 67108864}} + } + container_info = dict() + container_info['HostConfig'] = { + 'CpuPeriod': 0, 'KernelMemory': 0, 'Memory': 0, 'CpuQuota': 0, + 'CpusetCpus': '', 'CpuShares': 0, 'BlkioWeight': 0, + 'CpusetMems': '', 'MemorySwap': 0, 'MemoryReservation': 0, + 'Ulimits': [ + {'Name': 'RLIMIT_NOFILE', 'Soft': 1024, 'Hard': 4096}, + {'Name': 'RLIMIT_NPROC', 'Soft': 4096, 'Hard': 4096}, + {'Name': 'RLIMIT_MEMLOCK', 'Soft': 67108864, 'Hard': 67108864} + ]} + self.pw = get_PodmanWorker(self.fake_data['params']) + self.assertFalse(self.pw.compare_dimensions(container_info)) + + def test_compare_ulimits_ignore_podman_defaults(self): + self.fake_data['params']['dimensions'] = {'ulimits': {}} + container_info = dict() + container_info['HostConfig'] = { + 'CpuPeriod': 0, 'KernelMemory': 0, 'Memory': 0, 'CpuQuota': 0, + 'CpusetCpus': '', 'CpuShares': 0, 'BlkioWeight': 0, + 'CpusetMems': '', 'MemorySwap': 0, 'MemoryReservation': 0, + 'Ulimits': [ + # These ulimits are not settable by the user and + # are set by default on every podman container. + # We should ignore them on dimensions check. + {'Name': 'RLIMIT_NOFILE', 'Soft': 1024, 'Hard': 4096}, + {'Name': 'RLIMIT_NPROC', 'Soft': 4096, 'Hard': 4096} + ]} + self.pw = get_PodmanWorker(self.fake_data['params']) + self.assertFalse(self.pw.compare_dimensions(container_info)) + + def test_compare_ulimits_filter_defaults_both_sides(self): + self.fake_data['params']['dimensions'] = { + 'ulimits': { + 'RLIMIT_NOFILE': {'soft': 1048576, 'hard': 1048576}, + 'RLIMIT_NPROC': {'soft': 1048576, 'hard': 1048576} + } + } + container_info = dict() + container_info['HostConfig'] = { + 'CpuPeriod': 0, 'KernelMemory': 0, 'Memory': 0, 'CpuQuota': 0, + 'CpusetCpus': '', 'CpuShares': 0, 'BlkioWeight': 0, + 'CpusetMems': '', 'MemorySwap': 0, 'MemoryReservation': 0, + 'Ulimits': [ + {'Name': 'RLIMIT_NOFILE', 'Soft': 1048576, 'Hard': 1048576}, + {'Name': 'RLIMIT_NPROC', 'Soft': 1048576, 'Hard': 1048576} + ]} + self.pw = get_PodmanWorker(self.fake_data['params']) + self.assertFalse(self.pw.compare_dimensions(container_info)) + + def test_compare_ulimits_with_other_limits_and_defaults(self): + self.fake_data['params']['dimensions'] = { + 'ulimits': { + 'RLIMIT_NOFILE': {'soft': 1048576, 'hard': 1048576}, + 'RLIMIT_NPROC': {'soft': 1048576, 'hard': 1048576}, + 'memlock': {'soft': 67108864, 'hard': 67108864} + } + } container_info = dict() container_info['HostConfig'] = { 'CpuPeriod': 0, 'KernelMemory': 0, 'Memory': 0, 'CpuQuota': 0, 'CpusetCpus': '', 'CpuShares': 0, 'BlkioWeight': 0, 'CpusetMems': '', 'MemorySwap': 0, 'MemoryReservation': 0, - 'Ulimits': [ulimits_nofile]} + 'Ulimits': [ + {'Name': 'RLIMIT_NOFILE', 'Soft': 1048576, 'Hard': 1048576}, + {'Name': 'RLIMIT_NPROC', 'Soft': 1048576, 'Hard': 1048576}, + {'Name': 'RLIMIT_MEMLOCK', 'Soft': 67108864, 'Hard': 67108864} + ]} self.pw = get_PodmanWorker(self.fake_data['params']) self.assertFalse(self.pw.compare_dimensions(container_info)) From 7a6cd241be773d65630f7fb90b50bb554debca13 Mon Sep 17 00:00:00 2001 From: Bertrand Lanson Date: Fri, 31 Oct 2025 00:53:36 +0100 Subject: [PATCH 03/12] Fix idempotence on podman volume comparison Due to podman returning all mount option flags in Binds list, The compare_volumes function could not be idempotent because some flags would be skipped everytime, or analyzed when they shouldn't. This new version fixes it by filtering out all default flags from podman for both the requested and current volumes, making comparison accurate. It also takes into account special privileged paths that have the noexec flag added. Closes-bug: #2131039 Change-Id: I173bcb2b1f8c5b81f8395924dfccf73b060100b9 Signed-off-by: Bertrand Lanson (cherry picked from commit df5d6dd094ec1a6368e980c0d4d34ea7e4eced29) --- ansible/module_utils/kolla_podman_worker.py | 92 ++++++----- .../test_podman_worker.py | 144 +++++++++++++++++- 2 files changed, 191 insertions(+), 45 deletions(-) diff --git a/ansible/module_utils/kolla_podman_worker.py b/ansible/module_utils/kolla_podman_worker.py index fae0d32947..5ed32988e1 100644 --- a/ansible/module_utils/kolla_podman_worker.py +++ b/ansible/module_utils/kolla_podman_worker.py @@ -192,6 +192,7 @@ def parse_volumes(self, volumes, mounts, filtered_volumes): ) if src == 'devpts': mount_item = dict( + source=src, target=dest, type='devpts' ) @@ -385,50 +386,65 @@ def check_slash(string): else: return string - raw_volumes, binds = self.generate_volumes() - raw_vols, current_binds = self.generate_volumes( - container_info['HostConfig'].get('Binds')) - - current_vols = [check_slash(vol) for vol in raw_vols if vol] - volumes = [check_slash(vol) for vol in raw_volumes if vol] + # NOTE(blanson): Podman automatically appends default flags + # such as rprivate, nosuid, nodev, rbind to all mounts. + # For special paths like /proc, /run, /sys, and /var/run, + # noexec is also added by default. We remove these defaults + # because they do not reflect a meaningful difference + # between the requested and current container configuration. + # Additionally, if neither 'ro' nor 'rw' is specified, + # we implicitly assume 'rw' (Podman's default behavior). + def normalize_mode(path, mode): + default_flags = {'rprivate', 'nosuid', 'nodev', 'rbind'} + special_paths_noexec = {'/proc', '/run', '/sys', '/var/run'} + + flags = set(mode.split(',')) if mode else set() + flags -= default_flags + + if any(path.startswith(p) for p in special_paths_noexec): + flags.discard('noexec') + if not (flags & {'ro', 'rw'}): + flags.add('rw') + return flags + + # NOTE(blanson): Convert a binds dict into a list of + # (src, dst, normalized_flags) tuples. Normalization ignores + # default Podman flags and noexec for special paths to allow + # consistent comparison. + def build_bind_list(binds_dict): + lst = [] + for src, info in (binds_dict or {}).items(): + src_path = check_slash(src) + dst_path = check_slash(info['bind']) + mode_flags = normalize_mode( + dst_path, + info['mode'], + ) + lst.append((src_path, dst_path, mode_flags)) + return lst - if not volumes: - volumes = list() - if not current_vols: - current_vols = list() - if not current_binds: - current_binds = list() + binds_input = container_info['HostConfig'].get('Binds') + raw_volumes, binds = self.generate_volumes() + raw_vols, current_binds = ( + [], {} + ) if not binds_input else self.generate_volumes(binds_input) - volumes.sort() - current_vols.sort() + volumes = [check_slash(v) for v in raw_volumes or [] if v] + current_vols = [check_slash(v) for v in raw_vols or [] if v] - if set(volumes).symmetric_difference(set(current_vols)): + if set(volumes) != set(current_vols): return True - new_binds = list() - new_current_binds = list() - if binds: - for k, v in binds.items(): - k = check_slash(k) - v['bind'] = check_slash(v['bind']) - new_binds.append( - "{}:{}:{}".format(k, v['bind'], v['mode'])) - - if current_binds: - for k, v in current_binds.items(): - k = check_slash(k) - v['bind'] = check_slash(v['bind']) - if 'ro' in v['mode']: - v['mode'] = 'ro' - else: - v['mode'] = 'rw' - new_current_binds.append( - "{}:{}:{}".format(k, v['bind'], v['mode'][0:2])) - - new_binds.sort() - new_current_binds.sort() + req_bind_list = [ + (src, dst, frozenset(flags)) + for src, dst, flags in build_bind_list(binds) + ] + cur_bind_list = [ + (src, dst, frozenset(flags)) + for src, dst, flags in build_bind_list(current_binds) + ] - if set(new_binds).symmetric_difference(set(new_current_binds)): + if set(req_bind_list) != set(cur_bind_list): return True def compare_dimensions(self, container_info): diff --git a/tests/kolla_container_tests/test_podman_worker.py b/tests/kolla_container_tests/test_podman_worker.py index 93454eb216..55ab5c1657 100644 --- a/tests/kolla_container_tests/test_podman_worker.py +++ b/tests/kolla_container_tests/test_podman_worker.py @@ -1305,19 +1305,149 @@ def test_compare_volumes_from_post(self): def test_compare_volumes_neg(self): container_info = { 'Config': dict(Volumes=['/var/log/kolla/']), - 'HostConfig': dict(Binds=['kolla_logs:/var/log/kolla/:rw'])} - self.pw = get_PodmanWorker( - {'volumes': ['kolla_logs:/var/log/kolla/:rw']}) - + 'HostConfig': dict(Binds=[ + 'kolla_logs:/var/log/kolla/:rw,rprivate,nosuid,nodev,rbind' + ]) + } + self.pw = get_PodmanWorker({ + 'volumes': ['kolla_logs:/var/log/kolla/'] + }) self.assertFalse(self.pw.compare_volumes(container_info)) def test_compare_volumes_pos(self): container_info = { 'Config': dict(Volumes=['/var/log/kolla/']), - 'HostConfig': dict(Binds=['kolla_logs:/var/log/kolla/:rw'])} - self.pw = get_PodmanWorker( - {'volumes': ['/dev/:/dev/:rw']}) + 'HostConfig': dict(Binds=[ + 'kolla_logs:/var/log/kolla/:ro,rprivate,nosuid,nodev,rbind' + ]) + } + self.pw = get_PodmanWorker({ + 'volumes': ['kolla_logs:/var/log/kolla/'] + }) + self.assertTrue(self.pw.compare_volumes(container_info)) + + def test_compare_volumes_empty_add(self): + container_info = { + 'Config': dict(Volumes=[]), + 'HostConfig': dict(Binds=[]) + } + self.pw = get_PodmanWorker({ + 'volumes': ['kolla_logs:/var/log/kolla/'] + }) + self.assertTrue(self.pw.compare_volumes(container_info)) + + def test_compare_volumes_empty_del(self): + container_info = { + 'Config': dict(Volumes=['/var/log/kolla/']), + 'HostConfig': dict(Binds=[ + 'kolla_logs:/var/log/kolla/:ro,rprivate,nosuid,nodev,rbind' + ]) + } + self.pw = get_PodmanWorker({ + 'volumes': [] + }) + self.assertTrue(self.pw.compare_volumes(container_info)) + + def test_compare_volumes_noexec_default(self): + container_info = { + 'Config': dict(Volumes=['/proc/', '/run/libvirt', '/sys/']), + 'HostConfig': dict(Binds=[ + '/proc/:/proc/:rw,shared,rprivate,nosuid,nodev,noexec,rbind', + '/run/libvirt:/run/libvirt:rw,nosuid,nodev,noexec,rbind', + '/sys/:/sys/:rw,rprivate,nosuid,nodev,noexec,rbind', + ]) + } + self.pw = get_PodmanWorker({ + 'volumes': [ + '/proc/:/proc/:shared', + '/run/libvirt:/run/libvirt:rprivate', + '/sys/:/sys/:rprivate' + ] + }) + self.assertFalse(self.pw.compare_volumes(container_info)) + + def test_compare_volumes_shared_vs_rw(self): + container_info = { + 'Config': dict(Volumes=['/run/libvirt/']), + 'HostConfig': dict(Binds=[ + '/run/libvirt:/run/libvirt:rw,rprivate,nosuid,nodev,rbind' + ]) + } + self.pw = get_PodmanWorker({ + 'volumes': ['/run/libvirt:/run/libvirt:shared'] + }) + self.assertTrue(self.pw.compare_volumes(container_info)) + + def test_compare_volumes_implicit_rw_both_sides(self): + container_info = { + 'Config': dict(Volumes=['/dev/shm/']), # nosec + 'HostConfig': dict(Binds=[ + '/dev/shm:/dev/shm:rprivate,nosuid,nodev,rbind' # nosec + ]) + } + self.pw = get_PodmanWorker({ + 'volumes': ['/dev/shm:/dev/shm'] # nosec + }) + self.assertFalse(self.pw.compare_volumes(container_info)) + + def test_compare_volumes_explicit_rw_vs_implicit(self): + container_info = { + 'Config': dict(Volumes=['/data/']), + 'HostConfig': dict(Binds=[ + '/host/data:/data:rprivate,nosuid,nodev,rbind' + ]) + } + self.pw = get_PodmanWorker({ + 'volumes': ['/host/data:/data:rw'] + }) + self.assertFalse(self.pw.compare_volumes(container_info)) + def test_compare_volumes_var_run_noexec(self): + container_info = { + 'Config': dict(Volumes=['/var/run/libvirt/']), + 'HostConfig': dict(Binds=[ + '/var/run/libvirt:/var/run/libvirt:' + 'rw,rprivate,nosuid,nodev,noexec,rbind' + ]) + } + self.pw = get_PodmanWorker({ + 'volumes': ['/var/run/libvirt:/var/run/libvirt'] + }) + self.assertFalse(self.pw.compare_volumes(container_info)) + + def test_compare_volumes_multiple_binds(self): + container_info = { + 'Config': dict(Volumes=[ # nosec + '/var/log/kolla/', '/etc/kolla/', '/dev/shm/']), + 'HostConfig': dict(Binds=[ + 'kolla_logs:/var/log/kolla/:rw,rprivate,nosuid,nodev,rbind', + '/etc/kolla:/etc/kolla:ro,rprivate,nosuid,nodev,rbind', + '/dev/shm:/dev/shm:rprivate,nosuid,nodev,rbind' # nosec + ]) + } + self.pw = get_PodmanWorker({ + 'volumes': [ + 'kolla_logs:/var/log/kolla/', + '/etc/kolla:/etc/kolla:ro', + '/dev/shm:/dev/shm' # nosec + ] + }) + self.assertFalse(self.pw.compare_volumes(container_info)) + + def test_compare_volumes_multiple_binds_one_diff(self): + container_info = { + 'Config': dict(Volumes=['/var/log/kolla/', '/etc/kolla/']), + 'HostConfig': dict(Binds=[ + 'kolla_logs:/var/log/kolla/:rw,rprivate,nosuid,nodev,rbind', + '/etc/kolla:/etc/kolla:ro,rprivate,nosuid,nodev,rbind' + ]) + } + self.pw = get_PodmanWorker({ + 'volumes': [ + 'kolla_logs:/var/log/kolla/', + '/etc/kolla:/etc/kolla:rw' + ] + }) self.assertTrue(self.pw.compare_volumes(container_info)) def test_compare_environment_neg(self): From 645d204cf22b206d89e0c943efead8510c4e5198 Mon Sep 17 00:00:00 2001 From: Bartosz Bezak Date: Mon, 16 Feb 2026 11:36:42 +0100 Subject: [PATCH 04/12] ovn-db: set RELAY_ID per iterated relay container Ensure each relay pre-creates its own log file, fixing fluentd permission denied errors on ovn-sb-relay-.log. Closes-Bug: #2141909 Change-Id: Icd7bbda54c0112d1aafe636df0e4219ab06d914b Signed-off-by: Bartosz Bezak (cherry picked from commit 142584dcff6e59517a46c106ec2ef1e9a13fc498) --- ansible/roles/ovn-db/defaults/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/roles/ovn-db/defaults/main.yml b/ansible/roles/ovn-db/defaults/main.yml index ddeccf5644..823b847a30 100644 --- a/ansible/roles/ovn-db/defaults/main.yml +++ b/ansible/roles/ovn-db/defaults/main.yml @@ -33,7 +33,7 @@ ovn_db_services: group: ovn-sb-db-relay enabled: "{{ enable_ovn_sb_db_relay | bool }}" environment: - RELAY_ID: "{{ ovn_sb_db_relay_group_id | default('1') }}" + RELAY_ID: "{{ item | default(ovn_sb_db_relay_group_id | default('1')) | string }}" image: "{{ ovn_sb_db_relay_image_full }}" iterate: true iterate_var: "{{ ovn_sb_db_relay_count | int }}" From c2d125521562052e33a5d7c35b3ae32f9e4e5750 Mon Sep 17 00:00:00 2001 From: Bartosz Bezak Date: Tue, 10 Feb 2026 14:18:41 +0100 Subject: [PATCH 05/12] ovn: make iterated relay checks idempotent Compare ovn_sb_db_relay_ containers, not base ovn_sb_db_relay. Fix relay config ownership mismatch (root vs openvswitch). Co-Authored-By: Bertrand Lanson Co-Authored-By: Doug Szumski Closes-Bug: #2141573 Change-Id: I85420a7b2213d9a72ae3b2ef5de75bbaef04308c Signed-off-by: Bartosz Bezak (cherry picked from commit be29193b5e413d061b6f8b8307f244657134ac48) --- ansible/roles/ovn-db/templates/ovn-sb-db-relay.json.j2 | 2 +- ansible/roles/service-check-containers/tasks/iterated.yml | 7 +++---- ansible/roles/service-check-containers/tasks/main.yml | 5 +++-- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/ansible/roles/ovn-db/templates/ovn-sb-db-relay.json.j2 b/ansible/roles/ovn-db/templates/ovn-sb-db-relay.json.j2 index aa88944631..5955d3417c 100644 --- a/ansible/roles/ovn-db/templates/ovn-sb-db-relay.json.j2 +++ b/ansible/roles/ovn-db/templates/ovn-sb-db-relay.json.j2 @@ -4,7 +4,7 @@ { "source": "{{ container_config_directory }}/ovsdb-relay.json", "dest": "/etc/ovn/ovsdb-relay.json", - "owner": "openvswitch", + "owner": "root", "perm": "0600" } ], diff --git a/ansible/roles/service-check-containers/tasks/iterated.yml b/ansible/roles/service-check-containers/tasks/iterated.yml index a08614de6c..3e2837a87c 100644 --- a/ansible/roles/service-check-containers/tasks/iterated.yml +++ b/ansible/roles/service-check-containers/tasks/iterated.yml @@ -2,11 +2,11 @@ - name: "{{ kolla_role_name | default(project_name) }} | Check containers with iteration" become: true vars: - service: "{{ outer_item.value }}" + service: "{{ lookup('vars', (kolla_role_name | default(project_name)) + '_services')[service_name] }}" kolla_container: action: "compare_container" common_options: "{{ docker_common_options }}" - name: "{{ service.container_name }}" + name: "{{ service.container_name }}_{{ item }}" image: "{{ service.image | default(omit) }}" volumes: "{{ service.volumes | default(omit) }}" dimensions: "{{ service.dimensions | default(omit) }}" @@ -22,8 +22,7 @@ labels: "{{ service.labels | default(omit) }}" command: "{{ service.command | default(omit) }}" cgroupns_mode: "{{ service.cgroupns_mode | default(omit) }}" - loop: - - "{{ range(1,(service.iterate_var | int) + 1) | list }}" + loop: "{{ range(1, (iterate_count | int) + 1) | list }}" register: container_check # NOTE(yoctozepto): Must be a separate task because one cannot see the whole diff --git a/ansible/roles/service-check-containers/tasks/main.yml b/ansible/roles/service-check-containers/tasks/main.yml index da6fce15bb..f502ee62bc 100644 --- a/ansible/roles/service-check-containers/tasks/main.yml +++ b/ansible/roles/service-check-containers/tasks/main.yml @@ -42,9 +42,10 @@ - name: Include tasks vars: - service: "{{ outer_item.value }}" + service_name: "{{ outer_item.key }}" + iterate_count: "{{ outer_item.value.iterate_var | int }}" include_tasks: iterated.yml loop: "{{ lookup('vars', (kolla_role_name | default(project_name)) + '_services') | select_services_enabled_and_mapped_to_host | dict2items }}" loop_control: loop_var: outer_item - when: (service.iterate | default(False)) | bool + when: (outer_item.value.iterate | default(False)) | bool From b7821bee632e0591aad8ce1a0fbb8d6d13e8b980 Mon Sep 17 00:00:00 2001 From: Christian Berendt Date: Mon, 30 Jun 2025 15:01:12 +0200 Subject: [PATCH 06/12] skyline: add TLSv1.2 and TLSv1.3 support for HTTPS upstream endpoints When the upstream endpoint uses HTTPS, TLS errors were observed. This fix checks if the upstream endpoint is HTTPS and ensures TLSv1.2 and TLSv1.3 are enabled in the nginx configuration. References: * https://review.opendev.org/c/openstack/skyline-apiserver/+/941715 Closes-Bug: #2091935 Related-Bug: #1951437 Change-Id: I597c8f1f609580cfc8c29efbc79ada312e667441 Signed-off-by: fprzewozn (cherry picked from commit edfe2817603bb423ce644807a35e1f232fa95aa9) --- ansible/roles/skyline/templates/nginx.conf.j2 | 6 ++++++ .../notes/skyline-tls-upstream-nginx-c93b39fb69b8d438.yaml | 7 +++++++ 2 files changed, 13 insertions(+) create mode 100644 releasenotes/notes/skyline-tls-upstream-nginx-c93b39fb69b8d438.yaml diff --git a/ansible/roles/skyline/templates/nginx.conf.j2 b/ansible/roles/skyline/templates/nginx.conf.j2 index 944480464b..bba1082f62 100644 --- a/ansible/roles/skyline/templates/nginx.conf.j2 +++ b/ansible/roles/skyline/templates/nginx.conf.j2 @@ -38,6 +38,12 @@ http { ssl_certificate {{ skyline_ssl_certfile }}; ssl_certificate_key {{ skyline_ssl_keyfile }}; {% endif %} + + {% if internal_protocol == 'https' %} + proxy_ssl_protocols TLSv1.2 TLSv1.3; + proxy_ssl_server_name on; + {% endif %} + ## # Logging Settings ## diff --git a/releasenotes/notes/skyline-tls-upstream-nginx-c93b39fb69b8d438.yaml b/releasenotes/notes/skyline-tls-upstream-nginx-c93b39fb69b8d438.yaml new file mode 100644 index 0000000000..390bde577f --- /dev/null +++ b/releasenotes/notes/skyline-tls-upstream-nginx-c93b39fb69b8d438.yaml @@ -0,0 +1,7 @@ +--- +fixes: + - | + Fixed TLS errors in Skyline's nginx configuration when upstream + endpoints use HTTPS. + `LP#2091935 `__ + `LP#1951437 `__ From f39083baf97586f45ccbf588c066aeb2ed94b092 Mon Sep 17 00:00:00 2001 From: Bertrand Lanson Date: Fri, 8 Nov 2024 15:03:52 +0100 Subject: [PATCH 07/12] Fix inventory file for cyborg control services Fix a scheduling issue in the multinode and all-in-one inventory files, that would cause cyborg api and conductor service to also be scheduled on compute nodes rather then exclusively staying on the control plane. Closes-Bug: #2087552 Change-Id: I69d9a44db037fce42cb5a25b5688313eece15484 (cherry picked from commit 568e186a2a94df318037c0413957f6d844a9a26e) Signed-off-by: Pierre Riteau --- ansible/inventory/all-in-one | 1 - ansible/inventory/multinode | 1 - ...ix-cyborg-api-conductor-placement-f0cdf7274d31ffc0.yaml | 7 +++++++ 3 files changed, 7 insertions(+), 2 deletions(-) create mode 100644 releasenotes/notes/fix-cyborg-api-conductor-placement-f0cdf7274d31ffc0.yaml diff --git a/ansible/inventory/all-in-one b/ansible/inventory/all-in-one index 2b813d7e25..7824ffb54e 100644 --- a/ansible/inventory/all-in-one +++ b/ansible/inventory/all-in-one @@ -130,7 +130,6 @@ control [cyborg:children] control -compute [tacker:children] control diff --git a/ansible/inventory/multinode b/ansible/inventory/multinode index 7d74054466..5eb7557879 100644 --- a/ansible/inventory/multinode +++ b/ansible/inventory/multinode @@ -145,7 +145,6 @@ control [cyborg:children] control -compute [gnocchi:children] control diff --git a/releasenotes/notes/fix-cyborg-api-conductor-placement-f0cdf7274d31ffc0.yaml b/releasenotes/notes/fix-cyborg-api-conductor-placement-f0cdf7274d31ffc0.yaml new file mode 100644 index 0000000000..0b6a77bdcf --- /dev/null +++ b/releasenotes/notes/fix-cyborg-api-conductor-placement-f0cdf7274d31ffc0.yaml @@ -0,0 +1,7 @@ +--- +fixes: + - | + Fixes a placement problem for cyborg api and conductor services, + that would be also be scheduled on compute nodes, rather than + being exclusively on control plane. + `LP#2087552 `__ From db6da6ab25d12f7ffd55ccbb0abc10b16105cae0 Mon Sep 17 00:00:00 2001 From: Stig Telfer Date: Tue, 17 Mar 2026 13:05:46 +0000 Subject: [PATCH 08/12] Bind cAdvisor exporter to listen only internally cAdvisor was listening on all interfaces, which would potentially expose information about running containers on public networks. Change the cAdvisor startup invocation to bind only to the internal API interface. Closes-Bug: #2144659 Change-Id: Ica0d5e727467988fab3d4eb532caa7226556e714 Signed-off-by: Stig Telfer (cherry picked from commit 735f126d1188deb56616ec84d3f92fef14edf661) --- ansible/roles/prometheus/templates/prometheus-cadvisor.json.j2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ansible/roles/prometheus/templates/prometheus-cadvisor.json.j2 b/ansible/roles/prometheus/templates/prometheus-cadvisor.json.j2 index 21463e4335..7b00cfa5a3 100644 --- a/ansible/roles/prometheus/templates/prometheus-cadvisor.json.j2 +++ b/ansible/roles/prometheus/templates/prometheus-cadvisor.json.j2 @@ -1,5 +1,5 @@ { - "command": "/opt/cadvisor --port={{ prometheus_cadvisor_port }} --log_dir=/var/log/kolla/prometheus{% if prometheus_cadvisor_cmdline_extras %} {{ prometheus_cadvisor_cmdline_extras }}{% endif %}", + "command": "/opt/cadvisor --port={{ prometheus_cadvisor_port }} --log_dir=/var/log/kolla/prometheus --listen_ip {{ 'api' | kolla_address(inventory_hostname) }}{% if prometheus_cadvisor_cmdline_extras %} {{ prometheus_cadvisor_cmdline_extras }}{% endif %}", "config_files": [ {% if kolla_copy_ca_into_containers | bool %} { From f5b5e562b88a511fa157f870800f0a6a3635f0d7 Mon Sep 17 00:00:00 2001 From: Bartosz Bezak Date: Tue, 17 Mar 2026 15:23:17 +0100 Subject: [PATCH 09/12] nova-cell: parallelize libvirt version check The previous code used `run_once`, `delegate_to`, and `loop`, so one host ran `kolla_container_facts` and `libvirtd --version` for every compute host in sequence. This is slow on large deployments. Run these checks on the compute hosts directly so Ansible can use normal per-host concurrency. Set `any_errors_fatal: true` on the libvirt version-check tasks so a failure on one host fails the play. Closes-bug: #2144664 Change-Id: Ic9564c84c0a6c1cef1f77ed115d860f819eec67b Signed-off-by: Bartosz Bezak (cherry picked from commit cd7e86503020f653369258c7b450ae9f1b7c0074) --- .../roles/nova-cell/tasks/version-check.yml | 27 ++++++------------- 1 file changed, 8 insertions(+), 19 deletions(-) diff --git a/ansible/roles/nova-cell/tasks/version-check.yml b/ansible/roles/nova-cell/tasks/version-check.yml index 5a339cc998..213427ace4 100644 --- a/ansible/roles/nova-cell/tasks/version-check.yml +++ b/ansible/roles/nova-cell/tasks/version-check.yml @@ -40,32 +40,25 @@ name: - "{{ service.container_name }}" register: container_facts_per_host - run_once: true - delegate_to: "{{ item }}" - loop: "{{ groups[service.group] }}" - loop_control: - label: "{{ item }}" + when: inventory_hostname in groups[service.group] - name: Get current Libvirt version + any_errors_fatal: true become: true command: "{{ kolla_container_engine }} exec {{ service.container_name }} libvirtd --version" register: libvirt_version_current_results changed_when: false - run_once: true - delegate_to: "{{ item.item }}" - loop: "{{ container_facts_per_host.results }}" - loop_control: - label: "{{ item.item }}" when: - - item.containers[service.container_name] is defined - - item.containers[service.container_name].State.Running + - container_facts_per_host is not skipped + - container_facts_per_host.containers[service.container_name] is defined - (hostvars[groups[service.group] | first].service_image_info.images | default([]) | length) > 0 - - item.containers[service.container_name].Image + - container_facts_per_host.containers[service.container_name].Image != hostvars[groups[service.group] | first].service_image_info.images[0].Id - name: Check that the new Libvirt version is >= current + any_errors_fatal: true vars: - current_version: "{{ item.stdout | regex_search('[0-9]+\\.[0-9]+\\.[0-9]+') }}" + current_version: "{{ libvirt_version_current_results.stdout | regex_search('[0-9]+\\.[0-9]+\\.[0-9]+') }}" new_version: "{{ hostvars[groups[service.group] | first].libvirt_new_version }}" assert: that: "{{ new_version is version(current_version, '>=', strict=true) }}" @@ -75,11 +68,7 @@ that you want to do this, please skip the tag `nova-libvirt-version-check`. success_msg: > Libvirt version check successful: target {{ new_version }} >= current {{ current_version }}. - run_once: true - loop: "{{ libvirt_version_current_results.results }}" - loop_control: - label: "{{ item.item }}" - when: item.stdout is defined + when: libvirt_version_current_results is not skipped tags: nova-libvirt-version-check when: enable_nova_libvirt_container | bool and (groups[service.group] | length) > 0 From d113cda734db968de736f555b3c0cb13fab70333 Mon Sep 17 00:00:00 2001 From: Jay Jahns Date: Mon, 29 Sep 2025 09:00:28 -0500 Subject: [PATCH 10/12] Properly stop iterated services Adds a check similar to the handlers that will stop iterated services correctly. This is necessary in the case of ovn sb db relay, where the container name is ovn_sb_db_relay_1 vs ovn_sb_db_relay. Without this, any stop action will fail. Closes-Bug: 2125630 Change-Id: Ide1bba57998f1298400239a3df6a12db7c674192 Signed-off-by: Jay Jahns Signed-off-by: Michal Nasiadka (cherry picked from commit 3e444dd6bdc53578d110b5aedd0b260c360f59e3) --- ansible/roles/service-stop/tasks/iterated.yml | 14 ++++++++++++++ ansible/roles/service-stop/tasks/main.yml | 12 ++++++++++++ 2 files changed, 26 insertions(+) create mode 100644 ansible/roles/service-stop/tasks/iterated.yml diff --git a/ansible/roles/service-stop/tasks/iterated.yml b/ansible/roles/service-stop/tasks/iterated.yml new file mode 100644 index 0000000000..f25bf0c26a --- /dev/null +++ b/ansible/roles/service-stop/tasks/iterated.yml @@ -0,0 +1,14 @@ +--- +- name: "Stopping containers (iterated) for {{ service_name }}" + become: true + vars: + service: "{{ outer_item.value }}" + service_name: "{{ outer_item.key }}" + kolla_container: + action: "stop_container" + common_options: "{{ docker_common_options }}" + name: "{{ service.container_name }}_{{ item }}" + ignore_missing: "{{ kolla_action_stop_ignore_missing | bool }}" + when: + - service.container_name not in skip_stop_containers + loop: "{{ range(1, (service.iterate_var | int) + 1) | list }}" diff --git a/ansible/roles/service-stop/tasks/main.yml b/ansible/roles/service-stop/tasks/main.yml index ea502f7144..b3d775ce48 100644 --- a/ansible/roles/service-stop/tasks/main.yml +++ b/ansible/roles/service-stop/tasks/main.yml @@ -10,4 +10,16 @@ ignore_missing: "{{ kolla_action_stop_ignore_missing | bool }}" when: - service.container_name not in skip_stop_containers + - not (service.iterate | default(False)) | bool with_dict: "{{ project_services | select_services_enabled_and_mapped_to_host }}" + +- name: Include tasks for iterated containers + vars: + service: "{{ outer_item.value }}" + include_tasks: iterated.yml + loop: "{{ project_services | select_services_enabled_and_mapped_to_host | dict2items }}" + loop_control: + loop_var: outer_item + when: + - (service.iterate | default(False)) | bool + - service.iterate_var is defined From c84ae0f7a3eaccfeaa35b64b01565cfe43970b18 Mon Sep 17 00:00:00 2001 From: Seunghun Lee Date: Mon, 26 Jan 2026 11:55:29 +0000 Subject: [PATCH 11/12] [2025.1 only] Support ProxySQL 3.0.x on 2025.1 ProxySQL bug [1] on certificate chain send is decided not to be fixed for 2.7.x. As ProxySQL is the default loadbalancer for MariaDB from 2025.1, add support for ProxySQL 3.0.x to get the fix. [1] https://github.com/sysown/proxysql/issues/4877 Change-Id: I0e164b5f2a56f9dbc495046f0b3053bb9edf7fd3 Signed-off-by: Seunghun Lee Depends-on: https://review.opendev.org/c/openstack/kolla/+/974429 --- ansible/group_vars/all.yml | 4 ++ ansible/roles/loadbalancer/defaults/main.yml | 4 +- doc/source/admin/index.rst | 1 + doc/source/admin/proxysql.rst | 54 +++++++++++++++++++ ...-proxysql-3-on-epoxy-34b83ff3edc280b7.yaml | 12 +++++ tests/run.yml | 10 ++++ tests/test-proxysql-upgrade.sh | 28 ++++++++++ zuul.d/base.yaml | 1 + 8 files changed, 113 insertions(+), 1 deletion(-) create mode 100644 doc/source/admin/proxysql.rst create mode 100644 releasenotes/notes/add-support-for-deploying-proxysql-3-on-epoxy-34b83ff3edc280b7.yaml create mode 100644 tests/test-proxysql-upgrade.sh diff --git a/ansible/group_vars/all.yml b/ansible/group_vars/all.yml index f09e5240db..c71a338125 100644 --- a/ansible/group_vars/all.yml +++ b/ansible/group_vars/all.yml @@ -678,6 +678,10 @@ prometheus_blackbox_exporter_port: "9115" prometheus_instance_label: proxysql_admin_port: "6032" +# Integer variable to set ProxySQL version. Valid options are 2 and 3 +# When it's set to 2 (Default), ProxySQL 2.7.x is deployed. +# When it's set to 3, ProxySQL 3.0.x is used. +proxysql_version: 2 rabbitmq_port: "{{ '5671' if rabbitmq_enable_tls | bool else '5672' }}" rabbitmq_management_port: "15672" diff --git a/ansible/roles/loadbalancer/defaults/main.yml b/ansible/roles/loadbalancer/defaults/main.yml index 13cf3a17d8..e817a68e9d 100644 --- a/ansible/roles/loadbalancer/defaults/main.yml +++ b/ansible/roles/loadbalancer/defaults/main.yml @@ -47,7 +47,7 @@ haproxy_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_ haproxy_tag: "{{ openstack_tag }}" haproxy_image_full: "{{ haproxy_image }}:{{ haproxy_tag }}" -proxysql_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}proxysql" +proxysql_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ docker_image_name_prefix }}proxysql{{ proxysql_version_suffix }}" proxysql_tag: "{{ openstack_tag }}" proxysql_image_full: "{{ proxysql_image }}:{{ proxysql_tag }}" @@ -233,3 +233,5 @@ mariadb_singlenode: "{{ mariadb_shards_info.shards.values() | map(attribute='hos mariadb_shun_on_failures: "{{ '10' if mariadb_singlenode else '' }}" mariadb_connect_retries_delay: "{{ '1000' if mariadb_singlenode else '' }}" mariadb_connect_retries_on_failure: "{{ '20' if mariadb_singlenode else '' }}" + +proxysql_version_suffix: "{{ '-3' if proxysql_version | int == 3 else '' }}" diff --git a/doc/source/admin/index.rst b/doc/source/admin/index.rst index eb259951c1..e24a757ee2 100644 --- a/doc/source/admin/index.rst +++ b/doc/source/admin/index.rst @@ -13,3 +13,4 @@ Admin Guides production-architecture-guide deployment-philosophy password-rotation + proxysql diff --git a/doc/source/admin/proxysql.rst b/doc/source/admin/proxysql.rst new file mode 100644 index 0000000000..ee68b4fef9 --- /dev/null +++ b/doc/source/admin/proxysql.rst @@ -0,0 +1,54 @@ +======== +ProxySQL +======== + +ProxySQL provides loadbalancing to MariaDB. Prior to 2025.1 release, +HAProxy was the default loadbalancer for database like other services. +But from 2025.1, the ProxySQL became the default and the support for +HAProxy as a database loadbalancer will be discontinued from 2025.2. + +.. note:: + + If your MariaDB cluster is not managed by Kolla-Ansible, this is + not applied. + +Migrating from HAProxy +~~~~~~~~~~~~~~~~~~~~~~ + +The migration is automatically handled by Kolla-Ansible. By default, +ProxySQL gets enabled when MariaDB is enabled from 2025.1 release. +So, if users are coming from 2024.1 or 2024.2 release, they can +simply run service upgrade command. + +Upgrading ProxySQL from 2.7.x to 3.0.x +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The default version of ProxySQL for 2025.1 release is 2.7.x however +this version of ProxySQL has a bug that it does not send all +certificate chain during SSL handshake. See `ProxySQL issue 4877 +`__ for more detail. +This bug was fixed on ProxySQL 3.0.x but not in 2.7.x release. + +This bug does not affect users system unless they use chain of +certificates for database TLS (e.g. Use of intermediate certificate) +If however, this does affect your system, you can upgrade your ProxySQL +by following. + +1. Set ``proxysql_version`` to 3 + + .. code-block:: yaml + + proxysql_version: 3 + +2. Run service deployment for loadbalancers + + .. code-block:: bash + + $ kolla-ansible deploy -i -t loadbalancer + +3. Verify the version of ProxySQL after deployment + + .. code-block:: bash + + $ docker exec proxysql proxysql --version + $ ProxySQL version 3.0.5 diff --git a/releasenotes/notes/add-support-for-deploying-proxysql-3-on-epoxy-34b83ff3edc280b7.yaml b/releasenotes/notes/add-support-for-deploying-proxysql-3-on-epoxy-34b83ff3edc280b7.yaml new file mode 100644 index 0000000000..a892e404e2 --- /dev/null +++ b/releasenotes/notes/add-support-for-deploying-proxysql-3-on-epoxy-34b83ff3edc280b7.yaml @@ -0,0 +1,12 @@ +--- +features: + - | + Add support for deploying ProxySQL 3.0.x on OpenStack 2025.1 system. + ProxySQL 2.7.x, which is the default version of ProxySQL of 2025.1, has a + bug that `SSL handshake does not send full certificate + chain `__. + This causes database TLS verification failure when users use intermediate + certificate. + This bug was only fixed on ProxySQL 3.0.x release. + Users can deploy/upgrade ProxySQL to 3.0.x by setting + ``proxysql_version`` to 3. diff --git a/tests/run.yml b/tests/run.yml index 336ab36642..2dc2c1b9ad 100644 --- a/tests/run.yml +++ b/tests/run.yml @@ -540,6 +540,16 @@ VIP: "{{ kolla_internal_vip_address }}" TLS_ENABLED: "{{ tls_enabled }}" + - name: Run test-proxysql-upgrade.sh script + script: + cmd: test-proxysql-upgrade.sh + executable: /bin/bash + chdir: "{{ kolla_ansible_src_dir }}" + when: scenario == "mariadb" + environment: + KOLLA_ANSIBLE_VENV_PATH: "{{ kolla_ansible_venv_path }}" + CONTAINER_ENGINE: "{{ container_engine }}" + - name: Run test-prometheus-opensearch.sh script script: cmd: test-prometheus-opensearch.sh diff --git a/tests/test-proxysql-upgrade.sh b/tests/test-proxysql-upgrade.sh new file mode 100644 index 0000000000..f1b8f50d1c --- /dev/null +++ b/tests/test-proxysql-upgrade.sh @@ -0,0 +1,28 @@ +#!/bin/bash +set -o xtrace +set -o pipefail + +function test_proxysql_upgrade { + echo "Testing upgrading ProxySQL" + test_proxysql_upgrade_logged > /tmp/logs/ansible/test-proxysql-upgrade 2>&1 + result=$? + if [[ $result != 0 ]]; then + echo "Testing ProxySQL failed. See ansible/test-proxysql-upgrade for details" + else + echo "Successfully tested ProxySQL. See ansible/test-proxysql-upgrade for details" + fi + return $result +} + +function test_proxysql_upgrade_logged { + RAW_INVENTORY=/etc/kolla/inventory + source $KOLLA_ANSIBLE_VENV_PATH/bin/activate + kolla-ansible deploy -i ${RAW_INVENTORY} -t loadbalancer -e proxysql_version=3 || return $? + version=$(sudo $container_engine exec proxysql proxysql --version) + echo "ProxySQL version: $version" + echo $version | grep -q "3.0" + return $? +} + +container_engine="${1:-${CONTAINER_ENGINE:-docker}}" +test_proxysql_upgrade diff --git a/zuul.d/base.yaml b/zuul.d/base.yaml index 8f2f47393d..02d0b1301a 100644 --- a/zuul.d/base.yaml +++ b/zuul.d/base.yaml @@ -191,6 +191,7 @@ files: !inherit - ^ansible/roles/(loadbalancer|mariadb|proxysql-config)/ - ^tests/test-mariadb.sh + - ^tests/test-proxysql-upgrade.sh vars: scenario: mariadb From 440ba05b69d80b724cc5bcba47df47c6d4cc1ace Mon Sep 17 00:00:00 2001 From: Victor Chembaev Date: Thu, 26 Mar 2026 17:22:58 +0200 Subject: [PATCH 12/12] Run cinder-backup with ipc_mode: host Set ipc_mode: host to ensure the service uses host multipath semaphores, preventing the process from hanging during multipath flush operations in environments using SCSI-attached storage with multipath. Change-Id: I6cf8db49bef3cecaf3ded26e3e6ab61c4166e511 Closes-bug: #2091104 Signed-off-by: Victor Chembaev (cherry picked from commit 34ab43b3ed3041c0c6f069e545a8ebf10f647bb7) --- ansible/roles/cinder/defaults/main.yml | 1 + ansible/roles/cinder/handlers/main.yml | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/ansible/roles/cinder/defaults/main.yml b/ansible/roles/cinder/defaults/main.yml index a1b0f321f4..018cde7c6c 100644 --- a/ansible/roles/cinder/defaults/main.yml +++ b/ansible/roles/cinder/defaults/main.yml @@ -54,6 +54,7 @@ cinder_services: enabled: "{{ enable_cinder_backup | bool }}" image: "{{ cinder_backup_image_full }}" privileged: True + ipc_mode: "host" volumes: "{{ cinder_backup_default_volumes + cinder_backup_extra_volumes + lookup('vars', 'run_default_volumes_' + kolla_container_engine) }}" dimensions: "{{ cinder_backup_dimensions }}" healthcheck: "{{ cinder_backup_healthcheck }}" diff --git a/ansible/roles/cinder/handlers/main.yml b/ansible/roles/cinder/handlers/main.yml index 51583bbcc4..f85cabd0ef 100644 --- a/ansible/roles/cinder/handlers/main.yml +++ b/ansible/roles/cinder/handlers/main.yml @@ -38,7 +38,7 @@ name: "{{ service.container_name }}" image: "{{ service.image }}" privileged: "{{ service.privileged | default(False) }}" - ipc_mode: "{{ service.ipc_mode | default('') }}" + ipc_mode: "{{ service.ipc_mode | default(omit) }}" tmpfs: "{{ service.tmpfs | default(omit) }}" volumes: "{{ service.volumes | reject('equalto', '') | list }}" dimensions: "{{ service.dimensions }}" @@ -55,6 +55,7 @@ name: "{{ service.container_name }}" image: "{{ service.image }}" privileged: "{{ service.privileged | default(False) }}" + ipc_mode: "{{ service.ipc_mode | default(omit) }}" volumes: "{{ service.volumes | reject('equalto', '') | list }}" dimensions: "{{ service.dimensions }}" healthcheck: "{{ service.healthcheck | default(omit) }}"