From b23ef3886d92a7793905015ccd288a7b6a39ef02 Mon Sep 17 00:00:00 2001 From: Rich Megginson Date: Thu, 19 Mar 2026 11:58:50 -0600 Subject: [PATCH] test: ensure role gathers the facts it uses by having test clear_facts before include_role The role gathers the facts it uses. For example, if the user uses `ANSIBLE_GATHERING=explicit`, the role uses the `setup` module with the facts and subsets it requires. This change allows us to test this. Before every role invocation, the test will use `meta: clear_facts` so that the role starts with no facts. Create a task file tests/tasks/run_role_with_clear_facts.yml to do the tasks to clear the facts and run the role. Note that this means we don't need to use `gather_facts` for the tests. Some vars defined using `ansible_facts` have been changed to be defined with `set_fact` instead. This is because of the fact that `vars` are lazily evaluated - the var might be referenced when the facts have been cleared, and will issue an error like `ansible_facts["distribution"] is undefined`. This is typically done for blocks that have a `when` condition that uses `ansible_facts` and the block has a role invocation using run_role_with_clear_facts.yml These have been rewritten to define the `when` condition using `set_fact`. This is because the `when` condition is evaluated every time a task is invoked in the block, and if the facts are cleared, this will raise an undefined variable error. Signed-off-by: Rich Megginson --- tests/tasks/run_role_with_clear_facts.yml | 37 +++++++++++++++++++++++ tests/tests_default.yml | 4 +-- tests/tests_default_reboot.yml | 9 +++--- tests/tests_ssh.yml | 17 +++++++---- tests/tests_ssh_reboot.yml | 14 ++++----- 5 files changed, 59 insertions(+), 22 deletions(-) create mode 100644 tests/tasks/run_role_with_clear_facts.yml diff --git a/tests/tasks/run_role_with_clear_facts.yml b/tests/tasks/run_role_with_clear_facts.yml new file mode 100644 index 00000000..b792e4f2 --- /dev/null +++ b/tests/tasks/run_role_with_clear_facts.yml @@ -0,0 +1,37 @@ +--- +# Task file: clear_facts, run linux-system-roles.kdump. +# Include this with include_tasks or import_tasks +# Input: +# - __sr_tasks_from: tasks_from to run - same as tasks_from in include_role +# - __sr_public: export private vars from role - same as public in include_role +# - __sr_failed_when: set to false to ignore role errors - same as failed_when in include_role +- name: Clear facts + meta: clear_facts + +# note that you can use failed_when with import_role but not with include_role +# so this simulates the __sr_failed_when false case +# Q: Why do we need a separate task to run the role normally? Why not just +# run the role in the block and rethrow the error in the rescue block? +# A: Because you cannot rethrow the error in exactly the same way as the role does. +# It might be possible to exactly reconstruct ansible_failed_result but it's not worth the effort. +- name: Run the role with __sr_failed_when false + when: + - __sr_failed_when is defined + - not __sr_failed_when + block: + - name: Run the role + include_role: + name: linux-system-roles.kdump + tasks_from: "{{ __sr_tasks_from | default('main') }}" + public: "{{ __sr_public | default(false) }}" + rescue: + - name: Ignore the failure when __sr_failed_when is false + debug: + msg: Ignoring failure when __sr_failed_when is false + +- name: Run the role normally + include_role: + name: linux-system-roles.kdump + tasks_from: "{{ __sr_tasks_from | default('main') }}" + public: "{{ __sr_public | default(false) }}" + when: __sr_failed_when | d(true) diff --git a/tests/tests_default.yml b/tests/tests_default.yml index 646635a0..f675b3e6 100644 --- a/tests/tests_default.yml +++ b/tests/tests_default.yml @@ -1,15 +1,13 @@ --- - name: Ensure that the rule runs with default parameters hosts: all - gather_facts: false tasks: - name: >- The role requires reboot only on specific systems. Hence running the role in a rescue block to catch when it fails with reboot. block: - name: Run the role. If reboot is not required - the play succeeds. - include_role: - name: linux-system-roles.kdump + include_tasks: tasks/run_role_with_clear_facts.yml rescue: - name: If reboot is required - assert the expected fail message assert: diff --git a/tests/tests_default_reboot.yml b/tests/tests_default_reboot.yml index 0c90f4f3..6e1eff6f 100644 --- a/tests/tests_default_reboot.yml +++ b/tests/tests_default_reboot.yml @@ -9,9 +9,9 @@ - tests::reboot tasks: - name: Run the role and reboot if necessary - include_role: - name: linux-system-roles.kdump - public: true + include_tasks: tasks/run_role_with_clear_facts.yml + vars: + __sr_public: true - name: Notify and run handlers meta: flush_handlers @@ -80,8 +80,7 @@ kdump_dracut_args: --print-cmdline - name: Run the role again with new parameters - include_role: - name: linux-system-roles.kdump + include_tasks: tasks/run_role_with_clear_facts.yml - name: Notify and run handlers again meta: flush_handlers diff --git a/tests/tests_ssh.yml b/tests/tests_ssh.yml index 6faa74fb..6f3b5919 100644 --- a/tests/tests_ssh.yml +++ b/tests/tests_ssh.yml @@ -14,10 +14,6 @@ # but is less realistic, as in practice a host can not dump core # to itself. kdump_test_ssh_server_outside: "{{ inventory_hostname }}" - kdump_test_ssh_source: "{{ - ansible_facts['env']['SSH_CONNECTION'].split()[0] - if 'SSH_CONNECTION' in ansible_facts['env'] - else '127.0.0.1' }}" # this is the address at which the ssh dump server can be reached # from the managed host. Dumps will be uploaded there. @@ -78,13 +74,22 @@ - ansible_facts['distribution_major_version'] == '6' - kdump_test_ssh_server_outside == inventory_hostname + # because this relies on ansible_facts['env'], it must be set before + # the role is run because ansible_facts['env'] is cleared by the test + # and the role does not gather that fact. + - name: Set kdump_test_ssh_source + set_fact: + kdump_test_ssh_source: "{{ + ansible_facts['env']['SSH_CONNECTION'].split()[0] + if 'SSH_CONNECTION' in ansible_facts['env'] + else '127.0.0.1' }}" + - name: >- The role requires reboot only on specific systems. Hence running the role in a rescue block to catch when it fails with reboot. block: - name: Run the role. If reboot is not required - the play succeeds. - include_role: - name: linux-system-roles.kdump + include_tasks: tasks/run_role_with_clear_facts.yml rescue: - name: If reboot is required - assert the expected fail message assert: diff --git a/tests/tests_ssh_reboot.yml b/tests/tests_ssh_reboot.yml index 5e890969..0cea6c95 100644 --- a/tests/tests_ssh_reboot.yml +++ b/tests/tests_ssh_reboot.yml @@ -14,10 +14,6 @@ # but is less realistic, as in practice a host can not dump core # to itself. kdump_test_ssh_server_outside: "{{ inventory_hostname }}" - kdump_test_ssh_source: "{{ - ansible_facts['env']['SSH_CONNECTION'].split()[0] - if 'SSH_CONNECTION' in ansible_facts['env'] - else '127.0.0.1' }}" # this is the address at which the ssh dump server can be reached # from the managed host. Dumps will be uploaded there. @@ -77,6 +73,10 @@ __user_info.home ~ '/.ssh' }}" __authorized_keys_path: "{{ __user_info.home ~ '/.ssh/authorized_keys' }}" + kdump_test_ssh_source: "{{ + ansible_facts['env']['SSH_CONNECTION'].split()[0] + if 'SSH_CONNECTION' in ansible_facts['env'] + else '127.0.0.1' }}" - name: Print message that this test is skipped on EL 6 debug: @@ -112,8 +112,7 @@ when: __kdump_is_ostree | bool - name: Run the role and reboot if necessary - include_role: - name: linux-system-roles.kdump + include_tasks: tasks/run_role_with_clear_facts.yml - name: Flush handlers meta: flush_handlers @@ -137,8 +136,7 @@ delegate_to: "{{ kdump_ssh_server }}" - name: Run the role again - include_role: - name: linux-system-roles.kdump + include_tasks: tasks/run_role_with_clear_facts.yml - name: Notify and run handlers again meta: flush_handlers