Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions doc/ref/grains/all/index.rst
Original file line number Diff line number Diff line change
Expand Up @@ -19,4 +19,5 @@ grains modules
opts
package
pending_reboot
resources
rest_sample
5 changes: 5 additions & 0 deletions doc/ref/grains/all/salt.grains.resources.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
salt.grains.resources
=====================

.. automodule:: salt.grains.resources
:members:
5 changes: 5 additions & 0 deletions doc/ref/modules/all/index.rst
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,7 @@ execution modules
dpkg_lowpkg
dummyproxy_pkg
dummyproxy_service
dummyresource_test
environ
etcd_mod
ethtool
Expand Down Expand Up @@ -215,6 +216,10 @@ execution modules
ssh_pkg
ssh_pki
ssh_service
sshresource_cmd
sshresource_pkg
sshresource_state
sshresource_test
state
status
supervisord
Expand Down
6 changes: 6 additions & 0 deletions doc/ref/modules/all/salt.modules.dummyresource_test.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
salt.modules.dummyresource_test
===============================

.. automodule:: salt.modules.dummyresource_test
:members:
:undoc-members:
6 changes: 6 additions & 0 deletions doc/ref/modules/all/salt.modules.sshresource_cmd.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
salt.modules.sshresource_cmd
============================

.. automodule:: salt.modules.sshresource_cmd
:members:
:undoc-members:
6 changes: 6 additions & 0 deletions doc/ref/modules/all/salt.modules.sshresource_pkg.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
salt.modules.sshresource_pkg
============================

.. automodule:: salt.modules.sshresource_pkg
:members:
:undoc-members:
6 changes: 6 additions & 0 deletions doc/ref/modules/all/salt.modules.sshresource_state.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
salt.modules.sshresource_state
==============================

.. automodule:: salt.modules.sshresource_state
:members:
:undoc-members:
6 changes: 6 additions & 0 deletions doc/ref/modules/all/salt.modules.sshresource_test.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
salt.modules.sshresource_test
=============================

.. automodule:: salt.modules.sshresource_test
:members:
:undoc-members:
15 changes: 13 additions & 2 deletions salt/client/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -1259,8 +1259,19 @@ def get_iter_returns(
# re-do the ping
if time.time() > timeout_at and minions_running:
# since this is a new ping, no one has responded yet
jinfo = self.gather_job_info(
jid, list(minions - found), "list", **kwargs
# Only send gather_job_info to IDs that are accepted minions.
# Resource IDs (e.g. "dummy-01") are not PKI keys; sending
# saltutil.find_job to them as a list target would fail and
# print a misleading "No minions matched" message.
pending = minions - found
accepted_minions = set(
salt.utils.minions.CkMinions(self.opts)._pki_minions()
)
minion_pending = list(pending & accepted_minions)
jinfo = (
self.gather_job_info(jid, minion_pending, "list", **kwargs)
if minion_pending
else {}
)
minions_running = False
# if we weren't assigned any jid that means the master thinks
Expand Down
3 changes: 2 additions & 1 deletion salt/client/netapi.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
The main entry point for salt-api
"""

import asyncio
import logging
import signal

Expand Down Expand Up @@ -63,7 +64,7 @@ def run(self):
# No custom signal handling was added, install our own
signal.signal(signal.SIGTERM, self._handle_signals)

self.process_manager.run()
asyncio.run(self.process_manager.run())

def _handle_signals(self, signum, sigframe):
# escalate the signals to the process manager
Expand Down
79 changes: 29 additions & 50 deletions salt/client/ssh/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -229,6 +229,18 @@
tar --strip-components=1 -xf "$RELENV_TAR" -C "{THIN_DIR}"
fi

# BUG-WORKAROUND: salt-ssh relenv path never writes the minion config that
# Single.__init__ builds in self.minion_config. The non-relenv (salt-thin)
# path embeds it in SSH_PY_SHIM via OPTIONS.config, which the Python shim
# writes to thin_dir/minion. The relenv shim has no equivalent, so salt-call
# falls back to system defaults (/var/cache/salt, /var/log/salt) and fails for
# any unprivileged user. Writing it here replicates the salt-thin behaviour.
# See: https://github.com/saltstack/salt (file as issue against salt-ssh relenv)
mkdir -p "{THIN_DIR}/running_data/pki"
cat > "{THIN_DIR}/minion" << 'SALT_MINION_CONF_EOF'
__SALT_MINION_CONFIG__
SALT_MINION_CONF_EOF

# Check if Python binary is executable
if [ ! -x "$SALT_CALL_BIN" ]; then
echo "ERROR: salt-call binary not found or not executable at $SALT_CALL_BIN" >&2
Expand Down Expand Up @@ -265,9 +277,6 @@
echo "{RSTR}"
echo "{RSTR}" >&2

# Debug: Show the actual command being executed
echo "SALT_CALL_CMD: $SALT_CALL_BIN --retcode-passthrough --local --metadata --out=json -lquiet -c {THIN_DIR} -- {ARGS}" >&2

exec $SUDO "$SALT_CALL_BIN" --retcode-passthrough --local --metadata --out=json -lquiet -c "{THIN_DIR}" -- {ARGS}
EOF
""".split(
Expand Down Expand Up @@ -1191,59 +1200,34 @@ def __init__(
self.arch = arch.strip()

if self.opts.get("relenv"):
# Check if OS/arch already detected and cached in opts
if "relenv_kernel" in opts and "relenv_os_arch" in opts:
kernel = opts["relenv_kernel"]
os_arch = opts["relenv_os_arch"]
log.warning(f"RELENV: Reusing cached OS/arch: {kernel}/{os_arch}")
if thin:
# Caller pre-resolved the relenv tarball path — skip the SSH
# round-trip that detect_os_arch() would otherwise make during
# __init__. This is important when Single is created inside a
# minion job worker where every extra SSH connection adds latency
# and can cause hangs.
self.thin = thin
else:
# First Single instance - detect and cache OS/arch in opts before assigning to self.opts
kernel, os_arch = self.detect_os_arch()
opts["relenv_kernel"] = kernel
opts["relenv_os_arch"] = os_arch
log.warning(f"RELENV: Detected and cached OS/arch: {kernel}/{os_arch}")

log.info(
"RELENV: About to call gen_relenv() to download/generate tarball..."
)
self.thin = salt.utils.relenv.gen_relenv(
self.opts["cachedir"], kernel=kernel, os_arch=os_arch
)
log.info(
"RELENV: gen_relenv() completed successfully, tarball path: %s",
self.thin,
)
self.thin = salt.utils.relenv.gen_relenv(
opts["cachedir"], kernel=kernel, os_arch=os_arch
)

# Add file_roots and related config to minion config
# (required for slsutil functions and other fileserver operations)
# Thin does this in _run_wfunc_thin() at lines 1498-1507
# NOTE: Now that we transfer config via SCP instead of embedding in command line,
# we CAN add __master_opts__ without hitting ARG_MAX limits
self.minion_opts["file_roots"] = self.opts["file_roots"]
self.minion_opts["pillar_roots"] = self.opts["pillar_roots"]
self.minion_opts["ext_pillar"] = self.opts["ext_pillar"]
# For relenv, we need to override extension_modules to point to where the shim
# extracts the tarball on the remote system. The wrapper system will copy this
# to opts_pkg["extension_modules"] which is used by salt-call.
self.minion_opts["ext_pillar"] = self.opts.get("ext_pillar", [])
# For relenv, override extension_modules to point to where the shim
# extracts the tarball on the remote system.
self.minion_opts["extension_modules"] = (
f"{self.thin_dir}/running_data/var/cache/salt/minion/extmods"
)
self.minion_opts["module_dirs"] = self.opts["module_dirs"]
self.minion_opts["__master_opts__"] = self.context["master_opts"]

# Re-serialize the minion config after updating relenv-specific paths
# This ensures the config file sent to the remote system has the correct extension_modules path
self.minion_config = salt.serializers.yaml.serialize(self.minion_opts)
log.debug(
"RELENV: Re-serialized minion config with extension_modules=%s",
self.minion_opts["extension_modules"],
)

# NOTE: We no longer pre-compile pillar for relenv here.
# Both thin and relenv now use the wrapper system (_run_wfunc_thin())
# which compiles pillar dynamically, ensuring correct behavior with pillar overrides:
# - 1x compilation without pillar overrides
# - 2x compilation with pillar overrides (re-compiled in wrapper modules)
else:
self.thin = thin if thin else salt.utils.thin.thin_path(opts["cachedir"])

Expand Down Expand Up @@ -1867,22 +1851,16 @@ def _cmd_str(self):
and isinstance(self.argv[0], str)
and " " in self.argv[0]
):
# Split the string into shell words
argv_to_use = shlex.split(self.argv[0])
else:
argv_to_use = self.argv

quoted_args = " ".join(shlex.quote(str(arg)) for arg in argv_to_use)
log.debug(
"RELENV: Building shim with argv=%s, argv_to_use=%s, quoted_args=%s",
self.argv,
argv_to_use,
quoted_args,
)

# Note: Config is sent separately via SCP in cmd_block() to avoid ARG_MAX issues
# The shim expects the config file to already exist at {THIN_DIR}/minion
return SSH_SH_SHIM_RELENV.format(
# Use .replace() for minion_config — it is YAML flow-style and
# may contain literal { } which would break .format().
shim = SSH_SH_SHIM_RELENV.format(
DEBUG=debug,
SUDO=sudo,
SUDO_USER=sudo_user or "",
Expand All @@ -1892,6 +1870,7 @@ def _cmd_str(self):
ARGS=quoted_args,
EXT_MODS_VERSION=self.mods.get("version", ""),
)
return shim.replace("__SALT_MINION_CONFIG__", self.minion_config)

thin_code_digest, thin_sum = salt.utils.thin.thin_sum(cachedir, "sha1")
arg_str = '''
Expand Down
3 changes: 3 additions & 0 deletions salt/config/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -471,6 +471,8 @@ def _gather_buffer_space():
"return_retry_tries": int,
# Configures amount of retries for Syndic to Master of Masters
"syndic_retries": int,
# Top-level pillar key for per-type resource configuration (default: resources)
"resource_pillar_key": str,
# Specify one or more returners in which all events will be sent to. Requires that the returners
# in question have an event_return(event) function!
"event_return": (list, str),
Expand Down Expand Up @@ -1285,6 +1287,7 @@ def _gather_buffer_space():
"return_retry_timer": 5,
"return_retry_timer_max": 10,
"return_retry_tries": 3,
"resource_pillar_key": "resources",
"syndic_retries": 3,
"random_reauth_delay": 10,
"winrepo_source_dir": "salt://win/repo-ng/",
Expand Down
28 changes: 28 additions & 0 deletions salt/grains/resources.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
"""
Expose the resource IDs managed by this minion as a grain.

The grain ``salt_resources`` mirrors the ``resources:`` section of the minion
configuration so that the master's grains cache records which resources each
minion manages. This enables grain-based targeting (``G@salt_resources``) and
gives operators a human-readable view of resource topology via ``grains.items``.

Example output::

salt_resources:
dummy:
- dummy-01
- dummy-02
- dummy-03
"""

import logging

log = logging.getLogger(__name__)


def resources():
"""Return the resource IDs managed by this minion, keyed by resource type."""
managed = __opts__.get("resources", {})
if not managed:
return {}
return {"salt_resources": managed}
87 changes: 87 additions & 0 deletions salt/loader/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,7 @@
str(SALT_BASE_PATH / "output"),
str(SALT_BASE_PATH / "pillar"),
str(SALT_BASE_PATH / "proxy"),
str(SALT_BASE_PATH / "resource"),
str(SALT_BASE_PATH / "queues"),
str(SALT_BASE_PATH / "renderers"),
str(SALT_BASE_PATH / "returners"),
Expand Down Expand Up @@ -495,6 +496,92 @@ def proxy(
)


def resource(
opts,
functions=None,
utils=None,
context=None,
loaded_base_name=None,
):
"""
Load the resource connection modules (``salt/resource/*.py``).

Returns a LazyLoader whose functions are accessible via the
``__resource_funcs__`` dunder injected into resource execution modules.
Analogous to :func:`proxy` for proxy minions.

:param dict opts: The Salt options dictionary.
:param LazyLoader functions: A LazyLoader returned from :func:`minion_mods`.
:param LazyLoader utils: A LazyLoader returned from :func:`utils`.
:param dict context: Shared loader context dictionary.
:param str loaded_base_name: Module namespace prefix for this loader.
"""
return LazyLoader(
_module_dirs(opts, "resource"),
opts,
tag="resource",
pack={
"__salt__": functions,
"__utils__": utils,
"__context__": context,
"__resource__": {},
},
extra_module_dirs=utils.module_dirs if utils else None,
pack_self="__resource_funcs__",
loaded_base_name=loaded_base_name,
)


def resource_modules(
opts,
resource_type,
resource_funcs=None,
utils=None,
context=None,
loaded_base_name=None,
):
"""
Load execution modules for a specific resource type.

Creates an isolated :class:`LazyLoader` whose opts contain
``resource_type``, allowing execution modules to gate their
``__virtual__`` on that value — the same mechanism proxy modules use
with ``proxytype``. A minion managing N resource types holds N of
these loaders simultaneously (one per type, not one per device).

:param dict opts: The Salt options dictionary. A copy is made and
``resource_type`` is injected before passing to the loader.
:param str resource_type: The resource type string (e.g. ``"dummy"``).
:param LazyLoader resource_funcs: The resource connection loader returned
by :func:`resource`, injected as ``__resource_funcs__``.
:param LazyLoader utils: A LazyLoader returned from :func:`utils`.
:param dict context: Shared loader context dictionary.
:param str loaded_base_name: Module namespace prefix for this loader.
"""
resource_opts = dict(opts)
resource_opts["resource_type"] = resource_type

return LazyLoader(
_module_dirs(resource_opts, "modules", "module"),
resource_opts,
tag="module",
pack={
"__context__": context,
"__utils__": utils,
"__resource_funcs__": resource_funcs,
"__opts__": resource_opts,
# Empty sentinel so LazyLoader creates a NamedLoaderContext for
# __resource__ on every loaded module. The NamedLoaderContext
# reads from resource_ctxvar, which _thread_return sets per-call
# before dispatching — giving each resource job its own identity.
"__resource__": {},
},
extra_module_dirs=utils.module_dirs if utils else None,
loaded_base_name=loaded_base_name,
pack_self="__salt__",
)


def returners(
opts, functions, whitelist=None, context=None, proxy=None, loaded_base_name=None
):
Expand Down
Loading
Loading