From 80919b0ee8b26bbab441eef1c953e57c9430960e Mon Sep 17 00:00:00 2001 From: jlnav Date: Thu, 20 Feb 2025 14:00:36 -0600 Subject: [PATCH 01/39] first round of adding new extensions to flake8, then checking and modernizing type hints --- .../alloc_funcs/give_sim_work_first.py | 3 +- libensemble/ensemble.py | 19 ++++--- libensemble/executors/balsam_executor.py | 27 +++++----- libensemble/executors/executor.py | 51 +++++++++---------- libensemble/executors/mpi_executor.py | 41 ++++++++------- .../gen_funcs/persistent_ax_multitask.py | 31 +++++------ pyproject.toml | 3 ++ 7 files changed, 84 insertions(+), 91 deletions(-) diff --git a/libensemble/alloc_funcs/give_sim_work_first.py b/libensemble/alloc_funcs/give_sim_work_first.py index 1e528917b6..4f587ef0e0 100644 --- a/libensemble/alloc_funcs/give_sim_work_first.py +++ b/libensemble/alloc_funcs/give_sim_work_first.py @@ -1,5 +1,4 @@ import time -from typing import Tuple import numpy as np import numpy.typing as npt @@ -15,7 +14,7 @@ def give_sim_work_first( alloc_specs: dict, persis_info: dict, libE_info: dict, -) -> Tuple[dict]: +) -> tuple[dict]: """ Decide what should be given to workers. This allocation function gives any available simulation work first, and only when all simulations are diff --git a/libensemble/ensemble.py b/libensemble/ensemble.py index 31549d5b53..afd46000a3 100644 --- a/libensemble/ensemble.py +++ b/libensemble/ensemble.py @@ -1,7 +1,6 @@ import importlib import json import logging -from typing import Optional import numpy.typing as npt import tomli @@ -270,15 +269,15 @@ class Ensemble: def __init__( self, - sim_specs: Optional[SimSpecs] = SimSpecs(), - gen_specs: Optional[GenSpecs] = GenSpecs(), - exit_criteria: Optional[ExitCriteria] = {}, - libE_specs: Optional[LibeSpecs] = LibeSpecs(), - alloc_specs: Optional[AllocSpecs] = AllocSpecs(), - persis_info: Optional[dict] = {}, - executor: Optional[Executor] = None, - H0: Optional[npt.NDArray] = None, - parse_args: Optional[bool] = False, + sim_specs: SimSpecs | None = SimSpecs(), + gen_specs: GenSpecs | None = GenSpecs(), + exit_criteria: ExitCriteria | None = {}, + libE_specs: LibeSpecs | None = LibeSpecs(), + alloc_specs: AllocSpecs | None = AllocSpecs(), + persis_info: dict | None = {}, + executor: Executor | None = None, + H0: npt.NDArray | None = None, + parse_args: bool | None = False, ): self.sim_specs = sim_specs self.gen_specs = gen_specs diff --git a/libensemble/executors/balsam_executor.py b/libensemble/executors/balsam_executor.py index 6c97da4ccf..af1f88e88d 100644 --- a/libensemble/executors/balsam_executor.py +++ b/libensemble/executors/balsam_executor.py @@ -78,7 +78,6 @@ class HelloApp(ApplicationDefinition): import logging import os import time -from typing import Any, Dict, List, Optional, Union from balsam import util @@ -106,9 +105,9 @@ class BalsamTask(Task): def __init__( self, - app: Optional[Application] = None, + app: Application | None = None, app_args: dict = None, - workdir: Optional[str] = None, + workdir: str | None = None, stdout: str = None, stderr: str = None, workerid: int = None, @@ -122,7 +121,7 @@ def __init__( # May want to override workdir with Balsam value when it exists Task.__init__(self, app, app_args, workdir, stdout, stderr, workerid) - def _get_time_since_balsam_submit(self) -> Union[int, float]: + def _get_time_since_balsam_submit(self) -> int: """Return time since balsam task entered ``RUNNING`` state""" event_query = EventLog.objects.filter(job_id=self.process.id, to_state="RUNNING") if not len(event_query): @@ -203,7 +202,7 @@ def poll(self) -> None: self.state = "FAILED" self._set_complete() - def wait(self, timeout: Optional[int] = None) -> None: + def wait(self, timeout: int | None = None) -> None: """Waits on completion of the task or raises ``TimeoutExpired``. Status attributes of task are updated on completion. @@ -280,10 +279,10 @@ def add_app(self, *args) -> None: def register_app( self, BalsamApp: ApplicationDefinition, - app_name: Optional[str] = None, - calc_type: Optional[str] = None, + app_name: str | None = None, + calc_type: str | None = None, desc: str = None, - precedent: Optional[str] = None, + precedent: str | None = None, ) -> None: """Registers a Balsam ``ApplicationDefinition`` to libEnsemble. This class instance *must* have a ``site`` and ``command_template`` specified. See @@ -331,9 +330,9 @@ def submit_allocation( job_mode: str = "mpi", queue: str = "local", project: str = "local", - optional_params: Dict[Any, Any] = {}, - filter_tags: Dict[Any, Any] = {}, - partitions: List[Any] = [], + optional_params: dict = {}, + filter_tags: dict = {}, + partitions: list = [], ) -> BatchJob: """ Submits a Balsam ``BatchJob`` machine allocation request to Balsam. @@ -435,14 +434,14 @@ def set_resources(self, resources: str) -> None: def submit( self, - calc_type: Optional[str] = None, - app_name: Optional[str] = None, + calc_type: str | None = None, + app_name: str | None = None, app_args: dict = None, num_procs: int = None, num_nodes: int = None, procs_per_node: int = None, max_tasks_per_node: int = None, - machinefile: Optional[str] = None, + machinefile: str | None = None, gpus_per_rank: int = 0, transfers: dict = {}, workdir: str = "", diff --git a/libensemble/executors/executor.py b/libensemble/executors/executor.py index f90d07aca5..d9cf6f428d 100644 --- a/libensemble/executors/executor.py +++ b/libensemble/executors/executor.py @@ -12,7 +12,7 @@ import sys import time from pathlib import Path -from typing import Any, Optional, Union +from typing import Any import libensemble.utils.launcher as launcher from libensemble.message_numbers import ( @@ -24,7 +24,6 @@ WORKER_DONE, WORKER_KILL_ON_TIMEOUT, ) -from libensemble.resources.resources import Resources from libensemble.utils.timer import TaskTimer logger = logging.getLogger(__name__) @@ -78,10 +77,10 @@ class Application: def __init__( self, full_path: str, - name: Optional[str] = None, - calc_type: Optional[str] = "sim", - desc: Optional[str] = None, - pyobj: Optional[Any] = None, # used by balsam_executor to store ApplicationDefinition + name: str | None = None, + calc_type: str | None = "sim", + desc: str | None = None, + pyobj: Any | None = None, # used by balsam_executor to store ApplicationDefinition precedent: str = "", ) -> None: """Instantiates a new Application instance.""" @@ -101,7 +100,7 @@ def __init__( self.app_cmd = " ".join(filter(None, [self.precedent, self.full_path])) -def jassert(test: Optional[Union[Application, bool]], *args) -> None: +def jassert(test: Application | bool | None, *args) -> None: "Version of assert that raises a ExecutorException" if not test: raise ExecutorException(*args) @@ -170,7 +169,7 @@ def _add_to_env(self, key, value): """Add to task environment - overwrites if already set""" self.env[key] = value - def workdir_exists(self) -> Optional[bool]: + def workdir_exists(self) -> bool | None: """Returns true if the task's workdir exists""" return self.workdir and os.path.exists(self.workdir) @@ -260,7 +259,7 @@ def poll(self) -> None: self._set_complete() - def wait(self, timeout: Optional[float] = None) -> None: + def wait(self, timeout: float | None = None) -> None: """Waits on completion of the task or raises TimeoutExpired exception Status attributes of task are updated on completion. @@ -288,7 +287,7 @@ def wait(self, timeout: Optional[float] = None) -> None: self._set_complete() - def result(self, timeout: Optional[Union[int, float]] = None) -> str: + def result(self, timeout: int | float | None = None) -> str: """Wrapper for task.wait() that also returns the task's status on completion. Parameters @@ -303,7 +302,7 @@ def result(self, timeout: Optional[Union[int, float]] = None) -> str: self.wait(timeout=timeout) return self.state - def exception(self, timeout: Optional[Union[int, float]] = None): + def exception(self, timeout: int | float | None = None): """Wrapper for task.wait() that instead returns the task's error code on completion. Parameters @@ -386,7 +385,7 @@ class Executor: executor = None - def _wait_on_start(self, task: Task, fail_time: Optional[int] = None) -> None: + def _wait_on_start(self, task: Task, fail_time: int | None = None) -> None: """Called by submit when wait_on_start is True. Blocks until task polls as having started. @@ -472,7 +471,7 @@ def default_app(self, calc_type: str) -> Application: jassert(app, f"Default {calc_type} app is not set") return app - def set_resources(self, resources: Resources): + def set_resources(self, resources): # Does not use resources pass @@ -493,9 +492,9 @@ def set_gen_procs_gpus(self, libE_info): def register_app( self, full_path: str, - app_name: Optional[str] = None, - calc_type: Optional[str] = None, - desc: Optional[str] = None, + app_name: str | None = None, + calc_type: str | None = None, + desc: str | None = None, precedent: str = "", ) -> None: """Registers a user application to libEnsemble. @@ -571,7 +570,7 @@ def manager_kill_received(self) -> bool: return False def polling_loop( - self, task: Task, timeout: Optional[int] = None, delay: float = 0.1, poll_manager: bool = False + self, task: Task, timeout: int | None = None, delay: float = 0.1, poll_manager: bool = False ) -> int: """Optional, blocking, generic task status polling loop. Operates until the task finishes, times out, or is optionally killed via a manager signal. On completion, returns a @@ -637,7 +636,7 @@ def polling_loop( return calc_status - def get_task(self, taskid: Union[str, int]) -> Optional[Task]: + def get_task(self, taskid: str | int) -> Task | None: """Returns the task object for the supplied task ID""" task = next((j for j in self.list_of_tasks if j.id == taskid), None) if task is None: @@ -681,14 +680,14 @@ def _check_app_exists(self, full_path: str) -> None: def submit( self, - calc_type: Optional[str] = None, - app_name: Optional[str] = None, - app_args: Optional[str] = None, - stdout: Optional[str] = None, - stderr: Optional[str] = None, - dry_run: Optional[bool] = False, - wait_on_start: Optional[bool] = False, - env_script: Optional[str] = None, + calc_type: str | None = None, + app_name: str | None = None, + app_args: str | None = None, + stdout: str | None = None, + stderr: str | None = None, + dry_run: bool | None = False, + wait_on_start: bool | None = False, + env_script: str | None = None, ) -> Task: """Create a new task and run as a local serial subprocess. diff --git a/libensemble/executors/mpi_executor.py b/libensemble/executors/mpi_executor.py index 28d1fb6f97..fd67865b9a 100644 --- a/libensemble/executors/mpi_executor.py +++ b/libensemble/executors/mpi_executor.py @@ -15,7 +15,6 @@ import logging import os import time -from typing import List, Optional, Union import libensemble.utils.launcher as launcher from libensemble.executors.executor import Executor, ExecutorException, Task @@ -141,7 +140,7 @@ def set_resources(self, resources: Resources) -> None: self.resources = resources def _launch_with_retries( - self, task: Task, subgroup_launch: bool, wait_on_start: bool, run_cmd: List[str], use_shell: bool + self, task: Task, subgroup_launch: bool, wait_on_start: bool, run_cmd: list[str], use_shell: bool ) -> None: """Launch task with retry mechanism""" retry_count = 0 @@ -189,25 +188,25 @@ def _launch_with_retries( def submit( self, - calc_type: Optional[str] = None, - app_name: Optional[str] = None, - num_procs: Optional[int] = None, - num_nodes: Optional[int] = None, - procs_per_node: Optional[int] = None, - num_gpus: Optional[int] = None, - machinefile: Optional[str] = None, - app_args: Optional[str] = None, - stdout: Optional[str] = None, - stderr: Optional[str] = None, - stage_inout: Optional[str] = None, - hyperthreads: Optional[bool] = False, - dry_run: Optional[bool] = False, - wait_on_start: Optional[bool] = False, - extra_args: Optional[str] = None, - auto_assign_gpus: Optional[bool] = False, - match_procs_to_gpus: Optional[bool] = False, - env_script: Optional[str] = None, - mpi_runner_type: Optional[Union[str, dict]] = None, + calc_type: str | None = None, + app_name: str | None = None, + num_procs: int | None = None, + num_nodes: int | None = None, + procs_per_node: int | None = None, + num_gpus: int | None = None, + machinefile: str | None = None, + app_args: str | None = None, + stdout: str | None = None, + stderr: str | None = None, + stage_inout: str | None = None, + hyperthreads: bool | None = False, + dry_run: bool | None = False, + wait_on_start: bool | None = False, + extra_args: str | None = None, + auto_assign_gpus: bool | None = False, + match_procs_to_gpus: bool | None = False, + env_script: str | None = None, + mpi_runner_type: str | dict | None = None, ) -> Task: """Creates a new task, and either executes or schedules execution. diff --git a/libensemble/gen_funcs/persistent_ax_multitask.py b/libensemble/gen_funcs/persistent_ax_multitask.py index 0f5df7e303..3c6a427751 100644 --- a/libensemble/gen_funcs/persistent_ax_multitask.py +++ b/libensemble/gen_funcs/persistent_ax_multitask.py @@ -17,18 +17,15 @@ """ import os -from copy import deepcopy -from typing import Optional -from pyre_extensions import assert_is_instance import warnings +from copy import deepcopy +from typing import Optional # type: ignore import numpy as np import pandas as pd import torch - from ax import Metric, Runner from ax.core.data import Data -from ax.core.experiment import Experiment from ax.core.generator_run import GeneratorRun from ax.core.multi_type_experiment import MultiTypeExperiment from ax.core.objective import Objective @@ -41,20 +38,22 @@ from ax.modelbridge.registry import Models, ST_MTGP_trans from ax.modelbridge.torch import TorchModelBridge from ax.modelbridge.transforms.convert_metric_names import tconfig_from_mt_experiment -from ax.storage.metric_registry import register_metrics from ax.runners import SyntheticRunner from ax.storage.json_store.save import save_experiment +from ax.storage.metric_registry import register_metrics from ax.storage.runner_registry import register_runner from ax.utils.common.result import Ok +from pyre_extensions import assert_is_instance try: # For Ax >= 0.5.0 - from ax.modelbridge.transforms.derelativize import Derelativize + from ax.modelbridge.registry import MBM_X_trans from ax.modelbridge.transforms.convert_metric_names import ConvertMetricNames - from ax.modelbridge.transforms.trial_as_task import TrialAsTask + from ax.modelbridge.transforms.derelativize import Derelativize from ax.modelbridge.transforms.stratified_standardize_y import StratifiedStandardizeY from ax.modelbridge.transforms.task_encode import TaskChoiceToIntTaskChoice - from ax.modelbridge.registry import MBM_X_trans + from ax.modelbridge.transforms.trial_as_task import TrialAsTask + MT_MTGP_trans = MBM_X_trans + [ Derelativize, ConvertMetricNames, @@ -87,10 +86,10 @@ # get_MTGP based on https://ax.dev/docs/tutorials/multi_task/ def get_MTGP( - experiment: Experiment, + experiment, data: Data, - search_space: Optional[SearchSpace] = None, - trial_index: Optional[int] = None, + search_space: Optional[SearchSpace] = None, # noqa: MDA501 + trial_index: Optional[int] = None, # noqa: MDA501 device: torch.device = torch.device("cpu"), dtype: torch.dtype = torch.double, ) -> TorchModelBridge: @@ -103,9 +102,7 @@ def get_MTGP( """ if isinstance(experiment, MultiTypeExperiment): - trial_index_to_type = { - t.index: t.trial_type for t in experiment.trials.values() - } + trial_index_to_type = {t.index: t.trial_type for t in experiment.trials.values()} transforms = MT_MTGP_trans transform_configs = { "TrialAsTask": {"trial_level_map": {"trial_type": trial_index_to_type}}, @@ -276,9 +273,7 @@ def persistent_gp_mt_ax_gen_f(H, persis_info, gen_specs, libE_info): if not os.path.exists("model_history"): os.mkdir("model_history") # Register metric and runner in order to be able to save to json. - _, encoder_registry, decoder_registry = register_metrics( - {AxMetric: None} - ) + _, encoder_registry, decoder_registry = register_metrics({AxMetric: None}) _, encoder_registry, decoder_registry = register_runner( AxRunner, encoder_registry=encoder_registry, diff --git a/pyproject.toml b/pyproject.toml index 8b5be4bf64..cf6d5a03bc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -128,3 +128,6 @@ noy = "noy" [tool.typos.files] extend-exclude = ["*.bib", "*.xml", "docs/nitpicky"] + +[dependency-groups] +dev = [ "flake8-modern-annotations>=1.6.0,<2", "flake8-type-checking>=3.0.0,<4"] From e3f1751ae3e5a009167009c0febce84ef94af401 Mon Sep 17 00:00:00 2001 From: jlnav Date: Thu, 20 Feb 2025 14:49:03 -0600 Subject: [PATCH 02/39] additional type hint refactors --- docs/data_structures/libE_specs.rst | 6 +- libensemble/executors/mpi_executor.py | 3 +- .../gen_funcs/persistent_ax_multitask.py | 4 +- libensemble/libE.py | 9 +- libensemble/manager.py | 4 +- libensemble/resources/env_resources.py | 34 +-- libensemble/resources/mpi_resources.py | 15 +- libensemble/resources/node_resources.py | 7 +- libensemble/resources/platforms.py | 21 +- libensemble/resources/worker_resources.py | 6 +- libensemble/specs.py | 200 +++++++++--------- libensemble/utils/launcher.py | 11 +- libensemble/utils/output_directory.py | 3 +- libensemble/utils/runners.py | 9 +- 14 files changed, 161 insertions(+), 171 deletions(-) diff --git a/docs/data_structures/libE_specs.rst b/docs/data_structures/libE_specs.rst index 2a9195cea5..d82b0fb1a7 100644 --- a/docs/data_structures/libE_specs.rst +++ b/docs/data_structures/libE_specs.rst @@ -206,14 +206,14 @@ libEnsemble is primarily customized by setting options within a ``LibeSpecs`` cl **save_H_and_persis_on_abort** [bool] = ``True``: Save states of ``H`` and ``persis_info`` to file on aborting after an exception. - **save_H_on_completion** Optional[bool] = ``False`` + **save_H_on_completion** bool | None = ``False`` Save state of ``H`` to file upon completing a workflow. Also enabled when either ``save_every_k_sims`` or ``save_every_k_gens`` is set. - **save_H_with_date** Optional[bool] = ``False`` + **save_H_with_date** bool | None = ``False`` Save ``H`` filename contains date and timestamp. - **H_file_prefix** Optional[str] = ``"libE_history"`` + **H_file_prefix** str | None = ``"libE_history"`` Prefix for ``H`` filename. **use_persis_return_gen** [bool] = ``False``: diff --git a/libensemble/executors/mpi_executor.py b/libensemble/executors/mpi_executor.py index fd67865b9a..9b167ddaa1 100644 --- a/libensemble/executors/mpi_executor.py +++ b/libensemble/executors/mpi_executor.py @@ -20,7 +20,6 @@ from libensemble.executors.executor import Executor, ExecutorException, Task from libensemble.executors.mpi_runner import MPIRunner from libensemble.resources.mpi_resources import get_MPI_variant -from libensemble.resources.resources import Resources logger = logging.getLogger(__name__) # To change logging level for just this module @@ -136,7 +135,7 @@ def set_gen_procs_gpus(self, libE_info): self.gen_nprocs = libE_info.get("num_procs") self.gen_ngpus = libE_info.get("num_gpus") - def set_resources(self, resources: Resources) -> None: + def set_resources(self, resources) -> None: self.resources = resources def _launch_with_retries( diff --git a/libensemble/gen_funcs/persistent_ax_multitask.py b/libensemble/gen_funcs/persistent_ax_multitask.py index 3c6a427751..d585ce51dc 100644 --- a/libensemble/gen_funcs/persistent_ax_multitask.py +++ b/libensemble/gen_funcs/persistent_ax_multitask.py @@ -19,7 +19,7 @@ import os import warnings from copy import deepcopy -from typing import Optional # type: ignore +from typing import Optional # noqa: MDA400 import numpy as np import pandas as pd @@ -89,7 +89,7 @@ def get_MTGP( experiment, data: Data, search_space: Optional[SearchSpace] = None, # noqa: MDA501 - trial_index: Optional[int] = None, # noqa: MDA501 + trial_index: int | None = None, # noqa: MDA501 device: torch.device = torch.device("cpu"), dtype: torch.dtype = torch.double, ) -> TorchModelBridge: diff --git a/libensemble/libE.py b/libensemble/libE.py index 2762890bc3..af302d13c8 100644 --- a/libensemble/libE.py +++ b/libensemble/libE.py @@ -121,7 +121,6 @@ import sys import traceback from pathlib import Path -from typing import Callable, Dict import numpy as np @@ -154,11 +153,11 @@ def libE( sim_specs: SimSpecs, gen_specs: GenSpecs, exit_criteria: ExitCriteria, - persis_info: Dict = {}, + persis_info: dict = {}, alloc_specs: AllocSpecs = AllocSpecs(), libE_specs: LibeSpecs = {}, H0=None, -) -> (np.ndarray, Dict, int): +) -> (np.ndarray, dict, int): """ Parameters ---------- @@ -273,8 +272,8 @@ def manager( alloc_specs, libE_specs, hist: History, - on_abort: Callable = None, - on_cleanup: Callable = None, + on_abort=None, + on_cleanup=None, ): """Generic manager routine run.""" logger.info("Logger initializing: [workerID] precedes each line. [0] = Manager") diff --git a/libensemble/manager.py b/libensemble/manager.py index 99321ce6a3..559bb40e70 100644 --- a/libensemble/manager.py +++ b/libensemble/manager.py @@ -12,7 +12,7 @@ import sys import time import traceback -from typing import Any, Union +from typing import Any import numpy as np import numpy.typing as npt @@ -291,7 +291,7 @@ def term_test_stop_val(self, stop_val: Any) -> bool: H = self.hist.H return np.any(filter_nans(H[key][H["sim_ended"]]) <= val) - def term_test(self, logged: bool = True) -> Union[bool, int]: + def term_test(self, logged: bool = True) -> bool | int: """Checks termination criteria""" for retval, key, testf in self.term_tests: if key in self.exit_criteria: diff --git a/libensemble/resources/env_resources.py b/libensemble/resources/env_resources.py index 5086c5793e..47b3d78624 100644 --- a/libensemble/resources/env_resources.py +++ b/libensemble/resources/env_resources.py @@ -6,7 +6,7 @@ import os import re from collections import OrderedDict -from typing import Any, List, Optional, Tuple, Union +from typing import Any logger = logging.getLogger(__name__) @@ -38,11 +38,11 @@ class EnvResources: def __init__( self, - nodelist_env_slurm: Optional[str] = None, - nodelist_env_cobalt: Optional[str] = None, - nodelist_env_pbs: Optional[str] = None, - nodelist_env_lsf: Optional[str] = None, - nodelist_env_lsf_shortform: Optional[str] = None, + nodelist_env_slurm: str | None = None, + nodelist_env_cobalt: str | None = None, + nodelist_env_pbs: str | None = None, + nodelist_env_lsf: str | None = None, + nodelist_env_lsf_shortform: str | None = None, ) -> None: """Initializes a new EnvResources instance @@ -93,7 +93,7 @@ def __init__( self.scheduler = env break - def get_nodelist(self) -> List[Union[str, Any]]: + def get_nodelist(self) -> list[str | Any]: """Returns nodelist from environment or an empty list""" if self.scheduler: env = self.scheduler @@ -105,19 +105,19 @@ def get_nodelist(self) -> List[Union[str, Any]]: return [] @staticmethod - def abbrev_nodenames(node_list: List[str], prefix: str = None) -> List[str]: + def abbrev_nodenames(node_list: list[str], prefix: str = None) -> list[str]: """Returns nodelist with only string up to first dot""" newlist = [s.split(".", 1)[0] for s in node_list] return newlist @staticmethod - def cobalt_abbrev_nodenames(node_list: List[str], prefix: str = "nid") -> List[str]: + def cobalt_abbrev_nodenames(node_list: list[str], prefix: str = "nid") -> list[str]: """Returns nodelist with prefix and leading zeros stripped""" newlist = [s.lstrip(prefix) for s in node_list] newlist = [s.lstrip("0") for s in newlist] return newlist - def shortnames(self, node_list: List[str]) -> List[str]: + def shortnames(self, node_list: list[str]) -> list[str]: """Returns nodelist with entries in abbreviated form""" if self.scheduler == "Cobalt": return EnvResources.cobalt_abbrev_nodenames(node_list) @@ -126,7 +126,7 @@ def shortnames(self, node_list: List[str]) -> List[str]: return node_list @staticmethod - def _range_split(s: str) -> Tuple[int, int, int]: + def _range_split(s: str) -> tuple[int, int, int]: """Splits ID range string""" ab = s.split("-", 1) nnum_len = len(ab[0]) @@ -138,7 +138,7 @@ def _range_split(s: str) -> Tuple[int, int, int]: return a, b, nnum_len @staticmethod - def _noderange_append(prefix: str, nidstr: str, suffix: str) -> List[str]: + def _noderange_append(prefix: str, nidstr: str, suffix: str) -> list[str]: """Formats and appends nodes to overall nodelist""" nidlst = [] for nidgroup in nidstr.split(","): @@ -148,7 +148,7 @@ def _noderange_append(prefix: str, nidstr: str, suffix: str) -> List[str]: return nidlst @staticmethod - def get_slurm_nodelist(node_list_env: str) -> List[Union[str, Any]]: + def get_slurm_nodelist(node_list_env: str) -> list[str | Any]: """Gets global libEnsemble nodelist from the Slurm environment""" fullstr = os.environ[node_list_env] if not fullstr: @@ -172,7 +172,7 @@ def get_slurm_nodelist(node_list_env: str) -> List[Union[str, Any]]: return sorted(nidlst) @staticmethod - def get_cobalt_nodelist(node_list_env: str) -> List[Union[str, Any]]: + def get_cobalt_nodelist(node_list_env: str) -> list[str | Any]: """Gets global libEnsemble nodelist from the Cobalt environment""" nidlst = [] nidstr = os.environ[node_list_env] @@ -185,7 +185,7 @@ def get_cobalt_nodelist(node_list_env: str) -> List[Union[str, Any]]: return sorted(nidlst, key=int) @staticmethod - def get_pbs_nodelist(node_list_env: str) -> List[Union[str, Any]]: + def get_pbs_nodelist(node_list_env: str) -> list[str | Any]: """Gets global libEnsemble nodelist path from PBS environment""" nidstr_path = os.environ[node_list_env] if not nidstr_path: @@ -201,7 +201,7 @@ def get_pbs_nodelist(node_list_env: str) -> List[Union[str, Any]]: return unique_nodelist_shortnames @staticmethod - def get_lsf_nodelist(node_list_env: str) -> List[Union[str, Any]]: + def get_lsf_nodelist(node_list_env: str) -> list[str | Any]: """Gets global libEnsemble nodelist from the LSF environment""" full_list = os.environ[node_list_env] entries = full_list.split() @@ -211,7 +211,7 @@ def get_lsf_nodelist(node_list_env: str) -> List[Union[str, Any]]: return nodes @staticmethod - def get_lsf_nodelist_frm_shortform(node_list_env: str) -> List[Union[str, Any]]: + def get_lsf_nodelist_frm_shortform(node_list_env: str) -> list[str | Any]: """Gets global libEnsemble nodelist from the LSF environment from short-form version""" full_list = os.environ[node_list_env] entries = full_list.split() diff --git a/libensemble/resources/mpi_resources.py b/libensemble/resources/mpi_resources.py index 33a759ab59..c978dc247e 100644 --- a/libensemble/resources/mpi_resources.py +++ b/libensemble/resources/mpi_resources.py @@ -6,14 +6,13 @@ import os import platform import subprocess -from typing import Optional, Tuple, Union class MPIResourcesException(Exception): """Resources module exception""" -def rassert(test: Optional[Union[int, bool]], *args) -> None: +def rassert(test: int | bool | None, *args) -> None: if not test: raise MPIResourcesException(*args) @@ -89,8 +88,8 @@ def get_MPI_runner(mpi_runner=None) -> str: def task_partition( - num_procs: Optional[int], num_nodes: Optional[int], procs_per_node: Optional[int], machinefile: Optional[str] = None -) -> Union[Tuple[None, None, None], Tuple[int, int, int]]: + num_procs: int | None, num_nodes: int | None, procs_per_node: int | None, machinefile: str | None = None +) -> tuple[None, None, None] | tuple[int, int, int]: """Takes provided nprocs/nodes/ranks and outputs working configuration of procs/nodes/ranks or error """ @@ -223,12 +222,12 @@ def get_resources(resources, num_procs=None, num_nodes=None, procs_per_node=None def create_machinefile( resources: "resources.Resources", # noqa: F821 - machinefile: Optional[str] = None, + machinefile: str | None = None, num_procs: int = None, - num_nodes: Optional[int] = None, - procs_per_node: Optional[int] = None, + num_nodes: int | None = None, + procs_per_node: int | None = None, hyperthreads: bool = False, -) -> Tuple[bool, None, int, int]: +) -> tuple[bool, None, int, int]: """Creates a machinefile based on user-supplied config options, completed by detected machine resources """ diff --git a/libensemble/resources/node_resources.py b/libensemble/resources/node_resources.py index 55d3ef5d94..67eedea856 100644 --- a/libensemble/resources/node_resources.py +++ b/libensemble/resources/node_resources.py @@ -6,11 +6,10 @@ import collections import logging import os -from typing import Optional, Tuple +from typing import Tuple import psutil -from libensemble.resources.env_resources import EnvResources from libensemble.resources.gpu_detect import get_gpus_from_env, get_num_gpus logger = logging.getLogger(__name__) @@ -52,7 +51,7 @@ def _get_remote_resources(launcher): return output.decode() -def _get_cpu_resources_from_env(env_resources: Optional[EnvResources] = None) -> Optional[Tuple[int, int]]: +def _get_cpu_resources_from_env(env_resources=None) -> tuple[int, int] | None: """Returns logical and physical cores per node by querying environment or None""" if not env_resources: return None @@ -146,7 +145,7 @@ def _update_from_str(cores_info, cores_info_str): def get_sub_node_resources( - launcher: Optional[str] = None, remote_mode: bool = False, env_resources: Optional[EnvResources] = None + launcher: str | None = None, remote_mode: bool = False, env_resources=None ) -> Tuple[int, int, int]: """Returns logical and physical cores and GPUs per node as a tuple diff --git a/libensemble/resources/platforms.py b/libensemble/resources/platforms.py index 34f6cae1fc..6b60dab526 100644 --- a/libensemble/resources/platforms.py +++ b/libensemble/resources/platforms.py @@ -11,7 +11,6 @@ import logging import os import subprocess -from typing import Optional from pydantic import BaseModel @@ -33,28 +32,28 @@ class Platform(BaseModel): All are optional, and any not defined will be determined by libEnsemble's auto-detection. """ - mpi_runner: Optional[str] = None + mpi_runner: str | None = None """MPI runner: One of ``"mpich"``, ``"openmpi"``, ``"aprun"``, ``"srun"``, ``"jsrun"``, ``"msmpi"``, ``"custom"`` """ - runner_name: Optional[str] = None + runner_name: str | None = None """Literal string of MPI runner command. Only needed if different to the default Note that ``"mpich"`` and ``"openmpi"`` runners have the default command ``"mpirun"`` """ - cores_per_node: Optional[int] = None + cores_per_node: int | None = None """Number of physical CPU cores on a compute node of the platform""" - logical_cores_per_node: Optional[int] = None + logical_cores_per_node: int | None = None """Number of logical CPU cores on a compute node of the platform""" - gpus_per_node: Optional[int] = None + gpus_per_node: int | None = None """Number of GPU devices on a compute node of the platform""" - tiles_per_gpu: Optional[int] = None + tiles_per_gpu: int | None = None """Number of tiles on a GPU""" - gpu_setting_type: Optional[str] = None + gpu_setting_type: str | None = None """ How GPUs will be assigned. @@ -91,14 +90,14 @@ class Platform(BaseModel): """ - gpu_setting_name: Optional[str] = None + gpu_setting_name: str | None = None """Name of GPU setting See :attr:`gpu_setting_type` for more details. """ - gpu_env_fallback: Optional[str] = None + gpu_env_fallback: str | None = None """GPU fallback environment setting if not using an MPI runner. For example: @@ -115,7 +114,7 @@ class Platform(BaseModel): """ - scheduler_match_slots: Optional[bool] = True + scheduler_match_slots: bool | None = True """ Whether the libEnsemble resource scheduler should only assign matching slots when there are multiple (partial) nodes assigned to a sim function. diff --git a/libensemble/resources/worker_resources.py b/libensemble/resources/worker_resources.py index 8ec3d8f4bb..fbd270fce9 100644 --- a/libensemble/resources/worker_resources.py +++ b/libensemble/resources/worker_resources.py @@ -1,7 +1,7 @@ import logging import os from collections import Counter, OrderedDict -from typing import Any, Dict, List, Optional, Tuple, Union +from typing import Any, Dict, List, Tuple, Union import numpy as np @@ -97,9 +97,7 @@ def free_rsets(self, worker=None): self.nongpu_rsets_free += np.count_nonzero(~self.rsets["gpus"][rsets_to_free]) @staticmethod - def get_index_list( - num_workers: int, num_rsets: int, zero_resource_list: List[Union[int, Any]] - ) -> List[Optional[int]]: + def get_index_list(num_workers: int, num_rsets: int, zero_resource_list: List[Union[int, Any]]) -> List[int | None]: """Map WorkerID to index into a nodelist""" index = 0 index_list = [] diff --git a/libensemble/specs.py b/libensemble/specs.py index 494dd23f45..3ae06d8b9b 100644 --- a/libensemble/specs.py +++ b/libensemble/specs.py @@ -1,10 +1,10 @@ import random +import warnings from pathlib import Path -from typing import Any, Callable, List, Optional, Tuple, Union +from typing import Any, Callable import pydantic from pydantic import BaseModel, Field -import warnings from libensemble.alloc_funcs.give_sim_work_first import give_sim_work_first from libensemble.resources.platforms import Platform @@ -32,41 +32,41 @@ class SimSpecs(BaseModel): produced by a generator function. """ - inputs: Optional[List[str]] = Field(default=[], alias="in") + inputs: list[str] | None = Field(default=[], alias="in") """ - List of **field names** out of the complete history to pass + list of **field names** out of the complete history to pass into the simulation function upon calling. """ - persis_in: Optional[List[str]] = [] + persis_in: list[str] | None = [] """ - List of **field names** to send to a persistent simulation function + list of **field names** to send to a persistent simulation function throughout the run, following initialization. """ # list of tuples for dtype construction - outputs: Optional[List[Union[Tuple[str, Any], Tuple[str, Any, Union[int, Tuple]]]]] = Field([], alias="out") + outputs: list[[tuple[str, Any] | tuple[str, Any, int | tuple]]] = Field([], alias="out") """ - List of 2- or 3-tuples corresponding to NumPy dtypes. + list of 2- or 3-tuples corresponding to NumPy dtypes. e.g. ``("dim", int, (3,))``, or ``("path", str)``. Typically used to initialize an output array within the simulation function: ``out = np.zeros(100, dtype=sim_specs["out"])``. Also necessary to construct libEnsemble's history array. """ - globus_compute_endpoint: Optional[str] = "" + globus_compute_endpoint: str | None = "" """ A Globus Compute (https://www.globus.org/compute) ID corresponding to an active endpoint on a remote system. libEnsemble's workers will submit simulator function instances to this endpoint instead of calling them locally. """ - threaded: Optional[bool] = False + threaded: bool | None = False """ Instruct Worker process to launch user function to a thread. """ - user: Optional[dict] = {} + user: dict | None = {} """ A user-data dictionary to place bounds, constants, settings, or other parameters for customizing the simulator function. @@ -78,45 +78,45 @@ class GenSpecs(BaseModel): Specifications for configuring a Generator Function. """ - gen_f: Optional[Callable] = None + gen_f: Callable | None = None """ Python function matching the ``gen_f`` interface. Produces parameters for evaluation by a simulator function, and makes decisions based on simulator function output. """ - inputs: Optional[List[str]] = Field(default=[], alias="in") + inputs: list[str] | None = Field(default=[], alias="in") """ - List of **field names** out of the complete history to pass + list of **field names** out of the complete history to pass into the generator function upon calling. """ - persis_in: Optional[List[str]] = [] + persis_in: list[str] | None = [] """ - List of **field names** to send to a persistent generator function + list of **field names** to send to a persistent generator function throughout the run, following initialization. """ - outputs: Optional[List[Union[Tuple[str, Any], Tuple[str, Any, Union[int, Tuple]]]]] = Field([], alias="out") + outputs: list[[tuple[str, Any] | tuple[str, Any, int | tuple]]] = Field([], alias="out") """ - List of 2- or 3-tuples corresponding to NumPy dtypes. + list of 2- or 3-tuples corresponding to NumPy dtypes. e.g. ``("dim", int, (3,))``, or ``("path", str)``. Typically used to initialize an output array within the generator: ``out = np.zeros(100, dtype=gen_specs["out"])``. Also used to construct libEnsemble's history array. """ - globus_compute_endpoint: Optional[str] = "" + globus_compute_endpoint: str | None = "" """ A Globus Compute (https://www.globus.org/compute) ID corresponding to an active endpoint on a remote system. libEnsemble's workers will submit generator function instances to this endpoint instead of calling them locally. """ - threaded: Optional[bool] = False + threaded: bool | None = False """ Instruct Worker process to launch user function to a thread. """ - user: Optional[dict] = {} + user: dict | None = {} """ A user-data dictionary to place bounds, constants, settings, or other parameters for customizing the generator function @@ -134,15 +134,15 @@ class AllocSpecs(BaseModel): should be called, and with what resources and parameters. """ - user: Optional[dict] = {"num_active_gens": 1} + user: dict | None = {"num_active_gens": 1} """ A user-data dictionary to place bounds, constants, settings, or other parameters for customizing the allocation function. """ - outputs: List[Union[Tuple[str, Any], Tuple[str, Any, Union[int, Tuple]]]] = Field([], alias="out") + outputs: list[[tuple[str, Any] | tuple[str, Any, int | tuple]]] = Field([], alias="out") """ - List of 2- or 3-tuples corresponding to NumPy dtypes. e.g. ``("dim", int, (3,))``, or ``("path", str)``. + list of 2- or 3-tuples corresponding to NumPy dtypes. e.g. ``("dim", int, (3,))``, or ``("path", str)``. Allocation functions that modify libEnsemble's History array with additional fields should list those fields here. Also used to construct libEnsemble's history array. """ @@ -154,16 +154,16 @@ class ExitCriteria(BaseModel): Specifications for configuring when libEnsemble should stop a given run. """ - sim_max: Optional[int] = None + sim_max: int | None = None """Stop when this many new points have been evaluated by simulation functions.""" - gen_max: Optional[int] = None + gen_max: int | None = None """Stop when this many new points have been generated by generator functions.""" - wallclock_max: Optional[float] = None + wallclock_max: float | None = None """Stop when this many seconds has elapsed since the manager initialized.""" - stop_val: Optional[Tuple[str, float]] = None + stop_val: tuple[str, float] | None = None """Stop when ``H[str] < float`` for the given ``(str, float)`` pair.""" @@ -172,152 +172,152 @@ class LibeSpecs(BaseModel): Specifications for configuring libEnsemble's runtime behavior. """ - comms: Optional[str] = "mpi" + comms: str | None = "mpi" """ Manager/Worker communications mode. ``'mpi'``, ``'local'``, ``'threads'``, or ``'tcp'`` If ``nworkers`` is specified, then ``local`` comms will be used unless a parallel MPI environment is detected. """ - nworkers: Optional[int] = 0 + nworkers: int | None = 0 """ Number of worker processes in ``"local"``, ``"threads"``, or ``"tcp"``.""" - gen_on_manager: Optional[bool] = False + gen_on_manager: bool | None = False """ Instructs Manager process to run generator functions. This generator function can access/modify user objects by reference. """ - mpi_comm: Optional[Any] = None + mpi_comm: Any | None = None """ libEnsemble MPI communicator. Default: ``MPI.COMM_WORLD``""" - dry_run: Optional[bool] = False + dry_run: bool | None = False """ Whether libEnsemble should immediately exit after validating all inputs. """ - abort_on_exception: Optional[bool] = True + abort_on_exception: bool | None = True """ In MPI mode, whether to call ``MPI_ABORT`` on an exception. If ``False``, an exception will be raised by the manager. """ - save_every_k_sims: Optional[int] = 0 + save_every_k_sims: int | None = 0 """ Save history array to file after every k evaluated points. """ - save_every_k_gens: Optional[int] = 0 + save_every_k_gens: int | None = 0 """ Save history array to file after every k generated points. """ - save_H_and_persis_on_abort: Optional[bool] = True + save_H_and_persis_on_abort: bool | None = True """ Save states of ``H`` and ``persis_info`` to file on aborting after an exception.""" - save_H_on_completion: Optional[bool] = False + save_H_on_completion: bool | None = False """ Save state of ``H`` to file upon completing a workflow. Also enabled when either ``save_every_k_sims`` or ``save_every_k_gens`` is set. """ - save_H_with_date: Optional[bool] = False + save_H_with_date: bool | None = False """ ``H`` filename contains date and timestamp.""" - H_file_prefix: Optional[str] = "libE_history" + H_file_prefix: str | None = "libE_history" """ Prefix for ``H`` filename.""" - worker_timeout: Optional[int] = 1 + worker_timeout: int | None = 1 """ On libEnsemble shutdown, number of seconds after which workers considered timed out, then terminated. """ - kill_canceled_sims: Optional[bool] = False + kill_canceled_sims: bool | None = False """ Try to kill sims with ``"cancel_requested"`` set ``True``. If ``False``, the manager avoids this moderate overhead. """ - use_workflow_dir: Optional[bool] = False + use_workflow_dir: bool | None = False """ Whether to place *all* log files, dumped arrays, and default output directories in a separate `workflow` directory. Each run will be suffixed with a hash. If copying back an ensemble directory from a scratch space, the copy is placed here. """ - reuse_output_dir: Optional[bool] = False + reuse_output_dir: bool | None = False """ Whether to allow overwrites and access to previous ensemble and workflow directories in subsequent runs. ``False`` by default to protect results. """ - workflow_dir_path: Optional[Union[str, Path]] = "." + workflow_dir_path: str | Path | None = "." """ Optional path to the workflow directory. """ - ensemble_dir_path: Optional[Union[str, Path]] = Path("ensemble") + ensemble_dir_path: str | Path | None = Path("ensemble") """ Path to main ensemble directory. Can serve as a single working directory for workers, or contain calculation directories """ - ensemble_copy_back: Optional[bool] = False + ensemble_copy_back: bool | None = False """ Whether to copy back contents of ``ensemble_dir_path`` to launch location. Useful if ``ensemble_dir_path`` is located on node-local storage. """ - use_worker_dirs: Optional[bool] = False + use_worker_dirs: bool | None = False """ Whether to organize calculation directories under worker-specific directories. """ - sim_dirs_make: Optional[bool] = False + sim_dirs_make: bool | None = False """ Whether to make calculation directories for each simulation function call. """ - sim_dir_copy_files: Optional[List[Union[str, Path]]] = [] + sim_dir_copy_files: list[str | Path] | None = [] """ Paths to copy into the working directory upon calling the simulation function. - List of strings or ``pathlib.Path`` objects. + list of strings or ``pathlib.Path`` objects. """ - sim_dir_symlink_files: Optional[List[Union[str, Path]]] = [] + sim_dir_symlink_files: list[str | Path] | None = [] """ Paths to symlink into the working directory upon calling the simulation function. - List of strings or ``pathlib.Path`` objects. + list of strings or ``pathlib.Path`` objects. """ - sim_input_dir: Optional[Union[str, Path]] = None + sim_input_dir: str | Path | None = None """ Copy this directory's contents into the working directory upon calling the simulation function. Forms the base of a simulation directory. """ - gen_dirs_make: Optional[bool] = False + gen_dirs_make: bool | None = False """ Whether to make generator-specific calculation directories for each generator function call. """ - gen_dir_copy_files: Optional[List[Union[str, Path]]] = [] + gen_dir_copy_files: list[str | Path] | None = [] """ Paths to copy into the working directory upon calling the generator function. - List of strings or ``pathlib.Path`` objects + list of strings or ``pathlib.Path`` objects """ - gen_dir_symlink_files: Optional[List[Union[str, Path]]] = [] + gen_dir_symlink_files: list[str | Path] | None = [] """ Paths to symlink into the working directory upon calling the generator function. - List of strings or ``pathlib.Path`` objects. + list of strings or ``pathlib.Path`` objects. """ - gen_input_dir: Optional[Union[str, Path]] = None + gen_input_dir: str | Path | None = None """ Copy this directory's contents into the working directory upon calling the generator function. Forms the base of a generator directory. """ - calc_dir_id_width: Optional[int] = 4 + calc_dir_id_width: int | None = 4 """ The width of the numerical ID component of a calculation directory name. Leading zeros are padded to the sim/gen ID. """ - platform: Optional[str] = "" + platform: str | None = "" """Name of a known platform defined in the platforms module. - See :class:`Known Platforms List`. + See :class:`Known Platforms list`. Example: @@ -334,7 +334,7 @@ class LibeSpecs(BaseModel): See also option :attr:`platform_specs`. """ - platform_specs: Optional[Union[Platform, dict]] = {} + platform_specs: Platform | dict | None = {} """A Platform object or dictionary specifying settings for a platform. To use existing platform: @@ -345,7 +345,7 @@ class LibeSpecs(BaseModel): libE_specs["platform_specs"] = PerlmutterGPU() - See :class:`Known Platforms List`. + See :class:`Known Platforms list`. Or define a platform: @@ -369,82 +369,82 @@ class LibeSpecs(BaseModel): See also option :attr:`platform`. """ - profile: Optional[bool] = False + profile: bool | None = False """ Profile manager and worker logic using ``cProfile``. """ - disable_log_files: Optional[bool] = False + disable_log_files: bool | None = False """ Disable ``ensemble.log`` and ``libE_stats.txt`` log files. """ - safe_mode: Optional[bool] = False + safe_mode: bool | None = False """ Prevents user functions from overwriting protected History fields, but requires moderate overhead. """ - stats_fmt: Optional[dict] = {} + stats_fmt: dict | None = {} """ Options for formatting ``'libE_stats.txt'``. See 'Formatting libE_stats.txt'. """ - live_data: Optional[Any] = None + live_data: Any | None = None """ Add a live data capture object (e.g., for plotting). """ - workers: Optional[List[str]] = [] + workers: list[str] | None = [] """ TCP Only: A list of worker hostnames. """ - ip: Optional[str] = None + ip: str | None = None """ TCP Only: IP address for Manager's system. """ - port: Optional[int] = 0 + port: int | None = 0 """ TCP Only: Port number for Manager's system. """ - authkey: Optional[str] = f"libE_auth_{random.randrange(99999)}" + authkey: str | None = f"libE_auth_{random.randrange(99999)}" """ TCP Only: Authkey for Manager's system.""" - workerID: Optional[int] = None + workerID: int | None = None """ TCP Only: Worker ID number assigned to the new process. """ - worker_cmd: Optional[List[str]] = [] + worker_cmd: list[str] | None = [] """ TCP Only: Split string corresponding to worker/client Python process invocation. Contains a local Python path, calling script, and manager/server format-fields for ``manager_ip``, ``manager_port``, ``authkey``, and ``workerID``. ``nworkers`` is specified normally. """ - use_persis_return_gen: Optional[bool] = False + use_persis_return_gen: bool | None = False """ Adds persistent generator output fields to the History array on return. """ - use_persis_return_sim: Optional[bool] = False + use_persis_return_sim: bool | None = False """ Adds persistent simulator output fields to the History array on return. """ - final_gen_send: Optional[bool] = False + final_gen_send: bool | None = False """ Send final simulation results to persistent generators before shutdown. The results will be sent along with the ``PERSIS_STOP`` tag. """ - disable_resource_manager: Optional[bool] = False + disable_resource_manager: bool | None = False """ Disable the built-in resource manager, including automatic resource detection and/or assignment of resources to workers. ``"resource_info"`` will be ignored. """ - num_resource_sets: Optional[int] = 0 + num_resource_sets: int | None = 0 """ Total number of resource sets. Resources will be divided into this number. If not set, resources will be divided evenly (excluding zero_resource_workers). """ - gen_num_procs: Optional[int] = 0 + gen_num_procs: int | None = 0 """ The default number of processors (MPI ranks) required by generators. Unless overridden by the equivalent `persis_info` settings, generators will be allocated this many processors for applications launched via the MPIExecutor. """ - gen_num_gpus: Optional[int] = 0 + gen_num_gpus: int | None = 0 """ The default number of GPUs required by generators. Unless overridden by the equivalent `persis_info` settings, generators will be allocated this many GPUs. """ - gpus_per_group: Optional[int] = None + gpus_per_group: int | None = None """ Number of GPUs for each group in the scheduler. This can be used to deal with scenarios where nodes have different numbers of GPUs. In effect a @@ -452,13 +452,13 @@ class LibeSpecs(BaseModel): By default the GPUs on a node are treated as a group. """ - use_tiles_as_gpus: Optional[bool] = False + use_tiles_as_gpus: bool | None = False """ If ``True`` then treat a GPU tile as one GPU when GPU tiles is provided in platform specs or detected. """ - enforce_worker_core_bounds: Optional[bool] = False + enforce_worker_core_bounds: bool | None = False """ If ``False``, the Executor will permit the submission of tasks with a higher processor count than the CPUs available to the worker as @@ -467,40 +467,40 @@ class LibeSpecs(BaseModel): this argument is ignored """ - dedicated_mode: Optional[bool] = False + dedicated_mode: bool | None = False """ Instructs libEnsemble to not run applications on resources where libEnsemble processes (manager and workers) are running. """ - zero_resource_workers: Optional[List[int]] = [] + zero_resource_workers: list[int] | None = [] """ - List of workers that require no resources. For when a fixed mapping of workers + list of workers that require no resources. For when a fixed mapping of workers to resources is required. Otherwise, use ``num_resource_sets``. For use with supported allocation functions. """ - gen_workers: Optional[List[int]] = [] + gen_workers: list[int] | None = [] """ - List of workers that should only run generators. All other workers will only + list of workers that should only run generators. All other workers will only run simulator functions. """ - resource_info: Optional[dict] = {} + resource_info: dict | None = {} """ Resource information to override automatically detected resources. Allowed fields are given below in 'Overriding Resource Auto-detection'. Note that if ``disable_resource_manager`` is set then this option is ignored. """ - scheduler_opts: Optional[dict] = {} + scheduler_opts: dict | None = {} """ Options for the resource scheduler. See 'Scheduler Options' for more info """ class _EnsembleSpecs(BaseModel): """An all-encompassing model for a libEnsemble workflow.""" - H0: Optional[Any] = None # np.ndarray - avoids sphinx issue + H0: Any | None = None # np.ndarray - avoids sphinx issue """ A previous or preformatted libEnsemble History array to prepend. """ libE_specs: LibeSpecs @@ -509,20 +509,20 @@ class _EnsembleSpecs(BaseModel): sim_specs: SimSpecs """ Specifications for the simulation function. """ - gen_specs: Optional[GenSpecs] + gen_specs: GenSpecs | None """ Specifications for the generator function. """ exit_criteria: ExitCriteria """ Configurations for when to exit a workflow. """ - persis_info: Optional[dict] = None + persis_info: dict | None = None """ Per-worker information and structures to be passed between user function instances. """ - alloc_specs: Optional[AllocSpecs] = AllocSpecs() + alloc_specs: AllocSpecs | None = AllocSpecs() """ Specifications for the allocation function. """ -def input_fields(fields: List[str]): +def input_fields(fields: list[str]): """Decorates a user-function with a list of field names to pass in on initialization. Decorated functions don't need those fields specified in ``SimSpecs.inputs`` or ``GenSpecs.inputs``. @@ -550,7 +550,7 @@ def decorator(func): return decorator -def persistent_input_fields(fields: List[str]): +def persistent_input_fields(fields: list[str]): """Decorates a *persistent* user-function with a list of field names to send in throughout runtime. Decorated functions don't need those fields specified in ``SimSpecs.persis_in`` or ``GenSpecs.persis_in``. @@ -588,7 +588,7 @@ def decorator(func): return decorator -def output_data(fields: List[Union[Tuple[str, Any], Tuple[str, Any, Union[int, Tuple]]]]): +def output_data(fields: list[[tuple[str, Any] | tuple[str, Any, int | tuple]]]): """Decorates a user-function with a list of tuples corresponding to NumPy dtypes for the function's output data. Decorated functions don't need those fields specified in ``SimSpecs.outputs`` or ``GenSpecs.outputs``. diff --git a/libensemble/utils/launcher.py b/libensemble/utils/launcher.py index 1633f65bb6..c88361f34a 100644 --- a/libensemble/utils/launcher.py +++ b/libensemble/utils/launcher.py @@ -9,10 +9,9 @@ import subprocess import time from itertools import chain -from typing import List, Optional, Union -def form_command(cmd_template: List[str], specs: dict) -> List[str]: +def form_command(cmd_template: list[str], specs: dict) -> list[str]: "Fill command parts with dict entries from specs; drop any missing." specs = {k: v for k, v in specs.items() if v is not None} @@ -26,7 +25,7 @@ def fill(fmt): return list(chain.from_iterable(filter(None, map(fill, cmd_template)))) -def launch(cmd_template: List[str], specs: dict = None, **kwargs) -> subprocess.Popen: +def launch(cmd_template: list[str], specs: dict = None, **kwargs) -> subprocess.Popen: "Launch a new subprocess (with command templating and Python 3 help)." cmd = form_command(cmd_template, specs) if specs is not None else cmd_template return subprocess.Popen(cmd, **kwargs) @@ -70,7 +69,7 @@ def process_is_stopped(process, timeout): return process.poll() is not None -def wait(process: subprocess.Popen, timeout: Optional[Union[int, float]] = None) -> Optional[int]: +def wait(process: subprocess.Popen, timeout: int | float | None = None) -> int | None: "Wait on a process with timeout (wait forever if None)." try: return process.wait(timeout=timeout) @@ -78,7 +77,7 @@ def wait(process: subprocess.Popen, timeout: Optional[Union[int, float]] = None) return None -def wait_and_kill(process: subprocess.Popen, timeout: Optional[Union[int, float]]) -> int: +def wait_and_kill(process: subprocess.Popen, timeout: int | float | None) -> int: "Give a grace period for a process to terminate, then kill it." rc = wait(process, timeout) if rc is not None: @@ -87,7 +86,7 @@ def wait_and_kill(process: subprocess.Popen, timeout: Optional[Union[int, float] return process.wait() -def cancel(process: subprocess.Popen, timeout: Optional[Union[int, float]] = 0) -> int: +def cancel(process: subprocess.Popen, timeout: int | float | None = 0) -> int: "Send a termination signal, give a grace period, then hard kill if needed." if timeout is not None and timeout > 0: terminatepg(process) diff --git a/libensemble/utils/output_directory.py b/libensemble/utils/output_directory.py index 3de0f20ddf..b43ee3491b 100644 --- a/libensemble/utils/output_directory.py +++ b/libensemble/utils/output_directory.py @@ -2,7 +2,6 @@ import re import shutil from pathlib import Path -from typing import Optional from libensemble.message_numbers import EVAL_SIM_TAG, calc_type_strings from libensemble.tools.fields_keys import libE_spec_calc_dir_misc, libE_spec_gen_dir_keys, libE_spec_sim_dir_keys @@ -40,7 +39,7 @@ class EnsembleDirectory: A LocationStack object from libEnsemble's internal libensemble.utils.loc_stack module. """ - def __init__(self, libE_specs: dict, loc_stack: Optional[LocationStack] = None): + def __init__(self, libE_specs: dict, loc_stack: LocationStack | None = None): self.specs = libE_specs self.loc_stack = loc_stack diff --git a/libensemble/utils/runners.py b/libensemble/utils/runners.py index 629c733b1b..1f82cdb679 100644 --- a/libensemble/utils/runners.py +++ b/libensemble/utils/runners.py @@ -1,7 +1,6 @@ import inspect import logging import logging.handlers -from typing import Optional import numpy.typing as npt @@ -28,7 +27,7 @@ def _truncate_args(self, calc_in: npt.NDArray, persis_info, libE_info): args = [calc_in, persis_info, self.specs, libE_info] return args[:nparams] - def _result(self, calc_in: npt.NDArray, persis_info: dict, libE_info: dict) -> (npt.NDArray, dict, Optional[int]): + def _result(self, calc_in: npt.NDArray, persis_info: dict, libE_info: dict) -> (npt.NDArray, dict, int | None): """User function called in-place""" args = self._truncate_args(calc_in, persis_info, libE_info) return self.f(*args) @@ -36,7 +35,7 @@ def _result(self, calc_in: npt.NDArray, persis_info: dict, libE_info: dict) -> ( def shutdown(self) -> None: pass - def run(self, calc_in: npt.NDArray, Work: dict) -> (npt.NDArray, dict, Optional[int]): + def run(self, calc_in: npt.NDArray, Work: dict) -> (npt.NDArray, dict, int | None): return self._result(calc_in, Work["persis_info"], Work["libE_info"]) @@ -56,7 +55,7 @@ def _get_globus_compute_executor(self): else: return Executor - def _result(self, calc_in: npt.NDArray, persis_info: dict, libE_info: dict) -> (npt.NDArray, dict, Optional[int]): + def _result(self, calc_in: npt.NDArray, persis_info: dict, libE_info: dict) -> (npt.NDArray, dict, int | None): from libensemble.worker import Worker libE_info["comm"] = None # 'comm' object not pickle-able @@ -75,7 +74,7 @@ def __init__(self, specs): super().__init__(specs) self.thread_handle = None - def _result(self, calc_in: npt.NDArray, persis_info: dict, libE_info: dict) -> (npt.NDArray, dict, Optional[int]): + def _result(self, calc_in: npt.NDArray, persis_info: dict, libE_info: dict) -> (npt.NDArray, dict, int | None): args = self._truncate_args(calc_in, persis_info, libE_info) self.thread_handle = QCommThread(self.f, None, *args, user_function=True) self.thread_handle.run() From de96b24ebf23fd906cbb16c9518c00f401a60afc Mon Sep 17 00:00:00 2001 From: jlnav Date: Thu, 20 Feb 2025 14:57:46 -0600 Subject: [PATCH 03/39] presumptive last round of refactors, including many instances of "Any" with "object" --- libensemble/resources/node_resources.py | 5 ++--- libensemble/resources/worker_resources.py | 12 ++++++------ libensemble/specs.py | 24 +++++++++++------------ libensemble/tools/live_data/live_data.py | 6 ++---- libensemble/tools/persistent_support.py | 5 ++--- libensemble/utils/loc_stack.py | 13 ++++++------ libensemble/utils/validators.py | 2 +- 7 files changed, 30 insertions(+), 37 deletions(-) diff --git a/libensemble/resources/node_resources.py b/libensemble/resources/node_resources.py index 67eedea856..1c35b337f1 100644 --- a/libensemble/resources/node_resources.py +++ b/libensemble/resources/node_resources.py @@ -6,7 +6,6 @@ import collections import logging import os -from typing import Tuple import psutil @@ -29,7 +28,7 @@ def get_cpu_cores(hyperthreads: bool = False) -> int: return psutil.cpu_count(logical=hyperthreads) # This is ranks available per node -def _get_local_resources() -> Tuple[int, int, int]: +def _get_local_resources() -> tuple[int, int, int]: """Returns logical and physical cores and GPUs on the local node""" physical_cores = get_cpu_cores(hyperthreads=False) logical_cores = get_cpu_cores(hyperthreads=True) @@ -146,7 +145,7 @@ def _update_from_str(cores_info, cores_info_str): def get_sub_node_resources( launcher: str | None = None, remote_mode: bool = False, env_resources=None -) -> Tuple[int, int, int]: +) -> tuple[int, int, int]: """Returns logical and physical cores and GPUs per node as a tuple First checks for environment values, and and then for detected values. diff --git a/libensemble/resources/worker_resources.py b/libensemble/resources/worker_resources.py index fbd270fce9..b11dd7c35e 100644 --- a/libensemble/resources/worker_resources.py +++ b/libensemble/resources/worker_resources.py @@ -1,7 +1,7 @@ import logging import os from collections import Counter, OrderedDict -from typing import Any, Dict, List, Tuple, Union +from typing import Any import numpy as np @@ -97,7 +97,7 @@ def free_rsets(self, worker=None): self.nongpu_rsets_free += np.count_nonzero(~self.rsets["gpus"][rsets_to_free]) @staticmethod - def get_index_list(num_workers: int, num_rsets: int, zero_resource_list: List[Union[int, Any]]) -> List[int | None]: + def get_index_list(num_workers: int, num_rsets: int, zero_resource_list: list[int | Any]) -> list[int | None]: """Map WorkerID to index into a nodelist""" index = 0 index_list = [] @@ -125,7 +125,7 @@ class WorkerResources(RSetResources): :ivar int workerID: workerID for this worker. :ivar list local_nodelist: A list of all nodes assigned to this worker. - :ivar list rset_team: List of rset IDs currently assigned to this worker. + :ivar list rset_team: list of rset IDs currently assigned to this worker. :ivar int num_rsets: The number of resource sets assigned to this worker. :ivar dict slots: A dictionary with a list of slot IDs for each node. :ivar bool even_slots: True if each node has the same number of slots. @@ -292,7 +292,7 @@ def doihave_gpus(self): return self.all_rsets["gpus"][self.rset_team[0]] return False - def set_rset_team(self, rset_team: List[int]) -> None: + def set_rset_team(self, rset_team: list[int]) -> None: """Update worker team and local attributes Updates: rset_team @@ -346,8 +346,8 @@ def set_slot_count(self) -> None: @staticmethod def get_local_nodelist( - workerID: int, rset_team: List[int], split_list: List[List[str]], rsets_per_node: int - ) -> Tuple[List[str], Dict[str, List[int]]]: + workerID: int, rset_team: list[int], split_list: list[list[str]], rsets_per_node: int + ) -> tuple[list[str], dict[str, list[int]]]: """Returns the list of nodes available to the given worker and the slot dictionary""" if workerID is None: raise WorkerResourcesException("Worker has no workerID - aborting") diff --git a/libensemble/specs.py b/libensemble/specs.py index 3ae06d8b9b..1c7e366dcb 100644 --- a/libensemble/specs.py +++ b/libensemble/specs.py @@ -1,13 +1,11 @@ import random import warnings from pathlib import Path -from typing import Any, Callable import pydantic from pydantic import BaseModel, Field from libensemble.alloc_funcs.give_sim_work_first import give_sim_work_first -from libensemble.resources.platforms import Platform __all__ = ["SimSpecs", "GenSpecs", "AllocSpecs", "ExitCriteria", "LibeSpecs", "_EnsembleSpecs"] @@ -26,7 +24,7 @@ class SimSpecs(BaseModel): Specifications for configuring a Simulation Function. """ - sim_f: Callable = None + sim_f: object = None """ Python function matching the ``sim_f`` interface. Evaluates parameters produced by a generator function. @@ -45,7 +43,7 @@ class SimSpecs(BaseModel): """ # list of tuples for dtype construction - outputs: list[[tuple[str, Any] | tuple[str, Any, int | tuple]]] = Field([], alias="out") + outputs: list[[tuple[str, type] | tuple[str, type, int | tuple]]] = Field([], alias="out") """ list of 2- or 3-tuples corresponding to NumPy dtypes. e.g. ``("dim", int, (3,))``, or ``("path", str)``. @@ -78,7 +76,7 @@ class GenSpecs(BaseModel): Specifications for configuring a Generator Function. """ - gen_f: Callable | None = None + gen_f: object | None = None """ Python function matching the ``gen_f`` interface. Produces parameters for evaluation by a simulator function, and makes decisions based on simulator function output. @@ -96,7 +94,7 @@ class GenSpecs(BaseModel): throughout the run, following initialization. """ - outputs: list[[tuple[str, Any] | tuple[str, Any, int | tuple]]] = Field([], alias="out") + outputs: list[[tuple[str, type] | tuple[str, type, int | tuple]]] = Field([], alias="out") """ list of 2- or 3-tuples corresponding to NumPy dtypes. e.g. ``("dim", int, (3,))``, or ``("path", str)``. Typically used to initialize an @@ -128,7 +126,7 @@ class AllocSpecs(BaseModel): Specifications for configuring an Allocation Function. """ - alloc_f: Callable = give_sim_work_first + alloc_f: object = give_sim_work_first """ Python function matching the ``alloc_f`` interface. Decides when simulator and generator functions should be called, and with what resources and parameters. @@ -140,7 +138,7 @@ class AllocSpecs(BaseModel): for customizing the allocation function. """ - outputs: list[[tuple[str, Any] | tuple[str, Any, int | tuple]]] = Field([], alias="out") + outputs: list[[tuple[str, type] | tuple[str, type, int | tuple]]] = Field([], alias="out") """ list of 2- or 3-tuples corresponding to NumPy dtypes. e.g. ``("dim", int, (3,))``, or ``("path", str)``. Allocation functions that modify libEnsemble's History array with additional fields should list those @@ -187,7 +185,7 @@ class LibeSpecs(BaseModel): This generator function can access/modify user objects by reference. """ - mpi_comm: Any | None = None + mpi_comm: object | None = None """ libEnsemble MPI communicator. Default: ``MPI.COMM_WORLD``""" dry_run: bool | None = False @@ -334,7 +332,7 @@ class LibeSpecs(BaseModel): See also option :attr:`platform_specs`. """ - platform_specs: Platform | dict | None = {} + platform_specs: object | dict | None = {} """A Platform object or dictionary specifying settings for a platform. To use existing platform: @@ -381,7 +379,7 @@ class LibeSpecs(BaseModel): stats_fmt: dict | None = {} """ Options for formatting ``'libE_stats.txt'``. See 'Formatting libE_stats.txt'. """ - live_data: Any | None = None + live_data: object | None = None """ Add a live data capture object (e.g., for plotting). """ workers: list[str] | None = [] @@ -500,7 +498,7 @@ class LibeSpecs(BaseModel): class _EnsembleSpecs(BaseModel): """An all-encompassing model for a libEnsemble workflow.""" - H0: Any | None = None # np.ndarray - avoids sphinx issue + H0: object | None = None # np.ndarray - avoids sphinx issue """ A previous or preformatted libEnsemble History array to prepend. """ libE_specs: LibeSpecs @@ -588,7 +586,7 @@ def decorator(func): return decorator -def output_data(fields: list[[tuple[str, Any] | tuple[str, Any, int | tuple]]]): +def output_data(fields: list[[tuple[str, type] | tuple[str, type, int | tuple]]]): """Decorates a user-function with a list of tuples corresponding to NumPy dtypes for the function's output data. Decorated functions don't need those fields specified in ``SimSpecs.outputs`` or ``GenSpecs.outputs``. diff --git a/libensemble/tools/live_data/live_data.py b/libensemble/tools/live_data/live_data.py index 7d50d75a8b..2b7593e7d0 100644 --- a/libensemble/tools/live_data/live_data.py +++ b/libensemble/tools/live_data/live_data.py @@ -1,7 +1,5 @@ from abc import ABC, abstractmethod -import numpy.typing as npt - class LiveData(ABC): """A base class for capturing and processing data during an ensemble""" @@ -11,7 +9,7 @@ def __init__(self): pass @abstractmethod - def live_update(self, hist: npt.NDArray) -> None: + def live_update(self, hist: object) -> None: """Process history data after simulation results have been added to history Parameters @@ -21,7 +19,7 @@ def live_update(self, hist: npt.NDArray) -> None: A libEnsemble history type object. """ - def finalize(self, hist: npt.NDArray) -> None: + def finalize(self, hist: object) -> None: """ Finzalize live data processing diff --git a/libensemble/tools/persistent_support.py b/libensemble/tools/persistent_support.py index dca7d37ca5..7e9643e022 100644 --- a/libensemble/tools/persistent_support.py +++ b/libensemble/tools/persistent_support.py @@ -1,5 +1,4 @@ import logging -from typing import Any, Dict, List import numpy as np import numpy.typing as npt @@ -12,7 +11,7 @@ class PersistentSupport: """A helper class to assist with writing persistent user functions.""" - def __init__(self, libE_info: Dict[str, Dict[Any, Any]], calc_type: int) -> None: + def __init__(self, libE_info: dict[str, dict], calc_type: int) -> None: """ Instantiate a new PersistentSupport instance @@ -115,7 +114,7 @@ def send_recv(self, output: npt.NDArray, calc_status: int = UNSET_TAG) -> (int, self.send(output, calc_status) return self.recv() - def request_cancel_sim_ids(self, sim_ids: List[int]): + def request_cancel_sim_ids(self, sim_ids: list[int]): """Request cancellation of sim_ids. :param sim_ids: A list of sim_ids to cancel. diff --git a/libensemble/utils/loc_stack.py b/libensemble/utils/loc_stack.py index 67bbcb55bb..8eadf5244a 100644 --- a/libensemble/utils/loc_stack.py +++ b/libensemble/utils/loc_stack.py @@ -5,7 +5,6 @@ import os import shutil from pathlib import Path -from typing import List, Optional, Union class LocationStack: @@ -19,7 +18,7 @@ def __init__(self) -> None: def copy_file( self, destdir: Path, - copy_files: List[Path] = [], + copy_files: list[Path] = [], ignore_FileExists: bool = False, allow_overwrite: bool = False, ) -> None: @@ -48,7 +47,7 @@ def copy_file( def symlink_file( self, destdir: Path, - symlink_files: List[Path] = [], + symlink_files: list[Path] = [], ignore_FileExists: bool = False, allow_overwrite: bool = False, ) -> None: @@ -67,11 +66,11 @@ def symlink_file( def register_loc( self, - key: Union[str, int], + key: str | int, dirname: Path, - prefix: Optional[Path] = None, - copy_files: List[Path] = [], - symlink_files: List[Path] = [], + prefix: Path | None = None, + copy_files: list[Path] = [], + symlink_files: list[Path] = [], ignore_FileExists: bool = False, allow_overwrite: bool = False, ) -> str: diff --git a/libensemble/utils/validators.py b/libensemble/utils/validators.py index e91d06a171..1782e011e8 100644 --- a/libensemble/utils/validators.py +++ b/libensemble/utils/validators.py @@ -1,6 +1,6 @@ import os +from collections.abc import Callable from pathlib import Path -from typing import Callable import numpy as np From a51fd9c90099bd499423fb0b841aad23569297d8 Mon Sep 17 00:00:00 2001 From: jlnav Date: Thu, 20 Feb 2025 15:20:52 -0600 Subject: [PATCH 04/39] simplify specs.outputs, add ipdb to dev environment --- libensemble/specs.py | 8 ++++---- pyproject.toml | 1 + 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/libensemble/specs.py b/libensemble/specs.py index 1c7e366dcb..170435cd18 100644 --- a/libensemble/specs.py +++ b/libensemble/specs.py @@ -43,7 +43,7 @@ class SimSpecs(BaseModel): """ # list of tuples for dtype construction - outputs: list[[tuple[str, type] | tuple[str, type, int | tuple]]] = Field([], alias="out") + outputs: list[tuple] = Field([], alias="out") """ list of 2- or 3-tuples corresponding to NumPy dtypes. e.g. ``("dim", int, (3,))``, or ``("path", str)``. @@ -94,7 +94,7 @@ class GenSpecs(BaseModel): throughout the run, following initialization. """ - outputs: list[[tuple[str, type] | tuple[str, type, int | tuple]]] = Field([], alias="out") + outputs: list[tuple] = Field([], alias="out") """ list of 2- or 3-tuples corresponding to NumPy dtypes. e.g. ``("dim", int, (3,))``, or ``("path", str)``. Typically used to initialize an @@ -138,7 +138,7 @@ class AllocSpecs(BaseModel): for customizing the allocation function. """ - outputs: list[[tuple[str, type] | tuple[str, type, int | tuple]]] = Field([], alias="out") + outputs: list[tuple] = Field([], alias="out") """ list of 2- or 3-tuples corresponding to NumPy dtypes. e.g. ``("dim", int, (3,))``, or ``("path", str)``. Allocation functions that modify libEnsemble's History array with additional fields should list those @@ -586,7 +586,7 @@ def decorator(func): return decorator -def output_data(fields: list[[tuple[str, type] | tuple[str, type, int | tuple]]]): +def output_data(fields: list[tuple]): """Decorates a user-function with a list of tuples corresponding to NumPy dtypes for the function's output data. Decorated functions don't need those fields specified in ``SimSpecs.outputs`` or ``GenSpecs.outputs``. diff --git a/pyproject.toml b/pyproject.toml index cf6d5a03bc..c21a9bf296 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -80,6 +80,7 @@ sphinx-copybutton = ">=0.5.2,<0.6" pre-commit = ">=4.0.1,<5" nlopt = ">=2.8.0,<3" scipy = ">=1.9.1,<2" +ipdb = ">=0.13.13,<0.14" [tool.pixi.dependencies] python = ">=3.10,<3.14" From 24c4afa8978bd86c8e28914d44772747cdab8ee7 Mon Sep 17 00:00:00 2001 From: jlnav Date: Fri, 21 Mar 2025 15:48:19 -0500 Subject: [PATCH 05/39] add type hints to tools.py, fix a docstring, use npt.NDArray where appropriate --- libensemble/tools/live_data/live_data.py | 6 ++++-- libensemble/tools/tools.py | 20 +++++++++++++++++--- 2 files changed, 21 insertions(+), 5 deletions(-) diff --git a/libensemble/tools/live_data/live_data.py b/libensemble/tools/live_data/live_data.py index 2b7593e7d0..7d50d75a8b 100644 --- a/libensemble/tools/live_data/live_data.py +++ b/libensemble/tools/live_data/live_data.py @@ -1,5 +1,7 @@ from abc import ABC, abstractmethod +import numpy.typing as npt + class LiveData(ABC): """A base class for capturing and processing data during an ensemble""" @@ -9,7 +11,7 @@ def __init__(self): pass @abstractmethod - def live_update(self, hist: object) -> None: + def live_update(self, hist: npt.NDArray) -> None: """Process history data after simulation results have been added to history Parameters @@ -19,7 +21,7 @@ def live_update(self, hist: object) -> None: A libEnsemble history type object. """ - def finalize(self, hist: object) -> None: + def finalize(self, hist: npt.NDArray) -> None: """ Finzalize live data processing diff --git a/libensemble/tools/tools.py b/libensemble/tools/tools.py index 4038b5fafe..8cb81f3af3 100644 --- a/libensemble/tools/tools.py +++ b/libensemble/tools/tools.py @@ -10,6 +10,7 @@ import time import numpy as np +import numpy.typing as npt # Create logger logger = logging.getLogger(__name__) @@ -83,7 +84,16 @@ def _get_shortname(basename): # =================== save libE output to pickle and np ======================== -def save_libE_output(H, persis_info, basename, nworkers, dest_path=None, mess="Run completed", append_attrs=True): + +def save_libE_output( + H: npt.NDArray, + persis_info: dict, + basename: str, + nworkers: int, + dest_path: str = None, + mess: str = "Run completed", + append_attrs: bool = True, +) -> str: """ Writes out history array and persis_info to files. @@ -117,6 +127,10 @@ def save_libE_output(H, persis_info, basename, nworkers, dest_path=None, mess="R The number of workers in this ensemble. Added to output file names. + dest_path: :obj:`str`, optional + + The path to save the file to. + mess: :obj:`str` A message to print/log when saving the file. @@ -153,7 +167,7 @@ def save_libE_output(H, persis_info, basename, nworkers, dest_path=None, mess="R # ===================== per-process numpy random-streams ======================= -def add_unique_random_streams(persis_info, nstreams, seed=""): +def add_unique_random_streams(persis_info: dict, nstreams: int, seed: str = "") -> dict: """ Creates nstreams random number streams for the libE manager and workers when nstreams is num_workers + 1. Stream i is initialized with seed i by default. @@ -206,7 +220,7 @@ def add_unique_random_streams(persis_info, nstreams, seed=""): return persis_info -def check_npy_file_exists(filename: str, basename: bool = False, max_wait: int = 3): +def check_npy_file_exists(filename: str, basename: bool = False, max_wait: int = 3) -> bool: """Checks a file is created in a parallel environment Parameters From 5966164bef06a5a336fcd6397ee3153dbc90373d Mon Sep 17 00:00:00 2001 From: shudson Date: Mon, 7 Apr 2025 17:35:35 -0500 Subject: [PATCH 06/39] Add release notes for v1.5.0 --- CHANGELOG.rst | 39 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index b81a395af8..b827dc3d68 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -8,6 +8,45 @@ GitHub issues are referenced, and can be viewed with hyperlinks on the `github r .. _`github releases page`: https://github.com/Libensemble/libensemble/releases +Release 1.5.0 +-------------- + +:Date: Apr 8, 2025 + +General Updates: + +* Migrate package build system to `pyproject.toml` (with `pixi` support). #1459 +* Improve handling when no MPI found. #1514 +* `ensemble.save_output()` can save without appending attributes `append_attrs=False`. #1531 +* Improved handling of worker-specific `persis_info` fields when they are not initially provided. #1531 + * Bugfix: Fix `final_gen_send` when there are no worker-specific `persis_info` fields. + * Handle worker-generated `persis_info` fields. + * Ensure `persis_info` is initialized to an empty dictionary in user functions instead of None. + +Examples: + +* Update Ax generator for `Ax v0.5.0`. #1508 +* gpCAM generators renamed. #1516 + * `persistent_gpCAM_ask_tell` to `persistent_gpCAM` + * `persistent_gpCAM_simple` to `persistent_gpCAM_covar` (in fact less simple) +* Persistent generators return `None` as first return value unless `H_o` is updated. #1515 + +Documentation: + +* Revamped Examples and HPC section of documentation. #1501, #1536, #1539 +* Added tutorial and notebook demonstrating surrogate model creation with gpCAM. #1531 +* Updated Aurora guide. #1510 +* Updated and documented APOSMM/WarpX example. #1543 + +:Note: + +* Tests were run on Linux and MacOS with Python versions 3.10, 3.11, 3.12, 3.13 +* Heterogeneous workflows tested on Aurora (ALCF), Polaris (ALCF), and Perlmutter (NERSC). + +:Known Issues: + +* See known issues section in the documentation. + Release 1.4.3 -------------- From 31a60692ab60a7d41b19af3b799732813a6c41b9 Mon Sep 17 00:00:00 2001 From: shudson Date: Mon, 7 Apr 2025 17:41:44 -0500 Subject: [PATCH 07/39] Update version and year --- .wci.yml | 4 ++-- LICENSE | 2 +- libensemble/version.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.wci.yml b/.wci.yml index 78b37075ff..af395fb9af 100644 --- a/.wci.yml +++ b/.wci.yml @@ -16,8 +16,8 @@ description: | language: Python release: - version: 1.4.3 - date: 2024-12-16 + version: 1.5.0 + date: 2025-04-08 documentation: general: https://libensemble.readthedocs.io diff --git a/LICENSE b/LICENSE index de0d1ca0ea..6a45c6a4cf 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ BSD 3-Clause License -Copyright (c) 2018-2024, UChicago Argonne, LLC and the libEnsemble Development Team +Copyright (c) 2018-2025, UChicago Argonne, LLC and the libEnsemble Development Team All Rights Reserved. Redistribution and use in source and binary forms, with or without diff --git a/libensemble/version.py b/libensemble/version.py index a5467c834a..5b60188613 100644 --- a/libensemble/version.py +++ b/libensemble/version.py @@ -1 +1 @@ -__version__ = "1.4.3+dev" +__version__ = "1.5.0" From 2dc5b4e9909a04367343ccba382637021ef8a0b3 Mon Sep 17 00:00:00 2001 From: shudson Date: Mon, 7 Apr 2025 18:06:41 -0500 Subject: [PATCH 08/39] Use present tense in release notes --- CHANGELOG.rst | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index b827dc3d68..9ff1a6c209 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -18,7 +18,7 @@ General Updates: * Migrate package build system to `pyproject.toml` (with `pixi` support). #1459 * Improve handling when no MPI found. #1514 * `ensemble.save_output()` can save without appending attributes `append_attrs=False`. #1531 -* Improved handling of worker-specific `persis_info` fields when they are not initially provided. #1531 +* Improve handling of worker-specific `persis_info` fields when they are not initially provided. #1531 * Bugfix: Fix `final_gen_send` when there are no worker-specific `persis_info` fields. * Handle worker-generated `persis_info` fields. * Ensure `persis_info` is initialized to an empty dictionary in user functions instead of None. @@ -26,17 +26,17 @@ General Updates: Examples: * Update Ax generator for `Ax v0.5.0`. #1508 -* gpCAM generators renamed. #1516 +* Rename gpCAM generators. #1516 * `persistent_gpCAM_ask_tell` to `persistent_gpCAM` * `persistent_gpCAM_simple` to `persistent_gpCAM_covar` (in fact less simple) * Persistent generators return `None` as first return value unless `H_o` is updated. #1515 Documentation: -* Revamped Examples and HPC section of documentation. #1501, #1536, #1539 -* Added tutorial and notebook demonstrating surrogate model creation with gpCAM. #1531 -* Updated Aurora guide. #1510 -* Updated and documented APOSMM/WarpX example. #1543 +* Revamp Examples and HPC section of documentation. #1501, #1536, #1539 +* Add tutorial and notebook demonstrating surrogate model creation with gpCAM. #1531 +* Update Aurora guide. #1510 +* Update and documented APOSMM/WarpX example. #1543 :Note: From 178253b4010fcc392a0b075884311e70ed593f2e Mon Sep 17 00:00:00 2001 From: Jeffrey Larson Date: Tue, 8 Apr 2025 11:03:19 -0500 Subject: [PATCH 09/39] Edit docstring --- .../test_persistent_aposmm_ibcdfo_pounders.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/libensemble/tests/regression_tests/test_persistent_aposmm_ibcdfo_pounders.py b/libensemble/tests/regression_tests/test_persistent_aposmm_ibcdfo_pounders.py index cf7d611547..7523704a0b 100644 --- a/libensemble/tests/regression_tests/test_persistent_aposmm_ibcdfo_pounders.py +++ b/libensemble/tests/regression_tests/test_persistent_aposmm_ibcdfo_pounders.py @@ -14,9 +14,9 @@ These values are then mapped to the normalized emittance - . Execute via one of the following commands: - mpiexec -np 3 python test_persistent_aposmm_ibcdfo.py - python test_persistent_aposmm_ibcdfo.py --nworkers 2 -Both will run with 1 manager, 1 worker running APOSMM+IBCDFO), and 1 worker + mpiexec -np 3 python test_persistent_aposmm_ibcdfo_pounders.py + python test_persistent_aposmm_ibcdfo_pounders.py --nworkers 2 +Both will run with 1 manager, 1 worker running APOSMM+IBCDFO, and 1 worker doing the simulation evaluations. """ From 36f98a605a26dbc96f7541b470cf93569ca03b28 Mon Sep 17 00:00:00 2001 From: Jeffrey Larson Date: Tue, 8 Apr 2025 11:05:21 -0500 Subject: [PATCH 10/39] Whitespace --- docs/examples/sim_funcs.rst | 4 ++-- docs/examples/sim_funcs/forces_simf_gpu.rst | 2 +- docs/platforms/example_scripts.rst | 2 -- docs/platforms/platforms_index.rst | 10 ---------- docs/tutorials/forces_gpu_tutorial.rst | 2 -- .../bebop_submit_pbs_central.sh | 1 - 6 files changed, 3 insertions(+), 18 deletions(-) diff --git a/docs/examples/sim_funcs.rst b/docs/examples/sim_funcs.rst index 3cdf8bdc11..dad2609049 100644 --- a/docs/examples/sim_funcs.rst +++ b/docs/examples/sim_funcs.rst @@ -36,8 +36,8 @@ Functions that run user applications These use the executor to launch applications and in some cases handle dynamic CPU/GPU allocation. -The ``Variable resources`` module contains basic examples, while the ``Template`` -examples use a simple MPI/OpenMP (with GPU offload option) application (``forces``) +The ``Variable resources`` module contains basic examples, while the ``Template`` +examples use a simple MPI/OpenMP (with GPU offload option) application (``forces``) to demonstrate libEnsemble’s capabilities on various HPC systems. The build_forces.sh_ file gives compile lines for building the simple ``forces`` application on various platforms (use -DGPU to build for GPU). diff --git a/docs/examples/sim_funcs/forces_simf_gpu.rst b/docs/examples/sim_funcs/forces_simf_gpu.rst index 38cb8630e8..4c74d254f1 100644 --- a/docs/examples/sim_funcs/forces_simf_gpu.rst +++ b/docs/examples/sim_funcs/forces_simf_gpu.rst @@ -1,4 +1,4 @@ -Template for GPU executables +Template for GPU executables ---------------------------- .. role:: underline diff --git a/docs/platforms/example_scripts.rst b/docs/platforms/example_scripts.rst index 072cc65f22..24da78afc8 100644 --- a/docs/platforms/example_scripts.rst +++ b/docs/platforms/example_scripts.rst @@ -18,7 +18,6 @@ for more information about the respective systems and configuration. or, if using dynamic resources, :doc:`in the generator<../examples/sim_funcs/forces_simf_gpu_vary_resources>`. - General examples ---------------- @@ -43,7 +42,6 @@ LSF - Basic :caption: /examples/libE_submission_scripts/submit_lsf_simple.sh :language: bash - System Examples --------------- diff --git a/docs/platforms/platforms_index.rst b/docs/platforms/platforms_index.rst index c56ab66aa9..a08a823dc8 100644 --- a/docs/platforms/platforms_index.rst +++ b/docs/platforms/platforms_index.rst @@ -46,14 +46,12 @@ which runs the generator on the manager (using a thread) as below. A SLURM batch script may include: - .. code-block:: bash #SBATCH --nodes 3 python run_libe_forces.py --nworkers 3 - When using **gen_on_manager**, set ``nworkers`` to the number of workers desired for running simulations. Dedicated Mode @@ -64,7 +62,6 @@ True, the MPI executor will not launch applications on nodes where libEnsemble P processes (manager and workers) are running. Workers launch applications onto the remaining nodes in the allocation. - .. list-table:: :widths: 60 40 @@ -84,14 +81,12 @@ remaining nodes in the allocation. A SLURM batch script may include: - .. code-block:: bash #SBATCH --nodes 3 python run_libe_forces.py --nworkers 3 - Note that **gen_on_manager** is not set in the above example. Distributed Running @@ -116,7 +111,6 @@ case, requires :ref:`a careful MPI rank placement `. This allows the libEnsemble worker to read files produced by the application on local node storage. - Configuring the Run ------------------- @@ -159,7 +153,6 @@ Varying resources libEnsemble also features :ref:`dynamic resource assignment`, whereby the number of processes and/or the number of GPUs can be a set for each simulation by the generator. - Overriding Auto-Detection ------------------------- @@ -172,8 +165,6 @@ libE_specs option. When using the MPI Executor, it is possible to override the detected information using the `custom_info` argument. See the :doc:`MPI Executor<../executor/mpi_executor>` for more. - - Systems with Launch/MOM Nodes ----------------------------- @@ -212,7 +203,6 @@ or *to entirely different systems*. Submission scripts for running on launch/MOM nodes and for using Balsam can be found in the :doc:`examples`. - .. _globus_compute_ref: Globus Compute - Remote User Functions diff --git a/docs/tutorials/forces_gpu_tutorial.rst b/docs/tutorials/forces_gpu_tutorial.rst index ab1ee121fc..870b634648 100644 --- a/docs/tutorials/forces_gpu_tutorial.rst +++ b/docs/tutorials/forces_gpu_tutorial.rst @@ -35,7 +35,6 @@ from the simple forces example are highlighted: # Optional - to print GPU settings from libensemble.tools.test_support import check_gpu_setting - def run_forces(H, persis_info, sim_specs, libE_info): """Launches the forces MPI app and auto-assigns ranks and GPU resources. @@ -154,7 +153,6 @@ and use this information however you want. output = np.zeros(1, dtype=sim_specs["out"]) output["energy"][0] = final_energy - return output The above code will assign a GPU to each worker on CUDA-capable systems, diff --git a/examples/libE_submission_scripts/bebop_submit_pbs_central.sh b/examples/libE_submission_scripts/bebop_submit_pbs_central.sh index 4d0aff5894..7fb474194f 100644 --- a/examples/libE_submission_scripts/bebop_submit_pbs_central.sh +++ b/examples/libE_submission_scripts/bebop_submit_pbs_central.sh @@ -5,7 +5,6 @@ #PBS -A [project] #PBS -N libE_example - cd $PBS_O_WORKDIR # Choose MPI backend. Note that the built mpi4py in your environment should match. module load oneapi/mpi From c37db76ddf71e95bb7ab58ae3c0ccad5940af13e Mon Sep 17 00:00:00 2001 From: Jeffrey Larson Date: Tue, 8 Apr 2025 11:13:49 -0500 Subject: [PATCH 11/39] Underlines too short --- docs/tutorials/gpcam_tutorial.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/tutorials/gpcam_tutorial.rst b/docs/tutorials/gpcam_tutorial.rst index 09b523c7a3..a013c1b67e 100644 --- a/docs/tutorials/gpcam_tutorial.rst +++ b/docs/tutorials/gpcam_tutorial.rst @@ -10,7 +10,7 @@ In each iteration, a batch of points is produced for concurrent evaluation, maxi Ensure that libEnsemble, and gpCAM are installed via: ``pip install libensemble gpcam`` Generator function ------------------ +------------------ The gpCAM generator function is called ``persistent_gpCAM``. @@ -179,7 +179,7 @@ For running applications using parallel resources in the simulator see the `forc return term1 + term2 + term3 Calling Script -------------- +-------------- Our calling script configures libEnsemble, the generator function, and the simulator function. It then create the ensemble object and runs the ensemble. @@ -275,7 +275,7 @@ At the end of our calling script we run the ensemble. pprint(H[["sim_id", "x", "f"]][:16]) # See first 16 results Rerun and test model at known points ------------------------------------ +------------------------------------ To see how the accuracy of the surrogate model improves, we can use previously evaluated points as test points and run again with a different seed. @@ -292,7 +292,7 @@ To see how the accuracy of the surrogate model improves, we can use previously e print(persis_info) Viewing model progression ------------------------- +------------------------- Now we can check how our model's values compared against the values at known test points as the ensemble progresses. The comparison is based on the **mean squared error** between the gpCAM model and our known From d9a91c7ef1145899f393b1e39c45b3d9a19b95b7 Mon Sep 17 00:00:00 2001 From: Jeffrey Larson Date: Tue, 8 Apr 2025 11:15:04 -0500 Subject: [PATCH 12/39] Replacing ~ with - --- docs/platforms/platforms_index.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/platforms/platforms_index.rst b/docs/platforms/platforms_index.rst index a08a823dc8..15563cdf79 100644 --- a/docs/platforms/platforms_index.rst +++ b/docs/platforms/platforms_index.rst @@ -134,7 +134,7 @@ and partitions these to workers. The :doc:`MPI Executor<../executor/mpi_executor accesses the resources available to the current worker when launching tasks. Zero-resource workers -~~~~~~~~~~~~~~~~~~~~~ +--------------------- Users with persistent ``gen_f`` functions may notice that the persistent workers are still automatically assigned system resources. This can be resolved by using From 165c668c3473add3a273475a395913ec8362392d Mon Sep 17 00:00:00 2001 From: Jeffrey Larson Date: Tue, 8 Apr 2025 11:35:23 -0500 Subject: [PATCH 13/39] Link update --- docs/examples/sim_funcs/forces_simf_gpu_multi_app.rst | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/docs/examples/sim_funcs/forces_simf_gpu_multi_app.rst b/docs/examples/sim_funcs/forces_simf_gpu_multi_app.rst index a190e99fea..8a491e8816 100644 --- a/docs/examples/sim_funcs/forces_simf_gpu_multi_app.rst +++ b/docs/examples/sim_funcs/forces_simf_gpu_multi_app.rst @@ -10,7 +10,7 @@ ranks and GPU resources as requested by the generator. This makes efficient use of each node as the expensive GPU simulations will use the GPUs on the node/s, while the rest of the CPU cores are assigned to the simple CPU-only simulations. -For a realistic use-case see https://journals.aps.org/prab/abstract/10.1103/PhysRevAccelBeams.26.084601 +See this publication_ for a real-world demonstration of these capabilities. .. automodule:: forces_multi_app.forces_simf :members: @@ -39,5 +39,6 @@ up by each worker and these will be used when the simulation is run, unless over More information is available in the :doc:`Forces GPU tutorial <../../tutorials/forces_gpu_tutorial>` and the video_ demonstration on Frontier_. -.. _video: https://www.youtube.com/watch?v=H2fmbZ6DnVc .. _Frontier: https://docs.olcf.ornl.gov/systems/frontier_user_guide.html +.. _publication: https://doi.org/10.1103/PhysRevAccelBeams.26.084601 +.. _video: https://www.youtube.com/watch?v=H2fmbZ6DnVc From c9ca535b82e28a9c804e1d39b867bfc502ad33fb Mon Sep 17 00:00:00 2001 From: shudson Date: Tue, 8 Apr 2025 11:48:34 -0500 Subject: [PATCH 14/39] Bypass label in include --- docs/examples/submission_scripts.rst | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/examples/submission_scripts.rst b/docs/examples/submission_scripts.rst index 1f853585b5..7c37da40ea 100644 --- a/docs/examples/submission_scripts.rst +++ b/docs/examples/submission_scripts.rst @@ -1 +1,5 @@ .. include:: ../platforms/example_scripts.rst + :end-before: .. _slurm_mpi_distributed: + +.. include:: ../platforms/example_scripts.rst + :start-after: .. _slurm_mpi_distributed: From 71fa984a1de9be32253d9cd42151a8ca606f3665 Mon Sep 17 00:00:00 2001 From: Jeffrey Larson Date: Tue, 8 Apr 2025 11:58:50 -0500 Subject: [PATCH 15/39] More docs edits --- docs/examples/sim_funcs.rst | 4 ++-- docs/platforms/example_scripts.rst | 6 +++--- docs/platforms/platforms_index.rst | 10 +++++----- docs/resource_manager/resource_detection.rst | 8 ++++---- docs/resource_manager/zero_resource_workers.rst | 2 +- docs/tutorials/forces_gpu_tutorial.rst | 4 ++-- libensemble/gen_funcs/persistent_gpCAM.py | 6 +++--- libensemble/tests/regression_tests/test_gpCAM.py | 2 +- .../test_persistent_gp_multitask_ax.py | 2 +- 9 files changed, 22 insertions(+), 22 deletions(-) diff --git a/docs/examples/sim_funcs.rst b/docs/examples/sim_funcs.rst index dad2609049..be4374d884 100644 --- a/docs/examples/sim_funcs.rst +++ b/docs/examples/sim_funcs.rst @@ -24,7 +24,7 @@ Ideal for simple debugging of generator processes or system testing. Borehole function with kills Chwirut1 vector-valued function Inverse Bayesian likelihood - Norm + Norm Rosenbrock test optimization function Six Hump Camel Test noisy function @@ -37,7 +37,7 @@ These use the executor to launch applications and in some cases handle dynamic CPU/GPU allocation. The ``Variable resources`` module contains basic examples, while the ``Template`` -examples use a simple MPI/OpenMP (with GPU offload option) application (``forces``) +examples use a simple MPI/OpenMP (with GPU offload option) application (``forces``) to demonstrate libEnsemble’s capabilities on various HPC systems. The build_forces.sh_ file gives compile lines for building the simple ``forces`` application on various platforms (use -DGPU to build for GPU). diff --git a/docs/platforms/example_scripts.rst b/docs/platforms/example_scripts.rst index 24da78afc8..d534f0c662 100644 --- a/docs/platforms/example_scripts.rst +++ b/docs/platforms/example_scripts.rst @@ -7,12 +7,12 @@ for more information about the respective systems and configuration. .. note:: It is **highly recommended** that the directive lines (e.g., #SBATCH) in batch - submission scripts do **NOT** specify processor, task, or GPU configuration info - --- these lines should only specify the number of nodes required. + submission scripts do **NOT** specify processor, task, or GPU configuration + information---these lines should only specify the number of nodes required. For example, do not specify ``#SBATCH --gpus-per-node=4`` in order to use four GPUs on the node, when each worker may use less than this, as this may assign - all of the GPUs to a single MPI invocation. Instead, the configuration should + all of the GPUs to a single MPI invocation. Instead, the configuration should be supplied either :doc:`in the simulation function<../examples/sim_funcs/forces_simf_gpu>` or, if using dynamic resources, diff --git a/docs/platforms/platforms_index.rst b/docs/platforms/platforms_index.rst index 15563cdf79..c06cdbe6fd 100644 --- a/docs/platforms/platforms_index.rst +++ b/docs/platforms/platforms_index.rst @@ -89,19 +89,19 @@ remaining nodes in the allocation. Note that **gen_on_manager** is not set in the above example. -Distributed Running --------------------- +Distributed Running +------------------- In the **distributed** approach, libEnsemble can be run using the **mpi4py** communicator, with workers distributed across nodes. This is most often used when workers run simulation code directly, via a Python interface. The user -script is invoked with an MPI runner, for example (using an `mpich` based MPI):: +script is invoked with an MPI runner, for example (using an `mpich`-based MPI):: mpirun -np 4 -ppn 1 python myscript.py The distributed approach, can also be used with the executor, to co-locate workers -with the applications they submit. To ensure workers are placed as required in this -case, requires :ref:`a careful MPI rank placement `. +with the applications they submit. Ensuring that workers are placed as required in this +case requires :ref:`a careful MPI rank placement `. .. image:: ../images/distributed_new_detailed.png :alt: distributed diff --git a/docs/resource_manager/resource_detection.rst b/docs/resource_manager/resource_detection.rst index 474cc1cade..2048eb2793 100644 --- a/docs/resource_manager/resource_detection.rst +++ b/docs/resource_manager/resource_detection.rst @@ -18,17 +18,17 @@ LSF LSB_HOSTS/LSB_MCPU_HOSTS PBS PBS_NODEFILE =========== =========================== -These environment variable names can be modified via the :ref:`resource_info` +These environment variable names can be modified via the :ref:`resource_info` :class:`libE_specs` option. -On other systems you may have to supply a node list in a file called **node_list** -in your run directory. For example, on ALCF system Cooley_, the session node list +On other systems, you may have to supply a node list in a file called **node_list** +in your run directory. For example, on the ALCF system Cooley_, the session node list can be obtained as follows:: cat $COBALT_NODEFILE > node_list Resource detection can be disabled by setting -``libE_specs["disable_resource_manager"] = True``, and users can simply supply run +``libE_specs["disable_resource_manager"] = True``, and users can supply run configuration options on the Executor submit line. This will usually work sufficiently on diff --git a/docs/resource_manager/zero_resource_workers.rst b/docs/resource_manager/zero_resource_workers.rst index 1dc62095e6..4c72cf5d7b 100644 --- a/docs/resource_manager/zero_resource_workers.rst +++ b/docs/resource_manager/zero_resource_workers.rst @@ -53,7 +53,7 @@ concurrency desired by the ensemble, taking into account generators and simulato Users can set generator resources using the *libE_specs* options ``gen_num_procs`` and/or ``gen_num_gpus``, which take integer values. -If only ``gen_num_gpus`` is set, then the number of processors is set to match. +If only ``gen_num_gpus`` is set, then the number of processors is set to match. To vary generator resources, ``persis_info`` settings can be used in allocation functions before calling the ``gen_work`` support function. This takes the diff --git a/docs/tutorials/forces_gpu_tutorial.rst b/docs/tutorials/forces_gpu_tutorial.rst index 870b634648..be487f33cc 100644 --- a/docs/tutorials/forces_gpu_tutorial.rst +++ b/docs/tutorials/forces_gpu_tutorial.rst @@ -7,9 +7,9 @@ to the GPU. The libEnsemble scripts in this example are available under forces_gpu_ in the libEnsemble repository. This example is based on the -:doc:`simple forces tutorial <../tutorials/executor_forces_tutorial>` with +:doc:`simple forces tutorial <../tutorials/executor_forces_tutorial>` with a slightly modified simulation function (to assign GPUs) and a greatly increased -number of particles (allows live GPU usage to be viewed). +number of particles (to allow real-time GPU usage to be viewed). In the first example, each worker will be using one GPU. The code will assign the GPUs available to each worker, using the appropriate method. This works on systems diff --git a/libensemble/gen_funcs/persistent_gpCAM.py b/libensemble/gen_funcs/persistent_gpCAM.py index f130950468..05b08bb5ed 100644 --- a/libensemble/gen_funcs/persistent_gpCAM.py +++ b/libensemble/gen_funcs/persistent_gpCAM.py @@ -212,10 +212,10 @@ def persistent_gpCAM_covar(H_in, persis_info, gen_specs, libE_info): (lb, ub) and on following iterations samples the GP posterior covariance function to find sample points. - If gen_specs["user"]["use_grid"] is set to True the parameter space is + If gen_specs["user"]["use_grid"] is set to True, the parameter space is divided into a mesh of candidate points (num_points in each dimension). - Subsequent points chosen by maximum covariance that are at least a distance - `r` away from each other to explore difference regions. + Subsequent points are chosen with maximum covariance that are at least a + distance `r` away from each other to explore difference regions. If gen_specs["user"]["test_points_file"] is set to a file of evaluated points, then the gpCAM predications are compared at these points to assess diff --git a/libensemble/tests/regression_tests/test_gpCAM.py b/libensemble/tests/regression_tests/test_gpCAM.py index b554752eba..218ecfc918 100644 --- a/libensemble/tests/regression_tests/test_gpCAM.py +++ b/libensemble/tests/regression_tests/test_gpCAM.py @@ -11,7 +11,7 @@ Runs three variants of gpCAM. The first two use the posterior covariance sampling method, whereby the second run uses the grid approach and uses -the points from the first run as it’s test points.The third run uses the +the points from the first run as it’s test points. The third run uses the gpCAM ask/tell interface. See libensemble.gen_funcs.persistent_gpCAM for more details about the diff --git a/libensemble/tests/regression_tests/test_persistent_gp_multitask_ax.py b/libensemble/tests/regression_tests/test_persistent_gp_multitask_ax.py index 8df0ae006d..8c589161ad 100644 --- a/libensemble/tests/regression_tests/test_persistent_gp_multitask_ax.py +++ b/libensemble/tests/regression_tests/test_persistent_gp_multitask_ax.py @@ -2,7 +2,7 @@ Example of multi-fidelity optimization using a persistent GP gen_func (calling Ax). -Test is set to use the gen_on_manager option (persistent generator runs on +This test uses the gen_on_manager option (persistent generator runs on a thread). Therefore nworkers is the number of simulation workers. Execute via one of the following commands: From f713522a26c48188d58ea1632cd3130a70f5b7a6 Mon Sep 17 00:00:00 2001 From: Jeffrey Larson Date: Tue, 8 Apr 2025 12:06:14 -0500 Subject: [PATCH 16/39] nb-clean to ipynb files --- .../forces_with_executor/forces_tutorial_notebook.ipynb | 4 +--- examples/tutorials/gpcam_surrogate_model/gpcam.ipynb | 3 +-- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/examples/tutorials/forces_with_executor/forces_tutorial_notebook.ipynb b/examples/tutorials/forces_with_executor/forces_tutorial_notebook.ipynb index bf408258a4..b85222e445 100644 --- a/examples/tutorials/forces_with_executor/forces_tutorial_notebook.ipynb +++ b/examples/tutorials/forces_with_executor/forces_tutorial_notebook.ipynb @@ -587,9 +587,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "scrolled": false - }, + "metadata": {}, "outputs": [], "source": [ "! ls -l ensemble/sim*" diff --git a/examples/tutorials/gpcam_surrogate_model/gpcam.ipynb b/examples/tutorials/gpcam_surrogate_model/gpcam.ipynb index 097a391c91..228288c0ee 100644 --- a/examples/tutorials/gpcam_surrogate_model/gpcam.ipynb +++ b/examples/tutorials/gpcam_surrogate_model/gpcam.ipynb @@ -438,8 +438,7 @@ "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.9" + "pygments_lexer": "ipython3" } }, "nbformat": 4, From d5dcd521a5b7f328241116b3c1dd56f48288581c Mon Sep 17 00:00:00 2001 From: Jeffrey Larson Date: Tue, 8 Apr 2025 12:08:28 -0500 Subject: [PATCH 17/39] Update CHANGELOG.rst --- CHANGELOG.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 9ff1a6c209..a4650e9058 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -21,7 +21,7 @@ General Updates: * Improve handling of worker-specific `persis_info` fields when they are not initially provided. #1531 * Bugfix: Fix `final_gen_send` when there are no worker-specific `persis_info` fields. * Handle worker-generated `persis_info` fields. - * Ensure `persis_info` is initialized to an empty dictionary in user functions instead of None. + * Ensure `persis_info` is initialized to an empty dictionary in user functions instead of `None`. Examples: From 5d1278f2c2650ce78b849f38d492b4b657608af3 Mon Sep 17 00:00:00 2001 From: Jeffrey Larson Date: Tue, 8 Apr 2025 12:09:30 -0500 Subject: [PATCH 18/39] mono --- CHANGELOG.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 9ff1a6c209..a4650e9058 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -21,7 +21,7 @@ General Updates: * Improve handling of worker-specific `persis_info` fields when they are not initially provided. #1531 * Bugfix: Fix `final_gen_send` when there are no worker-specific `persis_info` fields. * Handle worker-generated `persis_info` fields. - * Ensure `persis_info` is initialized to an empty dictionary in user functions instead of None. + * Ensure `persis_info` is initialized to an empty dictionary in user functions instead of `None`. Examples: From db26ac9c33819eb5747685090227928cf8d463dc Mon Sep 17 00:00:00 2001 From: jlnav Date: Wed, 9 Apr 2025 11:36:03 -0500 Subject: [PATCH 19/39] small fixes - adjust version in pyproject.toml, fix pydantic version listed in advanced_installation --- docs/advanced_installation.rst | 2 +- pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/advanced_installation.rst b/docs/advanced_installation.rst index 4d436eb2cd..ad3131ee15 100644 --- a/docs/advanced_installation.rst +++ b/docs/advanced_installation.rst @@ -9,7 +9,7 @@ automatically installed alongside libEnsemble: * Python_ ``>= 3.10`` * NumPy_ ``>= 1.21`` * psutil_ ``>= 5.9.4`` -* `pydantic`_ ``<= 1.10.12`` +* `pydantic`_ ``>= 1.10.12`` * pyyaml_ ``>= v6.0`` * tomli_ ``>= 1.2.1`` diff --git a/pyproject.toml b/pyproject.toml index 7aca0ef7ed..12d06091fc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -30,7 +30,7 @@ classifiers = [ "Topic :: Scientific/Engineering", "Topic :: Software Development :: Libraries :: Python Modules", ] -version = "1.4.3+dev" +version = "1.5.0" [project.urls] Documentation = "https://libensemble.readthedocs.io/en/main/" From 1a9f3418ce069cfa04e0a982e30af909bfd4626c Mon Sep 17 00:00:00 2001 From: Stephen Hudson Date: Wed, 9 Apr 2025 12:49:15 -0500 Subject: [PATCH 20/39] Add LUMI support (#1546) --- libensemble/resources/platforms.py | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/libensemble/resources/platforms.py b/libensemble/resources/platforms.py index 46b357540e..062d0bab44 100644 --- a/libensemble/resources/platforms.py +++ b/libensemble/resources/platforms.py @@ -162,6 +162,19 @@ class GenericROCm(Platform): scheduler_match_slots: bool = True +class Lumi(Platform): + mpi_runner: str = "srun" + cores_per_node: int = 64 + logical_cores_per_node: int = 128 + + +class LumiGPU(Lumi): + gpus_per_node: int = 8 + gpu_setting_type: str = "env" + gpu_setting_name: str = "ROCR_VISIBLE_DEVICES" + scheduler_match_slots: bool = True + + class Perlmutter(Platform): mpi_runner: str = "srun" @@ -243,6 +256,8 @@ class Known_platforms(BaseModel): aurora: Aurora = Aurora() generic_rocm: GenericROCm = GenericROCm() frontier: Frontier = Frontier() + lumi: Lumi = Lumi() + lumi_g: LumiGPU = LumiGPU() perlmutter: Perlmutter = Perlmutter() perlmutter_c: PerlmutterCPU = PerlmutterCPU() perlmutter_g: PerlmutterGPU = PerlmutterGPU() @@ -272,6 +287,16 @@ def known_envs(): else: name = "perlmutter" logger.manager_warning("Perlmutter detected, but no compute partition detected. Are you on login nodes?") + if os.environ.get("SLURM_CLUSTER_NAME") == "lumi": + partition = os.environ.get("SLURM_JOB_PARTITION") + print(f"Lumi partition: {partition}") + if not partition: + logger.manager_warning("LUMI detected, but no compute partition detected. Are you on login nodes?") + if partition and partition.endswith("-g"): + name = "lumi_g" + print(f"Lumi GPU detected: {name}") + else: + name = "lumi" return name From cbabf5a698d7cd75d4b0f5c85d67fe911dac7ac3 Mon Sep 17 00:00:00 2001 From: shudson Date: Wed, 9 Apr 2025 13:03:35 -0500 Subject: [PATCH 21/39] Update release notes --- CHANGELOG.rst | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index a4650e9058..799fa6dd8f 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -30,6 +30,7 @@ Examples: * `persistent_gpCAM_ask_tell` to `persistent_gpCAM` * `persistent_gpCAM_simple` to `persistent_gpCAM_covar` (in fact less simple) * Persistent generators return `None` as first return value unless `H_o` is updated. #1515 +* Add LUMI to known platforms. #1546 Documentation: @@ -41,7 +42,7 @@ Documentation: :Note: * Tests were run on Linux and MacOS with Python versions 3.10, 3.11, 3.12, 3.13 -* Heterogeneous workflows tested on Aurora (ALCF), Polaris (ALCF), and Perlmutter (NERSC). +* Heterogeneous workflows tested on Aurora (ALCF), Polaris (ALCF), LUMI ((EuroHPC JU)), and Perlmutter (NERSC). :Known Issues: From 877f52be365db7df71aec30efc37ef883a24181e Mon Sep 17 00:00:00 2001 From: jlnav Date: Wed, 9 Apr 2025 13:10:51 -0500 Subject: [PATCH 22/39] make version dynamic in pyproject.toml --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 12d06091fc..c9c478e191 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -30,7 +30,7 @@ classifiers = [ "Topic :: Scientific/Engineering", "Topic :: Software Development :: Libraries :: Python Modules", ] -version = "1.5.0" +dynamic = ["version"] [project.urls] Documentation = "https://libensemble.readthedocs.io/en/main/" From 21f6175e7659de557d4e1c12ca6b33f8d4d01adc Mon Sep 17 00:00:00 2001 From: shudson Date: Wed, 9 Apr 2025 13:44:22 -0500 Subject: [PATCH 23/39] Revert "make version dynamic in pyproject.toml" This reverts commit 877f52be365db7df71aec30efc37ef883a24181e. --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index c9c478e191..12d06091fc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -30,7 +30,7 @@ classifiers = [ "Topic :: Scientific/Engineering", "Topic :: Software Development :: Libraries :: Python Modules", ] -dynamic = ["version"] +version = "1.5.0" [project.urls] Documentation = "https://libensemble.readthedocs.io/en/main/" From fa951d7acd8bf7fe64368407ecfe1a1371a38438 Mon Sep 17 00:00:00 2001 From: jlnav Date: Wed, 9 Apr 2025 15:38:46 -0500 Subject: [PATCH 24/39] libE_specs *may* always have a workflow_dir_path, as we're getting the attribute from the object. try the dynamic versioning again... --- libensemble/ensemble.py | 19 ++++++++----------- pyproject.toml | 5 ++++- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/libensemble/ensemble.py b/libensemble/ensemble.py index 9b581a1d1a..412b98c495 100644 --- a/libensemble/ensemble.py +++ b/libensemble/ensemble.py @@ -578,14 +578,11 @@ def save_output(self, basename: str, append_attrs: bool = True): Format: ``_results_History_length=_evals=_ranks=`` """ if self.is_manager: - if self._get_option("libE_specs", "workflow_dir_path"): - save_libE_output( - self.H, - self.persis_info, - basename, - self.nworkers, - dest_path=self.libE_specs.workflow_dir_path, - append_attrs=append_attrs, - ) - else: - save_libE_output(self.H, self.persis_info, basename, self.nworkers, append_attrs=append_attrs) + save_libE_output( + self.H, + self.persis_info, + basename, + self.nworkers, + dest_path=self.libE_specs.workflow_dir_path, + append_attrs=append_attrs, + ) diff --git a/pyproject.toml b/pyproject.toml index 12d06091fc..c6a7ea9f36 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -30,7 +30,7 @@ classifiers = [ "Topic :: Scientific/Engineering", "Topic :: Software Development :: Libraries :: Python Modules", ] -version = "1.5.0" +dynamic = ["version"] [project.urls] Documentation = "https://libensemble.readthedocs.io/en/main/" @@ -45,6 +45,9 @@ requires = ["setuptools", "wheel", "pip>=24.3.1,<26", "setuptools>=75.1.0,<79", where = ["."] include = ["libensemble*"] +[tool.setuptools.dynamic] +version = {attr = "libensemble.version.__version__"} + [tool.pixi.project] channels = ["conda-forge"] platforms = ["osx-arm64", "linux-64", "osx-64"] From 714e7e510535d74bf9b6b10c20f74e9a7839a759 Mon Sep 17 00:00:00 2001 From: jlnav Date: Wed, 9 Apr 2025 15:46:09 -0500 Subject: [PATCH 25/39] __all__ in tasmanian doesnt need definition if we'll never do: from libensemble.gen_funcs.persistent_tasmanian import * --- libensemble/gen_funcs/persistent_tasmanian.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/libensemble/gen_funcs/persistent_tasmanian.py b/libensemble/gen_funcs/persistent_tasmanian.py index ac491cf6a0..5afb9be1bc 100644 --- a/libensemble/gen_funcs/persistent_tasmanian.py +++ b/libensemble/gen_funcs/persistent_tasmanian.py @@ -10,10 +10,10 @@ from libensemble.tools import parse_args from libensemble.tools.persistent_support import PersistentSupport -__all__ = [ - "sparse_grid_batched", - "sparse_grid_async", -] +# __all__ = [ +# "sparse_grid_batched", +# "sparse_grid_async", +# ] def lex_le(x, y, tol=1e-12): From b57d80c639f56a066a11c8fee0f2eca676da556f Mon Sep 17 00:00:00 2001 From: jlnav Date: Wed, 9 Apr 2025 15:49:00 -0500 Subject: [PATCH 26/39] remove lumi prints in platforms.py --- libensemble/resources/platforms.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/libensemble/resources/platforms.py b/libensemble/resources/platforms.py index 062d0bab44..cf4dde324a 100644 --- a/libensemble/resources/platforms.py +++ b/libensemble/resources/platforms.py @@ -289,12 +289,10 @@ def known_envs(): logger.manager_warning("Perlmutter detected, but no compute partition detected. Are you on login nodes?") if os.environ.get("SLURM_CLUSTER_NAME") == "lumi": partition = os.environ.get("SLURM_JOB_PARTITION") - print(f"Lumi partition: {partition}") if not partition: logger.manager_warning("LUMI detected, but no compute partition detected. Are you on login nodes?") if partition and partition.endswith("-g"): name = "lumi_g" - print(f"Lumi GPU detected: {name}") else: name = "lumi" return name From cd178e3deaeebb9cd4debedce140549917590471 Mon Sep 17 00:00:00 2001 From: shudson Date: Wed, 9 Apr 2025 18:21:36 -0500 Subject: [PATCH 27/39] Update gpCAM notebook --- CHANGELOG.rst | 2 +- examples/tutorials/gpcam_surrogate_model/gpcam.ipynb | 8 ++------ 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 799fa6dd8f..71cb1119d4 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -42,7 +42,7 @@ Documentation: :Note: * Tests were run on Linux and MacOS with Python versions 3.10, 3.11, 3.12, 3.13 -* Heterogeneous workflows tested on Aurora (ALCF), Polaris (ALCF), LUMI ((EuroHPC JU)), and Perlmutter (NERSC). +* Heterogeneous workflows tested on Aurora (ALCF), Polaris (ALCF), LUMI (EuroHPC JU), and Perlmutter (NERSC). :Known Issues: diff --git a/examples/tutorials/gpcam_surrogate_model/gpcam.ipynb b/examples/tutorials/gpcam_surrogate_model/gpcam.ipynb index 228288c0ee..75b0c44041 100644 --- a/examples/tutorials/gpcam_surrogate_model/gpcam.ipynb +++ b/examples/tutorials/gpcam_surrogate_model/gpcam.ipynb @@ -20,10 +20,7 @@ "Ensure that libEnsemble, and gpCAM are installed via: `pip install libensemble gpcam`\n", " \n", "> **Note that for notebooks** the multiprocessing start method should be set to `fork` (default on Linux).\n", - "> To use with `spawn` (default on Windows and macOS), use the `multiprocess` library.\n", - "\n", - "> **Note:** If using **Colab** the cell below installs gpCAM and prevents Colab downgrading numpy due to pre-installs.\n", - "> Restart session when prompted (the warnings can be ignored)." + "> To use with `spawn` (default on Windows and macOS), use the `multiprocess` library." ] }, { @@ -35,8 +32,7 @@ "import sys\n", "if 'google.colab' in sys.modules:\n", " !pip install libensemble\n", - " # Prevent downgraded numpy in colab due to preinstalls\n", - " !pip install --upgrade --force-reinstall numpy==2.1.1 scipy gpcam fvgp" + " !pip install gpcam" ] }, { From 79bcd3771cb434c524ff948a528e76916dc51cdd Mon Sep 17 00:00:00 2001 From: shudson Date: Wed, 9 Apr 2025 20:33:24 -0500 Subject: [PATCH 28/39] Replace flashing live animation with clean post-run version --- .../aposmm/aposmm_tutorial_notebook.ipynb | 117 +++++++++++++----- 1 file changed, 88 insertions(+), 29 deletions(-) diff --git a/examples/tutorials/aposmm/aposmm_tutorial_notebook.ipynb b/examples/tutorials/aposmm/aposmm_tutorial_notebook.ipynb index 7898ba7661..38eca21677 100644 --- a/examples/tutorials/aposmm/aposmm_tutorial_notebook.ipynb +++ b/examples/tutorials/aposmm/aposmm_tutorial_notebook.ipynb @@ -226,28 +226,7 @@ "source": [ "## Run the Ensemble\n", "\n", - "Optionally run the next cell to set up a live graphic of the optimization progress during execution.\n", - "\n", - "**WARNING**: The graphic may flicker when the ensemble is running." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Configure to view live progress\n", - "from libensemble.tools.live_data.plot2n import Plot2N\n", - "\n", - "libE_specs[\"live_data\"] = Plot2N(plot_type=\"2d\") # Alt: '3d'" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Finally, set `persis_info` (to provide random seeds to workers) and run the ensemble:" + "Finally, set persis_info (to provide random seeds to workers) and run the ensemble." ] }, { @@ -290,29 +269,109 @@ " \n", "The first six values correspond to the local minima for the Six-Hump Camel simulation function.\n", "\n", - "The 7th value is a repeat minimum, as APOSMM will continue to start local optimization runs.\n", - "\n", - "Please see the [API reference](https://libensemble.readthedocs.io/en/main/examples/aposmm.html) for more APOSMM configuration options and other information.\n", + "The 7th value is a repeat minimum, as APOSMM will continue to start local optimization runs." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Viewing Animation\n", "\n", + "The following cell produces a 3D animation showing the random sampling points, \n", + "and points produced by the optimization runs, under the local Minima. It may take\n", + "a few seconds to produce the animation." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np\n", + "import matplotlib.pyplot as plt\n", + "import matplotlib.animation as animation\n", + "from IPython.display import HTML\n", + "from matplotlib.lines import Line2D\n", + "\n", + "def animate_aposmm_3d(H, batch_size):\n", + " x_vals = np.linspace(-2, 2, 50)\n", + " y_vals = np.linspace(-1, 1.1, 50)\n", + " X, Y = np.meshgrid(x_vals, y_vals)\n", + " Z = np.array([six_hump_camel_func(np.array([x, y])) for x, y in zip(X.ravel(), Y.ravel())]).reshape(X.shape)\n", + " fig = plt.figure(figsize=(10, 8))\n", + " ax = fig.add_subplot(111, projection=\"3d\")\n", + " ax.plot_surface(X, Y, Z, cmap=\"winter\", edgecolor=\"none\", alpha=0.3)\n", + " sc_normal = ax.scatter3D([], [], [], s=6, color=\"black\", marker=\"o\", label=\"Point\")\n", + " sc_localp = ax.scatter3D([], [], [], s=40, color=\"red\", marker=\"^\", label=\"Optimization point\")\n", + " custom_M_marker = Line2D([0], [0], linestyle='None', marker='$\\\\mathrm{M}$',\n", + " markersize=8, markerfacecolor='black', markeredgecolor='black', color='white')\n", + " ax.legend([sc_normal, sc_localp, custom_M_marker], [\"Point\", \"Optimization point\", \"Local minimum\"],loc=\"upper left\")\n", + " fig.tight_layout()\n", + " annotations = []\n", + "\n", + " def update(frame):\n", + " for ann in annotations:\n", + " ann.remove()\n", + " annotations.clear()\n", + " end = min((frame + 1) * batch_size, len(H))\n", + " H_sub = H[:end]\n", + " masks = [~H_sub[\"local_pt\"] & ~H_sub[\"local_min\"], H_sub[\"local_pt\"], H_sub[\"local_min\"]]\n", + " (x_n, y_n, f_n), (x_lp, y_lp, f_lp), (x_lm, y_lm, f_lm) = [\n", + " (H_sub[\"x\"][m, 0], H_sub[\"x\"][m, 1], H_sub[\"f\"][m]) for m in masks\n", + " ]\n", + " sc_normal._offsets3d = (x_n, y_n, f_n)\n", + " sc_localp._offsets3d = (x_lp, y_lp, f_lp)\n", + " for i in range(len(x_lm)):\n", + " annotations.append(ax.text(x_lm[i], y_lm[i], f_lm[i], \"M\", color=\"white\", fontsize=12,\n", + " bbox=dict(facecolor=\"black\", alpha=0.7, pad=2), zorder=999\n", + " ))\n", + " return sc_normal, sc_localp\n", + " total_frames = (len(H) + batch_size - 1) // batch_size\n", + " ani = animation.FuncAnimation(fig, update, frames=total_frames, interval=500, blit=False, repeat=False)\n", + " plt.close(fig)\n", + " return HTML(ani.to_jshtml())\n", + "\n", + "# Reduce batch_size for more refined steps\n", + "animate_aposmm_3d(H, batch_size=50)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ "## Applications\n", "\n", "APOSMM is not limited to evaluating minima from pure Python simulation functions.\n", "Many common libEnsemble use-cases involve using libEnsemble's Executor to launch user\n", "applications with parameters requested by APOSMM, then evaluate their output using\n", "APOSMM, and repeat until minima are identified. A currently supported example\n", - "can be found in libEnsemble's [WarpX Scaling Test](https://github.com/Libensemble/libensemble/tree/main/libensemble/tests/scaling_tests/warpx)" + "can be found in libEnsemble's [WarpX Scaling Test](https://github.com/Libensemble/libensemble/tree/main/libensemble/tests/scaling_tests/warpx)\n", + "\n", + "Please see the [API reference](https://libensemble.readthedocs.io/en/main/examples/aposmm.html) for more APOSMM configuration options and other information." ] } ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", + "language": "python", "name": "python3" }, "language_info": { - "name": "python" + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.13.1" } }, "nbformat": 4, - "nbformat_minor": 2 + "nbformat_minor": 4 } From 34339f4a61b3c9d68468bef794d2a989f268bdbf Mon Sep 17 00:00:00 2001 From: jlnav Date: Thu, 10 Apr 2025 09:22:23 -0500 Subject: [PATCH 29/39] add unit test to ensure persis_info being None gets set to empty dict inside function --- .../tests/unit_tests/test_ufunc_runners.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/libensemble/tests/unit_tests/test_ufunc_runners.py b/libensemble/tests/unit_tests/test_ufunc_runners.py index 1d3cbb4b2c..09f17b07ec 100644 --- a/libensemble/tests/unit_tests/test_ufunc_runners.py +++ b/libensemble/tests/unit_tests/test_ufunc_runners.py @@ -54,6 +54,20 @@ def tupilize(arg1, arg2): simrunner.shutdown() +def test_persis_info_from_none(): + calc_in, sim_specs, gen_specs = get_ufunc_args() + + def tupilize(arg1, arg2): + return (arg1, arg2) + + sim_specs["sim_f"] = tupilize + simrunner = Runner(sim_specs) + libE_info = {"H_rows": np.array([2, 3, 4]), "workerID": 1, "comm": "fakecomm"} + + result = simrunner.run(calc_in, {"libE_info": libE_info, "persis_info": None, "tag": 1}) + assert result == (calc_in, {}) + + @pytest.mark.extra def test_globus_compute_runner_init(): calc_in, sim_specs, gen_specs = get_ufunc_args() @@ -122,6 +136,7 @@ def test_globus_compute_runner_fail(): if __name__ == "__main__": test_normal_runners() test_thread_runners() + test_persis_info_from_none() test_globus_compute_runner_init() test_globus_compute_runner_pass() test_globus_compute_runner_fail() From d30259eb1b63eb484e3348fda648a4ee09273bc3 Mon Sep 17 00:00:00 2001 From: jlnav Date: Thu, 10 Apr 2025 11:23:45 -0500 Subject: [PATCH 30/39] enable coverage for ax and gpcam, disable coverage for tasmanian --- .codecov.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.codecov.yml b/.codecov.yml index 18ef408010..bf866b089c 100644 --- a/.codecov.yml +++ b/.codecov.yml @@ -3,5 +3,4 @@ ignore: - "libensemble/tools/forkable_pdb.py" - "libensemble/tools/live_data/*" - "libensemble/sim_funcs/executor_hworld.py" - - "libensemble/gen_funcs/persistent_ax_multitask.py" - - "libensemble/gen_funcs/persistent_gpCAM.py" + - "libensemble/gen_funcs/persistent_tasmanian.py" From deb58af61d1bd978166c161b78de78b71efa2a2a Mon Sep 17 00:00:00 2001 From: Jeffrey Larson Date: Thu, 10 Apr 2025 11:28:38 -0500 Subject: [PATCH 31/39] Updating input fields --- libensemble/gen_funcs/persistent_sampling.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/libensemble/gen_funcs/persistent_sampling.py b/libensemble/gen_funcs/persistent_sampling.py index 44611d06b6..ce4fe51515 100644 --- a/libensemble/gen_funcs/persistent_sampling.py +++ b/libensemble/gen_funcs/persistent_sampling.py @@ -29,8 +29,8 @@ def _get_user_params(user_specs): return b, n, lb, ub -@persistent_input_fields(["f", "x", "sim_id"]) -@output_data([("x", float, (2,))]) +@persistent_input_fields(["sim_id"]) +@output_data([("x", float, (2,))]) # The dimesion 2 is only a default... and is overwritten def persistent_uniform(_, persis_info, gen_specs, libE_info): """ This generation function always enters into persistent mode and returns From 026371cd338182feb8b9d4f152953f597181ec7b Mon Sep 17 00:00:00 2001 From: shudson Date: Thu, 10 Apr 2025 12:30:56 -0500 Subject: [PATCH 32/39] Add grid lines and up opacity --- .../aposmm/aposmm_tutorial_notebook.ipynb | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/examples/tutorials/aposmm/aposmm_tutorial_notebook.ipynb b/examples/tutorials/aposmm/aposmm_tutorial_notebook.ipynb index 38eca21677..594057f3c0 100644 --- a/examples/tutorials/aposmm/aposmm_tutorial_notebook.ipynb +++ b/examples/tutorials/aposmm/aposmm_tutorial_notebook.ipynb @@ -6,7 +6,7 @@ "source": [ "# Parallel Optimization with APOSMM\n", "\n", - "This tutorial demonstrates libEnsemble’s capability to identify multiple minima of simulation output using the built-in APOSMM (Asynchronously Parallel Optimization Solver for finding Multiple Minima) generator function (`gen_f`). In this tutorial, we’ll create a simple simulation function (`sim_f`) that defines a function with multiple minima, then write a libEnsemble calling script that imports APOSMM and parameterizes it to check for minima over a domain of outputs from our `sim_f`.\n", + "This tutorial demonstrates libEnsemble’s capability to identify multiple minima from simulation outputs using the built-in APOSMM (Asynchronously Parallel Optimization Solver for finding Multiple Minima) generator function (`gen_f`). In this tutorial, we’ll create a simple simulation function (`sim_f`) that defines a function with multiple minima, then write a libEnsemble calling script that imports APOSMM and parameterizes it to check for minima over a domain of outputs from our `sim_f`.\n", "\n", "Besides libEnsemble and NumPy, SciPy and mpmath are also required dependencies.\n", "\n", @@ -43,7 +43,6 @@ "outputs": [], "source": [ "# Define our simulation function\n", - "\n", "import numpy as np\n", "\n", "\n", @@ -122,9 +121,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "This allocation function starts a single Persistent APOSMM routine and provides ``sim_f`` output for points requested by APOSMM. Points can be sampled points or points from local optimization runs.\n", + "This allocation function starts a single Persistent APOSMM generator to generate points (simulation input parameters), and returns the resulting values from each simulation (run in parallel). Points can be sampled points or points from the parallel local optimization runs.\n", "\n", - "APOSMM supports a wide variety of external optimizers. The following statements set optimizer settings to ``'scipy'`` to indicate to APOSMM which optimization method to use, and help prevent unnecessary imports or package installations:" + "APOSMM supports a wide variety of external optimizers. The following statement sets the optimizer settings to ``'scipy'`` to indicate to APOSMM which optimization method to use, so it is imported at global scope:" ] }, { @@ -142,6 +141,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ + "This script uses the dictionary interface to configure the run (the newer object interface is equally valid).\n", "Set up ``nworkers``, ``libE_specs``, ``sim_specs``, ``gen_specs``, and ``alloc_specs``:" ] }, @@ -279,8 +279,9 @@ "## Viewing Animation\n", "\n", "The following cell produces a 3D animation showing the random sampling points, \n", - "and points produced by the optimization runs, under the local Minima. It may take\n", - "a few seconds to produce the animation." + "the points produced by the optimization runs, and the local Minima.\n", + "\n", + "This may take up to about 30 seconds to produce the 3D animation, depending on system." ] }, { @@ -302,7 +303,7 @@ " Z = np.array([six_hump_camel_func(np.array([x, y])) for x, y in zip(X.ravel(), Y.ravel())]).reshape(X.shape)\n", " fig = plt.figure(figsize=(10, 8))\n", " ax = fig.add_subplot(111, projection=\"3d\")\n", - " ax.plot_surface(X, Y, Z, cmap=\"winter\", edgecolor=\"none\", alpha=0.3)\n", + " ax.plot_surface(X, Y, Z, cmap=\"winter\", edgecolor='k', linewidth=0.1, antialiased=True, alpha=0.5) \n", " sc_normal = ax.scatter3D([], [], [], s=6, color=\"black\", marker=\"o\", label=\"Point\")\n", " sc_localp = ax.scatter3D([], [], [], s=40, color=\"red\", marker=\"^\", label=\"Optimization point\")\n", " custom_M_marker = Line2D([0], [0], linestyle='None', marker='$\\\\mathrm{M}$',\n", From 8da45e886e7bc7f3a99fc881597742c77a97cf69 Mon Sep 17 00:00:00 2001 From: shudson Date: Thu, 10 Apr 2025 13:42:22 -0500 Subject: [PATCH 33/39] Add back gpcam colab import line --- examples/tutorials/gpcam_surrogate_model/gpcam.ipynb | 9 +++++++-- libensemble/gen_funcs/persistent_sampling.py | 2 +- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/examples/tutorials/gpcam_surrogate_model/gpcam.ipynb b/examples/tutorials/gpcam_surrogate_model/gpcam.ipynb index 75b0c44041..29616f582e 100644 --- a/examples/tutorials/gpcam_surrogate_model/gpcam.ipynb +++ b/examples/tutorials/gpcam_surrogate_model/gpcam.ipynb @@ -20,7 +20,10 @@ "Ensure that libEnsemble, and gpCAM are installed via: `pip install libensemble gpcam`\n", " \n", "> **Note that for notebooks** the multiprocessing start method should be set to `fork` (default on Linux).\n", - "> To use with `spawn` (default on Windows and macOS), use the `multiprocess` library." + "> To use with `spawn` (default on Windows and macOS), use the `multiprocess` library.\n", + "\n", + "> **Note:** If using **Colab** the cell below installs gpCAM and prevents Colab downgrading numpy due to pre-installs.\n", + "> Restart session when prompted (the warnings can be ignored)." ] }, { @@ -32,7 +35,9 @@ "import sys\n", "if 'google.colab' in sys.modules:\n", " !pip install libensemble\n", - " !pip install gpcam" + " # !pip install gpcam\n", + " # Prevent downgraded numpy in colab due to preinstalls\n", + " !pip install --upgrade --force-reinstall numpy==2.1.1 scipy gpcam fvgp\"" ] }, { diff --git a/libensemble/gen_funcs/persistent_sampling.py b/libensemble/gen_funcs/persistent_sampling.py index ce4fe51515..401ccdaa94 100644 --- a/libensemble/gen_funcs/persistent_sampling.py +++ b/libensemble/gen_funcs/persistent_sampling.py @@ -30,7 +30,7 @@ def _get_user_params(user_specs): @persistent_input_fields(["sim_id"]) -@output_data([("x", float, (2,))]) # The dimesion 2 is only a default... and is overwritten +@output_data([("x", float, (2,))]) # The dimesion of 2 is a default and can be overwritten def persistent_uniform(_, persis_info, gen_specs, libE_info): """ This generation function always enters into persistent mode and returns From ced94fc36bf4ff85a88b3bbd508a51fa010bf432 Mon Sep 17 00:00:00 2001 From: shudson Date: Thu, 10 Apr 2025 14:06:03 -0500 Subject: [PATCH 34/39] Revert ensemble.py to release/v_1.5.0 version --- libensemble/ensemble.py | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/libensemble/ensemble.py b/libensemble/ensemble.py index 412b98c495..9b581a1d1a 100644 --- a/libensemble/ensemble.py +++ b/libensemble/ensemble.py @@ -578,11 +578,14 @@ def save_output(self, basename: str, append_attrs: bool = True): Format: ``_results_History_length=_evals=_ranks=`` """ if self.is_manager: - save_libE_output( - self.H, - self.persis_info, - basename, - self.nworkers, - dest_path=self.libE_specs.workflow_dir_path, - append_attrs=append_attrs, - ) + if self._get_option("libE_specs", "workflow_dir_path"): + save_libE_output( + self.H, + self.persis_info, + basename, + self.nworkers, + dest_path=self.libE_specs.workflow_dir_path, + append_attrs=append_attrs, + ) + else: + save_libE_output(self.H, self.persis_info, basename, self.nworkers, append_attrs=append_attrs) From edfa3c1723ac90fe01347f21f1266ad052ddf304 Mon Sep 17 00:00:00 2001 From: shudson Date: Thu, 10 Apr 2025 14:08:57 -0500 Subject: [PATCH 35/39] Keep __all__ lines for docs --- libensemble/gen_funcs/persistent_tasmanian.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/libensemble/gen_funcs/persistent_tasmanian.py b/libensemble/gen_funcs/persistent_tasmanian.py index 5afb9be1bc..ac491cf6a0 100644 --- a/libensemble/gen_funcs/persistent_tasmanian.py +++ b/libensemble/gen_funcs/persistent_tasmanian.py @@ -10,10 +10,10 @@ from libensemble.tools import parse_args from libensemble.tools.persistent_support import PersistentSupport -# __all__ = [ -# "sparse_grid_batched", -# "sparse_grid_async", -# ] +__all__ = [ + "sparse_grid_batched", + "sparse_grid_async", +] def lex_le(x, y, tol=1e-12): From 58735ab04802ccf8a9c35aa944c156d51e9e6437 Mon Sep 17 00:00:00 2001 From: shudson Date: Thu, 10 Apr 2025 14:17:13 -0500 Subject: [PATCH 36/39] Set date for release 1.5.0 --- .wci.yml | 2 +- CHANGELOG.rst | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.wci.yml b/.wci.yml index af395fb9af..f03aa0c635 100644 --- a/.wci.yml +++ b/.wci.yml @@ -17,7 +17,7 @@ language: Python release: version: 1.5.0 - date: 2025-04-08 + date: 2025-04-10 documentation: general: https://libensemble.readthedocs.io diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 71cb1119d4..bbb2bee549 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -11,7 +11,7 @@ GitHub issues are referenced, and can be viewed with hyperlinks on the `github r Release 1.5.0 -------------- -:Date: Apr 8, 2025 +:Date: Apr 10, 2025 General Updates: From 914ca07d5bd5990e528aa99d7a26fc7e6cc1a310 Mon Sep 17 00:00:00 2001 From: jlnav Date: Fri, 11 Apr 2025 11:44:12 -0500 Subject: [PATCH 37/39] trying out 'if TYPE_CHECKING' condition for importing modules only for type hints --- libensemble/resources/mpi_resources.py | 19 +++++++++++++++---- libensemble/resources/rset_resources.py | 6 +++++- libensemble/resources/worker_resources.py | 7 +++++-- libensemble/worker.py | 10 +++++++--- 4 files changed, 32 insertions(+), 10 deletions(-) diff --git a/libensemble/resources/mpi_resources.py b/libensemble/resources/mpi_resources.py index c978dc247e..9ea4a16ea4 100644 --- a/libensemble/resources/mpi_resources.py +++ b/libensemble/resources/mpi_resources.py @@ -6,6 +6,11 @@ import os import platform import subprocess +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from libensemble.resources.resources import Resources + from libensemble.resources.worker_resources import WorkerResources class MPIResourcesException(Exception): @@ -120,7 +125,7 @@ def task_partition( return num_procs, num_nodes, procs_per_node -def _max_rsets_per_node(worker_resources): +def _max_rsets_per_node(worker_resources: WorkerResources) -> int: """Return the maximum rsets per node for any node on this worker""" rset_team = worker_resources.rset_team local_rsets_list = worker_resources.local_rsets_list @@ -128,7 +133,13 @@ def _max_rsets_per_node(worker_resources): return max(rsets_on_node) -def get_resources(resources, num_procs=None, num_nodes=None, procs_per_node=None, hyperthreads=False): +def get_resources( + resources: Resources, + num_procs: int = None, + num_nodes: int = None, + procs_per_node: int = None, + hyperthreads: bool = False, +) -> tuple[int, int, int]: """Reconciles user-supplied options with available worker resources to produce run configuration. @@ -221,7 +232,7 @@ def get_resources(resources, num_procs=None, num_nodes=None, procs_per_node=None def create_machinefile( - resources: "resources.Resources", # noqa: F821 + resources: Resources, machinefile: str | None = None, num_procs: int = None, num_nodes: int | None = None, @@ -250,7 +261,7 @@ def create_machinefile( return built_mfile, num_procs, num_nodes, procs_per_node -def get_hostlist(resources, num_nodes=None): +def get_hostlist(resources: Resources, num_nodes=None): """Creates a hostlist based on user-supplied config options. completed by detected machine resources diff --git a/libensemble/resources/rset_resources.py b/libensemble/resources/rset_resources.py index e629547859..d93d1a8f93 100644 --- a/libensemble/resources/rset_resources.py +++ b/libensemble/resources/rset_resources.py @@ -1,7 +1,11 @@ import logging +from typing import TYPE_CHECKING import numpy as np +if TYPE_CHECKING: + from libensemble.resources.resources import Resources + logger = logging.getLogger(__name__) # To change logging level for just this module # logger.setLevel(logging.DEBUG) @@ -30,7 +34,7 @@ class RSetResources: # ('pool', int), # Pool ID (eg. separate gen/sim resources) - not yet used. ] - def __init__(self, num_workers, resources): + def __init__(self, num_workers: int, resources: Resources): """Initializes a new RSetResources instance Determines the compute resources available for each resource set. diff --git a/libensemble/resources/worker_resources.py b/libensemble/resources/worker_resources.py index b11dd7c35e..a082d9858a 100644 --- a/libensemble/resources/worker_resources.py +++ b/libensemble/resources/worker_resources.py @@ -1,12 +1,15 @@ import logging import os from collections import Counter, OrderedDict -from typing import Any +from typing import TYPE_CHECKING, Any import numpy as np from libensemble.resources.rset_resources import RSetResources +if TYPE_CHECKING: + from libensemble.resources.resources import GlobalResources + logger = logging.getLogger(__name__) # To change logging level for just this module # logger.setLevel(logging.DEBUG) @@ -26,7 +29,7 @@ class ResourceManager(RSetResources): # Holds the ID of the worker this rset is assigned to or zero man_rset_dtype = np.dtype(RSetResources.rset_dtype + [("assigned", int)]) - def __init__(self, num_workers: int, resources: "GlobalResources") -> None: # noqa: F821 + def __init__(self, num_workers: int, resources: GlobalResources) -> None: """Initializes a new ResourceManager instance Instantiates the numpy structured array that holds information for each diff --git a/libensemble/worker.py b/libensemble/worker.py index ec1793881f..eab211e8df 100644 --- a/libensemble/worker.py +++ b/libensemble/worker.py @@ -10,6 +10,10 @@ from pathlib import Path from traceback import format_exc from traceback import format_exception_only as format_exc_msg +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from libensemble.comms.comms import Comm import numpy as np import numpy.typing as npt @@ -42,7 +46,7 @@ def worker_main( - comm: "communicator", # noqa: F821 + comm: Comm, sim_specs: dict, gen_specs: dict, libE_specs: dict, @@ -50,7 +54,7 @@ def worker_main( log_comm: bool = True, resources: Resources = None, executor: Executor = None, -) -> None: # noqa: F821 +) -> None: """Evaluates calculations given to it by the manager. Creates a worker object, receives work from manager, runs worker, @@ -153,7 +157,7 @@ class Worker: def __init__( self, - comm: "communicator", # noqa: F821 + comm: Comm, dtypes: npt.DTypeLike, workerID: int, sim_specs: dict, From 59ef749c071972fee48e0621569c524899528ef8 Mon Sep 17 00:00:00 2001 From: jlnav Date: Fri, 11 Apr 2025 13:21:32 -0500 Subject: [PATCH 38/39] from __future__ import annotations --- libensemble/resources/mpi_resources.py | 2 ++ libensemble/resources/rset_resources.py | 2 ++ libensemble/resources/worker_resources.py | 2 ++ libensemble/worker.py | 2 ++ 4 files changed, 8 insertions(+) diff --git a/libensemble/resources/mpi_resources.py b/libensemble/resources/mpi_resources.py index 9ea4a16ea4..33b62ce3c4 100644 --- a/libensemble/resources/mpi_resources.py +++ b/libensemble/resources/mpi_resources.py @@ -2,6 +2,8 @@ Manages libensemble resources related to MPI tasks launched from nodes. """ +from __future__ import annotations + import logging import os import platform diff --git a/libensemble/resources/rset_resources.py b/libensemble/resources/rset_resources.py index d93d1a8f93..d35cdaee8b 100644 --- a/libensemble/resources/rset_resources.py +++ b/libensemble/resources/rset_resources.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import logging from typing import TYPE_CHECKING diff --git a/libensemble/resources/worker_resources.py b/libensemble/resources/worker_resources.py index a082d9858a..5033b2aeee 100644 --- a/libensemble/resources/worker_resources.py +++ b/libensemble/resources/worker_resources.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import logging import os from collections import Counter, OrderedDict diff --git a/libensemble/worker.py b/libensemble/worker.py index eab211e8df..f7421e48e5 100644 --- a/libensemble/worker.py +++ b/libensemble/worker.py @@ -3,6 +3,8 @@ ==================================================== """ +from __future__ import annotations + import logging import logging.handlers import socket From d802ad328ed1010f1018ad1314ccce566a164efd Mon Sep 17 00:00:00 2001 From: jlnav Date: Fri, 11 Apr 2025 13:41:38 -0500 Subject: [PATCH 39/39] other postponed annotations --- libensemble/executors/balsam_executor.py | 2 ++ libensemble/worker.py | 6 +++--- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/libensemble/executors/balsam_executor.py b/libensemble/executors/balsam_executor.py index af1f88e88d..54c9f78263 100644 --- a/libensemble/executors/balsam_executor.py +++ b/libensemble/executors/balsam_executor.py @@ -74,6 +74,8 @@ class HelloApp(ApplicationDefinition): .. _Globus: https://www.globus.org/ """ +from __future__ import annotations + import datetime import logging import os diff --git a/libensemble/worker.py b/libensemble/worker.py index f7421e48e5..44d5f0ddeb 100644 --- a/libensemble/worker.py +++ b/libensemble/worker.py @@ -165,7 +165,7 @@ def __init__( sim_specs: dict, gen_specs: dict, libE_specs: dict, - ) -> None: # noqa: F821 + ) -> None: """Initializes new worker object""" self.comm = comm self.dtypes = dtypes @@ -204,7 +204,7 @@ def _set_rset_team(libE_info: dict) -> bool: return False @staticmethod - def _set_executor(workerID: int, comm: "communicator") -> bool: # noqa: F821 + def _set_executor(workerID: int, comm: Comm) -> bool: """Sets worker ID in the executor, return True if set""" exctr = Executor.executor if isinstance(exctr, Executor): @@ -215,7 +215,7 @@ def _set_executor(workerID: int, comm: "communicator") -> bool: # noqa: F821 return False @staticmethod - def _set_resources(workerID, comm: "communicator") -> bool: # noqa: F821 + def _set_resources(workerID, comm: Comm) -> bool: """Sets worker ID in the resources, return True if set""" resources = Resources.resources if isinstance(resources, Resources):