diff --git a/.codecov.yml b/.codecov.yml index 18ef408010..bf866b089c 100644 --- a/.codecov.yml +++ b/.codecov.yml @@ -3,5 +3,4 @@ ignore: - "libensemble/tools/forkable_pdb.py" - "libensemble/tools/live_data/*" - "libensemble/sim_funcs/executor_hworld.py" - - "libensemble/gen_funcs/persistent_ax_multitask.py" - - "libensemble/gen_funcs/persistent_gpCAM.py" + - "libensemble/gen_funcs/persistent_tasmanian.py" diff --git a/.wci.yml b/.wci.yml index 78b37075ff..f03aa0c635 100644 --- a/.wci.yml +++ b/.wci.yml @@ -16,8 +16,8 @@ description: | language: Python release: - version: 1.4.3 - date: 2024-12-16 + version: 1.5.0 + date: 2025-04-10 documentation: general: https://libensemble.readthedocs.io diff --git a/CHANGELOG.rst b/CHANGELOG.rst index b81a395af8..bbb2bee549 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -8,6 +8,46 @@ GitHub issues are referenced, and can be viewed with hyperlinks on the `github r .. _`github releases page`: https://github.com/Libensemble/libensemble/releases +Release 1.5.0 +-------------- + +:Date: Apr 10, 2025 + +General Updates: + +* Migrate package build system to `pyproject.toml` (with `pixi` support). #1459 +* Improve handling when no MPI found. #1514 +* `ensemble.save_output()` can save without appending attributes `append_attrs=False`. #1531 +* Improve handling of worker-specific `persis_info` fields when they are not initially provided. #1531 + * Bugfix: Fix `final_gen_send` when there are no worker-specific `persis_info` fields. + * Handle worker-generated `persis_info` fields. + * Ensure `persis_info` is initialized to an empty dictionary in user functions instead of `None`. + +Examples: + +* Update Ax generator for `Ax v0.5.0`. #1508 +* Rename gpCAM generators. #1516 + * `persistent_gpCAM_ask_tell` to `persistent_gpCAM` + * `persistent_gpCAM_simple` to `persistent_gpCAM_covar` (in fact less simple) +* Persistent generators return `None` as first return value unless `H_o` is updated. #1515 +* Add LUMI to known platforms. #1546 + +Documentation: + +* Revamp Examples and HPC section of documentation. #1501, #1536, #1539 +* Add tutorial and notebook demonstrating surrogate model creation with gpCAM. #1531 +* Update Aurora guide. #1510 +* Update and documented APOSMM/WarpX example. #1543 + +:Note: + +* Tests were run on Linux and MacOS with Python versions 3.10, 3.11, 3.12, 3.13 +* Heterogeneous workflows tested on Aurora (ALCF), Polaris (ALCF), LUMI (EuroHPC JU), and Perlmutter (NERSC). + +:Known Issues: + +* See known issues section in the documentation. + Release 1.4.3 -------------- diff --git a/LICENSE b/LICENSE index de0d1ca0ea..6a45c6a4cf 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ BSD 3-Clause License -Copyright (c) 2018-2024, UChicago Argonne, LLC and the libEnsemble Development Team +Copyright (c) 2018-2025, UChicago Argonne, LLC and the libEnsemble Development Team All Rights Reserved. Redistribution and use in source and binary forms, with or without diff --git a/docs/advanced_installation.rst b/docs/advanced_installation.rst index 4d436eb2cd..ad3131ee15 100644 --- a/docs/advanced_installation.rst +++ b/docs/advanced_installation.rst @@ -9,7 +9,7 @@ automatically installed alongside libEnsemble: * Python_ ``>= 3.10`` * NumPy_ ``>= 1.21`` * psutil_ ``>= 5.9.4`` -* `pydantic`_ ``<= 1.10.12`` +* `pydantic`_ ``>= 1.10.12`` * pyyaml_ ``>= v6.0`` * tomli_ ``>= 1.2.1`` diff --git a/docs/data_structures/libE_specs.rst b/docs/data_structures/libE_specs.rst index c0ca141403..caa7b2eda8 100644 --- a/docs/data_structures/libE_specs.rst +++ b/docs/data_structures/libE_specs.rst @@ -205,14 +205,14 @@ libEnsemble is primarily customized by setting options within a ``LibeSpecs`` cl **save_H_and_persis_on_abort** [bool] = ``True``: Save states of ``H`` and ``persis_info`` to file on aborting after an exception. - **save_H_on_completion** Optional[bool] = ``False`` + **save_H_on_completion** bool | None = ``False`` Save state of ``H`` to file upon completing a workflow. Also enabled when either ``save_every_k_sims`` or ``save_every_k_gens`` is set. - **save_H_with_date** Optional[bool] = ``False`` + **save_H_with_date** bool | None = ``False`` Save ``H`` filename contains date and timestamp. - **H_file_prefix** Optional[str] = ``"libE_history"`` + **H_file_prefix** str | None = ``"libE_history"`` Prefix for ``H`` filename. **use_persis_return_gen** [bool] = ``False``: diff --git a/docs/examples/sim_funcs.rst b/docs/examples/sim_funcs.rst index 3cdf8bdc11..be4374d884 100644 --- a/docs/examples/sim_funcs.rst +++ b/docs/examples/sim_funcs.rst @@ -24,7 +24,7 @@ Ideal for simple debugging of generator processes or system testing. Borehole function with kills Chwirut1 vector-valued function Inverse Bayesian likelihood - Norm + Norm Rosenbrock test optimization function Six Hump Camel Test noisy function @@ -36,8 +36,8 @@ Functions that run user applications These use the executor to launch applications and in some cases handle dynamic CPU/GPU allocation. -The ``Variable resources`` module contains basic examples, while the ``Template`` -examples use a simple MPI/OpenMP (with GPU offload option) application (``forces``) +The ``Variable resources`` module contains basic examples, while the ``Template`` +examples use a simple MPI/OpenMP (with GPU offload option) application (``forces``) to demonstrate libEnsemble’s capabilities on various HPC systems. The build_forces.sh_ file gives compile lines for building the simple ``forces`` application on various platforms (use -DGPU to build for GPU). diff --git a/docs/examples/sim_funcs/forces_simf_gpu.rst b/docs/examples/sim_funcs/forces_simf_gpu.rst index 38cb8630e8..4c74d254f1 100644 --- a/docs/examples/sim_funcs/forces_simf_gpu.rst +++ b/docs/examples/sim_funcs/forces_simf_gpu.rst @@ -1,4 +1,4 @@ -Template for GPU executables +Template for GPU executables ---------------------------- .. role:: underline diff --git a/docs/examples/sim_funcs/forces_simf_gpu_multi_app.rst b/docs/examples/sim_funcs/forces_simf_gpu_multi_app.rst index a190e99fea..8a491e8816 100644 --- a/docs/examples/sim_funcs/forces_simf_gpu_multi_app.rst +++ b/docs/examples/sim_funcs/forces_simf_gpu_multi_app.rst @@ -10,7 +10,7 @@ ranks and GPU resources as requested by the generator. This makes efficient use of each node as the expensive GPU simulations will use the GPUs on the node/s, while the rest of the CPU cores are assigned to the simple CPU-only simulations. -For a realistic use-case see https://journals.aps.org/prab/abstract/10.1103/PhysRevAccelBeams.26.084601 +See this publication_ for a real-world demonstration of these capabilities. .. automodule:: forces_multi_app.forces_simf :members: @@ -39,5 +39,6 @@ up by each worker and these will be used when the simulation is run, unless over More information is available in the :doc:`Forces GPU tutorial <../../tutorials/forces_gpu_tutorial>` and the video_ demonstration on Frontier_. -.. _video: https://www.youtube.com/watch?v=H2fmbZ6DnVc .. _Frontier: https://docs.olcf.ornl.gov/systems/frontier_user_guide.html +.. _publication: https://doi.org/10.1103/PhysRevAccelBeams.26.084601 +.. _video: https://www.youtube.com/watch?v=H2fmbZ6DnVc diff --git a/docs/examples/submission_scripts.rst b/docs/examples/submission_scripts.rst index 1f853585b5..7c37da40ea 100644 --- a/docs/examples/submission_scripts.rst +++ b/docs/examples/submission_scripts.rst @@ -1 +1,5 @@ .. include:: ../platforms/example_scripts.rst + :end-before: .. _slurm_mpi_distributed: + +.. include:: ../platforms/example_scripts.rst + :start-after: .. _slurm_mpi_distributed: diff --git a/docs/platforms/example_scripts.rst b/docs/platforms/example_scripts.rst index 072cc65f22..d534f0c662 100644 --- a/docs/platforms/example_scripts.rst +++ b/docs/platforms/example_scripts.rst @@ -7,18 +7,17 @@ for more information about the respective systems and configuration. .. note:: It is **highly recommended** that the directive lines (e.g., #SBATCH) in batch - submission scripts do **NOT** specify processor, task, or GPU configuration info - --- these lines should only specify the number of nodes required. + submission scripts do **NOT** specify processor, task, or GPU configuration + information---these lines should only specify the number of nodes required. For example, do not specify ``#SBATCH --gpus-per-node=4`` in order to use four GPUs on the node, when each worker may use less than this, as this may assign - all of the GPUs to a single MPI invocation. Instead, the configuration should + all of the GPUs to a single MPI invocation. Instead, the configuration should be supplied either :doc:`in the simulation function<../examples/sim_funcs/forces_simf_gpu>` or, if using dynamic resources, :doc:`in the generator<../examples/sim_funcs/forces_simf_gpu_vary_resources>`. - General examples ---------------- @@ -43,7 +42,6 @@ LSF - Basic :caption: /examples/libE_submission_scripts/submit_lsf_simple.sh :language: bash - System Examples --------------- diff --git a/docs/platforms/platforms_index.rst b/docs/platforms/platforms_index.rst index c56ab66aa9..c06cdbe6fd 100644 --- a/docs/platforms/platforms_index.rst +++ b/docs/platforms/platforms_index.rst @@ -46,14 +46,12 @@ which runs the generator on the manager (using a thread) as below. A SLURM batch script may include: - .. code-block:: bash #SBATCH --nodes 3 python run_libe_forces.py --nworkers 3 - When using **gen_on_manager**, set ``nworkers`` to the number of workers desired for running simulations. Dedicated Mode @@ -64,7 +62,6 @@ True, the MPI executor will not launch applications on nodes where libEnsemble P processes (manager and workers) are running. Workers launch applications onto the remaining nodes in the allocation. - .. list-table:: :widths: 60 40 @@ -84,29 +81,27 @@ remaining nodes in the allocation. A SLURM batch script may include: - .. code-block:: bash #SBATCH --nodes 3 python run_libe_forces.py --nworkers 3 - Note that **gen_on_manager** is not set in the above example. -Distributed Running --------------------- +Distributed Running +------------------- In the **distributed** approach, libEnsemble can be run using the **mpi4py** communicator, with workers distributed across nodes. This is most often used when workers run simulation code directly, via a Python interface. The user -script is invoked with an MPI runner, for example (using an `mpich` based MPI):: +script is invoked with an MPI runner, for example (using an `mpich`-based MPI):: mpirun -np 4 -ppn 1 python myscript.py The distributed approach, can also be used with the executor, to co-locate workers -with the applications they submit. To ensure workers are placed as required in this -case, requires :ref:`a careful MPI rank placement `. +with the applications they submit. Ensuring that workers are placed as required in this +case requires :ref:`a careful MPI rank placement `. .. image:: ../images/distributed_new_detailed.png :alt: distributed @@ -116,7 +111,6 @@ case, requires :ref:`a careful MPI rank placement `. This allows the libEnsemble worker to read files produced by the application on local node storage. - Configuring the Run ------------------- @@ -140,7 +134,7 @@ and partitions these to workers. The :doc:`MPI Executor<../executor/mpi_executor accesses the resources available to the current worker when launching tasks. Zero-resource workers -~~~~~~~~~~~~~~~~~~~~~ +--------------------- Users with persistent ``gen_f`` functions may notice that the persistent workers are still automatically assigned system resources. This can be resolved by using @@ -159,7 +153,6 @@ Varying resources libEnsemble also features :ref:`dynamic resource assignment`, whereby the number of processes and/or the number of GPUs can be a set for each simulation by the generator. - Overriding Auto-Detection ------------------------- @@ -172,8 +165,6 @@ libE_specs option. When using the MPI Executor, it is possible to override the detected information using the `custom_info` argument. See the :doc:`MPI Executor<../executor/mpi_executor>` for more. - - Systems with Launch/MOM Nodes ----------------------------- @@ -212,7 +203,6 @@ or *to entirely different systems*. Submission scripts for running on launch/MOM nodes and for using Balsam can be found in the :doc:`examples`. - .. _globus_compute_ref: Globus Compute - Remote User Functions diff --git a/docs/resource_manager/resource_detection.rst b/docs/resource_manager/resource_detection.rst index 474cc1cade..2048eb2793 100644 --- a/docs/resource_manager/resource_detection.rst +++ b/docs/resource_manager/resource_detection.rst @@ -18,17 +18,17 @@ LSF LSB_HOSTS/LSB_MCPU_HOSTS PBS PBS_NODEFILE =========== =========================== -These environment variable names can be modified via the :ref:`resource_info` +These environment variable names can be modified via the :ref:`resource_info` :class:`libE_specs` option. -On other systems you may have to supply a node list in a file called **node_list** -in your run directory. For example, on ALCF system Cooley_, the session node list +On other systems, you may have to supply a node list in a file called **node_list** +in your run directory. For example, on the ALCF system Cooley_, the session node list can be obtained as follows:: cat $COBALT_NODEFILE > node_list Resource detection can be disabled by setting -``libE_specs["disable_resource_manager"] = True``, and users can simply supply run +``libE_specs["disable_resource_manager"] = True``, and users can supply run configuration options on the Executor submit line. This will usually work sufficiently on diff --git a/docs/resource_manager/zero_resource_workers.rst b/docs/resource_manager/zero_resource_workers.rst index 1dc62095e6..4c72cf5d7b 100644 --- a/docs/resource_manager/zero_resource_workers.rst +++ b/docs/resource_manager/zero_resource_workers.rst @@ -53,7 +53,7 @@ concurrency desired by the ensemble, taking into account generators and simulato Users can set generator resources using the *libE_specs* options ``gen_num_procs`` and/or ``gen_num_gpus``, which take integer values. -If only ``gen_num_gpus`` is set, then the number of processors is set to match. +If only ``gen_num_gpus`` is set, then the number of processors is set to match. To vary generator resources, ``persis_info`` settings can be used in allocation functions before calling the ``gen_work`` support function. This takes the diff --git a/docs/tutorials/forces_gpu_tutorial.rst b/docs/tutorials/forces_gpu_tutorial.rst index ab1ee121fc..be487f33cc 100644 --- a/docs/tutorials/forces_gpu_tutorial.rst +++ b/docs/tutorials/forces_gpu_tutorial.rst @@ -7,9 +7,9 @@ to the GPU. The libEnsemble scripts in this example are available under forces_gpu_ in the libEnsemble repository. This example is based on the -:doc:`simple forces tutorial <../tutorials/executor_forces_tutorial>` with +:doc:`simple forces tutorial <../tutorials/executor_forces_tutorial>` with a slightly modified simulation function (to assign GPUs) and a greatly increased -number of particles (allows live GPU usage to be viewed). +number of particles (to allow real-time GPU usage to be viewed). In the first example, each worker will be using one GPU. The code will assign the GPUs available to each worker, using the appropriate method. This works on systems @@ -35,7 +35,6 @@ from the simple forces example are highlighted: # Optional - to print GPU settings from libensemble.tools.test_support import check_gpu_setting - def run_forces(H, persis_info, sim_specs, libE_info): """Launches the forces MPI app and auto-assigns ranks and GPU resources. @@ -154,7 +153,6 @@ and use this information however you want. output = np.zeros(1, dtype=sim_specs["out"]) output["energy"][0] = final_energy - return output The above code will assign a GPU to each worker on CUDA-capable systems, diff --git a/docs/tutorials/gpcam_tutorial.rst b/docs/tutorials/gpcam_tutorial.rst index 09b523c7a3..a013c1b67e 100644 --- a/docs/tutorials/gpcam_tutorial.rst +++ b/docs/tutorials/gpcam_tutorial.rst @@ -10,7 +10,7 @@ In each iteration, a batch of points is produced for concurrent evaluation, maxi Ensure that libEnsemble, and gpCAM are installed via: ``pip install libensemble gpcam`` Generator function ------------------ +------------------ The gpCAM generator function is called ``persistent_gpCAM``. @@ -179,7 +179,7 @@ For running applications using parallel resources in the simulator see the `forc return term1 + term2 + term3 Calling Script -------------- +-------------- Our calling script configures libEnsemble, the generator function, and the simulator function. It then create the ensemble object and runs the ensemble. @@ -275,7 +275,7 @@ At the end of our calling script we run the ensemble. pprint(H[["sim_id", "x", "f"]][:16]) # See first 16 results Rerun and test model at known points ------------------------------------ +------------------------------------ To see how the accuracy of the surrogate model improves, we can use previously evaluated points as test points and run again with a different seed. @@ -292,7 +292,7 @@ To see how the accuracy of the surrogate model improves, we can use previously e print(persis_info) Viewing model progression ------------------------- +------------------------- Now we can check how our model's values compared against the values at known test points as the ensemble progresses. The comparison is based on the **mean squared error** between the gpCAM model and our known diff --git a/examples/libE_submission_scripts/bebop_submit_pbs_central.sh b/examples/libE_submission_scripts/bebop_submit_pbs_central.sh index 4d0aff5894..7fb474194f 100644 --- a/examples/libE_submission_scripts/bebop_submit_pbs_central.sh +++ b/examples/libE_submission_scripts/bebop_submit_pbs_central.sh @@ -5,7 +5,6 @@ #PBS -A [project] #PBS -N libE_example - cd $PBS_O_WORKDIR # Choose MPI backend. Note that the built mpi4py in your environment should match. module load oneapi/mpi diff --git a/examples/tutorials/aposmm/aposmm_tutorial_notebook.ipynb b/examples/tutorials/aposmm/aposmm_tutorial_notebook.ipynb index 7898ba7661..594057f3c0 100644 --- a/examples/tutorials/aposmm/aposmm_tutorial_notebook.ipynb +++ b/examples/tutorials/aposmm/aposmm_tutorial_notebook.ipynb @@ -6,7 +6,7 @@ "source": [ "# Parallel Optimization with APOSMM\n", "\n", - "This tutorial demonstrates libEnsemble’s capability to identify multiple minima of simulation output using the built-in APOSMM (Asynchronously Parallel Optimization Solver for finding Multiple Minima) generator function (`gen_f`). In this tutorial, we’ll create a simple simulation function (`sim_f`) that defines a function with multiple minima, then write a libEnsemble calling script that imports APOSMM and parameterizes it to check for minima over a domain of outputs from our `sim_f`.\n", + "This tutorial demonstrates libEnsemble’s capability to identify multiple minima from simulation outputs using the built-in APOSMM (Asynchronously Parallel Optimization Solver for finding Multiple Minima) generator function (`gen_f`). In this tutorial, we’ll create a simple simulation function (`sim_f`) that defines a function with multiple minima, then write a libEnsemble calling script that imports APOSMM and parameterizes it to check for minima over a domain of outputs from our `sim_f`.\n", "\n", "Besides libEnsemble and NumPy, SciPy and mpmath are also required dependencies.\n", "\n", @@ -43,7 +43,6 @@ "outputs": [], "source": [ "# Define our simulation function\n", - "\n", "import numpy as np\n", "\n", "\n", @@ -122,9 +121,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "This allocation function starts a single Persistent APOSMM routine and provides ``sim_f`` output for points requested by APOSMM. Points can be sampled points or points from local optimization runs.\n", + "This allocation function starts a single Persistent APOSMM generator to generate points (simulation input parameters), and returns the resulting values from each simulation (run in parallel). Points can be sampled points or points from the parallel local optimization runs.\n", "\n", - "APOSMM supports a wide variety of external optimizers. The following statements set optimizer settings to ``'scipy'`` to indicate to APOSMM which optimization method to use, and help prevent unnecessary imports or package installations:" + "APOSMM supports a wide variety of external optimizers. The following statement sets the optimizer settings to ``'scipy'`` to indicate to APOSMM which optimization method to use, so it is imported at global scope:" ] }, { @@ -142,6 +141,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ + "This script uses the dictionary interface to configure the run (the newer object interface is equally valid).\n", "Set up ``nworkers``, ``libE_specs``, ``sim_specs``, ``gen_specs``, and ``alloc_specs``:" ] }, @@ -226,28 +226,7 @@ "source": [ "## Run the Ensemble\n", "\n", - "Optionally run the next cell to set up a live graphic of the optimization progress during execution.\n", - "\n", - "**WARNING**: The graphic may flicker when the ensemble is running." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Configure to view live progress\n", - "from libensemble.tools.live_data.plot2n import Plot2N\n", - "\n", - "libE_specs[\"live_data\"] = Plot2N(plot_type=\"2d\") # Alt: '3d'" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Finally, set `persis_info` (to provide random seeds to workers) and run the ensemble:" + "Finally, set persis_info (to provide random seeds to workers) and run the ensemble." ] }, { @@ -290,29 +269,110 @@ " \n", "The first six values correspond to the local minima for the Six-Hump Camel simulation function.\n", "\n", - "The 7th value is a repeat minimum, as APOSMM will continue to start local optimization runs.\n", + "The 7th value is a repeat minimum, as APOSMM will continue to start local optimization runs." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Viewing Animation\n", "\n", - "Please see the [API reference](https://libensemble.readthedocs.io/en/main/examples/aposmm.html) for more APOSMM configuration options and other information.\n", + "The following cell produces a 3D animation showing the random sampling points, \n", + "the points produced by the optimization runs, and the local Minima.\n", "\n", + "This may take up to about 30 seconds to produce the 3D animation, depending on system." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np\n", + "import matplotlib.pyplot as plt\n", + "import matplotlib.animation as animation\n", + "from IPython.display import HTML\n", + "from matplotlib.lines import Line2D\n", + "\n", + "def animate_aposmm_3d(H, batch_size):\n", + " x_vals = np.linspace(-2, 2, 50)\n", + " y_vals = np.linspace(-1, 1.1, 50)\n", + " X, Y = np.meshgrid(x_vals, y_vals)\n", + " Z = np.array([six_hump_camel_func(np.array([x, y])) for x, y in zip(X.ravel(), Y.ravel())]).reshape(X.shape)\n", + " fig = plt.figure(figsize=(10, 8))\n", + " ax = fig.add_subplot(111, projection=\"3d\")\n", + " ax.plot_surface(X, Y, Z, cmap=\"winter\", edgecolor='k', linewidth=0.1, antialiased=True, alpha=0.5) \n", + " sc_normal = ax.scatter3D([], [], [], s=6, color=\"black\", marker=\"o\", label=\"Point\")\n", + " sc_localp = ax.scatter3D([], [], [], s=40, color=\"red\", marker=\"^\", label=\"Optimization point\")\n", + " custom_M_marker = Line2D([0], [0], linestyle='None', marker='$\\\\mathrm{M}$',\n", + " markersize=8, markerfacecolor='black', markeredgecolor='black', color='white')\n", + " ax.legend([sc_normal, sc_localp, custom_M_marker], [\"Point\", \"Optimization point\", \"Local minimum\"],loc=\"upper left\")\n", + " fig.tight_layout()\n", + " annotations = []\n", + "\n", + " def update(frame):\n", + " for ann in annotations:\n", + " ann.remove()\n", + " annotations.clear()\n", + " end = min((frame + 1) * batch_size, len(H))\n", + " H_sub = H[:end]\n", + " masks = [~H_sub[\"local_pt\"] & ~H_sub[\"local_min\"], H_sub[\"local_pt\"], H_sub[\"local_min\"]]\n", + " (x_n, y_n, f_n), (x_lp, y_lp, f_lp), (x_lm, y_lm, f_lm) = [\n", + " (H_sub[\"x\"][m, 0], H_sub[\"x\"][m, 1], H_sub[\"f\"][m]) for m in masks\n", + " ]\n", + " sc_normal._offsets3d = (x_n, y_n, f_n)\n", + " sc_localp._offsets3d = (x_lp, y_lp, f_lp)\n", + " for i in range(len(x_lm)):\n", + " annotations.append(ax.text(x_lm[i], y_lm[i], f_lm[i], \"M\", color=\"white\", fontsize=12,\n", + " bbox=dict(facecolor=\"black\", alpha=0.7, pad=2), zorder=999\n", + " ))\n", + " return sc_normal, sc_localp\n", + " total_frames = (len(H) + batch_size - 1) // batch_size\n", + " ani = animation.FuncAnimation(fig, update, frames=total_frames, interval=500, blit=False, repeat=False)\n", + " plt.close(fig)\n", + " return HTML(ani.to_jshtml())\n", + "\n", + "# Reduce batch_size for more refined steps\n", + "animate_aposmm_3d(H, batch_size=50)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ "## Applications\n", "\n", "APOSMM is not limited to evaluating minima from pure Python simulation functions.\n", "Many common libEnsemble use-cases involve using libEnsemble's Executor to launch user\n", "applications with parameters requested by APOSMM, then evaluate their output using\n", "APOSMM, and repeat until minima are identified. A currently supported example\n", - "can be found in libEnsemble's [WarpX Scaling Test](https://github.com/Libensemble/libensemble/tree/main/libensemble/tests/scaling_tests/warpx)" + "can be found in libEnsemble's [WarpX Scaling Test](https://github.com/Libensemble/libensemble/tree/main/libensemble/tests/scaling_tests/warpx)\n", + "\n", + "Please see the [API reference](https://libensemble.readthedocs.io/en/main/examples/aposmm.html) for more APOSMM configuration options and other information." ] } ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", + "language": "python", "name": "python3" }, "language_info": { - "name": "python" + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.13.1" } }, "nbformat": 4, - "nbformat_minor": 2 + "nbformat_minor": 4 } diff --git a/examples/tutorials/forces_with_executor/forces_tutorial_notebook.ipynb b/examples/tutorials/forces_with_executor/forces_tutorial_notebook.ipynb index bf408258a4..b85222e445 100644 --- a/examples/tutorials/forces_with_executor/forces_tutorial_notebook.ipynb +++ b/examples/tutorials/forces_with_executor/forces_tutorial_notebook.ipynb @@ -587,9 +587,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "scrolled": false - }, + "metadata": {}, "outputs": [], "source": [ "! ls -l ensemble/sim*" diff --git a/examples/tutorials/gpcam_surrogate_model/gpcam.ipynb b/examples/tutorials/gpcam_surrogate_model/gpcam.ipynb index 097a391c91..29616f582e 100644 --- a/examples/tutorials/gpcam_surrogate_model/gpcam.ipynb +++ b/examples/tutorials/gpcam_surrogate_model/gpcam.ipynb @@ -35,8 +35,9 @@ "import sys\n", "if 'google.colab' in sys.modules:\n", " !pip install libensemble\n", + " # !pip install gpcam\n", " # Prevent downgraded numpy in colab due to preinstalls\n", - " !pip install --upgrade --force-reinstall numpy==2.1.1 scipy gpcam fvgp" + " !pip install --upgrade --force-reinstall numpy==2.1.1 scipy gpcam fvgp\"" ] }, { @@ -438,8 +439,7 @@ "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.9" + "pygments_lexer": "ipython3" } }, "nbformat": 4, diff --git a/libensemble/alloc_funcs/give_sim_work_first.py b/libensemble/alloc_funcs/give_sim_work_first.py index f001006f3a..7ac4d75e5e 100644 --- a/libensemble/alloc_funcs/give_sim_work_first.py +++ b/libensemble/alloc_funcs/give_sim_work_first.py @@ -1,5 +1,4 @@ import time -from typing import Tuple import numpy as np import numpy.typing as npt @@ -15,7 +14,7 @@ def give_sim_work_first( alloc_specs: dict, persis_info: dict, libE_info: dict, -) -> Tuple[dict]: +) -> tuple[dict]: """ Decide what should be given to workers. This allocation function gives any available simulation work first, and only when all simulations are diff --git a/libensemble/ensemble.py b/libensemble/ensemble.py index 9b581a1d1a..0ceba2ab33 100644 --- a/libensemble/ensemble.py +++ b/libensemble/ensemble.py @@ -1,7 +1,6 @@ import importlib import json import logging -from typing import Optional import numpy.typing as npt import tomli @@ -270,15 +269,15 @@ class Ensemble: def __init__( self, - sim_specs: Optional[SimSpecs] = SimSpecs(), - gen_specs: Optional[GenSpecs] = GenSpecs(), - exit_criteria: Optional[ExitCriteria] = {}, - libE_specs: Optional[LibeSpecs] = LibeSpecs(), - alloc_specs: Optional[AllocSpecs] = AllocSpecs(), - persis_info: Optional[dict] = {}, - executor: Optional[Executor] = None, - H0: Optional[npt.NDArray] = None, - parse_args: Optional[bool] = False, + sim_specs: SimSpecs | None = SimSpecs(), + gen_specs: GenSpecs | None = GenSpecs(), + exit_criteria: ExitCriteria | None = {}, + libE_specs: LibeSpecs | None = LibeSpecs(), + alloc_specs: AllocSpecs | None = AllocSpecs(), + persis_info: dict | None = {}, + executor: Executor | None = None, + H0: npt.NDArray | None = None, + parse_args: bool | None = False, ): self.sim_specs = sim_specs self.gen_specs = gen_specs diff --git a/libensemble/executors/balsam_executor.py b/libensemble/executors/balsam_executor.py index 6c97da4ccf..54c9f78263 100644 --- a/libensemble/executors/balsam_executor.py +++ b/libensemble/executors/balsam_executor.py @@ -74,11 +74,12 @@ class HelloApp(ApplicationDefinition): .. _Globus: https://www.globus.org/ """ +from __future__ import annotations + import datetime import logging import os import time -from typing import Any, Dict, List, Optional, Union from balsam import util @@ -106,9 +107,9 @@ class BalsamTask(Task): def __init__( self, - app: Optional[Application] = None, + app: Application | None = None, app_args: dict = None, - workdir: Optional[str] = None, + workdir: str | None = None, stdout: str = None, stderr: str = None, workerid: int = None, @@ -122,7 +123,7 @@ def __init__( # May want to override workdir with Balsam value when it exists Task.__init__(self, app, app_args, workdir, stdout, stderr, workerid) - def _get_time_since_balsam_submit(self) -> Union[int, float]: + def _get_time_since_balsam_submit(self) -> int: """Return time since balsam task entered ``RUNNING`` state""" event_query = EventLog.objects.filter(job_id=self.process.id, to_state="RUNNING") if not len(event_query): @@ -203,7 +204,7 @@ def poll(self) -> None: self.state = "FAILED" self._set_complete() - def wait(self, timeout: Optional[int] = None) -> None: + def wait(self, timeout: int | None = None) -> None: """Waits on completion of the task or raises ``TimeoutExpired``. Status attributes of task are updated on completion. @@ -280,10 +281,10 @@ def add_app(self, *args) -> None: def register_app( self, BalsamApp: ApplicationDefinition, - app_name: Optional[str] = None, - calc_type: Optional[str] = None, + app_name: str | None = None, + calc_type: str | None = None, desc: str = None, - precedent: Optional[str] = None, + precedent: str | None = None, ) -> None: """Registers a Balsam ``ApplicationDefinition`` to libEnsemble. This class instance *must* have a ``site`` and ``command_template`` specified. See @@ -331,9 +332,9 @@ def submit_allocation( job_mode: str = "mpi", queue: str = "local", project: str = "local", - optional_params: Dict[Any, Any] = {}, - filter_tags: Dict[Any, Any] = {}, - partitions: List[Any] = [], + optional_params: dict = {}, + filter_tags: dict = {}, + partitions: list = [], ) -> BatchJob: """ Submits a Balsam ``BatchJob`` machine allocation request to Balsam. @@ -435,14 +436,14 @@ def set_resources(self, resources: str) -> None: def submit( self, - calc_type: Optional[str] = None, - app_name: Optional[str] = None, + calc_type: str | None = None, + app_name: str | None = None, app_args: dict = None, num_procs: int = None, num_nodes: int = None, procs_per_node: int = None, max_tasks_per_node: int = None, - machinefile: Optional[str] = None, + machinefile: str | None = None, gpus_per_rank: int = 0, transfers: dict = {}, workdir: str = "", diff --git a/libensemble/executors/executor.py b/libensemble/executors/executor.py index f90d07aca5..d9cf6f428d 100644 --- a/libensemble/executors/executor.py +++ b/libensemble/executors/executor.py @@ -12,7 +12,7 @@ import sys import time from pathlib import Path -from typing import Any, Optional, Union +from typing import Any import libensemble.utils.launcher as launcher from libensemble.message_numbers import ( @@ -24,7 +24,6 @@ WORKER_DONE, WORKER_KILL_ON_TIMEOUT, ) -from libensemble.resources.resources import Resources from libensemble.utils.timer import TaskTimer logger = logging.getLogger(__name__) @@ -78,10 +77,10 @@ class Application: def __init__( self, full_path: str, - name: Optional[str] = None, - calc_type: Optional[str] = "sim", - desc: Optional[str] = None, - pyobj: Optional[Any] = None, # used by balsam_executor to store ApplicationDefinition + name: str | None = None, + calc_type: str | None = "sim", + desc: str | None = None, + pyobj: Any | None = None, # used by balsam_executor to store ApplicationDefinition precedent: str = "", ) -> None: """Instantiates a new Application instance.""" @@ -101,7 +100,7 @@ def __init__( self.app_cmd = " ".join(filter(None, [self.precedent, self.full_path])) -def jassert(test: Optional[Union[Application, bool]], *args) -> None: +def jassert(test: Application | bool | None, *args) -> None: "Version of assert that raises a ExecutorException" if not test: raise ExecutorException(*args) @@ -170,7 +169,7 @@ def _add_to_env(self, key, value): """Add to task environment - overwrites if already set""" self.env[key] = value - def workdir_exists(self) -> Optional[bool]: + def workdir_exists(self) -> bool | None: """Returns true if the task's workdir exists""" return self.workdir and os.path.exists(self.workdir) @@ -260,7 +259,7 @@ def poll(self) -> None: self._set_complete() - def wait(self, timeout: Optional[float] = None) -> None: + def wait(self, timeout: float | None = None) -> None: """Waits on completion of the task or raises TimeoutExpired exception Status attributes of task are updated on completion. @@ -288,7 +287,7 @@ def wait(self, timeout: Optional[float] = None) -> None: self._set_complete() - def result(self, timeout: Optional[Union[int, float]] = None) -> str: + def result(self, timeout: int | float | None = None) -> str: """Wrapper for task.wait() that also returns the task's status on completion. Parameters @@ -303,7 +302,7 @@ def result(self, timeout: Optional[Union[int, float]] = None) -> str: self.wait(timeout=timeout) return self.state - def exception(self, timeout: Optional[Union[int, float]] = None): + def exception(self, timeout: int | float | None = None): """Wrapper for task.wait() that instead returns the task's error code on completion. Parameters @@ -386,7 +385,7 @@ class Executor: executor = None - def _wait_on_start(self, task: Task, fail_time: Optional[int] = None) -> None: + def _wait_on_start(self, task: Task, fail_time: int | None = None) -> None: """Called by submit when wait_on_start is True. Blocks until task polls as having started. @@ -472,7 +471,7 @@ def default_app(self, calc_type: str) -> Application: jassert(app, f"Default {calc_type} app is not set") return app - def set_resources(self, resources: Resources): + def set_resources(self, resources): # Does not use resources pass @@ -493,9 +492,9 @@ def set_gen_procs_gpus(self, libE_info): def register_app( self, full_path: str, - app_name: Optional[str] = None, - calc_type: Optional[str] = None, - desc: Optional[str] = None, + app_name: str | None = None, + calc_type: str | None = None, + desc: str | None = None, precedent: str = "", ) -> None: """Registers a user application to libEnsemble. @@ -571,7 +570,7 @@ def manager_kill_received(self) -> bool: return False def polling_loop( - self, task: Task, timeout: Optional[int] = None, delay: float = 0.1, poll_manager: bool = False + self, task: Task, timeout: int | None = None, delay: float = 0.1, poll_manager: bool = False ) -> int: """Optional, blocking, generic task status polling loop. Operates until the task finishes, times out, or is optionally killed via a manager signal. On completion, returns a @@ -637,7 +636,7 @@ def polling_loop( return calc_status - def get_task(self, taskid: Union[str, int]) -> Optional[Task]: + def get_task(self, taskid: str | int) -> Task | None: """Returns the task object for the supplied task ID""" task = next((j for j in self.list_of_tasks if j.id == taskid), None) if task is None: @@ -681,14 +680,14 @@ def _check_app_exists(self, full_path: str) -> None: def submit( self, - calc_type: Optional[str] = None, - app_name: Optional[str] = None, - app_args: Optional[str] = None, - stdout: Optional[str] = None, - stderr: Optional[str] = None, - dry_run: Optional[bool] = False, - wait_on_start: Optional[bool] = False, - env_script: Optional[str] = None, + calc_type: str | None = None, + app_name: str | None = None, + app_args: str | None = None, + stdout: str | None = None, + stderr: str | None = None, + dry_run: bool | None = False, + wait_on_start: bool | None = False, + env_script: str | None = None, ) -> Task: """Create a new task and run as a local serial subprocess. diff --git a/libensemble/executors/mpi_executor.py b/libensemble/executors/mpi_executor.py index 28d1fb6f97..9b167ddaa1 100644 --- a/libensemble/executors/mpi_executor.py +++ b/libensemble/executors/mpi_executor.py @@ -15,13 +15,11 @@ import logging import os import time -from typing import List, Optional, Union import libensemble.utils.launcher as launcher from libensemble.executors.executor import Executor, ExecutorException, Task from libensemble.executors.mpi_runner import MPIRunner from libensemble.resources.mpi_resources import get_MPI_variant -from libensemble.resources.resources import Resources logger = logging.getLogger(__name__) # To change logging level for just this module @@ -137,11 +135,11 @@ def set_gen_procs_gpus(self, libE_info): self.gen_nprocs = libE_info.get("num_procs") self.gen_ngpus = libE_info.get("num_gpus") - def set_resources(self, resources: Resources) -> None: + def set_resources(self, resources) -> None: self.resources = resources def _launch_with_retries( - self, task: Task, subgroup_launch: bool, wait_on_start: bool, run_cmd: List[str], use_shell: bool + self, task: Task, subgroup_launch: bool, wait_on_start: bool, run_cmd: list[str], use_shell: bool ) -> None: """Launch task with retry mechanism""" retry_count = 0 @@ -189,25 +187,25 @@ def _launch_with_retries( def submit( self, - calc_type: Optional[str] = None, - app_name: Optional[str] = None, - num_procs: Optional[int] = None, - num_nodes: Optional[int] = None, - procs_per_node: Optional[int] = None, - num_gpus: Optional[int] = None, - machinefile: Optional[str] = None, - app_args: Optional[str] = None, - stdout: Optional[str] = None, - stderr: Optional[str] = None, - stage_inout: Optional[str] = None, - hyperthreads: Optional[bool] = False, - dry_run: Optional[bool] = False, - wait_on_start: Optional[bool] = False, - extra_args: Optional[str] = None, - auto_assign_gpus: Optional[bool] = False, - match_procs_to_gpus: Optional[bool] = False, - env_script: Optional[str] = None, - mpi_runner_type: Optional[Union[str, dict]] = None, + calc_type: str | None = None, + app_name: str | None = None, + num_procs: int | None = None, + num_nodes: int | None = None, + procs_per_node: int | None = None, + num_gpus: int | None = None, + machinefile: str | None = None, + app_args: str | None = None, + stdout: str | None = None, + stderr: str | None = None, + stage_inout: str | None = None, + hyperthreads: bool | None = False, + dry_run: bool | None = False, + wait_on_start: bool | None = False, + extra_args: str | None = None, + auto_assign_gpus: bool | None = False, + match_procs_to_gpus: bool | None = False, + env_script: str | None = None, + mpi_runner_type: str | dict | None = None, ) -> Task: """Creates a new task, and either executes or schedules execution. diff --git a/libensemble/gen_funcs/persistent_ax_multitask.py b/libensemble/gen_funcs/persistent_ax_multitask.py index 451f14ad9a..6838750003 100644 --- a/libensemble/gen_funcs/persistent_ax_multitask.py +++ b/libensemble/gen_funcs/persistent_ax_multitask.py @@ -21,14 +21,12 @@ import os import warnings from copy import deepcopy -from typing import Optional import numpy as np import pandas as pd import torch from ax import Metric, Runner from ax.core.data import Data -from ax.core.experiment import Experiment from ax.core.generator_run import GeneratorRun from ax.core.multi_type_experiment import MultiTypeExperiment from ax.core.objective import Objective @@ -45,19 +43,36 @@ AxParameterWarning = Warning from ax.modelbridge.factory import get_sobol -from ax.modelbridge.registry import MBM_X_trans, Models, ST_MTGP_trans +from ax.modelbridge.registry import Models, ST_MTGP_trans from ax.modelbridge.torch import TorchModelBridge -from ax.modelbridge.transforms.convert_metric_names import ConvertMetricNames, tconfig_from_mt_experiment -from ax.modelbridge.transforms.derelativize import Derelativize -from ax.modelbridge.transforms.stratified_standardize_y import StratifiedStandardizeY -from ax.modelbridge.transforms.task_encode import TaskChoiceToIntTaskChoice -from ax.modelbridge.transforms.trial_as_task import TrialAsTask +from ax.modelbridge.transforms.convert_metric_names import tconfig_from_mt_experiment from ax.runners import SyntheticRunner from ax.storage.json_store.save import save_experiment from ax.storage.metric_registry import register_metrics from ax.storage.runner_registry import register_runner from ax.utils.common.result import Ok +try: + # For Ax >= 0.5.0 + from ax.modelbridge.registry import MBM_X_trans + from ax.modelbridge.transforms.convert_metric_names import ConvertMetricNames + from ax.modelbridge.transforms.derelativize import Derelativize + from ax.modelbridge.transforms.stratified_standardize_y import StratifiedStandardizeY + from ax.modelbridge.transforms.task_encode import TaskChoiceToIntTaskChoice + from ax.modelbridge.transforms.trial_as_task import TrialAsTask + + MT_MTGP_trans = list(MBM_X_trans) + [ + Derelativize, + ConvertMetricNames, + TrialAsTask, + StratifiedStandardizeY, + TaskChoiceToIntTaskChoice, + ] + +except ImportError: + # For Ax < 0.5.0 + from ax.modelbridge.registry import MT_MTGP_trans + from libensemble.message_numbers import EVAL_GEN_TAG, FINISHED_PERSISTENT_GEN_TAG, PERSIS_STOP, STOP_TAG from libensemble.tools.persistent_support import PersistentSupport @@ -75,21 +90,13 @@ category=AxParameterWarning, ) -MT_MTGP_trans = list(MBM_X_trans) + [ - Derelativize, - ConvertMetricNames, - TrialAsTask, - StratifiedStandardizeY, - TaskChoiceToIntTaskChoice, -] - # get_MTGP based on https://ax.dev/docs/tutorials/multi_task/ def get_MTGP( - experiment: Experiment, + experiment, data: Data, - search_space: Optional[SearchSpace] = None, - trial_index: Optional[int] = None, + search_space: SearchSpace | None = None, # noqa: MDA501 + trial_index: int | None = None, # noqa: MDA501 device: torch.device = torch.device("cpu"), dtype: torch.dtype = torch.double, ) -> TorchModelBridge: @@ -102,9 +109,7 @@ def get_MTGP( """ if isinstance(experiment, MultiTypeExperiment): - trial_index_to_type = { - t.index: t.trial_type for t in experiment.trials.values() - } + trial_index_to_type = {t.index: t.trial_type for t in experiment.trials.values()} transforms = MT_MTGP_trans transform_configs = { "TrialAsTask": {"trial_level_map": {"trial_type": trial_index_to_type}}, @@ -275,9 +280,7 @@ def persistent_gp_mt_ax_gen_f(H, persis_info, gen_specs, libE_info): if not os.path.exists("model_history"): os.mkdir("model_history") # Register metric and runner in order to be able to save to json. - _, encoder_registry, decoder_registry = register_metrics( - {AxMetric: None} - ) + _, encoder_registry, decoder_registry = register_metrics({AxMetric: None}) _, encoder_registry, decoder_registry = register_runner( AxRunner, encoder_registry=encoder_registry, diff --git a/libensemble/gen_funcs/persistent_gpCAM.py b/libensemble/gen_funcs/persistent_gpCAM.py index f130950468..05b08bb5ed 100644 --- a/libensemble/gen_funcs/persistent_gpCAM.py +++ b/libensemble/gen_funcs/persistent_gpCAM.py @@ -212,10 +212,10 @@ def persistent_gpCAM_covar(H_in, persis_info, gen_specs, libE_info): (lb, ub) and on following iterations samples the GP posterior covariance function to find sample points. - If gen_specs["user"]["use_grid"] is set to True the parameter space is + If gen_specs["user"]["use_grid"] is set to True, the parameter space is divided into a mesh of candidate points (num_points in each dimension). - Subsequent points chosen by maximum covariance that are at least a distance - `r` away from each other to explore difference regions. + Subsequent points are chosen with maximum covariance that are at least a + distance `r` away from each other to explore difference regions. If gen_specs["user"]["test_points_file"] is set to a file of evaluated points, then the gpCAM predications are compared at these points to assess diff --git a/libensemble/gen_funcs/persistent_sampling.py b/libensemble/gen_funcs/persistent_sampling.py index 44611d06b6..401ccdaa94 100644 --- a/libensemble/gen_funcs/persistent_sampling.py +++ b/libensemble/gen_funcs/persistent_sampling.py @@ -29,8 +29,8 @@ def _get_user_params(user_specs): return b, n, lb, ub -@persistent_input_fields(["f", "x", "sim_id"]) -@output_data([("x", float, (2,))]) +@persistent_input_fields(["sim_id"]) +@output_data([("x", float, (2,))]) # The dimesion of 2 is a default and can be overwritten def persistent_uniform(_, persis_info, gen_specs, libE_info): """ This generation function always enters into persistent mode and returns diff --git a/libensemble/libE.py b/libensemble/libE.py index 2762890bc3..af302d13c8 100644 --- a/libensemble/libE.py +++ b/libensemble/libE.py @@ -121,7 +121,6 @@ import sys import traceback from pathlib import Path -from typing import Callable, Dict import numpy as np @@ -154,11 +153,11 @@ def libE( sim_specs: SimSpecs, gen_specs: GenSpecs, exit_criteria: ExitCriteria, - persis_info: Dict = {}, + persis_info: dict = {}, alloc_specs: AllocSpecs = AllocSpecs(), libE_specs: LibeSpecs = {}, H0=None, -) -> (np.ndarray, Dict, int): +) -> (np.ndarray, dict, int): """ Parameters ---------- @@ -273,8 +272,8 @@ def manager( alloc_specs, libE_specs, hist: History, - on_abort: Callable = None, - on_cleanup: Callable = None, + on_abort=None, + on_cleanup=None, ): """Generic manager routine run.""" logger.info("Logger initializing: [workerID] precedes each line. [0] = Manager") diff --git a/libensemble/manager.py b/libensemble/manager.py index c0cf02500e..b12b96a774 100644 --- a/libensemble/manager.py +++ b/libensemble/manager.py @@ -12,7 +12,7 @@ import sys import time import traceback -from typing import Any, Union +from typing import Any import numpy as np import numpy.typing as npt @@ -291,7 +291,7 @@ def term_test_stop_val(self, stop_val: Any) -> bool: H = self.hist.H return np.any(filter_nans(H[key][H["sim_ended"]]) <= val) - def term_test(self, logged: bool = True) -> Union[bool, int]: + def term_test(self, logged: bool = True) -> bool | int: """Checks termination criteria""" for retval, key, testf in self.term_tests: if key in self.exit_criteria: diff --git a/libensemble/resources/env_resources.py b/libensemble/resources/env_resources.py index 5086c5793e..47b3d78624 100644 --- a/libensemble/resources/env_resources.py +++ b/libensemble/resources/env_resources.py @@ -6,7 +6,7 @@ import os import re from collections import OrderedDict -from typing import Any, List, Optional, Tuple, Union +from typing import Any logger = logging.getLogger(__name__) @@ -38,11 +38,11 @@ class EnvResources: def __init__( self, - nodelist_env_slurm: Optional[str] = None, - nodelist_env_cobalt: Optional[str] = None, - nodelist_env_pbs: Optional[str] = None, - nodelist_env_lsf: Optional[str] = None, - nodelist_env_lsf_shortform: Optional[str] = None, + nodelist_env_slurm: str | None = None, + nodelist_env_cobalt: str | None = None, + nodelist_env_pbs: str | None = None, + nodelist_env_lsf: str | None = None, + nodelist_env_lsf_shortform: str | None = None, ) -> None: """Initializes a new EnvResources instance @@ -93,7 +93,7 @@ def __init__( self.scheduler = env break - def get_nodelist(self) -> List[Union[str, Any]]: + def get_nodelist(self) -> list[str | Any]: """Returns nodelist from environment or an empty list""" if self.scheduler: env = self.scheduler @@ -105,19 +105,19 @@ def get_nodelist(self) -> List[Union[str, Any]]: return [] @staticmethod - def abbrev_nodenames(node_list: List[str], prefix: str = None) -> List[str]: + def abbrev_nodenames(node_list: list[str], prefix: str = None) -> list[str]: """Returns nodelist with only string up to first dot""" newlist = [s.split(".", 1)[0] for s in node_list] return newlist @staticmethod - def cobalt_abbrev_nodenames(node_list: List[str], prefix: str = "nid") -> List[str]: + def cobalt_abbrev_nodenames(node_list: list[str], prefix: str = "nid") -> list[str]: """Returns nodelist with prefix and leading zeros stripped""" newlist = [s.lstrip(prefix) for s in node_list] newlist = [s.lstrip("0") for s in newlist] return newlist - def shortnames(self, node_list: List[str]) -> List[str]: + def shortnames(self, node_list: list[str]) -> list[str]: """Returns nodelist with entries in abbreviated form""" if self.scheduler == "Cobalt": return EnvResources.cobalt_abbrev_nodenames(node_list) @@ -126,7 +126,7 @@ def shortnames(self, node_list: List[str]) -> List[str]: return node_list @staticmethod - def _range_split(s: str) -> Tuple[int, int, int]: + def _range_split(s: str) -> tuple[int, int, int]: """Splits ID range string""" ab = s.split("-", 1) nnum_len = len(ab[0]) @@ -138,7 +138,7 @@ def _range_split(s: str) -> Tuple[int, int, int]: return a, b, nnum_len @staticmethod - def _noderange_append(prefix: str, nidstr: str, suffix: str) -> List[str]: + def _noderange_append(prefix: str, nidstr: str, suffix: str) -> list[str]: """Formats and appends nodes to overall nodelist""" nidlst = [] for nidgroup in nidstr.split(","): @@ -148,7 +148,7 @@ def _noderange_append(prefix: str, nidstr: str, suffix: str) -> List[str]: return nidlst @staticmethod - def get_slurm_nodelist(node_list_env: str) -> List[Union[str, Any]]: + def get_slurm_nodelist(node_list_env: str) -> list[str | Any]: """Gets global libEnsemble nodelist from the Slurm environment""" fullstr = os.environ[node_list_env] if not fullstr: @@ -172,7 +172,7 @@ def get_slurm_nodelist(node_list_env: str) -> List[Union[str, Any]]: return sorted(nidlst) @staticmethod - def get_cobalt_nodelist(node_list_env: str) -> List[Union[str, Any]]: + def get_cobalt_nodelist(node_list_env: str) -> list[str | Any]: """Gets global libEnsemble nodelist from the Cobalt environment""" nidlst = [] nidstr = os.environ[node_list_env] @@ -185,7 +185,7 @@ def get_cobalt_nodelist(node_list_env: str) -> List[Union[str, Any]]: return sorted(nidlst, key=int) @staticmethod - def get_pbs_nodelist(node_list_env: str) -> List[Union[str, Any]]: + def get_pbs_nodelist(node_list_env: str) -> list[str | Any]: """Gets global libEnsemble nodelist path from PBS environment""" nidstr_path = os.environ[node_list_env] if not nidstr_path: @@ -201,7 +201,7 @@ def get_pbs_nodelist(node_list_env: str) -> List[Union[str, Any]]: return unique_nodelist_shortnames @staticmethod - def get_lsf_nodelist(node_list_env: str) -> List[Union[str, Any]]: + def get_lsf_nodelist(node_list_env: str) -> list[str | Any]: """Gets global libEnsemble nodelist from the LSF environment""" full_list = os.environ[node_list_env] entries = full_list.split() @@ -211,7 +211,7 @@ def get_lsf_nodelist(node_list_env: str) -> List[Union[str, Any]]: return nodes @staticmethod - def get_lsf_nodelist_frm_shortform(node_list_env: str) -> List[Union[str, Any]]: + def get_lsf_nodelist_frm_shortform(node_list_env: str) -> list[str | Any]: """Gets global libEnsemble nodelist from the LSF environment from short-form version""" full_list = os.environ[node_list_env] entries = full_list.split() diff --git a/libensemble/resources/mpi_resources.py b/libensemble/resources/mpi_resources.py index 33a759ab59..33b62ce3c4 100644 --- a/libensemble/resources/mpi_resources.py +++ b/libensemble/resources/mpi_resources.py @@ -2,18 +2,24 @@ Manages libensemble resources related to MPI tasks launched from nodes. """ +from __future__ import annotations + import logging import os import platform import subprocess -from typing import Optional, Tuple, Union +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from libensemble.resources.resources import Resources + from libensemble.resources.worker_resources import WorkerResources class MPIResourcesException(Exception): """Resources module exception""" -def rassert(test: Optional[Union[int, bool]], *args) -> None: +def rassert(test: int | bool | None, *args) -> None: if not test: raise MPIResourcesException(*args) @@ -89,8 +95,8 @@ def get_MPI_runner(mpi_runner=None) -> str: def task_partition( - num_procs: Optional[int], num_nodes: Optional[int], procs_per_node: Optional[int], machinefile: Optional[str] = None -) -> Union[Tuple[None, None, None], Tuple[int, int, int]]: + num_procs: int | None, num_nodes: int | None, procs_per_node: int | None, machinefile: str | None = None +) -> tuple[None, None, None] | tuple[int, int, int]: """Takes provided nprocs/nodes/ranks and outputs working configuration of procs/nodes/ranks or error """ @@ -121,7 +127,7 @@ def task_partition( return num_procs, num_nodes, procs_per_node -def _max_rsets_per_node(worker_resources): +def _max_rsets_per_node(worker_resources: WorkerResources) -> int: """Return the maximum rsets per node for any node on this worker""" rset_team = worker_resources.rset_team local_rsets_list = worker_resources.local_rsets_list @@ -129,7 +135,13 @@ def _max_rsets_per_node(worker_resources): return max(rsets_on_node) -def get_resources(resources, num_procs=None, num_nodes=None, procs_per_node=None, hyperthreads=False): +def get_resources( + resources: Resources, + num_procs: int = None, + num_nodes: int = None, + procs_per_node: int = None, + hyperthreads: bool = False, +) -> tuple[int, int, int]: """Reconciles user-supplied options with available worker resources to produce run configuration. @@ -222,13 +234,13 @@ def get_resources(resources, num_procs=None, num_nodes=None, procs_per_node=None def create_machinefile( - resources: "resources.Resources", # noqa: F821 - machinefile: Optional[str] = None, + resources: Resources, + machinefile: str | None = None, num_procs: int = None, - num_nodes: Optional[int] = None, - procs_per_node: Optional[int] = None, + num_nodes: int | None = None, + procs_per_node: int | None = None, hyperthreads: bool = False, -) -> Tuple[bool, None, int, int]: +) -> tuple[bool, None, int, int]: """Creates a machinefile based on user-supplied config options, completed by detected machine resources """ @@ -251,7 +263,7 @@ def create_machinefile( return built_mfile, num_procs, num_nodes, procs_per_node -def get_hostlist(resources, num_nodes=None): +def get_hostlist(resources: Resources, num_nodes=None): """Creates a hostlist based on user-supplied config options. completed by detected machine resources diff --git a/libensemble/resources/node_resources.py b/libensemble/resources/node_resources.py index 55d3ef5d94..1c35b337f1 100644 --- a/libensemble/resources/node_resources.py +++ b/libensemble/resources/node_resources.py @@ -6,11 +6,9 @@ import collections import logging import os -from typing import Optional, Tuple import psutil -from libensemble.resources.env_resources import EnvResources from libensemble.resources.gpu_detect import get_gpus_from_env, get_num_gpus logger = logging.getLogger(__name__) @@ -30,7 +28,7 @@ def get_cpu_cores(hyperthreads: bool = False) -> int: return psutil.cpu_count(logical=hyperthreads) # This is ranks available per node -def _get_local_resources() -> Tuple[int, int, int]: +def _get_local_resources() -> tuple[int, int, int]: """Returns logical and physical cores and GPUs on the local node""" physical_cores = get_cpu_cores(hyperthreads=False) logical_cores = get_cpu_cores(hyperthreads=True) @@ -52,7 +50,7 @@ def _get_remote_resources(launcher): return output.decode() -def _get_cpu_resources_from_env(env_resources: Optional[EnvResources] = None) -> Optional[Tuple[int, int]]: +def _get_cpu_resources_from_env(env_resources=None) -> tuple[int, int] | None: """Returns logical and physical cores per node by querying environment or None""" if not env_resources: return None @@ -146,8 +144,8 @@ def _update_from_str(cores_info, cores_info_str): def get_sub_node_resources( - launcher: Optional[str] = None, remote_mode: bool = False, env_resources: Optional[EnvResources] = None -) -> Tuple[int, int, int]: + launcher: str | None = None, remote_mode: bool = False, env_resources=None +) -> tuple[int, int, int]: """Returns logical and physical cores and GPUs per node as a tuple First checks for environment values, and and then for detected values. diff --git a/libensemble/resources/platforms.py b/libensemble/resources/platforms.py index 46b357540e..6bddbe6d61 100644 --- a/libensemble/resources/platforms.py +++ b/libensemble/resources/platforms.py @@ -11,7 +11,6 @@ import logging import os import subprocess -from typing import Optional from pydantic import BaseModel @@ -33,28 +32,28 @@ class Platform(BaseModel): All are optional, and any not defined will be determined by libEnsemble's auto-detection. """ - mpi_runner: Optional[str] = None + mpi_runner: str | None = None """MPI runner: One of ``"mpich"``, ``"openmpi"``, ``"aprun"``, ``"srun"``, ``"jsrun"``, ``"msmpi"``, ``"custom"`` """ - runner_name: Optional[str] = None + runner_name: str | None = None """Literal string of MPI runner command. Only needed if different to the default Note that ``"mpich"`` and ``"openmpi"`` runners have the default command ``"mpirun"`` """ - cores_per_node: Optional[int] = None + cores_per_node: int | None = None """Number of physical CPU cores on a compute node of the platform""" - logical_cores_per_node: Optional[int] = None + logical_cores_per_node: int | None = None """Number of logical CPU cores on a compute node of the platform""" - gpus_per_node: Optional[int] = None + gpus_per_node: int | None = None """Number of GPU devices on a compute node of the platform""" - tiles_per_gpu: Optional[int] = None + tiles_per_gpu: int | None = None """Number of tiles on a GPU""" - gpu_setting_type: Optional[str] = None + gpu_setting_type: str | None = None """ How GPUs will be assigned. @@ -91,14 +90,14 @@ class Platform(BaseModel): """ - gpu_setting_name: Optional[str] = None + gpu_setting_name: str | None = None """Name of GPU setting See :attr:`gpu_setting_type` for more details. """ - gpu_env_fallback: Optional[str] = None + gpu_env_fallback: str | None = None """GPU fallback environment setting if not using an MPI runner. For example: @@ -115,7 +114,7 @@ class Platform(BaseModel): """ - scheduler_match_slots: Optional[bool] = True + scheduler_match_slots: bool | None = True """ Whether the libEnsemble resource scheduler should only assign matching slots when there are multiple (partial) nodes assigned to a sim function. @@ -162,6 +161,19 @@ class GenericROCm(Platform): scheduler_match_slots: bool = True +class Lumi(Platform): + mpi_runner: str = "srun" + cores_per_node: int = 64 + logical_cores_per_node: int = 128 + + +class LumiGPU(Lumi): + gpus_per_node: int = 8 + gpu_setting_type: str = "env" + gpu_setting_name: str = "ROCR_VISIBLE_DEVICES" + scheduler_match_slots: bool = True + + class Perlmutter(Platform): mpi_runner: str = "srun" @@ -243,6 +255,8 @@ class Known_platforms(BaseModel): aurora: Aurora = Aurora() generic_rocm: GenericROCm = GenericROCm() frontier: Frontier = Frontier() + lumi: Lumi = Lumi() + lumi_g: LumiGPU = LumiGPU() perlmutter: Perlmutter = Perlmutter() perlmutter_c: PerlmutterCPU = PerlmutterCPU() perlmutter_g: PerlmutterGPU = PerlmutterGPU() @@ -272,6 +286,14 @@ def known_envs(): else: name = "perlmutter" logger.manager_warning("Perlmutter detected, but no compute partition detected. Are you on login nodes?") + if os.environ.get("SLURM_CLUSTER_NAME") == "lumi": + partition = os.environ.get("SLURM_JOB_PARTITION") + if not partition: + logger.manager_warning("LUMI detected, but no compute partition detected. Are you on login nodes?") + if partition and partition.endswith("-g"): + name = "lumi_g" + else: + name = "lumi" return name diff --git a/libensemble/resources/rset_resources.py b/libensemble/resources/rset_resources.py index e629547859..d35cdaee8b 100644 --- a/libensemble/resources/rset_resources.py +++ b/libensemble/resources/rset_resources.py @@ -1,7 +1,13 @@ +from __future__ import annotations + import logging +from typing import TYPE_CHECKING import numpy as np +if TYPE_CHECKING: + from libensemble.resources.resources import Resources + logger = logging.getLogger(__name__) # To change logging level for just this module # logger.setLevel(logging.DEBUG) @@ -30,7 +36,7 @@ class RSetResources: # ('pool', int), # Pool ID (eg. separate gen/sim resources) - not yet used. ] - def __init__(self, num_workers, resources): + def __init__(self, num_workers: int, resources: Resources): """Initializes a new RSetResources instance Determines the compute resources available for each resource set. diff --git a/libensemble/resources/worker_resources.py b/libensemble/resources/worker_resources.py index 8ec3d8f4bb..5033b2aeee 100644 --- a/libensemble/resources/worker_resources.py +++ b/libensemble/resources/worker_resources.py @@ -1,12 +1,17 @@ +from __future__ import annotations + import logging import os from collections import Counter, OrderedDict -from typing import Any, Dict, List, Optional, Tuple, Union +from typing import TYPE_CHECKING, Any import numpy as np from libensemble.resources.rset_resources import RSetResources +if TYPE_CHECKING: + from libensemble.resources.resources import GlobalResources + logger = logging.getLogger(__name__) # To change logging level for just this module # logger.setLevel(logging.DEBUG) @@ -26,7 +31,7 @@ class ResourceManager(RSetResources): # Holds the ID of the worker this rset is assigned to or zero man_rset_dtype = np.dtype(RSetResources.rset_dtype + [("assigned", int)]) - def __init__(self, num_workers: int, resources: "GlobalResources") -> None: # noqa: F821 + def __init__(self, num_workers: int, resources: GlobalResources) -> None: """Initializes a new ResourceManager instance Instantiates the numpy structured array that holds information for each @@ -97,9 +102,7 @@ def free_rsets(self, worker=None): self.nongpu_rsets_free += np.count_nonzero(~self.rsets["gpus"][rsets_to_free]) @staticmethod - def get_index_list( - num_workers: int, num_rsets: int, zero_resource_list: List[Union[int, Any]] - ) -> List[Optional[int]]: + def get_index_list(num_workers: int, num_rsets: int, zero_resource_list: list[int | Any]) -> list[int | None]: """Map WorkerID to index into a nodelist""" index = 0 index_list = [] @@ -127,7 +130,7 @@ class WorkerResources(RSetResources): :ivar int workerID: workerID for this worker. :ivar list local_nodelist: A list of all nodes assigned to this worker. - :ivar list rset_team: List of rset IDs currently assigned to this worker. + :ivar list rset_team: list of rset IDs currently assigned to this worker. :ivar int num_rsets: The number of resource sets assigned to this worker. :ivar dict slots: A dictionary with a list of slot IDs for each node. :ivar bool even_slots: True if each node has the same number of slots. @@ -294,7 +297,7 @@ def doihave_gpus(self): return self.all_rsets["gpus"][self.rset_team[0]] return False - def set_rset_team(self, rset_team: List[int]) -> None: + def set_rset_team(self, rset_team: list[int]) -> None: """Update worker team and local attributes Updates: rset_team @@ -348,8 +351,8 @@ def set_slot_count(self) -> None: @staticmethod def get_local_nodelist( - workerID: int, rset_team: List[int], split_list: List[List[str]], rsets_per_node: int - ) -> Tuple[List[str], Dict[str, List[int]]]: + workerID: int, rset_team: list[int], split_list: list[list[str]], rsets_per_node: int + ) -> tuple[list[str], dict[str, list[int]]]: """Returns the list of nodes available to the given worker and the slot dictionary""" if workerID is None: raise WorkerResourcesException("Worker has no workerID - aborting") diff --git a/libensemble/specs.py b/libensemble/specs.py index aa70018362..308491303d 100644 --- a/libensemble/specs.py +++ b/libensemble/specs.py @@ -1,13 +1,11 @@ import random import warnings from pathlib import Path -from typing import Any, Callable, List, Optional, Tuple, Union import pydantic from pydantic import BaseModel, Field from libensemble.alloc_funcs.give_sim_work_first import give_sim_work_first -from libensemble.resources.platforms import Platform __all__ = ["SimSpecs", "GenSpecs", "AllocSpecs", "ExitCriteria", "LibeSpecs", "_EnsembleSpecs"] @@ -26,47 +24,47 @@ class SimSpecs(BaseModel): Specifications for configuring a Simulation Function. """ - sim_f: Callable = None + sim_f: object = None """ Python function matching the ``sim_f`` interface. Evaluates parameters produced by a generator function. """ - inputs: Optional[List[str]] = Field(default=[], alias="in") + inputs: list[str] | None = Field(default=[], alias="in") """ - List of **field names** out of the complete history to pass + list of **field names** out of the complete history to pass into the simulation function upon calling. """ - persis_in: Optional[List[str]] = [] + persis_in: list[str] | None = [] """ - List of **field names** to send to a persistent simulation function + list of **field names** to send to a persistent simulation function throughout the run, following initialization. """ # list of tuples for dtype construction - outputs: Optional[List[Union[Tuple[str, Any], Tuple[str, Any, Union[int, Tuple]]]]] = Field([], alias="out") + outputs: list[tuple] = Field([], alias="out") """ - List of 2- or 3-tuples corresponding to NumPy dtypes. + list of 2- or 3-tuples corresponding to NumPy dtypes. e.g. ``("dim", int, (3,))``, or ``("path", str)``. Typically used to initialize an output array within the simulation function: ``out = np.zeros(100, dtype=sim_specs["out"])``. Also necessary to construct libEnsemble's history array. """ - globus_compute_endpoint: Optional[str] = "" + globus_compute_endpoint: str | None = "" """ A Globus Compute (https://www.globus.org/compute) ID corresponding to an active endpoint on a remote system. libEnsemble's workers will submit simulator function instances to this endpoint instead of calling them locally. """ - threaded: Optional[bool] = False + threaded: bool | None = False """ Instruct Worker process to launch user function to a thread. """ - user: Optional[dict] = {} + user: dict | None = {} """ A user-data dictionary to place bounds, constants, settings, or other parameters for customizing the simulator function. @@ -78,45 +76,45 @@ class GenSpecs(BaseModel): Specifications for configuring a Generator Function. """ - gen_f: Optional[Callable] = None + gen_f: object | None = None """ Python function matching the ``gen_f`` interface. Produces parameters for evaluation by a simulator function, and makes decisions based on simulator function output. """ - inputs: Optional[List[str]] = Field(default=[], alias="in") + inputs: list[str] | None = Field(default=[], alias="in") """ - List of **field names** out of the complete history to pass + list of **field names** out of the complete history to pass into the generator function upon calling. """ - persis_in: Optional[List[str]] = [] + persis_in: list[str] | None = [] """ - List of **field names** to send to a persistent generator function + list of **field names** to send to a persistent generator function throughout the run, following initialization. """ - outputs: Optional[List[Union[Tuple[str, Any], Tuple[str, Any, Union[int, Tuple]]]]] = Field([], alias="out") + outputs: list[tuple] = Field([], alias="out") """ - List of 2- or 3-tuples corresponding to NumPy dtypes. + list of 2- or 3-tuples corresponding to NumPy dtypes. e.g. ``("dim", int, (3,))``, or ``("path", str)``. Typically used to initialize an output array within the generator: ``out = np.zeros(100, dtype=gen_specs["out"])``. Also used to construct libEnsemble's history array. """ - globus_compute_endpoint: Optional[str] = "" + globus_compute_endpoint: str | None = "" """ A Globus Compute (https://www.globus.org/compute) ID corresponding to an active endpoint on a remote system. libEnsemble's workers will submit generator function instances to this endpoint instead of calling them locally. """ - threaded: Optional[bool] = False + threaded: bool | None = False """ Instruct Worker process to launch user function to a thread. """ - user: Optional[dict] = {} + user: dict | None = {} """ A user-data dictionary to place bounds, constants, settings, or other parameters for customizing the generator function @@ -128,21 +126,21 @@ class AllocSpecs(BaseModel): Specifications for configuring an Allocation Function. """ - alloc_f: Callable = give_sim_work_first + alloc_f: object = give_sim_work_first """ Python function matching the ``alloc_f`` interface. Decides when simulator and generator functions should be called, and with what resources and parameters. """ - user: Optional[dict] = {"num_active_gens": 1} + user: dict | None = {"num_active_gens": 1} """ A user-data dictionary to place bounds, constants, settings, or other parameters for customizing the allocation function. """ - outputs: List[Union[Tuple[str, Any], Tuple[str, Any, Union[int, Tuple]]]] = Field([], alias="out") + outputs: list[tuple] = Field([], alias="out") """ - List of 2- or 3-tuples corresponding to NumPy dtypes. e.g. ``("dim", int, (3,))``, or ``("path", str)``. + list of 2- or 3-tuples corresponding to NumPy dtypes. e.g. ``("dim", int, (3,))``, or ``("path", str)``. Allocation functions that modify libEnsemble's History array with additional fields should list those fields here. Also used to construct libEnsemble's history array. """ @@ -154,16 +152,16 @@ class ExitCriteria(BaseModel): Specifications for configuring when libEnsemble should stop a given run. """ - sim_max: Optional[int] = None + sim_max: int | None = None """Stop when this many new points have been evaluated by simulation functions.""" - gen_max: Optional[int] = None + gen_max: int | None = None """Stop when this many new points have been generated by generator functions.""" - wallclock_max: Optional[float] = None + wallclock_max: float | None = None """Stop when this many seconds has elapsed since the manager initialized.""" - stop_val: Optional[Tuple[str, float]] = None + stop_val: tuple[str, float] | None = None """Stop when ``H[str] < float`` for the given ``(str, float)`` pair.""" @@ -172,152 +170,152 @@ class LibeSpecs(BaseModel): Specifications for configuring libEnsemble's runtime behavior. """ - comms: Optional[str] = "mpi" + comms: str | None = "mpi" """ Manager/Worker communications mode. ``'mpi'``, ``'local'``, ``'threads'``, or ``'tcp'`` If ``nworkers`` is specified, then ``local`` comms will be used unless a parallel MPI environment is detected. """ - nworkers: Optional[int] = 0 + nworkers: int | None = 0 """ Number of worker processes in ``"local"``, ``"threads"``, or ``"tcp"``.""" - gen_on_manager: Optional[bool] = False + gen_on_manager: bool | None = False """ Instructs Manager process to run generator functions. This generator function can access/modify user objects by reference. """ - mpi_comm: Optional[Any] = None + mpi_comm: object | None = None """ libEnsemble MPI communicator. Default: ``MPI.COMM_WORLD``""" - dry_run: Optional[bool] = False + dry_run: bool | None = False """ Whether libEnsemble should immediately exit after validating all inputs. """ - abort_on_exception: Optional[bool] = True + abort_on_exception: bool | None = True """ In MPI mode, whether to call ``MPI_ABORT`` on an exception. If ``False``, an exception will be raised by the manager. """ - save_every_k_sims: Optional[int] = 0 + save_every_k_sims: int | None = 0 """ Save history array to file after every k evaluated points. """ - save_every_k_gens: Optional[int] = 0 + save_every_k_gens: int | None = 0 """ Save history array to file after every k generated points. """ - save_H_and_persis_on_abort: Optional[bool] = True + save_H_and_persis_on_abort: bool | None = True """ Save states of ``H`` and ``persis_info`` to file on aborting after an exception.""" - save_H_on_completion: Optional[bool] = False + save_H_on_completion: bool | None = False """ Save state of ``H`` to file upon completing a workflow. Also enabled when either ``save_every_k_sims`` or ``save_every_k_gens`` is set. """ - save_H_with_date: Optional[bool] = False + save_H_with_date: bool | None = False """ ``H`` filename contains date and timestamp.""" - H_file_prefix: Optional[str] = "libE_history" + H_file_prefix: str | None = "libE_history" """ Prefix for ``H`` filename.""" - worker_timeout: Optional[int] = 1 + worker_timeout: int | None = 1 """ On libEnsemble shutdown, number of seconds after which workers considered timed out, then terminated. """ - kill_canceled_sims: Optional[bool] = False + kill_canceled_sims: bool | None = False """ Try to kill sims with ``"cancel_requested"`` set ``True``. If ``False``, the manager avoids this moderate overhead. """ - use_workflow_dir: Optional[bool] = False + use_workflow_dir: bool | None = False """ Whether to place *all* log files, dumped arrays, and default output directories in a separate `workflow` directory. Each run will be suffixed with a hash. If copying back an ensemble directory from a scratch space, the copy is placed here. """ - reuse_output_dir: Optional[bool] = False + reuse_output_dir: bool | None = False """ Whether to allow overwrites and access to previous ensemble and workflow directories in subsequent runs. ``False`` by default to protect results. """ - workflow_dir_path: Optional[Union[str, Path]] = "." + workflow_dir_path: str | Path | None = "." """ Optional path to the workflow directory. """ - ensemble_dir_path: Optional[Union[str, Path]] = Path("ensemble") + ensemble_dir_path: str | Path | None = Path("ensemble") """ Path to main ensemble directory. Can serve as a single working directory for workers, or contain calculation directories """ - ensemble_copy_back: Optional[bool] = False + ensemble_copy_back: bool | None = False """ Whether to copy back contents of ``ensemble_dir_path`` to launch location. Useful if ``ensemble_dir_path`` is located on node-local storage. """ - use_worker_dirs: Optional[bool] = False + use_worker_dirs: bool | None = False """ Whether to organize calculation directories under worker-specific directories. """ - sim_dirs_make: Optional[bool] = False + sim_dirs_make: bool | None = False """ Whether to make calculation directories for each simulation function call. """ - sim_dir_copy_files: Optional[List[Union[str, Path]]] = [] + sim_dir_copy_files: list[str | Path] | None = [] """ Paths to copy into the working directory upon calling the simulation function. - List of strings or ``pathlib.Path`` objects. + list of strings or ``pathlib.Path`` objects. """ - sim_dir_symlink_files: Optional[List[Union[str, Path]]] = [] + sim_dir_symlink_files: list[str | Path] | None = [] """ Paths to symlink into the working directory upon calling the simulation function. - List of strings or ``pathlib.Path`` objects. + list of strings or ``pathlib.Path`` objects. """ - sim_input_dir: Optional[Union[str, Path]] = None + sim_input_dir: str | Path | None = None """ Copy this directory's contents into the working directory upon calling the simulation function. Forms the base of a simulation directory. """ - gen_dirs_make: Optional[bool] = False + gen_dirs_make: bool | None = False """ Whether to make generator-specific calculation directories for each generator function call. """ - gen_dir_copy_files: Optional[List[Union[str, Path]]] = [] + gen_dir_copy_files: list[str | Path] | None = [] """ Paths to copy into the working directory upon calling the generator function. - List of strings or ``pathlib.Path`` objects + list of strings or ``pathlib.Path`` objects """ - gen_dir_symlink_files: Optional[List[Union[str, Path]]] = [] + gen_dir_symlink_files: list[str | Path] | None = [] """ Paths to symlink into the working directory upon calling the generator function. - List of strings or ``pathlib.Path`` objects. + list of strings or ``pathlib.Path`` objects. """ - gen_input_dir: Optional[Union[str, Path]] = None + gen_input_dir: str | Path | None = None """ Copy this directory's contents into the working directory upon calling the generator function. Forms the base of a generator directory. """ - calc_dir_id_width: Optional[int] = 4 + calc_dir_id_width: int | None = 4 """ The width of the numerical ID component of a calculation directory name. Leading zeros are padded to the sim/gen ID. """ - platform: Optional[str] = "" + platform: str | None = "" """Name of a known platform defined in the platforms module. - See :class:`Known Platforms List`. + See :class:`Known Platforms list`. Example: @@ -334,7 +332,7 @@ class LibeSpecs(BaseModel): See also option :attr:`platform_specs`. """ - platform_specs: Optional[Union[Platform, dict]] = {} + platform_specs: object | dict | None = {} """A Platform object or dictionary specifying settings for a platform. To use existing platform: @@ -345,7 +343,7 @@ class LibeSpecs(BaseModel): libE_specs["platform_specs"] = PerlmutterGPU() - See :class:`Known Platforms List`. + See :class:`Known Platforms list`. Or define a platform: @@ -369,82 +367,82 @@ class LibeSpecs(BaseModel): See also option :attr:`platform`. """ - profile: Optional[bool] = False + profile: bool | None = False """ Profile manager and worker logic using ``cProfile``. """ - disable_log_files: Optional[bool] = False + disable_log_files: bool | None = False """ Disable ``ensemble.log`` and ``libE_stats.txt`` log files. """ - safe_mode: Optional[bool] = False + safe_mode: bool | None = False """ Prevents user functions from overwriting protected History fields, but requires moderate overhead. """ - stats_fmt: Optional[dict] = {} + stats_fmt: dict | None = {} """ Options for formatting ``'libE_stats.txt'``. See 'Formatting libE_stats.txt'. """ - live_data: Optional[Any] = None + live_data: object | None = None """ Add a live data capture object (e.g., for plotting). """ - workers: Optional[List[str]] = [] + workers: list[str] | None = [] """ TCP Only: A list of worker hostnames. """ - ip: Optional[str] = None + ip: str | None = None """ TCP Only: IP address for Manager's system. """ - port: Optional[int] = 0 + port: int | None = 0 """ TCP Only: Port number for Manager's system. """ - authkey: Optional[str] = f"libE_auth_{random.randrange(99999)}" + authkey: str | None = f"libE_auth_{random.randrange(99999)}" """ TCP Only: Authkey for Manager's system.""" - workerID: Optional[int] = None + workerID: int | None = None """ TCP Only: Worker ID number assigned to the new process. """ - worker_cmd: Optional[List[str]] = [] + worker_cmd: list[str] | None = [] """ TCP Only: Split string corresponding to worker/client Python process invocation. Contains a local Python path, calling script, and manager/server format-fields for ``manager_ip``, ``manager_port``, ``authkey``, and ``workerID``. ``nworkers`` is specified normally. """ - use_persis_return_gen: Optional[bool] = False + use_persis_return_gen: bool | None = False """ Adds persistent generator output fields to the History array on return. """ - use_persis_return_sim: Optional[bool] = False + use_persis_return_sim: bool | None = False """ Adds persistent simulator output fields to the History array on return. """ - final_gen_send: Optional[bool] = False + final_gen_send: bool | None = False """ Send final simulation results to persistent generators before shutdown. The results will be sent along with the ``PERSIS_STOP`` tag. """ - disable_resource_manager: Optional[bool] = False + disable_resource_manager: bool | None = False """ Disable the built-in resource manager, including automatic resource detection and/or assignment of resources to workers. ``"resource_info"`` will be ignored. """ - num_resource_sets: Optional[int] = 0 + num_resource_sets: int | None = 0 """ Total number of resource sets. Resources will be divided into this number. If not set, resources will be divided evenly (excluding zero_resource_workers). """ - gen_num_procs: Optional[int] = 0 + gen_num_procs: int | None = 0 """ The default number of processors (MPI ranks) required by generators. Unless overridden by the equivalent `persis_info` settings, generators will be allocated this many processors for applications launched via the MPIExecutor. """ - gen_num_gpus: Optional[int] = 0 + gen_num_gpus: int | None = 0 """ The default number of GPUs required by generators. Unless overridden by the equivalent `persis_info` settings, generators will be allocated this many GPUs. """ - gpus_per_group: Optional[int] = None + gpus_per_group: int | None = None """ Number of GPUs for each group in the scheduler. This can be used to deal with scenarios where nodes have different numbers of GPUs. In effect a @@ -452,13 +450,13 @@ class LibeSpecs(BaseModel): By default the GPUs on a node are treated as a group. """ - use_tiles_as_gpus: Optional[bool] = False + use_tiles_as_gpus: bool | None = False """ If ``True`` then treat a GPU tile as one GPU when GPU tiles is provided in platform specs or detected. """ - enforce_worker_core_bounds: Optional[bool] = False + enforce_worker_core_bounds: bool | None = False """ If ``False``, the Executor will permit the submission of tasks with a higher processor count than the CPUs available to the worker as @@ -467,40 +465,40 @@ class LibeSpecs(BaseModel): this argument is ignored """ - dedicated_mode: Optional[bool] = False + dedicated_mode: bool | None = False """ Instructs libEnsemble’s MPI executor not to run applications on nodes where libEnsemble processes (manager and workers) are running. """ - zero_resource_workers: Optional[List[int]] = [] + zero_resource_workers: list[int] | None = [] """ - List of workers that require no resources. For when a fixed mapping of workers + list of workers that require no resources. For when a fixed mapping of workers to resources is required. Otherwise, use ``num_resource_sets``. For use with supported allocation functions. """ - gen_workers: Optional[List[int]] = [] + gen_workers: list[int] | None = [] """ - List of workers that should only run generators. All other workers will only + list of workers that should only run generators. All other workers will only run simulator functions. """ - resource_info: Optional[dict] = {} + resource_info: dict | None = {} """ Resource information to override automatically detected resources. Allowed fields are given below in 'Overriding Resource Auto-detection'. Note that if ``disable_resource_manager`` is set then this option is ignored. """ - scheduler_opts: Optional[dict] = {} + scheduler_opts: dict | None = {} """ Options for the resource scheduler. See 'Scheduler Options' for more info """ class _EnsembleSpecs(BaseModel): """An all-encompassing model for a libEnsemble workflow.""" - H0: Optional[Any] = None # np.ndarray - avoids sphinx issue + H0: object | None = None # np.ndarray - avoids sphinx issue """ A previous or preformatted libEnsemble History array to prepend. """ libE_specs: LibeSpecs @@ -509,20 +507,20 @@ class _EnsembleSpecs(BaseModel): sim_specs: SimSpecs """ Specifications for the simulation function. """ - gen_specs: Optional[GenSpecs] + gen_specs: GenSpecs | None """ Specifications for the generator function. """ exit_criteria: ExitCriteria """ Configurations for when to exit a workflow. """ - persis_info: Optional[dict] = None + persis_info: dict | None = None """ Per-worker information and structures to be passed between user function instances. """ - alloc_specs: Optional[AllocSpecs] = AllocSpecs() + alloc_specs: AllocSpecs | None = AllocSpecs() """ Specifications for the allocation function. """ -def input_fields(fields: List[str]): +def input_fields(fields: list[str]): """Decorates a user-function with a list of field names to pass in on initialization. Decorated functions don't need those fields specified in ``SimSpecs.inputs`` or ``GenSpecs.inputs``. @@ -550,7 +548,7 @@ def decorator(func): return decorator -def persistent_input_fields(fields: List[str]): +def persistent_input_fields(fields: list[str]): """Decorates a *persistent* user-function with a list of field names to send in throughout runtime. Decorated functions don't need those fields specified in ``SimSpecs.persis_in`` or ``GenSpecs.persis_in``. @@ -588,7 +586,7 @@ def decorator(func): return decorator -def output_data(fields: List[Union[Tuple[str, Any], Tuple[str, Any, Union[int, Tuple]]]]): +def output_data(fields: list[tuple]): """Decorates a user-function with a list of tuples corresponding to NumPy dtypes for the function's output data. Decorated functions don't need those fields specified in ``SimSpecs.outputs`` or ``GenSpecs.outputs``. diff --git a/libensemble/tests/regression_tests/test_gpCAM.py b/libensemble/tests/regression_tests/test_gpCAM.py index b554752eba..218ecfc918 100644 --- a/libensemble/tests/regression_tests/test_gpCAM.py +++ b/libensemble/tests/regression_tests/test_gpCAM.py @@ -11,7 +11,7 @@ Runs three variants of gpCAM. The first two use the posterior covariance sampling method, whereby the second run uses the grid approach and uses -the points from the first run as it’s test points.The third run uses the +the points from the first run as it’s test points. The third run uses the gpCAM ask/tell interface. See libensemble.gen_funcs.persistent_gpCAM for more details about the diff --git a/libensemble/tests/regression_tests/test_persistent_aposmm_ibcdfo_pounders.py b/libensemble/tests/regression_tests/test_persistent_aposmm_ibcdfo_pounders.py index cf7d611547..7523704a0b 100644 --- a/libensemble/tests/regression_tests/test_persistent_aposmm_ibcdfo_pounders.py +++ b/libensemble/tests/regression_tests/test_persistent_aposmm_ibcdfo_pounders.py @@ -14,9 +14,9 @@ These values are then mapped to the normalized emittance - . Execute via one of the following commands: - mpiexec -np 3 python test_persistent_aposmm_ibcdfo.py - python test_persistent_aposmm_ibcdfo.py --nworkers 2 -Both will run with 1 manager, 1 worker running APOSMM+IBCDFO), and 1 worker + mpiexec -np 3 python test_persistent_aposmm_ibcdfo_pounders.py + python test_persistent_aposmm_ibcdfo_pounders.py --nworkers 2 +Both will run with 1 manager, 1 worker running APOSMM+IBCDFO, and 1 worker doing the simulation evaluations. """ diff --git a/libensemble/tests/regression_tests/test_persistent_gp_multitask_ax.py b/libensemble/tests/regression_tests/test_persistent_gp_multitask_ax.py index 8df0ae006d..8c589161ad 100644 --- a/libensemble/tests/regression_tests/test_persistent_gp_multitask_ax.py +++ b/libensemble/tests/regression_tests/test_persistent_gp_multitask_ax.py @@ -2,7 +2,7 @@ Example of multi-fidelity optimization using a persistent GP gen_func (calling Ax). -Test is set to use the gen_on_manager option (persistent generator runs on +This test uses the gen_on_manager option (persistent generator runs on a thread). Therefore nworkers is the number of simulation workers. Execute via one of the following commands: diff --git a/libensemble/tests/unit_tests/test_ufunc_runners.py b/libensemble/tests/unit_tests/test_ufunc_runners.py index 1d3cbb4b2c..09f17b07ec 100644 --- a/libensemble/tests/unit_tests/test_ufunc_runners.py +++ b/libensemble/tests/unit_tests/test_ufunc_runners.py @@ -54,6 +54,20 @@ def tupilize(arg1, arg2): simrunner.shutdown() +def test_persis_info_from_none(): + calc_in, sim_specs, gen_specs = get_ufunc_args() + + def tupilize(arg1, arg2): + return (arg1, arg2) + + sim_specs["sim_f"] = tupilize + simrunner = Runner(sim_specs) + libE_info = {"H_rows": np.array([2, 3, 4]), "workerID": 1, "comm": "fakecomm"} + + result = simrunner.run(calc_in, {"libE_info": libE_info, "persis_info": None, "tag": 1}) + assert result == (calc_in, {}) + + @pytest.mark.extra def test_globus_compute_runner_init(): calc_in, sim_specs, gen_specs = get_ufunc_args() @@ -122,6 +136,7 @@ def test_globus_compute_runner_fail(): if __name__ == "__main__": test_normal_runners() test_thread_runners() + test_persis_info_from_none() test_globus_compute_runner_init() test_globus_compute_runner_pass() test_globus_compute_runner_fail() diff --git a/libensemble/tools/persistent_support.py b/libensemble/tools/persistent_support.py index dca7d37ca5..7e9643e022 100644 --- a/libensemble/tools/persistent_support.py +++ b/libensemble/tools/persistent_support.py @@ -1,5 +1,4 @@ import logging -from typing import Any, Dict, List import numpy as np import numpy.typing as npt @@ -12,7 +11,7 @@ class PersistentSupport: """A helper class to assist with writing persistent user functions.""" - def __init__(self, libE_info: Dict[str, Dict[Any, Any]], calc_type: int) -> None: + def __init__(self, libE_info: dict[str, dict], calc_type: int) -> None: """ Instantiate a new PersistentSupport instance @@ -115,7 +114,7 @@ def send_recv(self, output: npt.NDArray, calc_status: int = UNSET_TAG) -> (int, self.send(output, calc_status) return self.recv() - def request_cancel_sim_ids(self, sim_ids: List[int]): + def request_cancel_sim_ids(self, sim_ids: list[int]): """Request cancellation of sim_ids. :param sim_ids: A list of sim_ids to cancel. diff --git a/libensemble/tools/tools.py b/libensemble/tools/tools.py index 4038b5fafe..8cb81f3af3 100644 --- a/libensemble/tools/tools.py +++ b/libensemble/tools/tools.py @@ -10,6 +10,7 @@ import time import numpy as np +import numpy.typing as npt # Create logger logger = logging.getLogger(__name__) @@ -83,7 +84,16 @@ def _get_shortname(basename): # =================== save libE output to pickle and np ======================== -def save_libE_output(H, persis_info, basename, nworkers, dest_path=None, mess="Run completed", append_attrs=True): + +def save_libE_output( + H: npt.NDArray, + persis_info: dict, + basename: str, + nworkers: int, + dest_path: str = None, + mess: str = "Run completed", + append_attrs: bool = True, +) -> str: """ Writes out history array and persis_info to files. @@ -117,6 +127,10 @@ def save_libE_output(H, persis_info, basename, nworkers, dest_path=None, mess="R The number of workers in this ensemble. Added to output file names. + dest_path: :obj:`str`, optional + + The path to save the file to. + mess: :obj:`str` A message to print/log when saving the file. @@ -153,7 +167,7 @@ def save_libE_output(H, persis_info, basename, nworkers, dest_path=None, mess="R # ===================== per-process numpy random-streams ======================= -def add_unique_random_streams(persis_info, nstreams, seed=""): +def add_unique_random_streams(persis_info: dict, nstreams: int, seed: str = "") -> dict: """ Creates nstreams random number streams for the libE manager and workers when nstreams is num_workers + 1. Stream i is initialized with seed i by default. @@ -206,7 +220,7 @@ def add_unique_random_streams(persis_info, nstreams, seed=""): return persis_info -def check_npy_file_exists(filename: str, basename: bool = False, max_wait: int = 3): +def check_npy_file_exists(filename: str, basename: bool = False, max_wait: int = 3) -> bool: """Checks a file is created in a parallel environment Parameters diff --git a/libensemble/utils/launcher.py b/libensemble/utils/launcher.py index 1633f65bb6..c88361f34a 100644 --- a/libensemble/utils/launcher.py +++ b/libensemble/utils/launcher.py @@ -9,10 +9,9 @@ import subprocess import time from itertools import chain -from typing import List, Optional, Union -def form_command(cmd_template: List[str], specs: dict) -> List[str]: +def form_command(cmd_template: list[str], specs: dict) -> list[str]: "Fill command parts with dict entries from specs; drop any missing." specs = {k: v for k, v in specs.items() if v is not None} @@ -26,7 +25,7 @@ def fill(fmt): return list(chain.from_iterable(filter(None, map(fill, cmd_template)))) -def launch(cmd_template: List[str], specs: dict = None, **kwargs) -> subprocess.Popen: +def launch(cmd_template: list[str], specs: dict = None, **kwargs) -> subprocess.Popen: "Launch a new subprocess (with command templating and Python 3 help)." cmd = form_command(cmd_template, specs) if specs is not None else cmd_template return subprocess.Popen(cmd, **kwargs) @@ -70,7 +69,7 @@ def process_is_stopped(process, timeout): return process.poll() is not None -def wait(process: subprocess.Popen, timeout: Optional[Union[int, float]] = None) -> Optional[int]: +def wait(process: subprocess.Popen, timeout: int | float | None = None) -> int | None: "Wait on a process with timeout (wait forever if None)." try: return process.wait(timeout=timeout) @@ -78,7 +77,7 @@ def wait(process: subprocess.Popen, timeout: Optional[Union[int, float]] = None) return None -def wait_and_kill(process: subprocess.Popen, timeout: Optional[Union[int, float]]) -> int: +def wait_and_kill(process: subprocess.Popen, timeout: int | float | None) -> int: "Give a grace period for a process to terminate, then kill it." rc = wait(process, timeout) if rc is not None: @@ -87,7 +86,7 @@ def wait_and_kill(process: subprocess.Popen, timeout: Optional[Union[int, float] return process.wait() -def cancel(process: subprocess.Popen, timeout: Optional[Union[int, float]] = 0) -> int: +def cancel(process: subprocess.Popen, timeout: int | float | None = 0) -> int: "Send a termination signal, give a grace period, then hard kill if needed." if timeout is not None and timeout > 0: terminatepg(process) diff --git a/libensemble/utils/loc_stack.py b/libensemble/utils/loc_stack.py index 67bbcb55bb..8eadf5244a 100644 --- a/libensemble/utils/loc_stack.py +++ b/libensemble/utils/loc_stack.py @@ -5,7 +5,6 @@ import os import shutil from pathlib import Path -from typing import List, Optional, Union class LocationStack: @@ -19,7 +18,7 @@ def __init__(self) -> None: def copy_file( self, destdir: Path, - copy_files: List[Path] = [], + copy_files: list[Path] = [], ignore_FileExists: bool = False, allow_overwrite: bool = False, ) -> None: @@ -48,7 +47,7 @@ def copy_file( def symlink_file( self, destdir: Path, - symlink_files: List[Path] = [], + symlink_files: list[Path] = [], ignore_FileExists: bool = False, allow_overwrite: bool = False, ) -> None: @@ -67,11 +66,11 @@ def symlink_file( def register_loc( self, - key: Union[str, int], + key: str | int, dirname: Path, - prefix: Optional[Path] = None, - copy_files: List[Path] = [], - symlink_files: List[Path] = [], + prefix: Path | None = None, + copy_files: list[Path] = [], + symlink_files: list[Path] = [], ignore_FileExists: bool = False, allow_overwrite: bool = False, ) -> str: diff --git a/libensemble/utils/output_directory.py b/libensemble/utils/output_directory.py index 3de0f20ddf..b43ee3491b 100644 --- a/libensemble/utils/output_directory.py +++ b/libensemble/utils/output_directory.py @@ -2,7 +2,6 @@ import re import shutil from pathlib import Path -from typing import Optional from libensemble.message_numbers import EVAL_SIM_TAG, calc_type_strings from libensemble.tools.fields_keys import libE_spec_calc_dir_misc, libE_spec_gen_dir_keys, libE_spec_sim_dir_keys @@ -40,7 +39,7 @@ class EnsembleDirectory: A LocationStack object from libEnsemble's internal libensemble.utils.loc_stack module. """ - def __init__(self, libE_specs: dict, loc_stack: Optional[LocationStack] = None): + def __init__(self, libE_specs: dict, loc_stack: LocationStack | None = None): self.specs = libE_specs self.loc_stack = loc_stack diff --git a/libensemble/utils/runners.py b/libensemble/utils/runners.py index 93a2cd1afe..9554b11769 100644 --- a/libensemble/utils/runners.py +++ b/libensemble/utils/runners.py @@ -1,7 +1,6 @@ import inspect import logging import logging.handlers -from typing import Optional import numpy.typing as npt @@ -28,7 +27,7 @@ def _truncate_args(self, calc_in: npt.NDArray, persis_info, libE_info): args = [calc_in, persis_info, self.specs, libE_info] return args[:nparams] - def _result(self, calc_in: npt.NDArray, persis_info: dict, libE_info: dict) -> (npt.NDArray, dict, Optional[int]): + def _result(self, calc_in: npt.NDArray, persis_info: dict, libE_info: dict) -> (npt.NDArray, dict, int | None): """User function called in-place""" args = self._truncate_args(calc_in, persis_info, libE_info) return self.f(*args) @@ -36,7 +35,7 @@ def _result(self, calc_in: npt.NDArray, persis_info: dict, libE_info: dict) -> ( def shutdown(self) -> None: pass - def run(self, calc_in: npt.NDArray, Work: dict) -> (npt.NDArray, dict, Optional[int]): + def run(self, calc_in: npt.NDArray, Work: dict) -> (npt.NDArray, dict, int | None): if Work["persis_info"] is None: Work["persis_info"] = {} return self._result(calc_in, Work["persis_info"], Work["libE_info"]) @@ -58,7 +57,7 @@ def _get_globus_compute_executor(self): else: return Executor - def _result(self, calc_in: npt.NDArray, persis_info: dict, libE_info: dict) -> (npt.NDArray, dict, Optional[int]): + def _result(self, calc_in: npt.NDArray, persis_info: dict, libE_info: dict) -> (npt.NDArray, dict, int | None): from libensemble.worker import Worker libE_info["comm"] = None # 'comm' object not pickle-able @@ -77,7 +76,7 @@ def __init__(self, specs): super().__init__(specs) self.thread_handle = None - def _result(self, calc_in: npt.NDArray, persis_info: dict, libE_info: dict) -> (npt.NDArray, dict, Optional[int]): + def _result(self, calc_in: npt.NDArray, persis_info: dict, libE_info: dict) -> (npt.NDArray, dict, int | None): args = self._truncate_args(calc_in, persis_info, libE_info) self.thread_handle = QCommThread(self.f, None, *args, user_function=True) self.thread_handle.run() diff --git a/libensemble/utils/validators.py b/libensemble/utils/validators.py index e91d06a171..1782e011e8 100644 --- a/libensemble/utils/validators.py +++ b/libensemble/utils/validators.py @@ -1,6 +1,6 @@ import os +from collections.abc import Callable from pathlib import Path -from typing import Callable import numpy as np diff --git a/libensemble/version.py b/libensemble/version.py index a5467c834a..5b60188613 100644 --- a/libensemble/version.py +++ b/libensemble/version.py @@ -1 +1 @@ -__version__ = "1.4.3+dev" +__version__ = "1.5.0" diff --git a/libensemble/worker.py b/libensemble/worker.py index ec1793881f..44d5f0ddeb 100644 --- a/libensemble/worker.py +++ b/libensemble/worker.py @@ -3,6 +3,8 @@ ==================================================== """ +from __future__ import annotations + import logging import logging.handlers import socket @@ -10,6 +12,10 @@ from pathlib import Path from traceback import format_exc from traceback import format_exception_only as format_exc_msg +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from libensemble.comms.comms import Comm import numpy as np import numpy.typing as npt @@ -42,7 +48,7 @@ def worker_main( - comm: "communicator", # noqa: F821 + comm: Comm, sim_specs: dict, gen_specs: dict, libE_specs: dict, @@ -50,7 +56,7 @@ def worker_main( log_comm: bool = True, resources: Resources = None, executor: Executor = None, -) -> None: # noqa: F821 +) -> None: """Evaluates calculations given to it by the manager. Creates a worker object, receives work from manager, runs worker, @@ -153,13 +159,13 @@ class Worker: def __init__( self, - comm: "communicator", # noqa: F821 + comm: Comm, dtypes: npt.DTypeLike, workerID: int, sim_specs: dict, gen_specs: dict, libE_specs: dict, - ) -> None: # noqa: F821 + ) -> None: """Initializes new worker object""" self.comm = comm self.dtypes = dtypes @@ -198,7 +204,7 @@ def _set_rset_team(libE_info: dict) -> bool: return False @staticmethod - def _set_executor(workerID: int, comm: "communicator") -> bool: # noqa: F821 + def _set_executor(workerID: int, comm: Comm) -> bool: """Sets worker ID in the executor, return True if set""" exctr = Executor.executor if isinstance(exctr, Executor): @@ -209,7 +215,7 @@ def _set_executor(workerID: int, comm: "communicator") -> bool: # noqa: F821 return False @staticmethod - def _set_resources(workerID, comm: "communicator") -> bool: # noqa: F821 + def _set_resources(workerID, comm: Comm) -> bool: """Sets worker ID in the resources, return True if set""" resources = Resources.resources if isinstance(resources, Resources): diff --git a/pyproject.toml b/pyproject.toml index 7aca0ef7ed..49a7cc7f48 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -30,7 +30,7 @@ classifiers = [ "Topic :: Scientific/Engineering", "Topic :: Software Development :: Libraries :: Python Modules", ] -version = "1.4.3+dev" +dynamic = ["version"] [project.urls] Documentation = "https://libensemble.readthedocs.io/en/main/" @@ -45,6 +45,9 @@ requires = ["setuptools", "wheel", "pip>=24.3.1,<26", "setuptools>=75.1.0,<79", where = ["."] include = ["libensemble*"] +[tool.setuptools.dynamic] +version = {attr = "libensemble.version.__version__"} + [tool.pixi.project] channels = ["conda-forge"] platforms = ["osx-arm64", "linux-64", "osx-64"] @@ -82,6 +85,7 @@ scipy = ">=1.15.2,<2" ax-platform = ">=0.5.0,<0.6" sphinxcontrib-spelling = ">=8.0.1,<9" autodoc-pydantic = ">=2.1.0,<3" +ipdb = ">=0.13.13,<0.14" [tool.pixi.dependencies] python = ">=3.10,<3.14" @@ -132,4 +136,4 @@ noy = "noy" extend-exclude = ["*.bib", "*.xml", "docs/nitpicky"] [dependency-groups] -dev = ["pyenchant", "enchant>=0.0.1,<0.0.2"] +dev = ["pyenchant", "enchant>=0.0.1,<0.0.2", "flake8-modern-annotations>=1.6.0,<2", "flake8-type-checking>=3.0.0,<4"]