diff --git a/README.md b/README.md index 3134e98..ff1be02 100644 --- a/README.md +++ b/README.md @@ -243,6 +243,6 @@ There are unit tests included with the code which also require downloading test - [ ] Interactive regime (`hvplot`, `bokeh`, `panel`) - [x] Ghost cells support - [x] Usage examples -- [ ] Parse the log file with timings +- [x] Parse the log file with timings - [x] Raw reader - [x] 3.14-compatible parallel output diff --git a/dist/nt2py-1.5.0-py3-none-any.whl b/dist/nt2py-1.5.0-py3-none-any.whl new file mode 100644 index 0000000..23f57bb Binary files /dev/null and b/dist/nt2py-1.5.0-py3-none-any.whl differ diff --git a/dist/nt2py-1.5.0.tar.gz b/dist/nt2py-1.5.0.tar.gz new file mode 100644 index 0000000..2b748bc Binary files /dev/null and b/dist/nt2py-1.5.0.tar.gz differ diff --git a/nt2/__init__.py b/nt2/__init__.py index 684950b..c9f6533 100644 --- a/nt2/__init__.py +++ b/nt2/__init__.py @@ -1,4 +1,4 @@ -__version__ = "1.4.0" +__version__ = "1.5.0" import nt2.containers.data as nt2_data diff --git a/nt2/containers/data.py b/nt2/containers/data.py index ad96400..0758956 100644 --- a/nt2/containers/data.py +++ b/nt2/containers/data.py @@ -13,6 +13,7 @@ def override(method): from nt2.utils import ToHumanReadable import xarray as xr +import pandas as pd from nt2.utils import ( DetermineDataFormat, @@ -26,6 +27,7 @@ def override(method): from nt2.containers.fields import Fields from nt2.containers.particles import Particles from nt2.containers.spectra import Spectra +from nt2.containers.diagnostics import Diagnostics import nt2.plotters.polar as acc_polar import nt2.plotters.particles as acc_particles @@ -246,6 +248,7 @@ def __init__( self.__coordinate_system = coord_system super(Data, self).__init__(path=path, reader=self.__reader, remap=remap) + self.__diagnostics = Diagnostics(path) def makeMovie( self, @@ -308,6 +311,11 @@ def attrs(self) -> Dict[str, Any]: """dict[str, Any]: The attributes of the data.""" return self.__attrs + @property + def diagnostics(self) -> Union[pd.DataFrame, None]: + """pd.DataFrame or None: The diagnostics output if .out file is found, None otherwise.""" + return self.__diagnostics.df + def to_str(self) -> str: """str: String representation of the all the enclosed dataframes.""" diff --git a/nt2/containers/diagnostics.py b/nt2/containers/diagnostics.py new file mode 100644 index 0000000..a8dff85 --- /dev/null +++ b/nt2/containers/diagnostics.py @@ -0,0 +1,106 @@ +from typing import Union +import pandas as pd + + +class Diagnostics: + df: Union[pd.DataFrame, None] + + def __init__(self, path: str): + import os + import logging + import re + + outfiles = [o for o in os.listdir(path) if o.endswith(".out")] + if len(outfiles) == 0: + logging.warning(f"No .out files found in {path}") + self.df = None + else: + self.outfile = os.path.join(path, outfiles[0]) + + data = {} + + with open(self.outfile, "r") as f: + content = f.read() + steps = re.findall(r"Step:\s+(\d+)\.+\[", content) + times = re.findall(r"Time:\s+([\d.]+\d)\.+\[", content) + substeps = re.findall(r"\s+([A-Za-z]+)\.+([\d.]+)\s+([mµn]?s)", content) + species = re.findall( + r"\s+species\s+(\d+)\s+\(.+\)\.+([\deE+-.]+)(\s+\d+\%\s:\s\d+\%\s+)?([\deE+-.]+)?( : )?([\deE+-.]+)?", + content, + ) + + data["steps"] = [] + for step in steps: + data["steps"].append(int(step)) + + data["times"] = [] + for time in times: + data["times"].append(float(time)) + + assert len(data["steps"]) == len( + data["times"] + ), "Number of steps and times do not match" + + data["substeps"] = {} + for substep in substeps: + if substep[0] not in data["substeps"].keys(): + data["substeps"][substep[0]] = [] + + def to_ns(value: float, unit: str) -> float: + if unit == "s": + return value * 1e9 + elif unit == "ms": + return value * 1e6 + elif unit == "µs": + return value * 1e3 + elif unit == "ns": + return value + else: + raise ValueError(f"Unknown time unit: {unit}") + + data["substeps"][substep[0]].append( + to_ns(float(substep[1]), substep[2]) + ) + + for key in data["substeps"].keys(): + assert len(data["substeps"][key]) == len( + data["steps"] + ), f"Number of substep entries for {key} does not match number of steps" + + data["species"] = {} + data["species_min"] = {} + data["species_max"] = {} + for specie in species: + if specie[0] not in data["species"].keys(): + data["species"][specie[0]] = [] + data["species_min"][specie[0]] = [] + data["species_max"][specie[0]] = [] + data["species"][specie[0]].append(int(float(specie[1]))) + if len(specie) == 6: + data["species_min"][specie[0]].append(int(float(specie[3]))) + data["species_max"][specie[0]].append(int(float(specie[5]))) + + for key in data["species"].keys(): + assert len(data["species"][key]) == len( + data["steps"] + ), f"Number of species entries for {key} does not match number of steps" + assert ( + len(data["species_min"][key]) == len(data["steps"]) + or len("species_min") == 0 + ), f"Number of species min entries for {key} does not match number of steps" + assert ( + len(data["species_max"][key]) == len(data["steps"]) + or len("species_max") == 0 + ), f"Number of species max entries for {key} does not match number of steps" + + self.df = pd.DataFrame(index=data["steps"]) + self.df["Step"] = data["steps"] + self.df["Time"] = data["times"] + for key in data["substeps"].keys(): + self.df[key] = data["substeps"][key] + for key in data["species"].keys(): + self.df[f"species_{key}"] = data["species"][key] + self.df[f"species_{key}_min"] = data["species_min"][key] + self.df[f"species_{key}_max"] = data["species_max"][key] + + del data diff --git a/pyproject.toml b/pyproject.toml index 58c2852..fef7be7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,37 +1,15 @@ -[build-system] -requires = ["hatchling"] -build-backend = "hatchling.build" - [project] name = "nt2py" -dynamic = ["version"] -dependencies = [ - "types-setuptools", - "typing_extensions", - "dask[complete]", - "adios2", - "bokeh", - "xarray", - "numpy", - "scipy", - "matplotlib", - "tqdm", - "contourpy", - "typer", - "loky", -] +description = "Post-processing & visualization toolkit for the Entity PIC code" +readme = "README.md" requires-python = ">=3.8" +license-files = ["LICENSE"] authors = [{ name = "Hayk", email = "haykh.astro@gmail.com" }] maintainers = [{ name = "Hayk", email = "haykh.astro@gmail.com" }] -description = "Post-processing & visualization toolkit for the Entity PIC code" -readme = "README.md" -license = { file = "LICENSE" } classifiers = [ "Development Status :: 5 - Production/Stable", - "Intended Audience :: Science/Research", "Intended Audience :: Education", - "Topic :: Scientific/Engineering :: Physics", - "Topic :: Scientific/Engineering :: Astronomy", + "Intended Audience :: Science/Research", "License :: OSI Approved :: BSD License", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.8", @@ -40,11 +18,27 @@ classifiers = [ "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", + "Programming Language :: Python :: 3.14", + "Topic :: Scientific/Engineering :: Astronomy", + "Topic :: Scientific/Engineering :: Physics", ] - -[project.optional-dependencies] -hdf5 = ["h5py"] -dev = ["black", "pytest"] +dependencies = [ + "adios2", + "bokeh", + "contourpy", + "dask[complete]", + "loky", + "matplotlib", + "numpy", + "pandas", + "scipy", + "tqdm", + "typer", + "types-setuptools", + "typing_extensions", + "xarray", +] +dynamic = ["version"] [project.urls] Repository = "https://github.com/entity-toolkit/nt2py" @@ -52,6 +46,14 @@ Repository = "https://github.com/entity-toolkit/nt2py" [project.scripts] nt2 = "nt2.cli.main:app" +[project.optional-dependencies] +dev = ["black", "pytest"] +hdf5 = ["h5py"] + +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + [tool.hatch.version] path = "nt2/__init__.py"