Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 9 additions & 0 deletions examples/sph/run_sph_rendering.py
Original file line number Diff line number Diff line change
Expand Up @@ -213,7 +213,16 @@ def H_profile(r):
model.change_htolerances(coarse=1.3, fine=1.1)
model.timestep()
model.change_htolerances(coarse=1.1, fine=1.1)
model.dump("tmp.sham")


ctx = shamrock.Context()
model = shamrock.get_Model_SPH(context=ctx, vector_type="f64_3", sph_kernel="M4")
model.load_from_dump("tmp.sham")

model.dump("tmp2.sham")

exit()
Comment on lines +216 to +225
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

critical

This block of code appears to be for testing the dump/load functionality and includes an exit() call. This prevents the rest of the example script from running, effectively breaking it. This test code should be removed before merging.

for i in range(5):
model.timestep()

Expand Down
8 changes: 3 additions & 5 deletions src/shammodels/gsph/include/shammodels/gsph/Model.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -344,10 +344,9 @@ namespace shammodels::gsph {
logger::info_ln("GSPH", "Loading state from dump", fname);
}

std::string metadata_user{};
shamrock::load_shamrock_dump(fname, metadata_user, ctx);
nlohmann::json j;
shamrock::load_shamrock_dump(fname, j, ctx);

nlohmann::json j = nlohmann::json::parse(metadata_user);
j.at("solver_config").get_to(solver.solver_config);

solver.init_ghost_layout();
Expand All @@ -372,8 +371,7 @@ namespace shammodels::gsph {
nlohmann::json metadata;
metadata["solver_config"] = solver.solver_config;

shamrock::write_shamrock_dump(
fname, metadata.dump(4), shambase::get_check_ref(ctx.sched));
shamrock::write_shamrock_dump(fname, metadata, shambase::get_check_ref(ctx.sched));
}

////////////////////////////////////////////////////////////////////////////////////////////
Expand Down
10 changes: 4 additions & 6 deletions src/shammodels/ramses/include/shammodels/ramses/Model.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -131,8 +131,7 @@ namespace shammodels::basegodunov {
nlohmann::json metadata;
metadata["solver_config"] = solver.solver_config;

shamrock::write_shamrock_dump(
fname, metadata.dump(4), shambase::get_check_ref(ctx.sched));
shamrock::write_shamrock_dump(fname, metadata, shambase::get_check_ref(ctx.sched));
}

/**
Expand All @@ -145,11 +144,10 @@ namespace shammodels::basegodunov {
logger::info_ln("Godunov", "Loading state from dump", fname);
}

// Load the context state and recover user metadata
std::string metadata_user{};
shamrock::load_shamrock_dump(fname, metadata_user, ctx);
// load the dump and recover the user metadata as json
nlohmann::json j;
shamrock::load_shamrock_dump(fname, j, ctx);

nlohmann::json j = nlohmann::json::parse(metadata_user);
j.at("solver_config").get_to(solver.solver_config);

// modules::GhostZones gz(ctx, solver.solver_config, storage);
Expand Down
9 changes: 3 additions & 6 deletions src/shammodels/sph/include/shammodels/sph/Model.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -836,11 +836,9 @@ namespace shammodels::sph {
}

// Load the context state and recover user metadata
std::string metadata_user{};
shamrock::load_shamrock_dump(fname, metadata_user, ctx);
nlohmann::json j;
shamrock::load_shamrock_dump(fname, j, ctx);

/// TODO: load solver config from metadata
nlohmann::json j = nlohmann::json::parse(metadata_user);
// std::cout << j << std::endl;
j.at("solver_config").get_to(solver.solver_config);

Expand Down Expand Up @@ -887,8 +885,7 @@ namespace shammodels::sph {

// Dump the state of the SPH model to a file
/// TODO: replace supplied metadata by solver config json
shamrock::write_shamrock_dump(
fname, metadata.dump(4), shambase::get_check_ref(ctx.sched));
shamrock::write_shamrock_dump(fname, metadata, shambase::get_check_ref(ctx.sched));
}

////////////////////////////////////////////////////////////////////////////////////////////
Expand Down
6 changes: 4 additions & 2 deletions src/shamrock/include/shamrock/io/ShamrockDump.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
*
*/

#include "nlohmann/json.hpp"
#include "shamalgs/collective/io.hpp"
#include "shamcomm/io.hpp"
#include "shamrock/scheduler/PatchScheduler.hpp"
Expand All @@ -34,7 +35,8 @@ namespace shamrock {
* @param metadata_user The user-provided metadata to add to the dump
* @param sched The patch scheduler to dump
*/
void write_shamrock_dump(std::string fname, std::string metadata_user, PatchScheduler &sched);
void write_shamrock_dump(
std::string fname, const nlohmann::json &metadata_user, PatchScheduler &sched);

/**
* @brief Load a Shamrock dump file and restore the state of the patches and retreive user
Expand All @@ -43,6 +45,6 @@ namespace shamrock {
* @param metadata_user The user-provided metadata to store
* @param ctx The Shamrock context to restore
*/
void load_shamrock_dump(std::string fname, std::string &metadata_user, ShamrockCtx &ctx);
void load_shamrock_dump(std::string fname, nlohmann::json &metadata_user, ShamrockCtx &ctx);

} // namespace shamrock
55 changes: 29 additions & 26 deletions src/shamrock/src/io/ShamrockDump.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -19,13 +19,20 @@
#include "shamcomm/logs.hpp"
#include "shamrock/io/ShamrockDump.hpp"

/// Currently this is the only valid tag as "1.0" of Shamrock dumps did not include any versioning
/// :'(
#define WRITER_VERSION_TAG "2.0"
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

Using a #define for a string constant is generally discouraged in modern C++. It's better to use a constexpr variable, which is type-safe and respects scope. This improves maintainability and prevents potential issues with macro expansion.

Suggested change
#define WRITER_VERSION_TAG "2.0"
static constexpr auto WRITER_VERSION_TAG = "2.0";


bool fused_metadata = true;
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

The global variable fused_metadata is introduced but it's not used anywhere in the codebase. This unused variable should be removed to improve code clarity and avoid confusion.


namespace shamrock {

void write_shamrock_dump(std::string fname, std::string metadata_user, PatchScheduler &sched) {
void write_shamrock_dump(
std::string fname, const nlohmann::json &metadata_user, PatchScheduler &sched) {

StackEntry stack_loc{};

std::string metadata_patch = sched.serialize_patch_metadata().dump(4);
nlohmann::json metadata_patch = sched.serialize_patch_metadata();

using namespace shamrock::patch;

Expand Down Expand Up @@ -63,9 +70,12 @@ namespace shamrock {
using namespace nlohmann;

json j;
j["pids"] = all_pids;
j["bytecounts"] = all_bytecounts;
j["offsets"] = all_offsets;
j["pids"] = all_pids;
j["bytecounts"] = all_bytecounts;
j["offsets"] = all_offsets;
j["metadata_patch"] = metadata_patch;
j["metadata_user"] = metadata_user;
j["versioning"] = WRITER_VERSION_TAG;

std::string sout = j.dump(4);

Expand All @@ -82,19 +92,14 @@ namespace shamrock {
// do some perf investigation before enabling preallocation
bool preallocate = false;
if (preallocate) {
MPI_Offset tot_byte = all_offsets.back() + all_bytecounts.back() + metadata_user.size()
+ metadata_patch.size() + sout.size() + sizeof(std::size_t) * 3;
MPI_Offset tot_byte
= all_offsets.back() + all_bytecounts.back() + sout.size() + sizeof(std::size_t);
MPICHECK(MPI_File_preallocate(mfile, tot_byte));
}

shamalgs::collective::write_header(mfile, metadata_user, head_ptr);
shamalgs::collective::write_header(mfile, metadata_patch, head_ptr);
shamalgs::collective::write_header(mfile, sout, head_ptr);

shamlog_debug_ln(
"ShamrockDump",
shambase::format(
"table sizes {} {} {}", metadata_patch.size(), metadata_user.size(), sout.size()));
shamlog_debug_ln("ShamrockDump", shambase::format("table sizes {}", sout.size()));

if (/*do check*/ true) {
auto check_same_mpi = [](std::string s) {
Expand All @@ -112,8 +117,6 @@ namespace shamrock {
}
};

check_same_mpi(metadata_user);
check_same_mpi(metadata_patch);
check_same_mpi(sout);
}

Expand Down Expand Up @@ -159,7 +162,7 @@ namespace shamrock {
}
}

void load_shamrock_dump(std::string fname, std::string &metadata_user, ShamrockCtx &ctx) {
void load_shamrock_dump(std::string fname, nlohmann::json &metadata_user, ShamrockCtx &ctx) {

StackEntry stack_loc{};

Expand All @@ -171,12 +174,9 @@ namespace shamrock {
shambase::Timer timer;
timer.start();

std::string metadata_patch{};
std::string patchdata_infos{};
std::string metadata{};

metadata_user = shamalgs::collective::read_header(mfile, head_ptr);
metadata_patch = shamalgs::collective::read_header(mfile, head_ptr);
patchdata_infos = shamalgs::collective::read_header(mfile, head_ptr);
metadata = shamalgs::collective::read_header(mfile, head_ptr);

if (!shamcmdopt::getenv_str("SHAMDUMP_OFFSET_MODE_OLD").has_value()) {
// reset MPI view
Expand All @@ -186,8 +186,11 @@ namespace shamrock {

using namespace nlohmann;

json jmeta_patch = json::parse(metadata_patch);
json jpdat_info = json::parse(patchdata_infos);
json jmeta = json::parse(metadata);

metadata_user = jmeta.at("metadata_user");

auto jmeta_patch = jmeta.at("metadata_patch");
Comment on lines +189 to +193
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

high

The new dump format includes a versioning field, but this field is not checked during the loading process in load_shamrock_dump. This breaks backward compatibility with older dump files and misses an opportunity to validate the dump format.

The current implementation will fail when trying to load old dumps because it expects a single unified JSON header.

To make the loading process more robust, you should check for the presence and value of the versioning field. This would allow you to either support older formats or provide a clear error message if an unsupported format is detected.

For example:

json jmeta = json::parse(metadata);

if (jmeta.contains("versioning")) {
    // New format
    auto version = jmeta.at("versioning").get<std::string>();
    if (version != WRITER_VERSION_TAG) {
        logger::warn_ln("ShamrockDump", "Loading dump with version " + version + ", which differs from writer version " + WRITER_VERSION_TAG);
    }
    // ... proceed with loading new format
    metadata_user = jmeta.at("metadata_user");
    auto jmeta_patch = jmeta.at("metadata_patch");
    // ...
} else {
    // Old format or corrupted file
    shambase::throw_with_loc<std::runtime_error>(
        "Unsupported dump format: versioning information is missing.");
}


ctx.pdata_layout_new();
*ctx.pdl = jmeta_patch.at("patchdata_layout").get<patch::PatchDataLayerLayout>();
Expand Down Expand Up @@ -217,9 +220,9 @@ namespace shamrock {
std::vector<u64> all_pids;
std::vector<u64> all_bytecounts;

all_bytecounts = jpdat_info.at("bytecounts").get<std::vector<u64>>();
all_offsets = jpdat_info.at("offsets").get<std::vector<u64>>();
all_pids = jpdat_info.at("pids").get<std::vector<u64>>();
all_bytecounts = jmeta.at("bytecounts").get<std::vector<u64>>();
all_offsets = jmeta.at("offsets").get<std::vector<u64>>();
all_pids = jmeta.at("pids").get<std::vector<u64>>();

struct PatchFileOffset {
u64 offset, bytecount;
Expand Down