diff --git a/.bazelrc b/.bazelrc index 7a20ee7a3..6ca7f1554 100644 --- a/.bazelrc +++ b/.bazelrc @@ -20,7 +20,7 @@ common --http_timeout_scaling=2.0 common --@score_baselibs//score/json:base_library=nlohmann -common --@score_baselibs//score/memory/shared/flags:use_typedshmd=False +common --//score/memory/shared/flags:use_typedshmd=False common --@score_logging//score/mw/log/flags:KRemote_Logging=False common --//score/mw/com/flags:tracing_library=@score_baselibs//score/analysis/tracing/generic_trace_library/stub_implementation common --extra_toolchains=@gcc_toolchain_x86_64//:cc_toolchain diff --git a/MODULE.bazel b/MODULE.bazel index 1c45fc9e1..76eb7499b 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -182,6 +182,7 @@ bazel_dep(name = "buildifier_prebuilt", version = "8.2.1.2", dev_dependency = Tr bazel_dep(name = "score_crates", version = "0.0.7", repo_name = "score_communication_crate_index") bazel_dep(name = "boost.program_options", version = "1.87.0") +bazel_dep(name = "boost.container", version = "1.87.0") bazel_dep(name = "boost.interprocess", version = "1.87.0") bazel_dep(name = "download_utils", version = "1.2.2", dev_dependency = True) @@ -227,12 +228,13 @@ single_version_override( patches = ["//third_party/rule_doxygen:Doxyfile.patch"], ) -# Apply patch to fix visibility issue with interprocess_notification target +# NOTE: local_path_override is temporary for development. +# Once baselibs merges the interprocess_mutex visibility fix, +# replace with git_override pointing to that commit. bazel_dep(name = "score_baselibs", version = "0.2.4") -git_override( +local_path_override( module_name = "score_baselibs", - commit = "052c2f271be4239f97182b164f4903b8c88d6c72", - remote = "https://github.com/eclipse-score/baselibs.git", + path = "/home/q551424/score/baselibs", ) bazel_dep(name = "score_logging", version = "0.1.0") diff --git a/score/memory/.clang-tidy-extra b/score/memory/.clang-tidy-extra new file mode 100644 index 000000000..967b10c77 --- /dev/null +++ b/score/memory/.clang-tidy-extra @@ -0,0 +1,22 @@ +--- +# +# This configuration file contains extra clang-tidy +# checks which shall get performed for all source files +# residing in this directory as well as its subdirectories. +# + +# NOTE: Please *NEVER* specify a wildcard pattern for enabling checks, +# such as `bugprone-*` or `performance-*`! Only disabling checks +# would be acceptable, e.g. `-bugprone-*` or `-performance-*` etc. +# Reason for such guideline is that, in case wildcards get used for +# enabling checks, upgrading the clang-tidy binary to a newer version +# would then implicitly enable the new checks available in the upgraded +# clang-tidy binary. And then our (voting) CI jobs which are performing +# the extra clang-tidy checks are highly subject to fail and code would +# have to be made compliant again first. And that would block and also +# prolong the version upgrade unnecessarily. Furthermore it is easier +# to immediately spot which checks exactly will get performed in +# case each one gets listed explicitly, as done below. +Checks: > + -*, + modernize-concat-nested-namespaces, diff --git a/score/memory/BUILD b/score/memory/BUILD new file mode 100644 index 000000000..ebb04a43d --- /dev/null +++ b/score/memory/BUILD @@ -0,0 +1,58 @@ +# ******************************************************************************* +# Copyright (c) 2025 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* + +load("@score_baselibs//:bazel/unit_tests.bzl", "cc_unit_test_suites_for_host_and_qnx") +load("@score_baselibs//score/quality/clang_tidy:extra_checks.bzl", "clang_tidy_extra_checks") + +package(default_visibility = [ + "//score/memory:__subpackages__", +]) + +alias( + name = "string_literal", + actual = "@score_baselibs//score/string:string_literal", + deprecation = "Use @score_baselibs//score/string:string_literal directly. This alias will be removed.", + visibility = ["//visibility:public"], +) + +alias( + name = "string_comparison_adaptor", + actual = "@score_baselibs//score/string:string_comparison_adaptor", + deprecation = "Use @score_baselibs//score/string:string_comparison_adaptor directly. This alias will be removed.", + visibility = ["//visibility:public"], +) + +alias( + name = "split_string_view", + actual = "@score_baselibs//score/string:split_string_view", + deprecation = "Use @score_baselibs//score/string:split_string_view directly. This alias will be removed.", + visibility = ["//visibility:public"], +) + +cc_unit_test_suites_for_host_and_qnx( + name = "unit_test_suite", + test_suites_from_sub_packages = [ + "//score/memory/shared:unit_test_suite", + ], + visibility = [ + "//visibility:public", + ], +) + +clang_tidy_extra_checks( + name = "clang_tidy_extra_checks", + extra_features = [ + "spp_code_style_check_header_guards", + ], + tidy_config_file = ".clang-tidy-extra", +) diff --git a/score/memory/README.md b/score/memory/README.md new file mode 100644 index 000000000..a6f198b8a --- /dev/null +++ b/score/memory/README.md @@ -0,0 +1,6 @@ +# Memory + +We need different utility libraries to handle memory. One of them being custom allocators but also handling shared +memory is a common problem. + +This library shall be a single place to abstract common memory related use-cases. diff --git a/score/memory/design/README.md b/score/memory/design/README.md new file mode 100644 index 000000000..25d03ba1f --- /dev/null +++ b/score/memory/design/README.md @@ -0,0 +1,5 @@ +# Software Design Description + +This library holds an implementation to work with shared memory (see shared_memory folder). + +Besides that it holds multiple utility functions to work with different parts of memory. diff --git a/score/memory/design/shared_memory/OffsetPtrDesign.md b/score/memory/design/shared_memory/OffsetPtrDesign.md new file mode 100644 index 000000000..c05859268 --- /dev/null +++ b/score/memory/design/shared_memory/OffsetPtrDesign.md @@ -0,0 +1,208 @@ +# Offset Pointer + +## Overview + +When mapping shared memory into different processes, the shared memory block will be mapped to a different +virtual address space in each process. +While it is theoretically possible to enforce that the shared memory block is always mapped at the same base address in +each process, this is rather impractical, since it cannot be ensured that any given memory address is still free for use +in each process. + +This means that using pointers or using complex data structures that rely on pointers in shared memory is non-trivial. +A pointer created in shared memory in one process pointing to an address in the same shared memory region will +not be valid in another process. `boost::interprocess` solves this problem with introducing a so-called +[OffsetPtr](https://www.boost.org/doc/libs/1_64_0/doc/html/boost/interprocess/offset_ptr.html). +The C++-Standard names such pointers also `fancy pointer`. + +The idea of an `OffsetPtr` is that instead of storing an address of the pointed-to object (like a normal pointer), it +stores the offset between the address of the pointed-to object and the address of the `OffsetPtr` itself. +This offset is the same in all processes, thus, a valid pointer can be calculated as the sum of the base address of the +`OffsetPtr` and the offset that it stores. + +Validity of the `OffsetPtr` depends on the validity of the pointed to object. +This means, that the absolute pointer handed to the constructor of `OffsetPtr` must point to a valid object of type `T` (or derived). +Users must make sure that the `OffsetPtr` is valid before dereferencing it. +E.g. make sure that the `OffsetPtr` is not dereferenced after the object is destructed or moved-from. + +The available public member methods are taken over from the `boost::interprocess::offset_ptr` implementation. +In order to reuse this pointer also with stl-based containers it shall implement the requirements stated by +[std::pointer_traits](https://en.cppreference.com/w/cpp/memory/pointer_traits). + +### Bounds Checking OffsetPtr + +For safety reasons, it is important that when accessing the memory pointed to by an `OffsetPtr` (either by dereferencing +the `OffsetPtr` or getting a raw pointer from the `OffsetPtr` and dereferencing that), the *entire* pointed-to object +must lie inside the original memory region in which the `OffsetPtr` was created ( +See [this](../../../../docs/features/ipc/lola/ipnext_README.md#shared-memory-handling) for an explanation of why bounds +checking must be done). + +From a safety perspective, the point of bounds checking is to prevent a lower safety rated process from interfering with +the memory of a higher safety rated process. +Currently, this is only an issue when dealing with shared memory. +However, in the future, we may have other memory resources which also require bounds checking. +e.g. memory pools in which we want to make sure that the `OffsetPtr` is not pointing to an address outside that pool. +Therefore, we have a generic interface for bounds checking that doesn’t depend on the type of memory. + +Since we may have multiple memory resources which should be bounds checked, the `MemoryResourceRegistry` provides the +public interface for these checks. +An `OffsetPtr` does not know in which region / type of memory it has been allocated, so it is up to the +`MemoryResourceRegistry` to determine the relevant memory resource and memory bounds, if there are any, associated with +a given `OffsetPtr`. +This also means that bounds checking has to be attempted every time an `OffsetPtr` is dereferenced, even if the +`OffsetPtr` is in a type of memory that doesn’t need to be bounds checked. +Each class deriving from `ManagedMemoryResource` (e.g. `SharedMemoryResource`) can decide whether the memory that it is +managing should be bounds checked by an `OffsetPtr`. +It does this by implementing the function `ManagedMemoryResource::IsOffsetPtrBoundsCheckBypassingEnabled()`. + +[Bounds checking](./generated/svg/bounds_checking.svg) +contains a minimalistic UML diagram of the bounds checking. + +#### Bounds Checking Performance - Memory Bounds Lookup + +Our simple integration tests and feedback from customers revealed, that the bounds checking functionality will be hit +very frequently! +In our 1st straight forward implementation approach, the MemoryResourceRegistry::GetBoundsFromAddress() function +acquired a reader-lock as we had to care for consistency between readers asking for bounds and writers, which update the +current bounds by inserting/removing resources. +But this solution based on a reader-writer lock turned out to be a big performance penalty. + +Our current solution to access the bounds within the `MemoryResourceRegistry` concurrently between readers and writers +is the following: + +* there can be only one writer active at a time. So writers get "serialized" by a "normal" mutex. I.e. all writer + activity to the bounds are routed through one of the following APIs, which already care for writer serialization: + `insert_resource()`, `remove_resource()`, `clear()` +* there can be an arbitrary number of readers active, which do a bounds-lookup (this happens during OffsetPtr deref) +* the algo to synchronize the access between the single writer and the multiple readers of the bounds is a lock-free + algo based on versioning and is detailed in the next subchapter. + +So as we have lock-free access to the bounds for our readers, the footprint/runtime during the (high frequency +bounds-checking is very low, which also some benchmarks revealed (see [here](../../shared/test/performance). + +##### Lock-Free bounds-check algorithm + +The known bounds (aka known regions) are stored in a map (`std::map`) containing the start +address of the region as key and its end address as value. +For our lock-free algo, we are maintaining N versions of this known regions/map and an indicator, which of the N +versions is the current/most recent one. + +## OffsetPtr Implementation + +Points to be considered in implementation can be seen +in [Problems to solve](./offset_ptr_problems.md#problems-to-solve). + +### Bounds checking - OffsetPtr in shared memory + +When an `OffsetPtr` is in a shared memory region, we can perform bounds checks by getting the memory bounds of that +region from the MemoryResourceRegistry using the address of the OffsetPtr (via +`MemoryResourceRegistry::GetBoundsFromAddress`). +We then check that the start address and end address of the pointed-to object lie within the retrieved memory bounds. +We also check that the entire `OffsetPtr` fits within the shared memory region. + +### Bounds checking - OffsetPtr on stack + +If the `OffsetPtr` is copied out of the memory region in which it was originally created, we still need to perform +bounds checks before dereferencing / getting a raw pointrer from the `OffsetPtr`. +Therefore, when copying an `OffsetPtr` from shared memory to the stack, we get the `MemoryResourceIdentifier` of the +memory resource from the `MemoryResourceRegistry` and store it within the `OffsetPtr`. +When dereferencing / getting a raw pointer from an `OffsetPtr` on the stack, we can get the memory bounds of the +`OffsetPtr`'s memory region with `MemoryResourceRegistry::GetBoundsFromIdentifier`. +We can use these bounds to check that the pointed-to object is still within that memory region. + +When the `OffsetPtr` is copied back into shared memory, the `MemoryResourceIdentifier` is no longer used, since it can +be corrupted by another process, so we have to again use `MemoryResourceRegistry::GetBoundsFromAddress` to look up +memory bounds for bounds checking. +If the `OffsetPtr` is copied back to the stack, then the `MemoryResourceIdentifier` will be looked up again. + +### Dereferencing / Getting OffsetPtr\ + +An `OffsetPtr` can be templated with `void`. +This can be useful for applications in which type-erasure of the pointed-to type is required. +However, this means that the `OffsetPtr` does not know the size of the pointed-to object, which is required for checking +that the start **and** end address of the pointed-to object lies within the correct memory region. +Therefore, we provide two additional `get()` overloads when the pointed-to type is void to allow the user to provide the +size information used to check that the end address of the pointed-to object also lies within the correct memory region: + +* `get`: This allows the caller to provide the actual PointedType as a template argument. +* `get(explicit_pointed_type_size)`: This allows the caller to provide the size of the PointedType as a function + argument. This is useful if the size of the pointed-to object is not known at compile time (and hence cannot be + derived from a type), e.g. if we have an OffsetPtr pointing to a type erased array of dynamic size. + +### Copying OffsetPtr + +As outlined in [One-past-the-end-iterators](./offset_ptr_problems.md#definitions--background), doing a bounds check on a +one-past-the-end iterator may fail if the container lies at the end of the memory region. +However, we want to support the ability to copy a one-past-the-end iterator. +Therefore, we have to make sure that copying an `Offsetptr` does not perform bounds checking (even when copying out of +shared memory). +Since bounds checking only needs to be done before getting a raw pointer from the `OffsetPtr` or dereferencing it (which +can also be done [if the `OffsetPtr` has been copied to the stack](#dereferencing--getting-offsetptr-on-the-stack)), we +can simply avoid doing any bounds checks when copying without violating any safety goals. + +### Bounds check "race conditions" + +Since an OffsetPtr residing in shared memory could be corrupted *during* bounds checking, we must ensure that the offset +value (or any other value which resides in shared memory such as a `MemoryResourceIdentifier`) is first copied to the +stack where it cannot be corrupted by another process. +This copy should be used for bounds checking and once checked, it should be used for dereferencing, getting a raw +pointer etc. + +### Pointer Arithmetic Considerations + +In the implementation of an `OffsetPtr` as described above, we need to perform pointer arithmetic in two places: + +1. When constructing or copying an `OffsetPtr`, we need to subtract the address of the `OffsetPtr` itself from the + address of the pointed-to object. +2. When dereferencing an `OffsetPtr`, we need to add the calculated offset to the address of the `OffsetPtr`. + +In (1.), subtracting two pointers which do not point to elements of the same array is undefined behaviour according to +the [standard](https://timsong-cpp.github.io/cppwp/n4659/expr.add#5). +In (2.), if adding an integral type to a pointer results in an address which does not point to an element of the same +array, then this is also undefined behaviour according to +the [standard](https://timsong-cpp.github.io/cppwp/n4659/expr.add#4). +To deal with these issues, we first cast the address to an integral type, and then do the addition / subtraction on the +integral types instead of pointers. +We can then cast the integral type back to a pointer, if required. +The conversion of a pointer to an integral type and an integral type to a pointer are implementation +defined: https://timsong-cpp.github.io/cppwp/n4659/expr.reinterpret.cast#4 +and https://timsong-cpp.github.io/cppwp/n4659/expr.reinterpret.cast#5, respectively. +In this way, all "pointer arithmetic" is now actually integer arithmetic which is implementation defined. +We rely on having sufficient tests to ensure that the implementation behaves as we expect. + +## DynamicArray Considerations + +### Bounds checking iterators / element access + +LoLa uses [DynamicArrays](../../../containers/dynamic_array.h) for +its [ServiceDataStorage](../../../../mw/com/impl/bindings/lola/service_data_storage.h) +and [ServiceDataControl](../../../../mw/com/impl/bindings/lola/service_data_control.h). +A `DynamicArray` is a fixed-size array data structure whose size can be, dynamically set at construction. +Since these both reside in shared memory, the underlying pointer type used by `DynamicArray` must be an `OffsetPtr`. +The `DynamicArray` is therefore susceptible to similar issues of memory corruption as an `OffsetPtr`. + +For example, if the `OffsetPtr` to the underlying array is corrupted, then it may point to an address outside the +correct memory region or to an address that begins within the memory region, but the end address of the array (i.e. the +start address + the array size) would reside outside the memory region. + +When accessing any elements via `at()` or `operator[]`, we must check that the element lies in the correct memory +region. +This is automatically done since we use an `OffsetPtr` to point to the array, so dereferencing an element will already +perform bounds checking. +However, when getting any iterators or pointers from the `DynamicArray`, we must first check that the entire underlying +array lies in the correct memory region. +We can do this by performing an `OffsetPtr` bounds check on the first and last elements of the array. +Since the array is contiguous, if the first and last elements are within the region, then all elements are. +We do the check on the first **and** last elements since the iterators return raw pointers which can be incremented / +decremented to dereference any element of the array. + +### One-past-the-end-iterator + +As outlined in [One-past-the-end-iterators](./offset_ptr_problems.md#definitions--background), doing a bounds check on a +one-past-the-end iterator may fail if the container lies at the end of the memory region. +Since the `DynamicArray` uses raw pointers as iterators, it needs to get a raw pointer from the one-past-the-end +`OffsetPtr` (e.g. in `end()`) which does bounds checking which may fail. +Therefore, we provide an additional `get()` overload called `GetWithoutBoundsCheck()` which the `DynamicArray` can use * +*only** for getting the raw pointer from the one-past-the-end `OffsetPtr`. +To prevent the user from decrementing this iterator and dereferencing it without any bounds checks, the `DynamicArray` +manually does bounds-checking on the start and end elements as +described [above](#bounds-checking-iterators--element-access). diff --git a/score/memory/design/shared_memory/README.md b/score/memory/design/shared_memory/README.md new file mode 100644 index 000000000..52e0d917d --- /dev/null +++ b/score/memory/design/shared_memory/README.md @@ -0,0 +1,367 @@ +# Shared Memory +In order to use shared memory more easily and also in combination with different dynamic containers, +an abstraction layer is introduced. + +## Use Cases / Customer Functions +There are no direct Customer Functions associated with this part of `ara::core`. +This is caused by the fact that the shared memory abstraction represents an implementation detail, +which is necessary to fulfill the [Basic Architectural thoughts](../../../../mw/com/design/README.md) of `ara::com`. + +In fact, the usage of shared memory or its allocators shall be fully transparent for a user of the `ara`-API. + +## Shared Memory based allocation +The following section gives a textual reasoning and explanation of the class diagram that can be seen [here](./generated/svg/memory_allocation.svg). + +![Memory Allocation](./generated/svg/memory_allocation.svg) + +Further also some [guidance](#guidance-for-data-types-in-shared-memory) is given, which data types can be stored +in shared memory. + +### Offset Pointer +See [OffsetPtr Design](./OffsetPtrDesign.md). + +### Polymorphic OffsetPtr Allocator +A user of the `ara::com`-API is able to acquire a so called `AllocateePtr` [SWS_CM_00308]. This way he shifts the +responsibility of allocating memory for a specific data type towards the middleware. Depending on the used network binding, +it can be necessary to allocate the memory either directly in shared memory (to enable truly zero-copy mechanisms) or +on the heap (to serialize the data and send over network sockets). It shall be highlighted that the `AllocateePtr` in +both cases will point to the same data type, since this is a runtime decision (based on the results of the service discovery). + +In order to support this behaviour, polymorphic allocation needs to be introduced. The +`std::pmr::polymorphic_allocator` (or its respective implementations of AMP) cannot be reused, because it will +allocate raw pointers. As explained in [Offset Pointer](#offset-pointer) this is not suitable for the shared memory use case +and thus a respective allocator needs to be introduced. It shall be pointed out that a classical usage of polymorphic +allocation is not working out. The classical way would pass a memory resource into the allocator as a raw pointer. Even if +we would swap the raw pointer with an `OffsetPtr` it would mean that the memory resource would need to be stored inside +the shared memory. This again is not applicable for some reasons: +* some memory resource classes may contain state/data, which only has a meaning in a specific process (e.g. a top-level +shared memory resource might hold a file descriptor (fd)) to control the (POSIX) shared memory object, but this fd is +different per process. +* our memory resources are designed in a certain inheritance hierarchy (abstract super class being +[ManagedMemoryResource](#managed-memory-resources)), +because we want to have runtime polymorphism to decide, which kind of memory resource implementation is providing storage. +Storing instances of polymorphic classes into shared memory is tricky, because they contain v-tables! But v-tables contain +raw pointers, which are again process specific! + +These issues can only be solved by introducing a custom implemented indirection in the form of `MemoryResourceProxy`, +which is explained in more detail in the following chapter. Our polymorphic allocator class is `PolymorphicOffsetPtrAllocator` +and it gets such a `MemoryResourceProxy` as its "memory resource" on which it operates. + +### Managed Memory Resources + +The first part is the `ManagedMemoryResource`. Inspired by [`std::memory_resource`](https://en.cppreference.com/w/cpp/memory/memory_resource) +`ManagedMemoryResource` represents the interface which is used by any container to allocate memory. Its respective +implementations then either allocate the memory on shared memory (`SharedMemoryResource`) or heap (`HeapMemoryResource`). +It shall be noted that the important difference between an `std::memory_resource` and the newly mentioned +`ManagedMemoryResource` is, that it offers the possibility to get an `MemoryResourceProxy` for a specific resource +instance. +This `MemoryResourceProxy`, which each `ManagedMemoryResource` subclass provides, is the needed indirection mechanism, +we talked about in the previous chapter! +The idea behind the `MemoryResourceProxy` is, that it builds up a non-virtual class that can be stored +in shared memory and identifies a specific `ManagedMemoryResource` using a process-specific global instance of `MemoryResourceRegistry`. +One can think of it as a custom shared memory safe implementation of a v-table. In order for this to work, on construction +of a memory resource, it needs to register itself at the `MemoryResourceRegistry`. Then, when returning the `MemoryResourceProxy` +it needs to be constructed with the same identifier. This workflow is further illustrated in [Memory Allocation Workflow](./generated/svg//memory_allocation_workflow_seq.svg). +On a second process, that did not create the shared memory, the workflow would look the same, with the only difference, +that the `MemoryResourceProxy` is not created, but rather reinterpreted from the shared memory region. + +![Memory Allocation](./generated/svg/memory_allocation_workflow_seq.svg) + +The key idea of this `MemoryResourceRegistry` concept is, that the keys used for registering a `ManagedMemoryResource` into +the registry (and being the essential part of the proxy) is globally (across all processes) unique and deterministic. +But this is _easily_ achievable. E.g.: +* for a `SharedMemoryResource` the key will be generated from its file system path +* for a `HeapMemoryResource` the key will be fixed to 0. +* for a [SubResource](#subresource) the key will be generated from a combination of the key of its parent resource and a +running number + +While the `HeapMemoryResource` can be a trivial implementation using the standard global allocator, `SharedMemoryResource` +has more specifics that need to be explained. The most important one being, that it cannot be publicly constructed nor copied. +This is necessary to avoid that the same shared memory is mapped multiple times in one process. A factory +(`SharedMemoryFactory`) will take care of a safe creation and ownership transfer. Another specific is, that +`SharedMemoryResource` will interact with some operating system abstraction library (`osabstraction`) in order to +manage the shared memory physically (opening, truncating, closing). + +#### SubResource + +Besides the direct subclasses `SharedMemoryResource` and `HeapMemoryResource` of `ManagedMemoryResource` we also do foresee +another subclass `SubResource`, which allows multi-level/hierarchic stacking of memory resources. +Sample use case: We want to subdivide the memory of a shared memory object (represented by a `SharedMemoryResource`). +For instance in case of events of a specific data type DT1, where we know its max size requirements ex ante and eventually +also the max number of clients/subscribers the memory allocation strategy could be massively optimized! In such a scenario +we would create a specific/suitable `SubResource` within a `SharedMemoryResource` and for events of type DT1, we would attach an +`PolymorphicOffsetPtrAllocator` instance, which gets an `MemoryResourceProxy`, which references this `SubResource`. + +**_Note_**: Usage of such `SubResource`s is an optimization feature, which we might not yet use in our first POC. + +**Writer** + +The writer, which wants to update the known regions, 1st needs to find a version among the N versions of known regions, +which is currently **not** used/accessed by any reader. For this each known regions version has an atomic `std::uint32_t` +as ref-counter, which reflects how many readers are currently doing a bounds-check lookup on this known regions version. +Therefore, the writer checks each version, starting with the oldest, and if the ref-count is 0, then tries to atomically +change it to some specific marker value `INVALID_REF_COUNT_VAL_START` via atomic `compare_exchange` operation. +If the writer succeeds with the change, he has now unique ownership of this version, copies the map/known regions from +the current most recent version to this acquired version, does the bounds-update there, atomically sets the ref-count to +0 and finally with an atomic store operation declares this acquired version as the new current most recent version. + +`INVALID_REF_COUNT_VAL_START` is chosen as being `std::numeric_limits::max() / 2U`, i.e. it +is halfway in the range of the ref-counter type. This means, that any ref-count value between +0..`INVALID_REF_COUNT_VAL_START` are valid ref-counter values being used by readers (see below) to signal their current +usage. So we allow up to approx. 2*10⁷ threads concurrently accessing a known regions version. + +**Reader** + +A reader, which wants to access for bounds-checking the most recent known regions version does the following steps: +1. atomically load/read the indicator, which version is the most recent. +2. atomically increment the usage/reference counter of the known regions version read in step 1. and check the result + (previous ref counter) of the atomic increment. + +There are three outcomes in 2., i.e. the rec count **before** the atomic increment: +1. "Good" case: *old_ref_count < `INVALID_REF_COUNT_VAL_START` - 1.* + + This is the "good" case where the reader successfully acquired this version of known regions for read. Once a + version has been created by a writer, its ref_count will be 0. The ref_count will then be incremented every time a + reader is currently accessing that version, and then decremented again when it's finished. Therefore, if the old + old_ref_count is less than "`INVALID_REF_COUNT_VAL_START` - 1", it is in this safe-for-reading state. + + *Result*: Reader acquires the version. Other readers can also acquire it but a writer cannot write to it. + +2. "Retry" case: *`INVALID_REF_COUNT_VAL_START` <= old_ref_count < `INVALID_REF_COUNT_VAL_END`* + + A reader gets the latest version index which currently has no other readers. At this point, this thread blocks or + runs slowly. A writer updates another version, changing the latest version index to that newly modified version, so + the reader thread has loaded a version index which no longer corresponds to the true latest version. The writer or a + series of writers do this enough times that the next time that a writer acquires a version, it acquires the same + version index that the reader is accessing. But since the reader has not yet incremented the ref count of that + version, the writer acquires it. The writer will update the ref count to `INVALID_REF_COUNT_VAL_START` and begin + modifying the version. The reader thread finally unblocks and increments the ref count, but will see that the + old_ref_count is now `INVALID_REF_COUNT_VAL_START`. Therefore, it knows that a writer has acquired this version and + it should check for the new latest version index and try to acquire that version. + + *Result*: In this case, the reader will retry a specified number of times until it can acquire a version for + reading. If it cannot acquire a version for reading after these retries, it returns an empty value and the caller + can handle this case. + +3. "Failure" cases: + + * (A) *old_ref_count == `INVALID_REF_COUNT_VAL_START` - 1.* + + If the old ref-counter was equal to `INVALID_REF_COUNT_VAL_START` - 1 before incrementing, the new ref_count + after incrementing would now be `INVALID_REF_COUNT_VAL_START`. This is the value used by the writer to indicate + that it is currently writing to this version, which will prevent other readers from accessing this version. It + is also the initial value for an unused version (i.e. that have no readers), so a writer will assume that it is + free to write to this version which could lead to the version being updated *while* the reader is still reading + it. + + This case will occur if we have almost 2x10⁷ readers concurrently accessing the same version. + Alternatively, if the decrement-logic of the ref_count (when a reader is finished with the version) is broken. + + * (B) *old_ref_count == `INVALID_REF_COUNT_VAL_END`* + + If the case described in the "Retry" case occurs, then the increment operation by the reader will cause the + ref_count to be (`INVALID_REF_COUNT_VAL_START` + 1). If this occurs, enough times, then eventually the ref_count + will reach `INVALID_REF_COUNT_VAL_END`. If another reader tries to increment the ref_count of this version, then + it will overflow to 0, despite the fact that the writer is still updating the region. + + This case will occur if this rety case is encountered by almost 2x10⁷ on the same version. + + *Result*: In both these cases, we terminate. + +**_Note_**: The reason, that we use a vast range of "invalid refcounts" from `INVALID_REF_COUNT_VAL_START` to +`INVALID_REF_COUNT_VAL_END` instead of just using one marker/sentinel value `INVALID_REF_COUNT_VAL`, is that we want +to use solely `atomic_increment` in our algo instead of a pair of +`atomic_load`/`atomic_compare_exchange(loaded_val, loaded_val + 1)`! With just one marker/sentinel, we would have to +use semantically/from algo perspective such a pair of operations, which has the following downside/problem we saw in +load-tests!: If we have a concurrency/contention in reader threads between the `atomic_load` and the upcoming +`atomic_compare_exchange`, the `atomic_compare_exchange` fails, forcing us/the reader into a retry! Under simulated +heavy load, the number of needed re-tries for a reader to finally succeed in updating the ref-counter was **huge**! + +### Usage + +Since `ara::core` needs to implement different container types like `Vector` or `String` it shall be possible to reuse +standard library container. This is possible by overloading the standard library container with a custom allocator, +that follows the requirements specified in [`std::allocator_traits`](https://en.cppreference.com/w/cpp/memory/allocator_traits). +This custom allocator is called `PolymorphicOffsetPtrAllocator` and depends on the previously defined memory resource proxy +`MemoryResourceProxy`, which will then resolve the correct memory resource to use. In order to support multi-level +allocations (e.g. vector in vector) the custom allocator needs to be wrapped in`std::scoped_allocator_adaptor`. + +All in all an example usage and implementation of `ara::core::Vector` could look like this: +```c++ +template +using ara::core::Vector = std::vector > +auto memory_resource = score::memory::shared::SharedMemoryFactory.getInstance.open("/my_shm_name"); +ara::core::Vector myVector(memory_resource.getProxy()); + +myVector.push_back(42u); // Will land on shared memory + +ara::core::Vector onHeap(); +onHeap.push_back(42u); // Will land on heap +``` + +### Guidance for data types in shared memory +Due to the fact that shared memory is interpreted by two processes, there are some limitations on data types +that can be stored meaningful in the shared memory. One point already mentioned in [Offset Pointer](#offset-pointer) is +that no raw pointer can be stored in shared memory, since the pointer will be invalid in other processes. + +In order to store any data type in shared memory without serialization, it needs to be ensured that alignment and overall +memory layout will not differ between the processes that access it. Strictly speaking both processes need to be build +with the same compiler / linker and also need to use the same options for them. This includes that they use the same +standard library implementation. + +Further it is not possible to store objects with virtual functions within the shared memory. This can be explained again +by Offset Pointer problem. The pointers within the v-table will just be invalid in the other process. + +When storing templated types in shared memory, it needs to be ensured that their respective symbol names are mangled in +the same manner in both process and no conflicts arise. + +Last but not least, when instantiating data types in shared memory, placement new shall be used. Copy-Construction shall +be avoided and move construction does not seem to be possible. This leaves us with a problem on the processes that read +the data but not create them. They have to use a `reinterpret_cast` to get the respective data types from the raw memory +they opened. This causes undefined behaviour since the C++-Standard states that such casts are only defined if the +object started life already in this process. The notion of shared memory is not considered to the C++-Standard. In +practice the cast will work, but for ASIL-B software this behaviour needs to be assured by the compiler vendor. + +At the end the interaction with shared memory can look like listed: +```c++ +// 1. Process: +void* ptr = shm_open(...); +int* value = new(ptr) int; // using placement new to store data type +*value = 5; + +// 2. Process +void* ptr = shm_open(...); +int* value = reinterpret_cast(ptr); +std::cout << value << std::endl // will print 5 +*value = 42; // undefined behaviour, because reinterpret_cast is only valid if the object would life there already +``` +### SharedMemoryResource allocation +`SharedMemoryResource` objects can be requested in two different ways: +1. As named `SharedMemoryResource` objects with an entry in the path namespace (`/dev/shmem`) +2. As anonymous `SharedMemoryResource` objects that are accessed using a `shm_handle_t` + +Named `SharedMemoryResource` can be created by calling either of the following `Create APIs` +- `SharedMemoryFactory::Create(...)` +- `SharedMemoryFactory::CreateOrOpen(...)` +- `SharedMemoryFactory::CreateAnonymous(...)` + +Shared memory is allocated in either typed memory or OS-system memory based on the `prefer_typed_memory` parameter of the above APIs; when set to true, the shared memory will be allocated in a typed memory region. The [score::memory::shared::TypedMemory](../../shared/typedshm/typedshm_wrapper/typed_memory.h) class acts as a wrapper that uses the [`typed_memory_daemon`](../../../../intc/typedmemd/README.md) client interface[`score::tmd::TypedSharedMemory`](../../../../intc/typedmemd/code/clientlib/typedsharedmemory.h) APIs to allocate shared memory in typed memory. +If allocation in typed memory fails, the allocation of shared memory will fall back to the OS-system memory ([DMA Accessible Memory Fallback](broken_link_c/issue/31034619)). + +The `TypedMemory` wrapper provides the following APIs: +- `AllocateNamedTypedMemory()`: Allocates a named shared memory object in typed memory region +- `AllocateAndOpenAnonymousTypedMemory()`: Allocates an anonymous shared memory object in typed memory region +- `Unlink()`: Unlinks a named shared memory object allocated in typed memory. Only the original creator of the SHM object can unlink it. The creator is verified by checking the internal ownership map maintained by `typed_memory_daemon` +- `GetCreatorUid()`: Retrieves the creator UID of a named shared memory object allocated in typed memory. The creator UID is retrieved from the internal ownership map maintained by `typed_memory_daemon` + +### User permissions of Shared Memory +The `permissions` parameter in the `Create APIs`, allows the user to specify the access rights for the `SharedMemoryResource`. + +For the Named Shared Memory it can be set to one of the following: +- `WorldReadable`: allows read/write access for the user and read-only access for others. +- `WorldWritable`: allows read/write access for both the user and all other users. +- `UserPermissionsMap`: allows read/write access for the user and for named shared memory, sets specific permissions for additional users listed in the `UserPermissionsMap` using Access Control Lists ([ACLs](https://www.qnx.com/developers/docs/7.1/#com.qnx.doc.security.system/topic/manual/access_control.html)). + +For the Anonymous Shared Memory check the details below [Anonymous Shared Memory](#anonymous-shared-memory) + +### Named Shared Memory +Named shared memory allocated in typed memory will inherit the effective `UID/GID` of the `typed_memory_daemon`. Whereas, named shared memory allocated in OS-system memory will have the effective `UID/GID` of the user. + +For named shared memory allocated in OS-system memory with `world-writable` mode, permissions will be enforced using `fchmod` to ensure `world-writable` access. + +The underlying `shm_open()` call uses the `O_EXCL` and `O_CREAT` flags, ensuring that duplicate names result in an error. The file permissions are determined by the `permissions` parameter[User permissions of Shared Memory](#user-permissions-of-shared-memory) passed to the `Create APIs`, and are applied via the `mode` argument in the `shm_open()` call. + +From a safety standpoint, `ASIL-B` applications should avoid creating shared memory objects with `world-readable` or `world-writable` permissions in order to reduce security risks. For more details check [Access Control concepts](broken_link_a/ui/api/v1/download/contentBrowsing/ipnext-platform-documentation/master/html/features/dac/README.html) + +![Named memory allocation](./generated/svg/named_memory_allocation_seq.svg) + +### Anonymous Shared Memory +Anonymous shared memory created with `SharedMemoryFactory::CreateAnonymous(...)` does not have a representation in the file system. In fact, there is no way for a random process to identify an anonymous shared memory object. Thus, there are no corresponding implementations of `SharedMemoryFactory::Open(...)` and `SharedMemoryFactory::CreateOrOpen(...)`. To share an anonymous shared memory object with another process it must be actively shared at runtime by: +- Creating a handle using `shm_create_handle(...)` +- Sharing this handle with the other process +- Opening the handle in the other process using `shm_open_handle(...)` + +It is important to note that anonymous shared memory is currently only implemented for QNX environment. In the current design/implementation, there is no API available to share the `shm_handle_t` with other processes. Instead, the `SharedMemoryResource` holds the file descriptor of the anonymous shared memory object. + +Anonymous shared memory allocated in typed memory are created with read/write access for the user. Whereas, for anonymous shared memory allocated in OS-system memory, +underlying `shm_open()` call is made with `mode` argument based on the`permissions` parameter[User permissions of Shared Memory](#user-permissions-of-shared-memory) passed to the `SharedMemoryFactory::CreateAnonymous(...)`. + +Anonymous Shared Memory objects are created without `SHM_CREATE_HANDLE_OPT_NOFD`, such that the user is able to create further `shm_handle_t` for other processes. +These objects are then sealed by calling `shm_ctl()` with `SHMCTL_SEAL` flag to prevents the object's layout (e.g., its size and backing memory) and attributes from being modified. So that no process (including the object's creator) can modify the layout or change any attributes. + +![Anonymous memory allocation](./generated/svg/anonymous_memory_allocation_seq.svg) + +## Memory Management Algorithm +The allocated shared memory needs to be managed in some way. Meaning, freed memory needs to be reused before +new memory is allocated and the shared memory segment is enlarged. + +For the proof of concept we only need a monotonic allocator. Meaning, it will only increase and not free any memory. + +## Lifetime of SharedMemoryResource and MemoryResourceRegistry + +### Background - Static construction / destruction sequence +All static objects will be destroyed at program end (after all other non-static objects have been destroyed). The order in which static objects are destroyed will be the inverse of the order in which they're created. E.g. if static object A is created and the static object B is created, then B will be destroyed before A. + +### Lifetime of SharedMemoryResource in application code +The MemoryResourceRegistry is a singleton which is created when MemoryResourceRegistry::getInstance() is called for the first time. This will be called during the construction of a SharedMemoryResource, so it is guaranteed to be created if a SharedMemoryResource is created. The destructor of SharedMemoryResource also calls MemoryResourceRegistry::getInstance(). This means that the MemoryResourceRegistry should be destroyed only after the last SharedMemoryResource has been destroyed. + +In application code, if the lifetime of a SharedMemoryResource is linked to the lifetime of a static object (e.g. it's destroyed in the destructor of the static object), then the SharedMemoryResource will be destroyed at some point during the static destruction sequence at the earliest. In this case, the user must ensure that MemoryResourceRegistry::getInstance() is called before the static object owning the shared_ptr is created. This can be solved via a singleton approach e.g. + +``` +// Assuming that UserSharedMemoryResourceOwner is created as a static variable +class UserSharedMemoryResourceOwner +{ + public: + UserSharedMemoryResourceOwner() + { + // The MemoryResourceRegistry will be created before the UserSharedMemoryResourceOwner. + // Therefore, it will be destroyed after the UserSharedMemoryResourceOwner + MemoryResourceRegistry::getInstance(); + + // Create the SharedMemoryResource and assign to memory_resource_... + } + + ~UserSharedMemoryResourceOwner() { + // If the ref count of the memory_resource_ is greater than 1, the SharedMemoryResource will not + // be destroyed here! + assert(memory_resource_.ref_count() == 1); + + // The MemoryResourceRegistry is still alive here + } + + private: + // Created in the constructor and destroyed in the destructor of UserSharedMemoryResourceOwner. + std::shared_ptr memory_resource_; +} +``` + +Obviously, since the SharedMemoryResource is contained within a shared_ptr, in the example above, the user must ensure that the ref count of the shared_ptr goes to 0 when the static object is destroyed so that the SharedMemoryResource itself is destroyed. + +## Ownership of the SharedMemoryResource + +Getting the `uid` of the creator of the underlying memory managed by a `SharedMemoryResource` is essential to ensure that only memory with the expected allowed providers can be opened by a user of the library as this feature is used by some ASIL B components (for example see this [requirement](broken_link_c/issue/33047276) or this [one](broken_link_c/issue/8742625)). +This library only supports opening a shared memory area based on the path therefore this concept is only valid for the named memory use case, annonymous memory is not considered. + +There are 2 possible use-cases : + +1. Memory is allocated in System RAM: + +The owner `uid` of the allocated memory will be the `euid` (effective user ID) of the allocating process. +Therefore, when another process opens the memory it only needs to perform a simple check by using `fstat` to get the `uid` for the memory and compare that `uid` with the passed expected provider. + +2. Memory is allocated in [Typed Memory](../../../../intc/typedmemd/README.md): + +Due to safety considerations, typed memory cannot be directly allocated by a process using the POSIX primitives, so the allocation is delegated to an ASIL B application called [`typed_memory_daemon`](broken_link_g/swh/safe-posix-platform/blob/master/platform/aas/intc/typedmemd/README.md). +The `typed_memory_daemon` will allocate the memory with its `euid` as the owner `uid`. +For safety and security reasons the `typed_memory_daemon` cannot transfer ownership of the memory using `chown` to the application's `euid` from which it got the delegation, so it will use Access Control Lists ([ACLs](https://www.qnx.com/developers/docs/7.1/#com.qnx.doc.security.system/topic/manual/access_control.html)) to give read/write permissions to the requestor's `euid` as well as any other needed `uid`. \ +In this case, to be able to identify the requestor process, the `typed_memory_daemon` maintains an internal ownership map that tracks the creator UID for each shared memory object. +When another process opens the `SharedMemoryResource` it will then check if the `uid` of the memory is identical to the `typed_memory_daemon`'s `euid`. +If the check is successful it means that the memory is in typed memory and the corresponding internal flag (`is_shm_in_typed_memory_`) will also be updated. +Then it will query the `typed_memory_daemon` via `TypedMemory::GetCreatorUid()` to retrieve the creator UID from the internal ownership map and compare it with the passed expected provider. +If the creator UID cannot be retrieved (e.g., the shared memory object was not allocated by `typed_memory_daemon`), the application will be terminated. + +This solution of querying the `typed_memory_daemon` for the creator UID was chosen as it provides a reliable way to identify the owner/creator of the memory without relying on ACL inspection for ownership determination. + +The complete sequence to find the owner is: + +![GetOwnerUid sequence](./generated/svg/get_owner_uid_seq.svg) diff --git a/score/memory/design/shared_memory/anonymous_memory_allocation_seq.puml b/score/memory/design/shared_memory/anonymous_memory_allocation_seq.puml new file mode 100644 index 000000000..613e7d567 --- /dev/null +++ b/score/memory/design/shared_memory/anonymous_memory_allocation_seq.puml @@ -0,0 +1,145 @@ +@startuml anonymous_memory_allocation_seq +' Sequence diagram for anonymous memory allocation + +participant InstanceP1 as Instance +participant SharedMemoryFactoryP1 as Factory +participant SharedMemoryResourceP1 as Resource +participant TypedMemoryP1 as TypedMemory +participant tmd_ITypedSharedMemory as ITypedSharedMemory + +participant os_Stat as Stat + + +participant os_utils_AccessControlList as ACL +participant os_Unistd as Unistd + +participant os_Mman as Mman +participant MemoryResourceProxyP1 as Proxy +participant AnonymousSharedMemoryObject as AnonymousObject +participant MemoryResourceRegistryP1 as Registry + + + + + +activate Instance +activate Factory + +Instance -> Factory: CreateAnonymous(prefer_typed_memory, permissions, size) + +activate TypedMemory +Factory -> TypedMemory: TypedMemory::Default() +TypedMemory --> Factory: return TypedMemoryP1 + + +activate Resource + +Factory -> Resource: CreateAnonymous(TypedMemoryP1, permissions, size) + + +deactivate Resource + + +alt typed_memory_ptr_ != nullptr (AllocateInTypedmemory) + + + + Resource -> TypedMemory: AllocateAndOpenAnonymousTypedMemory(size) + activate Resource + + TypedMemory --> Resource: file_descriptor + TypedMemory -> ITypedSharedMemory: AllocateAndOpenAnonymousTypedMemory(size) + activate ITypedSharedMemory + ITypedSharedMemory --> TypedMemory: return shm_handle + deactivate ITypedSharedMemory + activate Mman + TypedMemory -> Mman: shm_open_handle(shm_handle, O_RDWR) + + Mman --> TypedMemory: return file descriptor + deactivate Mman + deactivate TypedMemory + deactivate Resource + +end + + + + +alt AllocateInTypedmemory failed or AllocateInSysram (typed_memory_ptr_ == nullptr) + Resource -> Resource: mode = calcStatModeForPermissions(permissions) + activate Resource + + deactivate Resource + + + + Resource -> Mman: shm_open(SHM_ANON, O_RDWR | O_CREAT | O_ANON, mode) + activate Resource + activate Mman + Mman --> Resource: file_descriptor + deactivate Mman + deactivate Resource + + alt mode == world-readable + Resource -> Stat: fchmod(file_descriptor, mode) + activate Resource + activate Stat + Stat --> Resource + deactivate Stat + else mode == UserPermissions + Resource -> ACL: AllowUser(file_descriptor, os::Acl::Permission) + activate ACL + ACL --> Resource + deactivate ACL + deactivate Resource + end + + + Resource -> Mman: shm_ctl(fd, SHMCTL_ANON | SHMCTL_SEAL, 0UL, size) + activate Resource + Mman --> Resource + deactivate Resource + + Resource -> Unistd: ftruncate(file_descriptor, size) + activate Resource + activate Unistd + Unistd --> Resource + deactivate Unistd + deactivate Resource + +end + +Resource -> Resource: GetOwnerUidAndSizeOf(file_descriptor) +activate Resource + + + + +Resource -> Mman: mmap(map memory into process) +activate Mman +Mman --> Resource +deactivate Mman + +Resource -> Proxy: Instantiate(Unique ID) +activate Proxy +Proxy --> Resource +deactivate Proxy + +Resource -> Registry: insert_resource(uniqueID, this) +activate Registry +Registry --> Resource +deactivate Registry + +Resource -> AnonymousObject: initializeInternalsInSharedMemory() - MemoryResourceProxyP1, alreadyAllocatedBytes, mutex +activate AnonymousObject +AnonymousObject --> Resource +deactivate AnonymousObject + + + +Resource --> Factory: instance +Factory --> Instance: instance +deactivate Resource +deactivate Factory +deactivate Instance +@enduml diff --git a/score/memory/design/shared_memory/bounds_checking.puml b/score/memory/design/shared_memory/bounds_checking.puml new file mode 100644 index 000000000..bc4987840 --- /dev/null +++ b/score/memory/design/shared_memory/bounds_checking.puml @@ -0,0 +1,92 @@ +@startuml bounds_checking +skinparam linetype ortho + + +class "score::memory::shared::MemoryResourceProxy" as Proxy { + Notes: + Unchanged +} + + + +class "score::memory::shared::MemoryResourceRegistry" as Registry { + known_regions: MemoryRegionMap + -- + + insert_resource(std::pair): void + + remove_resource(std::uint64_t identifier): void + + GetBoundsFromAddress(uintptr_t): optional> + + GetBoundsFromIdentifier(uint64 identifier): Result + -- + Notes: + * known_regions is a map for memory regions (i.e. storing the start and end + addresses for each memory region) which provides lock-free access to one + writer and multiple concurrent readers. + * GetBoundsFromAddress() checks if the passed pointer is within a known + memory region. If so, returns the bounds of that memory region. Else, + returns pair of null ptrs. We return memory bounds rather than a bool + value indicating whether the OffsetPtr is within the memory bounds to + avoid explicitly coupling it with an OffsetPtr. +} + +class "score::memory::shared::SharedMemoryResource" as SharedResource { + - Create(StringLiteral, InitializeCallback, std::size_t, UserPermissions permissions = {}) noexcept: std::shared_ptr + -- + Notes: + Calls MemoryResourceRegistry::insert_resource() in Create() after mapping the shared memory into the process. +} + +class "score::memory::shared::HeapMemoryResource" as HeapResource { + -- + Notes: + Since we don't want to bounds check, don't + call MemoryResourceRegistry::insert_memory_range(). +} + +class "score::memory::shared::OffsetPtr" as OffsetPtr { + + get() const: PointedType* + + get(size_t) const: PointedType* + + get(): ExplicitPointedType* + + GetWithoutBoundsCheck(): PointedType* + -- + Notes: + If EnableOffsetPtrBoundsChecking() has not been called + with enable=false, when get() is called, gets the memory + bounds of the region in which the offset pointer is allocated + with MemoryResourceRegistry::get_bounds_for(this). If non-null + bounds are returned, terminates if pointed-to-object is not + inside memory bounds. Otherwise, returns as normal. +} + +class "score::memory::shared::PolymorphicOffsetPtrAllocator" as PolymorphicOffsetPtrAllocator { + Notes: + Unchanged + -- +} + +class "score::memory::shared::ManagedMemoryResource" as ManagedMemoryResource { + -- + Notes: + Unchanged +} + + +Proxy -l-> Registry : lookup dispatch \ntarget +ManagedMemoryResource -u-o Registry: holds +HeapResource -l.|> ManagedMemoryResource +SharedResource -u.|> ManagedMemoryResource +Proxy -d-> ManagedMemoryResource: dispatch allocate deallocate +Proxy -d-o PolymorphicOffsetPtrAllocator: allocates memory \nusing +PolymorphicOffsetPtrAllocator -d-> OffsetPtr: constructs and \nreturns +OffsetPtr -l-> Registry: get memory bounds + + +note as N1 + +-- +Global Functions +-- +EnableOffsetPtrBoundsChecking(): +- Sets or clears flag which to allow bounds checking. +end note + +@enduml diff --git a/score/memory/design/shared_memory/generate_diagrams.sh b/score/memory/design/shared_memory/generate_diagrams.sh new file mode 100755 index 000000000..ef49ac759 --- /dev/null +++ b/score/memory/design/shared_memory/generate_diagrams.sh @@ -0,0 +1,40 @@ +#!/bin/bash +# ******************************************************************************* +# Copyright (c) 2026 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* +set -e + +# Configuration +PLANTUML_VERSION="1.2025.10" +PLANTUML_JAR="plantuml-${PLANTUML_VERSION}.jar" +PLANTUML_PATH="${HOME}/.cache/plantuml/${PLANTUML_JAR}" +SVG_OUTPUT_DIR="./generated/svg" + +# Download PlantUML if needed +if [ ! -f "$PLANTUML_PATH" ]; then + mkdir -p "$(dirname "$PLANTUML_PATH")" + echo "Downloading PlantUML ${PLANTUML_VERSION}..." + URL="https://github.com/plantuml/plantuml/releases/download/v${PLANTUML_VERSION}/${PLANTUML_JAR}" + wget -q --show-progress -O "$PLANTUML_PATH" "$URL" || curl -L -o "$PLANTUML_PATH" "$URL" +fi + +# Generate diagrams +rm -rf "$SVG_OUTPUT_DIR" +mkdir -p "$SVG_OUTPUT_DIR" +for file in ./*.puml; do + [ -f "$file" ] && java -jar "$PLANTUML_PATH" -svg -charset UTF-8 -o "$SVG_OUTPUT_DIR" "$file" +done + +# Fix line endings +find "$SVG_OUTPUT_DIR" -name "*.svg" -exec sed -i 's/\r$//' {} \; 2>/dev/null + +echo "Diagrams generated with PlantUML ${PLANTUML_VERSION}." diff --git a/score/memory/design/shared_memory/generated/svg/anonymous_memory_allocation_seq.svg b/score/memory/design/shared_memory/generated/svg/anonymous_memory_allocation_seq.svg new file mode 100644 index 000000000..d30798a85 --- /dev/null +++ b/score/memory/design/shared_memory/generated/svg/anonymous_memory_allocation_seq.svg @@ -0,0 +1 @@ +InstanceP1SharedMemoryFactoryP1SharedMemoryResourceP1SharedMemoryResourceP1SharedMemoryResourceP1SharedMemoryResourceP1SharedMemoryResourceP1SharedMemoryResourceP1SharedMemoryResourceP1SharedMemoryResourceP1TypedMemoryP1tmd_ITypedSharedMemoryos_Statos_utils_AccessControlListos_Unistdos_Mmanos_Mmanos_MmanMemoryResourceProxyP1AnonymousSharedMemoryObjectMemoryResourceRegistryP1InstanceP1SharedMemoryFactoryP1SharedMemoryResourceP1TypedMemoryP1tmd_ITypedSharedMemoryos_Statos_utils_AccessControlListos_Unistdos_MmanMemoryResourceProxyP1AnonymousSharedMemoryObjectMemoryResourceRegistryP1InstanceP1InstanceP1SharedMemoryFactoryP1SharedMemoryFactoryP1SharedMemoryResourceP1SharedMemoryResourceP1TypedMemoryP1TypedMemoryP1tmd_ITypedSharedMemorytmd_ITypedSharedMemoryos_Statos_Statos_utils_AccessControlListos_utils_AccessControlListos_Unistdos_Unistdos_Mmanos_MmanMemoryResourceProxyP1MemoryResourceProxyP1AnonymousSharedMemoryObjectAnonymousSharedMemoryObjectMemoryResourceRegistryP1MemoryResourceRegistryP1InstanceP1SharedMemoryFactoryP1SharedMemoryResourceP1SharedMemoryResourceP1SharedMemoryResourceP1SharedMemoryResourceP1SharedMemoryResourceP1SharedMemoryResourceP1SharedMemoryResourceP1SharedMemoryResourceP1TypedMemoryP1tmd_ITypedSharedMemoryos_Statos_utils_AccessControlListos_Unistdos_Mmanos_Mmanos_MmanMemoryResourceProxyP1AnonymousSharedMemoryObjectMemoryResourceRegistryP1CreateAnonymous(prefer_typed_memory, permissions, size)TypedMemory::Default()return TypedMemoryP1CreateAnonymous(TypedMemoryP1, permissions, size)alt[typed_memory_ptr_ != nullptr (AllocateInTypedmemory)]AllocateAndOpenAnonymousTypedMemory(size)file_descriptorAllocateAndOpenAnonymousTypedMemory(size)return shm_handleshm_open_handle(shm_handle, O_RDWR)return file descriptoralt[AllocateInTypedmemory failed or AllocateInSysram (typed_memory_ptr_ == nullptr)]mode = calcStatModeForPermissions(permissions)shm_open(SHM_ANON, O_RDWR | O_CREAT | O_ANON, mode)file_descriptoralt[mode == world-readable]fchmod(file_descriptor, mode)[mode == UserPermissions]AllowUser(file_descriptor, os::Acl::Permission)shm_ctl(fd, SHMCTL_ANON | SHMCTL_SEAL, 0UL, size)ftruncate(file_descriptor, size)GetOwnerUidAndSizeOf(file_descriptor)mmap(map memory into process)Instantiate(Unique ID)insert_resource(uniqueID, this)initializeInternalsInSharedMemory() - MemoryResourceProxyP1, alreadyAllocatedBytes, mutexinstanceinstance \ No newline at end of file diff --git a/score/memory/design/shared_memory/generated/svg/bounds_checking.svg b/score/memory/design/shared_memory/generated/svg/bounds_checking.svg new file mode 100644 index 000000000..959fd86b1 --- /dev/null +++ b/score/memory/design/shared_memory/generated/svg/bounds_checking.svg @@ -0,0 +1 @@ +bmw::memory::shared::MemoryResourceProxyNotes:Unchangedbmw::memory::shared::MemoryResourceRegistryknown_regions: MemoryRegionMapinsert_resource(std::pair<std::unit64_t, ManagedMemoryResource*>): voidremove_resource(std::uint64_t identifier): voidGetBoundsFromAddress(uintptr_t): optional<pair<MemoryRegionBounds, MemoryResourceIdentifier>>GetBoundsFromIdentifier(uint64 identifier): Result<MemoryRegionBounds>Notes:known_regions is a map for memory regions (i.e. storing the start and endaddresses for each memory region) which provides lock-free access to onewriter and multiple concurrent readers.GetBoundsFromAddress() checks if the passed pointer is within a knownmemory region. If so, returns the bounds of that memory region. Else,returns pair of null ptrs. We return memory bounds rather than a boolvalue indicating whether the OffsetPtr is within the memory bounds toavoid explicitly coupling it with an OffsetPtr.bmw::memory::shared::SharedMemoryResourceCreate(StringLiteral, InitializeCallback, std::size_t, UserPermissions permissions = {}) noexcept: std::shared_ptr<ManagedMemoryResource>Notes:Calls MemoryResourceRegistry::insert_resource() in Create() after mapping the shared memory into the process.bmw::memory::shared::HeapMemoryResourceNotes:Since we don't want to bounds check, don'tcall MemoryResourceRegistry::insert_memory_range().bmw::memory::shared::OffsetPtrPointedTypeget() const: PointedType*get(size_t) const: PointedType*get<ExplicitPointedType>(): ExplicitPointedType*GetWithoutBoundsCheck(): PointedType*Notes:If EnableOffsetPtrBoundsChecking() has not been calledwith enable=false, when get() is called, gets the memorybounds of the region in which the offset pointer is allocatedwith MemoryResourceRegistry::get_bounds_for(this). If non-nullbounds are returned, terminates if pointed-to-object is notinside memory bounds. Otherwise, returns as normal.bmw::memory::shared::PolymorphicOffsetPtrAllocatorT:classNotes:Unchangedbmw::memory::shared::ManagedMemoryResourceNotes:Unchanged Global FunctionsEnableOffsetPtrBoundsChecking():- Sets or clears flag which to allow bounds checking.lookup dispatchtargetholdsdispatch allocate deallocateallocates memoryusingconstructs andreturnsget memory bounds \ No newline at end of file diff --git a/score/memory/design/shared_memory/generated/svg/get_owner_uid_seq.svg b/score/memory/design/shared_memory/generated/svg/get_owner_uid_seq.svg new file mode 100644 index 000000000..859047d1c --- /dev/null +++ b/score/memory/design/shared_memory/generated/svg/get_owner_uid_seq.svg @@ -0,0 +1 @@ +SharedMemoryResourceos..StatTypedMemorytyped_memory_daemonSharedMemoryResourceSharedMemoryResourceos::Statos::StatTypedMemoryTypedMemorytyped_memory_daemontyped_memory_daemonGetOwnerUid(fd)fstat(fd)return StatBufferset owner_uid = StatBuffer.uidalt[StatBuffer.uid == typed memory manager uid && memory is named]GetCreatorUid(shm_name)query ownership mapreturn creator_uidalt[creator_uid retrieval failed]Unable to retrieve creator UID fromtyped_memory_daemon ownership mapProcess is terminatedstd::terminate()return creator_uidowner_uid = creator_uidreturn owner_uid \ No newline at end of file diff --git a/score/memory/design/shared_memory/generated/svg/memory_allocation.svg b/score/memory/design/shared_memory/generated/svg/memory_allocation.svg new file mode 100644 index 000000000..bc530bf4a --- /dev/null +++ b/score/memory/design/shared_memory/generated/svg/memory_allocation.svg @@ -0,0 +1 @@ +Usage:using ara::core::Vector<T> = std::vector<T, std::scoped_allocator_adaptor<bmw::memory::shared::PolymorphicOffsetAllocator<T>>>\nauto top_level_memory_ressource = bmw::memory::shared::SharedMemoryFactory("/my_shm_name");\nauto sub_memory_resource_event1 = top_level_memory_ressource.createSubResource(10000, 20000);\nara::core::Vector<std::uint8_t> myVector(sub_memory_resource_event1.getProxy()());\nmyVector.push_back(42u); // Will land on shared memory\nara::core::Vector<std::uint8_t> onHeap();\n+onHeap.push_back(42u); // Will land on heapamp::pmr::memory_resourcememory_resource()memory_resource(const MemoryResource&)memory_resource() allocate(std::size_t bytes, std::size_t alignment = alignof(max_align_t)): void*deallocate(void* p, std::size_t bytes, std::size_t alignment = alignof(max_align_t)): voidis_equal(const memory_resource& other) const noexcept: bool do_allocate(std::size_t bytes, std::size_t alignment): void*do_allocate(void* p, std::size_t bytes, std::size_t alignment): voiddo_is_equal(const offsetmemory_resource& other) const noexcept: boolNotes:This data type shall apply requirements stated byhttps://en.cppreference.com/w/cpp/memory/memory_resourcebmw::memory::shared::MemoryResourceRegistryMemoryResourceRegistry()getInstance(): MemoryResourceRegistry&at(std::unit64_t): ManagedMemoryResource*insert_resource(std::pair<std::uint64_t, ManagedMemoryResource*>): voidremove_resource(std::uint64_t identifier): voidget_bounds_for(void*): std::pair<void*, void*>get_bounds_for(uint64 identifier): std::pair<void*, void*>Notes:Not copyable, not movable  bmw::memory::shared::ManagedMemoryResourcesubresources: std::map<std::uint64_t, SubResource>getMemoryResourceProxy(): MemoryResourceProxy*construct<T*>(args...):T*destruct<T*>(T*): voidgetBaseAddress() = 0: void*getUsableBaseAddress() = 0: void*getEndAddress() = 0: void*  bmw::memory::shared::SharedMemoryResourcefd: intbase: void*SharedMemoryResource(StringLiteral, ...)getMemoryResourceProxy(): MemoryResourceProxy*getBaseAddress(): void*getUsableBaseAddress(): void*getEndAddress(): void*getPath(): std::string* do_allocate(std::size_t bytes, std::size_t alignment): void*do_deallocate(void* p, std::size_t bytes, std::size_t alignment): voiddo_is_equal(const offset_memory_resource& other) const noexcept: boolNotes:Not copyable or movable and not public constructableonly Factory can createbmw::memory::shared::SharedMemoryFactoryalreadyOpenedFilesstatic getInstance() static Open(...) noexcept: std::shared_ptr<ManagedMemoryResource>static Create(...) noexcept: std::shared_ptr<ManagedMemoryResource>static CreateAnonymous(...) noexcept: std::shared_ptr<ManagedMemoryResource>static CreateOrOpen(...) noexcept: std::shared_ptr<ManagedMemoryResource>static Remove(std:string path): voidNotes:SharedMemoryFactory is not movable or copyable.Factory Singleton, ensures that SharedMemoryResource is created only once.bmw::memory::shared::MemoryResourceProxymemory_identifier_: std::unit64_tMemoryResourceProxy(const std::uint64_t& memoryId)allocate(std::size_t n ): void*deallocate(void* p, std::size_t n ): voidNotes:MemoryResourceProxy is not movable or copyable.Only does the dispatching to right MemoryResource via MemoryResourceRegistry.Dispatching is needed as we can't put instances of real memory resources into shared memory asthey have vtables.  bmw::memory::shared::PolymorphicOffsetPtrAllocatorT:classproxy: offset_ptr<MemoryResourceProxy>PolymorphicOffsetPtrAllocator(ManagedMemoryResource&)PolymorphicOffsetPtrAllocator()allocate( std::size_t n ): offset_ptr<T>deallocate(offset_ptr<T> p, std::size_t n ): voidgetMemoryResourceProxy(): offset_ptr<MemoryResourceProxy>Notes:This data type shall apply requirementsstated byhttps://en.cppreference.com/w/cpp/memory/allocator_traitsIf default constructed, it will not use any indirection via aMemoryResourceProxy to the MemoryResourceRegistry. Instead, it willsimply allocate memory on the heap.  bmw::memory::shared::OffsetPtrPointedTypeoffset: std::ptrdiff_toffset_ptr() noexcept;offset_ptr(pointer) noexcept;Notes:In addition to the specified members,this data type shall apply requirements stated byhttps://en.cppreference.com/w/cpp/memory/pointer_traitsbmw::memory::shared::SubResourceparent: ManagedMemoryResource&SubResource(ManagedMemoryResource& parent, std::size_t initialSize, void* memory_start, std::size_t maxSize)Notes:Not copyable and not public constructableonly parent resource can create.This entity/class is used to create multi levels of memory resources.bmw::memory::shared::HeapMemoryResourceosabstractionholdslookup dispatchtargetdispatchallocatedeallocateallocates memoryusingconstructs andreturnsmanageshared memoryCreates \ No newline at end of file diff --git a/score/memory/design/shared_memory/generated/svg/memory_allocation_workflow_seq.svg b/score/memory/design/shared_memory/generated/svg/memory_allocation_workflow_seq.svg new file mode 100644 index 000000000..d0d3983ab --- /dev/null +++ b/score/memory/design/shared_memory/generated/svg/memory_allocation_workflow_seq.svg @@ -0,0 +1 @@ +InstanceP1SharedMemoryFactoryP1PolymorphicOffsetAllocatorPolymorphicOffsetAllocatorMemoryResourceProxySharedMemoryResourceP1SharedMemoryResourceP1FooSharedMemoryObjectFooSharedMemoryObjectMemoryResourceRegistryP1MemoryResourceRegistryP1InstanceP1SharedMemoryFactoryP1PolymorphicOffsetAllocatorMemoryResourceProxySharedMemoryResourceP1FooSharedMemoryObjectMemoryResourceRegistryP1InstanceP1InstanceP1SharedMemoryFactoryP1SharedMemoryFactoryP1PolymorphicOffsetAllocatorPolymorphicOffsetAllocatorMemoryResourceProxyMemoryResourceProxySharedMemoryResourceP1SharedMemoryResourceP1FooSharedMemoryObjectFooSharedMemoryObjectMemoryResourceRegistryP1MemoryResourceRegistryP1InstanceP1SharedMemoryFactoryP1PolymorphicOffsetAllocatorPolymorphicOffsetAllocatorMemoryResourceProxySharedMemoryResourceP1SharedMemoryResourceP1FooSharedMemoryObjectFooSharedMemoryObjectMemoryResourceRegistryP1MemoryResourceRegistryP1open("/foo")Check if instances was already openedalt[Open new instance]instantiateshm_open(open)GetOwnerUid()insert_resource(uniqueID, this)read(shared state)reinterprete(shared state)InsertResourceIntoMapalt[AllowedProviderCheck]getowneruid()instanceinstantiate(Resource)allocate(Bytes)allocate(Bytes)at(unique)ManagedMemoryResource*allocate(Bytes)ftruncate(Bytes)void*void*offsetPtr<void> \ No newline at end of file diff --git a/score/memory/design/shared_memory/generated/svg/named_memory_allocation_seq.svg b/score/memory/design/shared_memory/generated/svg/named_memory_allocation_seq.svg new file mode 100644 index 000000000..8f2fb65aa --- /dev/null +++ b/score/memory/design/shared_memory/generated/svg/named_memory_allocation_seq.svg @@ -0,0 +1 @@ +InstanceP1SharedMemoryFactoryP1SharedMemoryResourceP1SharedMemoryResourceP1SharedMemoryResourceP1SharedMemoryResourceP1SharedMemoryResourceP1SharedMemoryResourceP1SharedMemoryResourceP1TypedMemoryP1os_Statos_utils_AccessControlListos_Unistdos_Mmanos_Mmanos_MmanMemoryResourceRegistryP1FooSharedMemoryObjectMemoryResourceProxyInstanceP1SharedMemoryFactoryP1SharedMemoryResourceP1TypedMemoryP1tmd_ITypedSharedMemoryos_Statos_utils_AccessControlListos_Unistdos_MmanMemoryResourceRegistryP1FooSharedMemoryObjectMemoryResourceProxyInstanceP1InstanceP1SharedMemoryFactoryP1SharedMemoryFactoryP1SharedMemoryResourceP1SharedMemoryResourceP1TypedMemoryP1TypedMemoryP1tmd_ITypedSharedMemorytmd_ITypedSharedMemoryos_Statos_Statos_utils_AccessControlListos_utils_AccessControlListos_Unistdos_Unistdos_Mmanos_MmanMemoryResourceRegistryP1MemoryResourceRegistryP1FooSharedMemoryObjectFooSharedMemoryObjectMemoryResourceProxyMemoryResourceProxyInstanceP1SharedMemoryFactoryP1SharedMemoryResourceP1SharedMemoryResourceP1SharedMemoryResourceP1SharedMemoryResourceP1SharedMemoryResourceP1SharedMemoryResourceP1SharedMemoryResourceP1TypedMemoryP1os_Statos_utils_AccessControlListos_Unistdos_Mmanos_Mmanos_MmanMemoryResourceRegistryP1FooSharedMemoryObjectMemoryResourceProxyCreate("/foo", permissions, size, prefer_typed_memory)Check if instances was already createdTypedMemory::Default()return TypedMemoryP1Create("/foo", permissions, size, TypedMemoryP1)mode = calcStatModeForPermissions(permissions)AllocateNamedTypedMemory("/foo", permissions, size)altreturn existing instancealt[AllocateInTypedmemory(typed_memory_ptr_ != nullptr)]AllocateNamedTypedMemory("/foo", permissions, size)successAllocateNamedTypedMemory("/foo", permissions, size)shm_open(O_RDWR|O_CREAT|O_EXCL, mode)file_descriptoralt[AllocateInTypedmemory failed or AllocateInSysram(typed_memory_ptr_ == nullptr)]shm_open(O_RDWR|O_EXCL, mode)file_descriptoralt[mode == world-readable]fchmod(file_descriptor, mode)[mode==UserPermissions]AllowUser(file_descriptor, os::Acl::Permission)ftruncate(file_descriptor, size)GetOwnerUidAndSizeOf(file_descriptor)mmap(map memory into process)Instantiate(Unique ID)insert_resource(uniqueID, this)return resultinitializeInternalsInSharedMemory() - Proxy, alreadyAllocatedBytes, mutexinstance \ No newline at end of file diff --git a/score/memory/design/shared_memory/get_owner_uid_seq.puml b/score/memory/design/shared_memory/get_owner_uid_seq.puml new file mode 100644 index 000000000..d025cafdc --- /dev/null +++ b/score/memory/design/shared_memory/get_owner_uid_seq.puml @@ -0,0 +1,35 @@ +@startuml get_owner_uid_seq + +participant SharedMemoryResource +participant "os::Stat" as os +participant TypedMemory +participant "typed_memory_daemon" as tmd + +[-> SharedMemoryResource: GetOwnerUid(fd) +SharedMemoryResource -> os : fstat(fd) +os -> SharedMemoryResource: return StatBuffer + +SharedMemoryResource -> SharedMemoryResource: set owner_uid = StatBuffer.uid + +alt StatBuffer.uid == typed memory manager uid && memory is named + SharedMemoryResource -> TypedMemory: GetCreatorUid(shm_name) + TypedMemory -> tmd: query ownership map + tmd -> TypedMemory: return creator_uid + + alt creator_uid retrieval failed + note over SharedMemoryResource + Unable to retrieve creator UID from + typed_memory_daemon ownership map + Process is terminated + endnote + SharedMemoryResource -> SharedMemoryResource: std::terminate() + destroy SharedMemoryResource + end + + TypedMemory -> SharedMemoryResource: return creator_uid + SharedMemoryResource -> SharedMemoryResource: owner_uid = creator_uid +end + +SharedMemoryResource ->[: return owner_uid + +@enduml diff --git a/score/memory/design/shared_memory/memory_allocation.puml b/score/memory/design/shared_memory/memory_allocation.puml new file mode 100644 index 000000000..87687475e --- /dev/null +++ b/score/memory/design/shared_memory/memory_allocation.puml @@ -0,0 +1,186 @@ +@startuml memory_allocation +allowmixing +skinparam usecaseBorderType dashed +skinparam linetype ortho + + +note as N1 #yellow +Usage: +using ara::core::Vector = std::vector>>\n +auto top_level_memory_ressource = score::memory::shared::SharedMemoryFactory("/my_shm_name");\n +auto sub_memory_resource_event1 = top_level_memory_ressource.createSubResource(10000, 20000);\n +ara::core::Vector myVector(sub_memory_resource_event1.getProxy()());\n +myVector.push_back(42u); // Will land on shared memory\n +ara::core::Vector onHeap();\n+onHeap.push_back(42u); // Will land on heap +end note + + +class "score::cpp::pmr::memory_resource" as MemoryResource +{ + -- + memory_resource() + memory_resource(const MemoryResource&) + ~memory_resource() + + + allocate(std::size_t bytes, std::size_t alignment = alignof(max_align_t)): void* + + deallocate(void* p, std::size_t bytes, std::size_t alignment = alignof(max_align_t)): void + + is_equal(const memory_resource& other) const noexcept: bool + + -do_allocate(std::size_t bytes, std::size_t alignment): void* + -do_allocate(void* p, std::size_t bytes, std::size_t alignment): void + -do_is_equal(const offsetmemory_resource& other) const noexcept: bool + -- + Notes: + This data type shall apply requirements stated by \nhttps://en.cppreference.com/w/cpp/memory/memory_resource +} + +class "score::memory::shared::MemoryResourceRegistry" as MemoryResourceRegistry +{ + - MemoryResourceRegistry() + + getInstance(): MemoryResourceRegistry& + + at(std::unit64_t): ManagedMemoryResource* + + insert_resource(std::pair): void + + remove_resource(std::uint64_t identifier): void + + get_bounds_for(void*): std::pair + + get_bounds_for(uint64 identifier): std::pair + -- + Notes: + Not copyable, not movable + + +} + +class "score::memory::shared::ManagedMemoryResource" as ManagedMemoryResource +{ + -subresources: std::map + -- + -getMemoryResourceProxy(): MemoryResourceProxy* + +construct(args...):T* + +destruct(T*): void + +getBaseAddress() = 0: void* + +getUsableBaseAddress() = 0: void* + -getEndAddress() = 0: void* + + +} + +class "score::memory::shared::SharedMemoryResource" as SharedMemoryResource +{ + fd: int + base: void* + -- + # SharedMemoryResource(StringLiteral, ...) + - getMemoryResourceProxy(): MemoryResourceProxy* + + getBaseAddress(): void* + + getUsableBaseAddress(): void* + - getEndAddress(): void* + + getPath(): std::string* + + - do_allocate(std::size_t bytes, std::size_t alignment): void* + - do_deallocate(void* p, std::size_t bytes, std::size_t alignment): void + - do_is_equal(const offset_memory_resource& other) const noexcept: bool + -- + Notes: + Not copyable or movable and not public constructable\nonly Factory can create +} + +class "score::memory::shared::SharedMemoryFactory" as SharedMemoryFactory +{ + # alreadyOpenedFiles + -- + +static getInstance() + + +static Open(...) noexcept: std::shared_ptr + +static Create(...) noexcept: std::shared_ptr + +static CreateAnonymous(...) noexcept: std::shared_ptr + +static CreateOrOpen(...) noexcept: std::shared_ptr + +static Remove(std:string path): void + -- + Notes: + SharedMemoryFactory is not movable or copyable.\nFactory Singleton, ensures that SharedMemoryResource is created only once. +} + +class "score::memory::shared::MemoryResourceProxy" as MemoryResourceProxy +{ + - memory_identifier_: std::unit64_t + -- + + MemoryResourceProxy(const std::uint64_t& memoryId) + + allocate(std::size_t n ): void* + + deallocate(void* p, std::size_t n ): void + -- + Notes: + MemoryResourceProxy is not movable or copyable. + Only does the dispatching to right MemoryResource via MemoryResourceRegistry. + Dispatching is needed as we can't put instances of real memory resources into shared memory as + they have vtables. + + +} + +class "score::memory::shared::PolymorphicOffsetPtrAllocator" as PolymorphicOffsetPtrAllocator +{ + - proxy: offset_ptr + -- + + PolymorphicOffsetPtrAllocator(ManagedMemoryResource&) + + PolymorphicOffsetPtrAllocator() + + allocate( std::size_t n ): offset_ptr + + deallocate(offset_ptr p, std::size_t n ): void + - getMemoryResourceProxy(): offset_ptr + -- + Notes: + This data type shall apply requirements + stated by + https://en.cppreference.com/w/cpp/memory/allocator_traits + If default constructed, it will not use any indirection via a + MemoryResourceProxy to the MemoryResourceRegistry. Instead, it will + simply allocate memory on the heap. + + +} + +class "score::memory::shared::OffsetPtr" as OffsetPtr +{ + - offset: std::ptrdiff_t + -- + + offset_ptr() noexcept; + + offset_ptr(pointer) noexcept; + -- + Notes: + In addition to the specified members, + this data type shall apply requirements stated by + https://en.cppreference.com/w/cpp/memory/pointer_traits +} + +class "score::memory::shared::SubResource" as SubResource +{ + - parent: ManagedMemoryResource& + -- + + SubResource(ManagedMemoryResource& parent, std::size_t initialSize, void* memory_start, std::size_t maxSize) + -- + Notes: + Not copyable and not public constructable + only parent resource can create. + This entity/class is used to create multi levels of memory resources. +} + +class "score::memory::shared::HeapMemoryResource" as HeapMemoryResource + + +usecase osabstraction + + +ManagedMemoryResource -u-o MemoryResourceRegistry : holds +ManagedMemoryResource .u.|> MemoryResource +MemoryResourceProxy -d-o MemoryResourceRegistry : lookup dispatch \ntarget +MemoryResourceProxy -d-> ManagedMemoryResource : dispatch\nallocate\ndeallocate +MemoryResourceProxy -d-o PolymorphicOffsetPtrAllocator : allocates memory \nusing +PolymorphicOffsetPtrAllocator -d-> OffsetPtr : constructs and \nreturns +SharedMemoryResource -u.|> ManagedMemoryResource +HeapMemoryResource -l.|> ManagedMemoryResource +SubResource -l.|> ManagedMemoryResource +SharedMemoryResource -r-> osabstraction : manage\nshared memory + +SharedMemoryFactory -u-> SharedMemoryResource : Creates + + +@enduml diff --git a/score/memory/design/shared_memory/memory_allocation_workflow_seq.puml b/score/memory/design/shared_memory/memory_allocation_workflow_seq.puml new file mode 100644 index 000000000..ed0fb3681 --- /dev/null +++ b/score/memory/design/shared_memory/memory_allocation_workflow_seq.puml @@ -0,0 +1,89 @@ +@startuml memory_allocation_workflow_seq +' Sequence diagram for memory allocation workflow + +participant InstanceP1 as Instance +participant SharedMemoryFactoryP1 as Factory +participant PolymorphicOffsetAllocator as Allocator +participant MemoryResourceProxy as Proxy +participant SharedMemoryResourceP1 as Resource +participant FooSharedMemoryObject as FooObject +participant MemoryResourceRegistryP1 as Registry + + +activate Instance +activate Factory +Instance -> Factory: open("/foo") +Factory -> Factory: Check if instances was already opened + + +alt Open new instance + Factory -> Resource: instantiate + activate Resource + Resource -> FooObject: shm_open(open) + + Resource -> Resource: GetOwnerUid() + + Resource -> Registry: insert_resource(uniqueID, this) + activate Registry + Registry --> Resource + + + Resource -> FooObject: read(shared state) + activate FooObject + FooObject --> Resource + deactivate FooObject + + Resource -> Proxy: reinterprete(shared state) + + Resource --> Factory + + Factory -> Factory: InsertResourceIntoMap + +end + + +deactivate Registry + + +alt AllowedProviderCheck + Factory-> Resource: getowneruid() + Resource --> Factory + +end +deactivate Resource + +Factory --> Instance: instance + +Instance -> Allocator: instantiate(Resource) +activate Allocator +activate Allocator +Instance -> Allocator: allocate(Bytes) +Allocator -> Proxy: allocate(Bytes) +activate Proxy +Proxy -> Registry: at(unique) +activate Registry +Registry --> Proxy: ManagedMemoryResource* +deactivate Registry + +Proxy -> Resource: allocate(Bytes) +activate Resource +Resource -> FooObject: ftruncate(Bytes) +activate FooObject +deactivate FooObject + +Resource --> Proxy: void* +deactivate Resource + +Proxy --> Allocator: void* +deactivate Proxy + +Allocator --> Instance: offsetPtr +deactivate Allocator + + + + + +deactivate Factory +deactivate Instance +@enduml diff --git a/score/memory/design/shared_memory/named_memory_allocation_seq.puml b/score/memory/design/shared_memory/named_memory_allocation_seq.puml new file mode 100644 index 000000000..a79121dc9 --- /dev/null +++ b/score/memory/design/shared_memory/named_memory_allocation_seq.puml @@ -0,0 +1,134 @@ +@startuml named_memory_allocation_seq +' Sequence diagram for named memory allocation + + +participant InstanceP1 as Instance +participant SharedMemoryFactoryP1 as Factory +participant SharedMemoryResourceP1 as Resource +participant TypedMemoryP1 as TypedMemory +participant tmd_ITypedSharedMemory as ITypedSharedMemory + +participant os_Stat as Stat +participant os_utils_AccessControlList as ACL +participant os_Unistd as Unistd + +participant os_Mman as Mman + +participant MemoryResourceRegistryP1 as Registry +participant FooSharedMemoryObject as FooObject +participant MemoryResourceProxy as Proxy + +activate Instance +activate Factory + +Instance -> Factory: Create("/foo", permissions, size, prefer_typed_memory) +Factory -> Factory: Check if instances was already created +Factory -> TypedMemory: TypedMemory::Default() +activate TypedMemory +TypedMemory --> Factory: return TypedMemoryP1 +Factory -> Resource: Create("/foo", permissions, size, TypedMemoryP1) +activate Resource +Resource -> Resource: mode = calcStatModeForPermissions(permissions) + +deactivate Resource + + +TypedMemory -> ITypedSharedMemory: AllocateNamedTypedMemory("/foo", permissions, size) +alt + Factory --> Instance: return existing instance + +end + +alt AllocateInTypedmemory(typed_memory_ptr_ != nullptr) + Resource -> TypedMemory: AllocateNamedTypedMemory("/foo", permissions, size) + activate Resource + TypedMemory --> Resource: success + + TypedMemory -> ITypedSharedMemory: AllocateNamedTypedMemory("/foo", permissions, size) + ITypedSharedMemory --> TypedMemory + + deactivate TypedMemory + + + Resource -> Mman: shm_open(O_RDWR|O_CREAT|O_EXCL, mode) + activate Mman + Mman --> Resource: file_descriptor + deactivate Mman + deactivate Resource +end + +alt AllocateInTypedmemory failed or AllocateInSysram(typed_memory_ptr_ == nullptr) + + Resource -> Mman: shm_open(O_RDWR|O_EXCL, mode) + activate Resource + activate Mman + Mman --> Resource: file_descriptor + deactivate Mman + deactivate Resource + + alt mode == world-readable + Resource -> Stat: fchmod(file_descriptor, mode) + activate Resource + activate Stat + Stat --> Resource + deactivate Stat + deactivate Resource + else mode==UserPermissions + + Resource -> ACL: AllowUser(file_descriptor, os::Acl::Permission) + activate Resource + activate ACL + ACL --> Resource + deactivate ACL + deactivate Resource + end + + + + Resource -> Unistd: ftruncate(file_descriptor, size) + activate Resource + activate Unistd + Unistd --> Resource + deactivate Unistd + deactivate Resource + + +end + +Resource -> Resource: GetOwnerUidAndSizeOf(file_descriptor) +activate Resource +activate Mman +Resource -> Mman: mmap(map memory into process) +Mman --> Resource +deactivate Mman + +Resource -> Proxy : Instantiate(Unique ID) +activate Proxy +Proxy --> Resource +deactivate Proxy + +Resource -> Registry: insert_resource(uniqueID, this) +activate Registry +Registry --> Resource: return result +deactivate Registry + + +Resource -> FooObject: initializeInternalsInSharedMemory() - Proxy, alreadyAllocatedBytes, mutex +activate FooObject +FooObject --> Resource +deactivate FooObject + +Resource --> Factory: instance +Factory --> Instance + +deactivate Resource +deactivate Factory +deactivate Instance +destroy Mman +destroy Registry +destroy FooObject +destroy Proxy +destroy Resource +destroy Factory +destroy Instance +@enduml diff --git a/score/memory/design/shared_memory/offset_ptr_problems.md b/score/memory/design/shared_memory/offset_ptr_problems.md new file mode 100644 index 000000000..fb8afc6ae --- /dev/null +++ b/score/memory/design/shared_memory/offset_ptr_problems.md @@ -0,0 +1,55 @@ +# OffsetPtr problems to solve + +## Definitions / background: +- Start check: Checking that the start address of a pointed-to object lies the same shared memory region in which the OffsetPtr was created. +- End checks: Checking that the end address of a pointed-to object lies the same shared memory region in which the OffsetPtr was created. +- Bounds check: Start and end check. + +One-past-the-end-iterators: +- The size of memory allocated for a container usually doesn't include the size of a pointed-to object starting at a one-past-the-end iterator address. Therefore, doing an end check on a one-past-the-end iterator may fail. + - It is legal to create and copy such a pointer. + - It's illegal to dereference such a pointer. + - It's legal to decrement such a pointer and then dereference it. +- The standard library containers that we're using (i.e. std::unordered_map) are creating and copying one-past-the-end pointers. + +## Required checks: + +OffsetPtr +- Dereferencing a pointer - operator* + - We need to do a Bounds check. +- Getting raw pointer - get(), operator pointer(), operator-> + - Although we're not dereferencing the pointer, so we are not violating any safety goals by creating the raw pointer, we cannot control the bounds checking after this point (i.e. making sure that bounds checking is done before the raw pointer is dereferenced). Therefore, we need to do the same checks as if we were dereferencing the pointer. +- Copying OffsetPtr + - We don't _need_ to do any checks here. The copied-to OffsetPtr will do any required bounds checking when it's dereferenced. + +OffsetPtr +- Dereferencing a pointer - operator* + - We cannot dereference a void*. +- Getting raw pointer - get(), operator pointer(), operator-> + - We do not know the size of the pointed-to object and therefore can only perform a start check. + - It must be ensured that a full bounds check is done before the user dereferences the pointer (after they've cast it to a type). i.e. if we do a start check when getting the pointer, the user just has to do the end check. +- Copying OffsetPtr + - We don't _need_ to do any checks here. The copied-to OffsetPtr / subsequent user checks before dereferencing will do any required bounds checking. + + +## Problems to solve: + +1. UB in pointer arithmetic when calculating offset and getting absolute pointer from offset. +2. Copying offset pointer should not do end check (will fail for one-past-the-end iterator). +3. Getting a void pointer cannot do end check. Dereferencing retrieved pointer will therefore bypass end check. +4. Copying an OffsetPtr to the stack will no longer perform bounds checking. Therefore, we must be able to do bounds checking when getting / dereferencing a stack OffsetPtr. + +## Proposed solution: + +1. Cast addresses to integral types (implementation defined: `https://timsong-cpp.github.io/cppwp/n4659/expr.reinterpret.cast#4`, `https://timsong-cpp.github.io/cppwp/n4659/expr.reinterpret.cast#5`) and then do all arithmetic on integral types to avoid UB. + - When calculating the offset, cast the OffsetPtr and pointed-to addresses to integral type and subtract integers. + - When getting raw pointer from offset, cast the OffsetPtr address to integral type and add the offset to it. Cast the resulting address to a pointer. + - When copying an OffsetPtr, cast the copied-from and copied-to OffsetPtr addresses to integral types. Add the difference between the two integers to the currently stored offset and store in the copied-to OffsetPtr. +2. Copying offset pointer can now calculate the offset in an implementation defined manner (even when copying out of shared memory) so doesn't have to do any bounds checking. Therefore, copying one-past-the-end iterator will do no end check and never fail. +3. OffsetPtr could contain a templated get() (which would be the only one enabled for PointedType=void) which is templated with the real size of the type (which is currently pointed to via a void pointer). It will then do the bounds check using the size of the and return a void* or event the type itself. + - Limitation of this is that we use OffsetPtr instead of void*. We would have to change ManagedMemoryResource getUsableBaseAddress, getEndAddress etc. to return OffsetPtr instead of void*. We wanted to anyway make these private, so it might not be a big issue. + - Other option would be to have getUnsafe() and require the caller to manually do the bounds checking. +4. When copying an OffsetPtr to the stack, we must store the MemoryResource identifier to do the bounds checking on dereferencing (since we can't use the address of the OffsetPtr itself to identiy the memory resource, like when the OffsetPtr is in shared memory). + +## Misc: +- MemoryResourceRegistry can store uintptr_t instead of void* for memory bounds. diff --git a/score/memory/shared/BUILD b/score/memory/shared/BUILD new file mode 100644 index 000000000..c9a4446ed --- /dev/null +++ b/score/memory/shared/BUILD @@ -0,0 +1,957 @@ +# ******************************************************************************* +# Copyright (c) 2025 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* + +load("@rules_cc//cc:defs.bzl", "cc_library") +load("@score_baselibs//:bazel/unit_tests.bzl", "cc_gtest_unit_test", "cc_unit_test", "cc_unit_test_suites_for_host_and_qnx") +load("@score_baselibs//score/language/safecpp:toolchain_features.bzl", "COMPILER_WARNING_FEATURES") + +# ============================================================================= +# Aliases — these targets moved to new packages but kept here for compatibility +# ============================================================================= + +cc_library( + name = "atomic_interface", + hdrs = ["i_atomic.h"], + deprecation = "Use @score_baselibs//score/concurrency/atomic:atomic_interface directly. This forwarding header will be removed.", + visibility = ["//visibility:public"], + deps = ["@score_baselibs//score/concurrency/atomic:atomic_interface"], +) + +cc_library( + name = "atomic_indirector", + hdrs = ["atomic_indirector.h"], + deprecation = "Use @score_baselibs//score/concurrency/atomic:atomic_indirector directly. This forwarding header will be removed.", + visibility = ["//visibility:public"], + deps = ["@score_baselibs//score/concurrency/atomic:atomic_indirector"], +) + +cc_library( + name = "atomic_indirector_mock_binding", + hdrs = ["atomic_mock.h"], + deprecation = "Use @score_baselibs//score/concurrency/atomic:atomic_indirector_mock_binding directly. This forwarding header will be removed.", + testonly = True, + visibility = ["//visibility:public"], + deps = ["@score_baselibs//score/concurrency/atomic:atomic_indirector_mock_binding"], +) + +# ============================================================================= +# Primitives — these stay in baselibs +# ============================================================================= + +cc_library( + name = "pointer_arithmetic_util", + srcs = ["pointer_arithmetic_util.cpp"], + hdrs = ["pointer_arithmetic_util.h"], + features = COMPILER_WARNING_FEATURES, + tags = ["FFI"], + visibility = [ + "//visibility:public", + ], + deps = [ + "@score_baselibs//score/language/safecpp/safe_math", + "@score_baselibs//score/memory:data_type_size_info", + "@score_baselibs//score/mw/log:frontend", + ], +) + +cc_library( + name = "allocation_algorithm", + srcs = ["allocation_algorithm.cpp"], + hdrs = ["allocation_algorithm.h"], + features = COMPILER_WARNING_FEATURES, + tags = ["FFI"], + visibility = [ + "//visibility:public", + ], + deps = [ + ":pointer_arithmetic_util", + ], +) + +cc_library( + name = "managed_memory_resource", + srcs = ["managed_memory_resource.cpp"], + hdrs = ["managed_memory_resource.h"], + features = COMPILER_WARNING_FEATURES, + tags = ["FFI"], + visibility = [ + "//visibility:public", + ], + deps = ["@score_baselibs//score/language/futurecpp"], +) + +cc_library( + name = "user_permission", + hdrs = ["user_permission.h"], + features = COMPILER_WARNING_FEATURES, + tags = ["FFI"], + visibility = ["//visibility:public"], + deps = ["@score_baselibs//score/os/utils/acl"], +) + +cc_library( + name = "i_shared_memory_resource", + srcs = ["i_shared_memory_resource.cpp"], + hdrs = ["i_shared_memory_resource.h"], + features = COMPILER_WARNING_FEATURES, + tags = ["FFI"], + visibility = [ + "//visibility:public", + ], + deps = [ + ":managed_memory_resource", + ":user_permission", + "@score_baselibs//score/language/futurecpp", + ], +) + +cc_library( + name = "memory_region_bounds", + srcs = ["memory_region_bounds.cpp"], + hdrs = ["memory_region_bounds.h"], + features = COMPILER_WARNING_FEATURES, + tags = ["FFI"], + visibility = [ + "//visibility:public", + ], + deps = [ + "@score_baselibs//score/language/futurecpp", + ], +) + +cc_library( + name = "memory_region_map", + srcs = ["memory_region_map.cpp"], + hdrs = ["memory_region_map.h"], + features = COMPILER_WARNING_FEATURES, + implementation_deps = [ + "@score_baselibs//score/mw/log:frontend", + ], + tags = ["FFI"], + visibility = [ + "//visibility:public", + ], + deps = [ + "@score_baselibs//score/concurrency/atomic:atomic_indirector", + ":memory_region_bounds", + "@score_baselibs//score/language/futurecpp", + ], +) + +cc_library( + name = "memory_resource_registry", + srcs = ["memory_resource_registry.cpp"], + hdrs = ["memory_resource_registry.h"], + features = COMPILER_WARNING_FEATURES, + implementation_deps = [ + ":pointer_arithmetic_util", + ":shared_memory_error", + "@score_baselibs//score/mw/log:frontend", + ], + tags = ["FFI"], + visibility = [ + "//visibility:public", + ], + deps = [ + ":managed_memory_resource", + ":memory_region_bounds", + ":memory_region_map", + "@score_baselibs//score/language/futurecpp", + "@score_baselibs//score/result", + ], +) + +cc_library( + name = "memory_resource_proxy", + srcs = ["memory_resource_proxy.cpp"], + hdrs = ["memory_resource_proxy.h"], + features = COMPILER_WARNING_FEATURES, + implementation_deps = [ + ":memory_region_bounds", + ":memory_resource_registry", + ":pointer_arithmetic_util", + "@score_baselibs//score/language/futurecpp", + "@score_baselibs//score/mw/log:frontend", + ], + tags = ["FFI"], + visibility = [ + "//visibility:public", + ], +) + +cc_library( + name = "offset_ptr", + srcs = [ + "offset_ptr.cpp", + ], + hdrs = [ + "offset_ptr.h", + ], + features = COMPILER_WARNING_FEATURES, + tags = ["FFI"], + visibility = [ + "//visibility:public", + ], + deps = [ + ":memory_region_bounds", + ":memory_resource_registry", + ":offset_ptr_bounds_check", + ":pointer_arithmetic_util", + "@score_baselibs//score/language/futurecpp", + "@score_baselibs//score/language/safecpp/safe_math", + "@score_baselibs//score/quality/compiler_warnings", + ], +) + +cc_library( + name = "offset_ptr_bounds_check", + srcs = [ + "offset_ptr_bounds_check.cpp", + ], + hdrs = [ + "offset_ptr_bounds_check.h", + ], + features = COMPILER_WARNING_FEATURES, + implementation_deps = [ + ":pointer_arithmetic_util", + "@score_baselibs//score/mw/log:frontend", + ], + tags = ["FFI"], + visibility = [ + "//visibility:public", + ], + deps = [ + ":memory_region_map", + ":memory_resource_registry", + "@score_baselibs//score/language/futurecpp", + ], +) + +cc_library( + name = "polymorphic_offset_ptr_allocator", + srcs = ["polymorphic_offset_ptr_allocator.cpp"], + hdrs = ["polymorphic_offset_ptr_allocator.h"], + features = COMPILER_WARNING_FEATURES, + tags = ["FFI"], + visibility = [ + "//visibility:public", + ], + deps = [ + ":memory_resource_proxy", + ":offset_ptr", + "@score_baselibs//score/language/safecpp/safe_math", + ], +) + +cc_library( + name = "map", + srcs = ["map.cpp"], + hdrs = ["map.h"], + features = COMPILER_WARNING_FEATURES, + tags = ["FFI"], + visibility = [ + "//visibility:public", + ], + deps = [ + ":polymorphic_offset_ptr_allocator", + ] + select({ + "@platforms//os:linux": [ + "@boost.container", + "@boost.interprocess", + ], + "@platforms//os:qnx": [], + }), +) + +cc_library( + name = "string", + srcs = ["string.cpp"], + hdrs = ["string.h"], + features = COMPILER_WARNING_FEATURES, + tags = ["FFI"], + visibility = [ + "//visibility:public", + ], + deps = [ + ":polymorphic_offset_ptr_allocator", + "@score_baselibs//score/language/safecpp/string_view:char_traits_wrapper", + ], +) + +cc_library( + name = "vector", + srcs = ["vector.cpp"], + hdrs = ["vector.h"], + features = COMPILER_WARNING_FEATURES, + tags = ["FFI"], + visibility = [ + "//visibility:public", + ], + deps = [ + ":polymorphic_offset_ptr_allocator", + ], +) + +cc_library( + name = "lock_file", + srcs = ["lock_file.cpp"], + hdrs = ["lock_file.h"], + features = COMPILER_WARNING_FEATURES, + implementation_deps = [ + "@score_baselibs//score/mw/log:frontend", + "@score_baselibs//score/os:fcntl", + "@score_baselibs//score/os:stat", + "@score_baselibs//score/os:unistd", + ], + tags = ["FFI"], + visibility = [ + "//visibility:public", + ], + deps = [ + "@score_baselibs//score/language/futurecpp", + ], +) + +cc_library( + name = "shared_memory_error", + srcs = ["shared_memory_error.cpp"], + hdrs = ["shared_memory_error.h"], + features = COMPILER_WARNING_FEATURES, + tags = ["FFI"], + visibility = ["//visibility:public"], + deps = [ + "@score_baselibs//score/result", + ], +) + +cc_library( + name = "types", + features = COMPILER_WARNING_FEATURES, + tags = ["FFI"], + visibility = [ + "//visibility:public", + ], + deps = [ + ":managed_memory_resource", + ":map", + ":memory_region_map", + ":memory_resource_proxy", + ":memory_resource_registry", + ":offset_ptr", + ":polymorphic_offset_ptr_allocator", + ":shared_memory_error", + ":string", + ":vector", + ], +) + +cc_library( + name = "shared_memory_resource", + srcs = ["shared_memory_resource.cpp"], + hdrs = ["shared_memory_resource.h"], + features = COMPILER_WARNING_FEATURES, + implementation_deps = [ + "@score_baselibs//score/language/safecpp/string_view:zstring_view", + ], + tags = ["FFI"], + visibility = [ + "//score/memory/shared/fake:__subpackages__", + ], + deps = [ + ":i_shared_memory_resource", + ":lock_file", + ":pointer_arithmetic_util", + ":types", + "@score_baselibs//score/language/futurecpp", + "@score_baselibs//score/language/safecpp/safe_math", + "//score/memory/shared/sealedshm/sealedshm_wrapper:sealedshm", + "//score/memory/shared/typedshm/typedshm_wrapper:typedmemory", + "//score/memory/shared/typedshm/utils:typed_memory_utils", + "@score_baselibs//score/mw/log:frontend", + "@score_baselibs//score/os:errno_logging", + "@score_baselibs//score/os:fcntl", + "@score_baselibs//score/os:mman", + "@score_baselibs//score/os:stat", + "@score_baselibs//score/os:unistd", + "@score_baselibs//score/os/utils/acl", + "@score_baselibs//score/os/utils/interprocess:interprocess_mutex", + ], +) + +cc_library( + name = "i_shared_memory_factory", + srcs = ["i_shared_memory_factory.cpp"], + hdrs = ["i_shared_memory_factory.h"], + features = COMPILER_WARNING_FEATURES, + tags = ["FFI"], + deps = [ + ":i_shared_memory_resource", + ":types", + "@score_baselibs//score/language/futurecpp", + "//score/memory/shared/typedshm/typedshm_wrapper:typedmemory", + ], +) + +cc_library( + name = "shared_memory_factory_mock", + testonly = True, + srcs = ["shared_memory_factory_mock.cpp"], + hdrs = ["shared_memory_factory_mock.h"], + features = COMPILER_WARNING_FEATURES, + visibility = [ + "//visibility:public", + ], + deps = [ + ":i_shared_memory_factory", + ":types", + "@googletest//:gtest", + "//score/memory/shared/typedshm/typedshm_wrapper:typedmemory", + ], +) + +cc_library( + name = "shared_memory_factory_impl", + srcs = ["shared_memory_factory_impl.cpp"], + hdrs = ["shared_memory_factory_impl.h"], + features = COMPILER_WARNING_FEATURES, + tags = ["FFI"], + deps = [ + ":i_shared_memory_factory", + ":shared_memory_resource", + ":types", + "@score_baselibs//score/language/futurecpp", + "//score/memory/shared/typedshm/typedshm_wrapper:typedmemory", + "//score/memory/shared/typedshm/utils:typed_memory_utils", + "@score_baselibs//score/mw/log:frontend", + "@score_baselibs//score/os:mman", + "@score_baselibs//score/os:unistd", + "@score_baselibs//score/os/utils/acl", + ], +) + +cc_library( + name = "shared_memory_factory", + srcs = ["shared_memory_factory.cpp"], + hdrs = ["shared_memory_factory.h"], + features = COMPILER_WARNING_FEATURES, + implementation_deps = [ + "shared_memory_factory_impl", + "@score_baselibs//score/mw/log:frontend", + "@score_baselibs//score/os:errno_logging", + "@score_baselibs//score/os:unistd", + ], + tags = ["FFI"], + deps = [ + ":i_shared_memory_factory", + ":i_shared_memory_resource", + ":types", + "@score_baselibs//score/language/futurecpp", + ], +) + +cc_library( + name = "shared", + features = COMPILER_WARNING_FEATURES, + tags = ["FFI"], + visibility = [ + "//visibility:public", + ], + deps = [ + ":shared_memory_factory", + ":shared_memory_resource", + ], +) + +cc_library( + name = "shared_memory_resource_heap_allocator_mock", + testonly = True, + srcs = ["shared_memory_resource_heap_allocator_mock.cpp"], + hdrs = ["shared_memory_resource_heap_allocator_mock.h"], + features = COMPILER_WARNING_FEATURES, + tags = ["FFI"], + visibility = [ + "//visibility:public", + ], + deps = [ + ":i_shared_memory_resource", + ":new_delete_delegate_resource", + "@googletest//:gtest", + "@score_baselibs//score/language/futurecpp", + ], +) + +cc_library( + name = "shared_memory_resource_mock", + testonly = True, + srcs = ["shared_memory_resource_mock.cpp"], + hdrs = ["shared_memory_resource_mock.h"], + features = COMPILER_WARNING_FEATURES, + visibility = [ + "//visibility:public", + ], + deps = [ + ":i_shared_memory_resource", + "@googletest//:gtest", + "@score_baselibs//score/language/futurecpp", + ], +) + +cc_library( + name = "new_delete_delegate_resource", + srcs = ["new_delete_delegate_resource.cpp"], + hdrs = ["new_delete_delegate_resource.h"], + features = COMPILER_WARNING_FEATURES, + implementation_deps = [ + "@score_baselibs//score/language/safecpp/safe_math", + "@score_baselibs//score/mw/log:frontend", + ], + tags = ["FFI"], + visibility = [ + "//visibility:public", + ], + deps = [ + ":pointer_arithmetic_util", + ":shared", + ":types", + "@score_baselibs//score/language/futurecpp", + ], +) + +cc_library( + name = "shared_memory_test_resources", + testonly = True, + srcs = [ + "shared_memory_test_resources.cpp", + ], + hdrs = [ + "shared_memory_test_resources.h", + ], + features = COMPILER_WARNING_FEATURES, + tags = ["FFI"], + visibility = ["//score/memory/shared:__subpackages__"], + deps = [ + ":shared", + "@score_baselibs//score/language/futurecpp", + "//score/memory/shared/sealedshm/sealedshm_wrapper:sealedshm_mock", + "//score/memory/shared/typedshm/typedshm_wrapper/test:typedmemory_mock", + "@score_baselibs//score/os/mocklib:acl_mock", + "@score_baselibs//score/os/mocklib:fcntl_mock", + "@score_baselibs//score/os/mocklib:mman_mock", + "@score_baselibs//score/os/mocklib:stat_mock", + "@score_baselibs//score/os/mocklib:unistd_mock", + ], +) + +# ============================================================================= +# Tests for primitives +# ============================================================================= + +cc_unit_test( + name = "pointer_arithmetic_util_precondition_violation_test", + srcs = [ + "pointer_arithmetic_util_test.cpp", + ], + features = COMPILER_WARNING_FEATURES + [ + "-aborts_upon_exception", + ], + visibility = [ + "@score_baselibs//score/memory:__pkg__", + ], + deps = [ + ":pointer_arithmetic_util", + "@googletest//:gtest", + "@googletest//:gtest_main", + "@score_baselibs//score/language/futurecpp:futurecpp_test_support", + "@score_baselibs//score/language/safecpp/coverage_termination_handler", + "@score_baselibs//score/mw/log:backend_stub_testutil", + ], +) + +cc_unit_test( + name = "pointer_arithmetic_util_calculate_aligned_size_test", + srcs = [ + "pointer_arithmetic_util_calculate_aligned_size_test.cpp", + ], + features = COMPILER_WARNING_FEATURES + [ + "-aborts_upon_exception", + ], + visibility = [ + "@score_baselibs//score/memory:__pkg__", + ], + deps = [ + ":pointer_arithmetic_util", + "@googletest//:gtest", + "@googletest//:gtest_main", + "@score_baselibs//score/language/futurecpp:futurecpp_test_support", + "@score_baselibs//score/mw/log:backend_stub_testutil", + ], +) + +cc_gtest_unit_test( + name = "memory_region_bounds_test", + srcs = [ + "memory_region_bounds_test.cpp", + ], + features = COMPILER_WARNING_FEATURES, + visibility = [ + "@score_baselibs//score/memory:__pkg__", + ], + deps = [ + ":memory_region_bounds", + ], +) + +cc_gtest_unit_test( + name = "memory_resource_registry_test", + srcs = [ + "memory_resource_registry_test.cpp", + ], + features = COMPILER_WARNING_FEATURES, + visibility = [ + "@score_baselibs//score/memory:__pkg__", + ], + deps = [ + ":memory_resource_registry", + ":pointer_arithmetic_util", + ":shared_memory_error", + "@score_baselibs//score/mw/log:backend_stub_testutil", + ], +) + + +cc_gtest_unit_test( + name = "map_test", + srcs = [ + "map_test.cpp", + ], + features = COMPILER_WARNING_FEATURES, + visibility = [ + "@score_baselibs//score/memory:__pkg__", + ], + deps = [ + ":map", + ":vector", + "//score/memory/shared/fake:fake_memory_resources", + "@score_baselibs//score/mw/log:backend_stub_testutil", + ], +) + +cc_gtest_unit_test( + name = "memory_region_map_test", + srcs = [ + "memory_region_map_test.cpp", + ], + features = COMPILER_WARNING_FEATURES, + visibility = [ + "@score_baselibs//score/memory:__pkg__", + ], + deps = [ + ":memory_region_map", + ":shared_memory_test_resources", + "@score_baselibs//score/concurrency/atomic:atomic_indirector_mock_binding", + "@score_baselibs//score/mw/log:backend_stub_testutil", + ], +) + +cc_gtest_unit_test( + name = "memory_resource_proxy_test", + srcs = [ + "memory_resource_proxy_test.cpp", + ], + features = COMPILER_WARNING_FEATURES, + visibility = [ + "@score_baselibs//score/memory:__pkg__", + ], + deps = [ + ":memory_resource_proxy", + ":shared_memory_test_resources", + "//score/memory/shared/fake:fake_memory_resources", + "@score_baselibs//score/mw/log:backend_stub_testutil", + ], +) + +cc_gtest_unit_test( + name = "polymorphic_offset_ptr_allocator_test", + srcs = [ + "polymorphic_offset_ptr_allocator_test.cpp", + ], + features = COMPILER_WARNING_FEATURES, + visibility = [ + "@score_baselibs//score/memory:__pkg__", + ], + deps = [ + ":polymorphic_offset_ptr_allocator", + "//score/memory/shared/fake:fake_memory_resources", + "@score_baselibs//score/mw/log:backend_stub_testutil", + ], +) + +cc_gtest_unit_test( + name = "shared_memory_error_test", + srcs = [ + "shared_memory_error_test.cpp", + ], + features = COMPILER_WARNING_FEATURES, + visibility = [ + "@score_baselibs//score/memory:__pkg__", + ], + deps = [ + ":shared_memory_error", + ], +) + +cc_gtest_unit_test( + name = "vector_test", + srcs = [ + "vector_test.cpp", + ], + features = COMPILER_WARNING_FEATURES, + visibility = [ + "@score_baselibs//score/memory:__pkg__", + ], + deps = [ + ":vector", + "//score/memory/shared/fake:fake_memory_resources", + "@score_baselibs//score/mw/log:backend_stub_testutil", + ], +) + +cc_gtest_unit_test( + name = "string_test", + srcs = [ + "string_test.cpp", + ], + features = COMPILER_WARNING_FEATURES, + target_compatible_with = [ + "@platforms//os:linux", + ], + visibility = [ + "@score_baselibs//score/memory:__pkg__", + ], + deps = [ + ":string", + "//score/memory/shared/fake:fake_memory_resources", + "@score_baselibs//score/mw/log:backend_stub_testutil", + ], +) + +cc_gtest_unit_test( + name = "shared_memory_factory_test", + srcs = [ + "shared_memory_factory_test.cpp", + ], + features = COMPILER_WARNING_FEATURES, + visibility = [ + "@score_baselibs//score/memory:__pkg__", + ], + deps = [ + ":shared_memory_factory", + ":shared_memory_test_resources", + "@score_baselibs//score/mw/log:backend_stub_testutil", + ], +) + +cc_gtest_unit_test( + name = "lock_file_test", + srcs = [ + "lock_file_test.cpp", + ], + features = COMPILER_WARNING_FEATURES, + visibility = [ + "@score_baselibs//score/memory:__pkg__", + ], + deps = [ + ":lock_file", + ":shared_memory_test_resources", + "@score_baselibs//score/mw/log:backend_stub_testutil", + ], +) + +cc_gtest_unit_test( + name = "managed_memory_resource_test", + srcs = ["managed_memory_resource_test.cpp"], + features = COMPILER_WARNING_FEATURES, + visibility = [ + "@score_baselibs//score/memory:__pkg__", + ], + deps = [ + ":managed_memory_resource", + ":shared_memory_test_resources", + "//score/memory/shared/fake:fake_memory_resources", + "@score_baselibs//score/mw/log:backend_stub_testutil", + ], +) + +cc_gtest_unit_test( + name = "new_delete_delegate_resource_test", + srcs = [ + "new_delete_delegate_resource_test.cpp", + ], + features = COMPILER_WARNING_FEATURES, + visibility = [ + "@score_baselibs//score/memory:__pkg__", + ], + deps = [ + ":new_delete_delegate_resource", + ":shared_memory_test_resources", + "@score_baselibs//score/mw/log:backend_stub_testutil", + ], +) + +cc_gtest_unit_test( + name = "shared_memory_resource_allocate_test", + srcs = [ + "shared_memory_resource_allocate_test.cpp", + ], + features = COMPILER_WARNING_FEATURES, + visibility = [ + "@score_baselibs//score/memory:__pkg__", + ], + deps = [ + ":pointer_arithmetic_util", + ":shared_memory_test_resources", + "//score/memory/shared/fake:fake_memory_resources", + "@score_baselibs//score/mw/log:backend_stub_testutil", + ], +) + +cc_gtest_unit_test( + name = "shared_memory_resource_create_anonymous_test", + srcs = [ + "shared_memory_resource_create_anonymous_test.cpp", + ], + features = COMPILER_WARNING_FEATURES, + visibility = [ + "@score_baselibs//score/memory:__pkg__", + ], + deps = [ + ":shared_memory_test_resources", + "//score/memory/shared/sealedshm/sealedshm_wrapper:sealedshm", + "@score_baselibs//score/mw/log:backend_stub_testutil", + "@score_baselibs//score/os/utils/acl:mock", + ], +) + +cc_gtest_unit_test( + name = "shared_memory_resource_create_or_open_test", + srcs = [ + "shared_memory_resource_create_or_open_test.cpp", + ], + features = COMPILER_WARNING_FEATURES, + visibility = [ + "@score_baselibs//score/memory:__pkg__", + ], + deps = [ + ":shared_memory_test_resources", + "@score_baselibs//score/mw/log:backend_stub_testutil", + ], +) + +cc_gtest_unit_test( + name = "shared_memory_resource_create_test", + srcs = [ + "shared_memory_resource_create_test.cpp", + ], + features = COMPILER_WARNING_FEATURES, + visibility = [ + "@score_baselibs//score/memory:__pkg__", + ], + deps = [ + ":pointer_arithmetic_util", + ":shared_memory_resource", + ":shared_memory_test_resources", + "@score_baselibs//score/mw/log:backend_stub_testutil", + "@score_baselibs//score/os/utils/acl:mock", + ], +) + +cc_gtest_unit_test( + name = "shared_memory_resource_misc_test", + srcs = [ + "shared_memory_resource_misc_test.cpp", + ], + features = COMPILER_WARNING_FEATURES, + visibility = [ + "@score_baselibs//score/memory:__pkg__", + ], + deps = [ + ":pointer_arithmetic_util", + ":shared_memory_test_resources", + "//score/memory/shared/fake:fake_memory_resources", + "//score/memory/shared/sealedshm/sealedshm_wrapper:sealedshm_mock", + "@score_baselibs//score/mw/log:backend_stub_testutil", + ], +) + +cc_gtest_unit_test( + name = "shared_memory_resource_open_test", + srcs = [ + "shared_memory_resource_open_test.cpp", + ], + features = COMPILER_WARNING_FEATURES, + visibility = [ + "@score_baselibs//score/memory:__pkg__", + ], + deps = [ + ":shared_memory_test_resources", + "//score/memory/shared/fake:fake_memory_resources", + "@score_baselibs//score/mw/log:backend_stub_testutil", + "@score_baselibs//score/os/utils/acl:mock", + ], +) + +# Separate test suite for string test as it should not be executed on qnx +test_suite( + name = "unit_tests_host", + tests = [ + ":string_test", + ], + visibility = [ + "@score_baselibs//score/memory:__pkg__", + ], +) + +cc_unit_test_suites_for_host_and_qnx( + name = "unit_test_suite", + cc_unit_tests = [ + ":lock_file_test", + ":managed_memory_resource_test", + ":map_test", + ":memory_region_bounds_test", + ":memory_region_map_test", + ":memory_resource_proxy_test", + ":memory_resource_registry_test", + ":new_delete_delegate_resource_test", + ":polymorphic_offset_ptr_allocator_test", + ":pointer_arithmetic_util_precondition_violation_test", + ":pointer_arithmetic_util_calculate_aligned_size_test", + ":shared_memory_error_test", + ":shared_memory_factory_test", + ":shared_memory_resource_allocate_test", + ":shared_memory_resource_create_anonymous_test", + ":shared_memory_resource_create_or_open_test", + ":shared_memory_resource_create_test", + ":shared_memory_resource_misc_test", + ":shared_memory_resource_open_test", + ":vector_test", + ], + test_suites_from_sub_packages = [ + "//score/memory/shared/flock:unit_test_suite", + "//score/memory/shared/sealedshm/sealedshm_wrapper:unit_test_suite", + "//score/memory/shared/typedshm/typedshm_wrapper:unit_test_suite", + "//score/memory/shared/test_offset_ptr:unit_test_suite", + ], + visibility = [ + "//score/memory:__pkg__", + ], +) diff --git a/score/memory/shared/allocation_algorithm.cpp b/score/memory/shared/allocation_algorithm.cpp new file mode 100644 index 000000000..9eb32a713 --- /dev/null +++ b/score/memory/shared/allocation_algorithm.cpp @@ -0,0 +1,43 @@ +/******************************************************************************** + * Copyright (c) 2025 Contributors to the Eclipse Foundation + * + * See the NOTICE file(s) distributed with this work for additional + * information regarding copyright ownership. + * + * This program and the accompanying materials are made available under the + * terms of the Apache License Version 2.0 which is available at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * SPDX-License-Identifier: Apache-2.0 + ********************************************************************************/ +#include "score/memory/shared/allocation_algorithm.h" + +#include "score/memory/shared/pointer_arithmetic_util.h" + +#include + +namespace score::memory::shared +{ + +namespace detail +{ + +void* do_allocation_algorithm(const void* const alloc_start, + const void* const alloc_end, + const std::size_t bytes, + const std::size_t alignment) noexcept +{ + // Suppress "AUTOSAR C++14 A5-2-3" rule finding: A cast shall not remove any const or volatile + // qualification from the type of a pointer or reference. + // Rationale : std::align does not modify the underlying memory of aligned_address + // (https://timsong-cpp.github.io/cppwp/n4659/ptr.align#lib:align) so the const_cast will not result in undefined + // behaviour. + // coverity[autosar_cpp14_a5_2_3_violation] + void* aligned_address = const_cast(alloc_start); + auto buffer_space = static_cast(SubtractPointersBytes(alloc_end, alloc_start)); + return std::align(alignment, bytes, aligned_address, buffer_space); +} + +} // namespace detail + +} // namespace score::memory::shared diff --git a/score/memory/shared/allocation_algorithm.h b/score/memory/shared/allocation_algorithm.h new file mode 100644 index 000000000..d1417c31f --- /dev/null +++ b/score/memory/shared/allocation_algorithm.h @@ -0,0 +1,39 @@ +/******************************************************************************** + * Copyright (c) 2025 Contributors to the Eclipse Foundation + * + * See the NOTICE file(s) distributed with this work for additional + * information regarding copyright ownership. + * + * This program and the accompanying materials are made available under the + * terms of the Apache License Version 2.0 which is available at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * SPDX-License-Identifier: Apache-2.0 + ********************************************************************************/ +#ifndef SCORE_LIB_MEMORY_SHARED_ALLOCATION_ALGORITHM_H +#define SCORE_LIB_MEMORY_SHARED_ALLOCATION_ALGORITHM_H + +#include + +namespace score::memory::shared +{ + +namespace detail +{ + +/// \brief Implementation of a simplistic monotonic allocation algo as used by do_allocate(). +/// \param alloc_start address where allocation can start (start of free buffer space) +/// \param alloc_end address where allocation shall end (end of free buffer space) +/// \param bytes how many bytes to allocate +/// \param alignment +/// \return If allocation is successful a valid pointer is returned, otherwise a nullptr +void* do_allocation_algorithm(const void* const alloc_start, + const void* const alloc_end, + const std::size_t bytes, + const std::size_t alignment) noexcept; + +} // namespace detail + +} // namespace score::memory::shared + +#endif // SCORE_LIB_MEMORY_SHARED_ALLOCATION_ALGORITHM_H diff --git a/score/memory/shared/atomic_indirector.h b/score/memory/shared/atomic_indirector.h new file mode 100644 index 000000000..a03018319 --- /dev/null +++ b/score/memory/shared/atomic_indirector.h @@ -0,0 +1,22 @@ +// ******************************************************************************* +// Copyright (c) 2025 Contributors to the Eclipse Foundation +// +// See the NOTICE file(s) distributed with this work for additional +// information regarding copyright ownership. +// +// This program and the accompanying materials are made available under the +// terms of the Apache License Version 2.0 which is available at +// https://www.apache.org/licenses/LICENSE-2.0 +// +// SPDX-License-Identifier: Apache-2.0 +// ******************************************************************************* + +#ifndef SCORE_MEMORY_SHARED_ATOMIC_INDIRECTOR_H +#define SCORE_MEMORY_SHARED_ATOMIC_INDIRECTOR_H + +#warning \ + "score/memory/shared/atomic_indirector.h is deprecated. Include score/concurrency/atomic/atomic_indirector.h instead." + +#include "score/concurrency/atomic/atomic_indirector.h" + +#endif // SCORE_MEMORY_SHARED_ATOMIC_INDIRECTOR_H diff --git a/score/memory/shared/atomic_mock.h b/score/memory/shared/atomic_mock.h new file mode 100644 index 000000000..bf3c29f6a --- /dev/null +++ b/score/memory/shared/atomic_mock.h @@ -0,0 +1,21 @@ +// ******************************************************************************* +// Copyright (c) 2025 Contributors to the Eclipse Foundation +// +// See the NOTICE file(s) distributed with this work for additional +// information regarding copyright ownership. +// +// This program and the accompanying materials are made available under the +// terms of the Apache License Version 2.0 which is available at +// https://www.apache.org/licenses/LICENSE-2.0 +// +// SPDX-License-Identifier: Apache-2.0 +// ******************************************************************************* + +#ifndef SCORE_MEMORY_SHARED_ATOMIC_MOCK_H +#define SCORE_MEMORY_SHARED_ATOMIC_MOCK_H + +#warning "score/memory/shared/atomic_mock.h is deprecated. Include score/concurrency/atomic/atomic_mock.h instead." + +#include "score/concurrency/atomic/atomic_mock.h" + +#endif // SCORE_MEMORY_SHARED_ATOMIC_MOCK_H diff --git a/score/memory/shared/fake/BUILD b/score/memory/shared/fake/BUILD new file mode 100644 index 000000000..c86ba18cd --- /dev/null +++ b/score/memory/shared/fake/BUILD @@ -0,0 +1,57 @@ +# ******************************************************************************* +# Copyright (c) 2025 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* + +load("@rules_cc//cc:defs.bzl", "cc_library") +load("@score_baselibs//score/language/safecpp:toolchain_features.bzl", "COMPILER_WARNING_FEATURES") + +cc_library( + name = "fake_memory_resources", + testonly = True, + srcs = [ + "my_bounded_memory_resource.cpp", + "my_memory_resource.cpp", + ], + hdrs = [ + "my_bounded_memory_resource.h", + "my_memory_resource.h", + ], + features = COMPILER_WARNING_FEATURES, + visibility = ["//visibility:public"], + deps = [ + "@score_baselibs//score/language/futurecpp", + "//score/memory/shared:allocation_algorithm", + "//score/memory/shared:managed_memory_resource", + "//score/memory/shared:memory_resource_proxy", + "//score/memory/shared:memory_resource_registry", + "//score/memory/shared:pointer_arithmetic_util", + ], +) + +cc_library( + name = "my_bounded_shared_memory_resource", + testonly = True, + srcs = [ + "my_bounded_shared_memory_resource.cpp", + ], + hdrs = [ + "my_bounded_shared_memory_resource.h", + ], + features = COMPILER_WARNING_FEATURES, + visibility = ["//visibility:public"], + deps = [ + ":fake_memory_resources", + "@score_baselibs//score/language/futurecpp", + "//score/memory/shared:i_shared_memory_resource", + "//score/memory/shared:types", + ], +) diff --git a/score/memory/shared/fake/my_bounded_memory_resource.cpp b/score/memory/shared/fake/my_bounded_memory_resource.cpp new file mode 100644 index 000000000..0532bd0dd --- /dev/null +++ b/score/memory/shared/fake/my_bounded_memory_resource.cpp @@ -0,0 +1,130 @@ +/******************************************************************************** + * Copyright (c) 2025 Contributors to the Eclipse Foundation + * + * See the NOTICE file(s) distributed with this work for additional + * information regarding copyright ownership. + * + * This program and the accompanying materials are made available under the + * terms of the Apache License Version 2.0 which is available at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * SPDX-License-Identifier: Apache-2.0 + ********************************************************************************/ +#include "score/memory/shared/fake/my_bounded_memory_resource.h" + +#include "score/memory/shared/memory_resource_proxy.h" +#include "score/memory/shared/memory_resource_registry.h" +#include "score/memory/shared/pointer_arithmetic_util.h" +#include "score/memory/shared/allocation_algorithm.h" + +#include "score/language/safecpp/safe_math/safe_math.h" + +#include + +#include +#include + +namespace score::memory::shared::test +{ + +namespace +{ + +std::pair AllocateMemoryRange(const std::size_t memory_resource_size) noexcept +{ + auto* memory_allocation = static_cast(std::malloc(memory_resource_size)); + SCORE_LANGUAGE_FUTURECPP_ASSERT_MESSAGE(memory_allocation != nullptr, "Malloc must return allocated memory!"); + return {memory_allocation, memory_allocation + memory_resource_size}; +} + +} // namespace + +std::uint64_t MyBoundedMemoryResource::instanceId = 0U; +std::size_t MyBoundedMemoryResource::memoryResourceProxyAllocationSize_{ + CalculateAlignedSize(sizeof(MemoryResourceProxy), alignof(std::max_align_t))}; + +MyBoundedMemoryResource::MyBoundedMemoryResource(const std::size_t memory_resource_size, + const bool register_resource_with_registry) + : MyBoundedMemoryResource{AllocateMemoryRange(memory_resource_size + memoryResourceProxyAllocationSize_), + register_resource_with_registry} +{ + should_free_memory_on_destruction_ = true; +} + +/// \brief Construct MyBoundedMemoryResource using an underlying memory region owned by the caller (i.e. will not be +/// created or freed within the lifecyle of this class) +MyBoundedMemoryResource::MyBoundedMemoryResource(const std::pair memory_range, + const bool register_resource_with_registry) + // Suppress "AUTOSAR C++14 M5-2-8" rule finding: An object with integer type or pointer to void type shall not + // be converted to an object with pointer type. + // Rationale : fresh allocated memory needs to be converted to the type that shall be stored in it + // coverity[autosar_cpp14_m5_2_8_violation] + : baseAddress_{static_cast(memory_range.first)}, + endAddress_{static_cast(memory_range.second)}, + virtual_address_space_to_reserve_{ + static_cast(SubtractPointersBytes(memory_range.second, memory_range.first))}, + already_allocated_bytes_{0U}, + deallocatedMemory_{0U}, + memoryResourceId_{instanceId++}, + manager_{nullptr}, + should_free_memory_on_destruction_{false} +{ + if (register_resource_with_registry) + { + const bool registration_result = + MemoryResourceRegistry::getInstance().insert_resource({memoryResourceId_, this}); + SCORE_LANGUAGE_FUTURECPP_ASSERT_MESSAGE(registration_result, "Could not register memory resource with registry"); + } + manager_ = AllocateMemoryResourceProxy(memoryResourceId_); +} + +MyBoundedMemoryResource::~MyBoundedMemoryResource() +{ + if (should_free_memory_on_destruction_) + { + std::free(baseAddress_); + } + MemoryResourceRegistry::getInstance().remove_resource(memoryResourceId_); +} + +void* MyBoundedMemoryResource::do_allocate(const std::size_t bytes, std::size_t alignment) +{ + SCORE_LANGUAGE_FUTURECPP_ASSERT_PRD(alignment <= alignof(std::max_align_t)); + void* const allocation_start_address = AddOffsetToPointer(baseAddress_, already_allocated_bytes_); + void* const allocation_end_address = AddOffsetToPointer(this->baseAddress_, virtual_address_space_to_reserve_); + void* const new_address_aligned = + detail::do_allocation_algorithm(allocation_start_address, allocation_end_address, bytes, alignment); + + if (new_address_aligned == nullptr) + { + score::mw::log::LogFatal("shm") << "Cannot allocate memory block of size" << bytes << " at: [" + << PointerToLogValue(new_address_aligned) << ":" + << PointerToLogValue(AddOffsetToPointer(new_address_aligned, bytes)) + << "]. Does not fit within shared memory segment: [" + << PointerToLogValue(baseAddress_) << ":" + << PointerToLogValue(this->getEndAddress()) << "]"; + std::terminate(); + } + const auto padding = SubtractPointersBytes(new_address_aligned, allocation_start_address); + + const auto total_allocated_bytes = safe_math::Add(bytes, padding).value(); + already_allocated_bytes_ += total_allocated_bytes; + + return new_address_aligned; +} + +void MyBoundedMemoryResource::do_deallocate(void* /*memory*/, const std::size_t bytes, std::size_t) +{ + deallocatedMemory_ += bytes; +} + +MemoryResourceProxy* MyBoundedMemoryResource::AllocateMemoryResourceProxy(const std::uint64_t memory_resource_id) +{ + // We allocate the MemoryResourceProxy using worst case alignment so that any further allocations will start at an + // aligned memory address. This is important so that GetUserAllocatedBytes() is never affected by the allocation of + // the MemoryResourceProxy. + auto* storage = do_allocate(memoryResourceProxyAllocationSize_, alignof(std::max_align_t)); + return new (storage) MemoryResourceProxy(memory_resource_id); +} + +} // namespace score::memory::shared::test diff --git a/score/memory/shared/fake/my_bounded_memory_resource.h b/score/memory/shared/fake/my_bounded_memory_resource.h new file mode 100644 index 000000000..9e203410b --- /dev/null +++ b/score/memory/shared/fake/my_bounded_memory_resource.h @@ -0,0 +1,125 @@ +/******************************************************************************** + * Copyright (c) 2025 Contributors to the Eclipse Foundation + * + * See the NOTICE file(s) distributed with this work for additional + * information regarding copyright ownership. + * + * This program and the accompanying materials are made available under the + * terms of the Apache License Version 2.0 which is available at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * SPDX-License-Identifier: Apache-2.0 + ********************************************************************************/ +#ifndef SCORE_LIB_MEMORY_SHARED_FAKE_MYBOUNDEDMEMORYRESOURCE_H +#define SCORE_LIB_MEMORY_SHARED_FAKE_MYBOUNDEDMEMORYRESOURCE_H + +#include "score/memory/shared/managed_memory_resource.h" +#include "score/memory/shared/memory_resource_proxy.h" +#include "score/memory/shared/pointer_arithmetic_util.h" + +#include +#include + +namespace score::memory::shared::test +{ + +class MyBoundedMemoryResource final : public ManagedMemoryResource +{ + friend class MyBoundedSharedMemoryResource; + + public: + /// \brief Construct MyBoundedMemoryResource which owns the underlying memory region (i.e. will create and free it + /// within the lifecyle of this class) + MyBoundedMemoryResource(const std::size_t memory_resource_size = 200U, + const bool register_resource_with_registry = true); + + /// \brief Construct MyBoundedMemoryResource using an underlying memory region owned by the caller (i.e. will not be + /// created or freed within the lifecyle of this class) + MyBoundedMemoryResource(const std::pair memory_range, + const bool register_resource_with_registry = true); + + ~MyBoundedMemoryResource() final; + + MyBoundedMemoryResource(const MyBoundedMemoryResource&) noexcept = default; + MyBoundedMemoryResource(MyBoundedMemoryResource&&) noexcept = default; + MyBoundedMemoryResource& operator=(const MyBoundedMemoryResource&) noexcept = default; + MyBoundedMemoryResource& operator=(MyBoundedMemoryResource&&) noexcept = default; + + MemoryResourceProxy* getMemoryResourceProxy() noexcept override + { + return manager_; + } + + void* getBaseAddress() const noexcept override + { + return baseAddress_; + } + + void* getUsableBaseAddress() const noexcept override + { + // Memory is allocated on construction to store a MemoryResourceProxy in the memory region. This is part of the + // "ControlBlock" of the memory region and therefore set the BaseAddress after it. + return AddOffsetToPointer(baseAddress_, memoryResourceProxyAllocationSize_); + } + + const void* getEndAddress() const noexcept override + { + return endAddress_; + } + + size_t getAllocatedMemory() const + { + return already_allocated_bytes_; + }; + + std::size_t GetUserAllocatedBytes() const noexcept override + { + // Memory is allocated on construction to store a MemoryResourceProxy in the memory region. This is part of the + // "ControlBlock" of the memory region and therefore we don't take it into account in the number of user + // allocated bytes. + return already_allocated_bytes_ - memoryResourceProxyAllocationSize_; + }; + + std::size_t GetUserDeAllocatedBytes() const noexcept + { + return deallocatedMemory_; + } + + bool IsOffsetPtrBoundsCheckBypassingEnabled() const noexcept override + { + return false; + } + + std::uint64_t getMemoryResourceId() const + { + return memoryResourceId_; + }; + + private: + void* do_allocate(const std::size_t bytes, std::size_t) override; + + void do_deallocate(void* /*memory*/, const std::size_t bytes, std::size_t) override; + + bool do_is_equal(const memory_resource&) const noexcept override + { + return false; + } + + MemoryResourceProxy* AllocateMemoryResourceProxy(const std::uint64_t memory_resource_id); + + static std::uint64_t instanceId; + static std::size_t memoryResourceProxyAllocationSize_; + + void* baseAddress_; + void* endAddress_; + std::size_t virtual_address_space_to_reserve_; + std::size_t already_allocated_bytes_; + std::size_t deallocatedMemory_; + std::uint64_t memoryResourceId_; + MemoryResourceProxy* manager_; + bool should_free_memory_on_destruction_; +}; + +} // namespace score::memory::shared::test + +#endif // SCORE_LIB_MEMORY_SHARED_FAKE_MYBOUNDEDMEMORYRESOURCE_H diff --git a/score/memory/shared/fake/my_bounded_shared_memory_resource.cpp b/score/memory/shared/fake/my_bounded_shared_memory_resource.cpp new file mode 100644 index 000000000..9b5c0d310 --- /dev/null +++ b/score/memory/shared/fake/my_bounded_shared_memory_resource.cpp @@ -0,0 +1,30 @@ +/******************************************************************************** + * Copyright (c) 2025 Contributors to the Eclipse Foundation + * + * See the NOTICE file(s) distributed with this work for additional + * information regarding copyright ownership. + * + * This program and the accompanying materials are made available under the + * terms of the Apache License Version 2.0 which is available at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * SPDX-License-Identifier: Apache-2.0 + ********************************************************************************/ +#include "score/memory/shared/fake/my_bounded_shared_memory_resource.h" + +namespace score::memory::shared::test +{ + +MyBoundedSharedMemoryResource::MyBoundedSharedMemoryResource(const std::size_t memory_resource_size, + const bool register_resource_with_registry) + : resource_{memory_resource_size, register_resource_with_registry} +{ +} + +MyBoundedSharedMemoryResource::MyBoundedSharedMemoryResource(const std::pair memory_range, + const bool register_resource_with_registry) + : resource_{memory_range, register_resource_with_registry} +{ +} + +} // namespace score::memory::shared::test diff --git a/score/memory/shared/fake/my_bounded_shared_memory_resource.h b/score/memory/shared/fake/my_bounded_shared_memory_resource.h new file mode 100644 index 000000000..17bb4057c --- /dev/null +++ b/score/memory/shared/fake/my_bounded_shared_memory_resource.h @@ -0,0 +1,123 @@ +/******************************************************************************** + * Copyright (c) 2025 Contributors to the Eclipse Foundation + * + * See the NOTICE file(s) distributed with this work for additional + * information regarding copyright ownership. + * + * This program and the accompanying materials are made available under the + * terms of the Apache License Version 2.0 which is available at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * SPDX-License-Identifier: Apache-2.0 + ********************************************************************************/ +#ifndef SCORE_LIB_MEMORY_SHARED_FAKE_MY_BOUNDED_SHARED_MEMORY_RESOURCE_H +#define SCORE_LIB_MEMORY_SHARED_FAKE_MY_BOUNDED_SHARED_MEMORY_RESOURCE_H + +#include "score/memory/shared/fake/my_bounded_memory_resource.h" +#include "score/memory/shared/i_shared_memory_resource.h" + +namespace score::memory::shared::test +{ + +class MyBoundedSharedMemoryResource final : public ISharedMemoryResource +{ + public: + /// \brief Construct MyBoundedSharedMemoryResource which owns the underlying memory region (i.e. will create and + /// free it within the lifecyle of this class) + MyBoundedSharedMemoryResource(const std::size_t memory_resource_size = 200U, + const bool register_resource_with_registry = true); + + /// \brief Construct MyBoundedSharedMemoryResource using an underlying memory region owned by the caller (i.e. will + /// not be created or freed within the lifecyle of this class) + MyBoundedSharedMemoryResource(const std::pair memory_range, + const bool register_resource_with_registry = true); + + const std::string* getPath() const noexcept override + { + return nullptr; + } + + std::string_view GetIdentifier() const noexcept override + { + return "id: 123"; + }; + + void UnlinkFilesystemEntry() const noexcept override {} + + FileDescriptor GetFileDescriptor() const noexcept override + { + return FileDescriptor{1}; + } + + bool IsShmInTypedMemory() const noexcept override + { + return false; + } + + MemoryResourceProxy* getMemoryResourceProxy() noexcept override + { + return resource_.getMemoryResourceProxy(); + } + + void* getBaseAddress() const noexcept override + { + return resource_.getBaseAddress(); + } + + void* getUsableBaseAddress() const noexcept override + { + return resource_.getUsableBaseAddress(); + } + + const void* getEndAddress() const noexcept override + { + return resource_.getEndAddress(); + } + + size_t getAllocatedMemory() const + { + return resource_.getAllocatedMemory(); + }; + + std::size_t GetUserAllocatedBytes() const noexcept override + { + return resource_.GetUserAllocatedBytes(); + }; + + std::size_t GetUserDeAllocatedBytes() const noexcept + { + return resource_.GetUserDeAllocatedBytes(); + } + + bool IsOffsetPtrBoundsCheckBypassingEnabled() const noexcept override + { + return resource_.IsOffsetPtrBoundsCheckBypassingEnabled(); + } + + std::uint64_t getMemoryResourceId() const + { + return resource_.getMemoryResourceId(); + }; + + private: + void* do_allocate(const std::size_t bytes, std::size_t alignment) override + { + return resource_.do_allocate(bytes, alignment); + } + + void do_deallocate(void* memory, const std::size_t bytes, std::size_t alignment) override + { + resource_.do_deallocate(memory, bytes, alignment); + } + + bool do_is_equal(const memory_resource& resource) const noexcept override + { + return resource_.do_is_equal(resource); + } + + MyBoundedMemoryResource resource_; +}; + +} // namespace score::memory::shared::test + +#endif // SCORE_LIB_MEMORY_SHARED_FAKE_MY_BOUNDED_SHARED_MEMORY_RESOURCE_H diff --git a/score/memory/shared/fake/my_memory_resource.cpp b/score/memory/shared/fake/my_memory_resource.cpp new file mode 100644 index 000000000..f68f08b20 --- /dev/null +++ b/score/memory/shared/fake/my_memory_resource.cpp @@ -0,0 +1,20 @@ +/******************************************************************************** + * Copyright (c) 2025 Contributors to the Eclipse Foundation + * + * See the NOTICE file(s) distributed with this work for additional + * information regarding copyright ownership. + * + * This program and the accompanying materials are made available under the + * terms of the Apache License Version 2.0 which is available at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * SPDX-License-Identifier: Apache-2.0 + ********************************************************************************/ +#include "score/memory/shared/fake/my_memory_resource.h" + +namespace score::memory::shared::test +{ + +std::uint64_t MyMemoryResource::instanceId = 0U; + +} // namespace score::memory::shared::test diff --git a/score/memory/shared/fake/my_memory_resource.h b/score/memory/shared/fake/my_memory_resource.h new file mode 100644 index 000000000..658fb04d3 --- /dev/null +++ b/score/memory/shared/fake/my_memory_resource.h @@ -0,0 +1,136 @@ +/******************************************************************************** + * Copyright (c) 2025 Contributors to the Eclipse Foundation + * + * See the NOTICE file(s) distributed with this work for additional + * information regarding copyright ownership. + * + * This program and the accompanying materials are made available under the + * terms of the Apache License Version 2.0 which is available at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * SPDX-License-Identifier: Apache-2.0 + ********************************************************************************/ +#ifndef SCORE_LIB_MEMORY_SHARED_FAKE_MYMEMORYRESOURCE_H +#define SCORE_LIB_MEMORY_SHARED_FAKE_MYMEMORYRESOURCE_H + +#include "score/memory/shared/managed_memory_resource.h" +#include "score/memory/shared/memory_resource_proxy.h" +#include "score/memory/shared/memory_resource_registry.h" +#include "score/memory/shared/pointer_arithmetic_util.h" + +#include + +#include +#include +#include + +namespace score::memory::shared::test +{ + +class MyMemoryResource : public ManagedMemoryResource +{ + public: + // Set the default memory range to cover all possible memory addresses so that bounds checking of the + // MemoryResourceProxy is guaranteed to pass. We use start address 1 so that (getBaseAddress() == nullptr) returns + // false. In tests where bounds checking should be considered, use MyBoundedMemoryResource. + MyMemoryResource(const std::pair memory_range = + {std::uintptr_t{1U}, std::numeric_limits::max()}) noexcept + : baseAddress_{CastIntegerToPointer(memory_range.first)}, + endAddress_{CastIntegerToPointer(memory_range.second)}, + allocationPossible_{true}, + allocatedMemory_{0U}, + memoryResourceId_{instanceId++}, + manager_{memoryResourceId_} + { + } + + MemoryResourceProxy* getMemoryResourceProxy() noexcept override + { + MemoryResourceRegistry::getInstance().clear(); + score::cpp::ignore = MemoryResourceRegistry::getInstance().insert_resource({memoryResourceId_, this}); + return &this->manager_; + } + + void* getBaseAddress() const noexcept override + { + return baseAddress_; + } + + void* getUsableBaseAddress() const noexcept override + { + return baseAddress_; + } + + std::size_t GetUserAllocatedBytes() const noexcept override + { + return allocatedMemory_; + }; + + const void* getEndAddress() const noexcept override + { + return endAddress_; + } + + bool IsOffsetPtrBoundsCheckBypassingEnabled() const noexcept override + { + return true; + } + + size_t getAllocatedMemory() const + { + return allocatedMemory_; + }; + + std::uint64_t getMemoryResourceId() const + { + return memoryResourceId_; + }; + + bool isAllocationPossible() const + { + return allocationPossible_; + }; + + void setAllocationPossible(const bool allocation_possible) + { + allocationPossible_ = allocation_possible; + }; + + private: + void* do_allocate(const std::size_t bytes, std::size_t) override + { + if (allocationPossible_) + { + allocatedMemory_ += bytes; + return std::malloc(bytes); + } + else + { + throw std::bad_alloc{}; + } + } + + void do_deallocate(void* memory, const std::size_t bytes, std::size_t) override + { + allocatedMemory_ -= bytes; + std::free(memory); + } + + bool do_is_equal(const memory_resource&) const noexcept override + { + return false; + } + + static std::uint64_t instanceId; + + void* const baseAddress_; + void* const endAddress_; + bool allocationPossible_; + std::size_t allocatedMemory_; + const std::uint64_t memoryResourceId_; + MemoryResourceProxy manager_; +}; + +} // namespace score::memory::shared::test + +#endif // SCORE_LIB_MEMORY_SHARED_FAKE_MYMEMORYRESOURCE_H diff --git a/score/memory/shared/flags/BUILD b/score/memory/shared/flags/BUILD new file mode 100644 index 000000000..1f69d11b5 --- /dev/null +++ b/score/memory/shared/flags/BUILD @@ -0,0 +1,30 @@ +# ******************************************************************************* +# Copyright (c) 2025 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* + +load("@bazel_skylib//rules:common_settings.bzl", "bool_flag") + +bool_flag( + name = "use_typedshmd", + build_setting_default = True, +) + +config_setting( + name = "config_use_typedshmd", + flag_values = { + ":use_typedshmd": "True", + }, + visibility = [ + "//score/memory/shared/typedshm/typedshm_wrapper:__subpackages__", + "//score/memory/shared/typedshm/utils:__subpackages__", + ], +) diff --git a/score/memory/shared/flock/BUILD b/score/memory/shared/flock/BUILD new file mode 100644 index 000000000..ec431ea3a --- /dev/null +++ b/score/memory/shared/flock/BUILD @@ -0,0 +1,110 @@ +# ******************************************************************************* +# Copyright (c) 2025 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* + +load("@rules_cc//cc:defs.bzl", "cc_library") +load("@score_baselibs//:bazel/unit_tests.bzl", "cc_gtest_unit_test", "cc_unit_test_suites_for_host_and_qnx") +load("@score_baselibs//score/language/safecpp:toolchain_features.bzl", "COMPILER_WARNING_FEATURES") + +cc_library( + name = "flock_mutex", + srcs = ["flock_mutex.cpp"], + hdrs = ["flock_mutex.h"], + features = COMPILER_WARNING_FEATURES, + implementation_deps = [ + "@score_baselibs//score/mw/log:frontend", + "@score_baselibs//score/os:fcntl", + ], + tags = ["FFI"], + deps = [ + "//score/memory/shared:lock_file", + ], +) + +cc_library( + name = "exclusive_flock_mutex", + srcs = ["exclusive_flock_mutex.cpp"], + hdrs = ["exclusive_flock_mutex.h"], + features = COMPILER_WARNING_FEATURES, + implementation_deps = [ + "@score_baselibs//score/os:fcntl", + ], + tags = ["FFI"], + visibility = [ + "//visibility:public", # platform_only + ], + deps = [ + ":flock_mutex", + "//score/memory/shared:lock_file", + ], +) + +cc_library( + name = "shared_flock_mutex", + srcs = ["shared_flock_mutex.cpp"], + hdrs = ["shared_flock_mutex.h"], + features = COMPILER_WARNING_FEATURES, + implementation_deps = [ + "@score_baselibs//score/os:fcntl", + ], + tags = ["FFI"], + visibility = [ + "//visibility:public", # platform_only + ], + deps = [ + ":flock_mutex", + "//score/memory/shared:lock_file", + ], +) + +cc_library( + name = "flock_mutex_and_lock", + srcs = ["flock_mutex_and_lock.cpp"], + hdrs = ["flock_mutex_and_lock.h"], + features = COMPILER_WARNING_FEATURES, + tags = ["FFI"], + visibility = [ + "//visibility:public", # platform_only + ], + deps = [ + "//score/memory/shared:lock_file", + ], +) + +cc_gtest_unit_test( + name = "unit_test", + srcs = [ + "flock_mutex_test.cpp", + ], + features = COMPILER_WARNING_FEATURES, + visibility = [ + "//score/memory:__pkg__", + ], + deps = [ + ":exclusive_flock_mutex", + ":shared_flock_mutex", + "@score_baselibs//score/mw/log:backend_stub_testutil", + "@score_baselibs//score/os:fcntl", + "@score_baselibs//score/os/mocklib:fcntl_mock", + "@score_baselibs//score/os/mocklib:stat_mock", + ], +) + +cc_unit_test_suites_for_host_and_qnx( + name = "unit_test_suite", + cc_unit_tests = [ + ":unit_test", + ], + visibility = [ + "//score/memory/shared:__pkg__", + ], +) diff --git a/score/memory/shared/flock/exclusive_flock_mutex.cpp b/score/memory/shared/flock/exclusive_flock_mutex.cpp new file mode 100644 index 000000000..e3e439952 --- /dev/null +++ b/score/memory/shared/flock/exclusive_flock_mutex.cpp @@ -0,0 +1,34 @@ +/******************************************************************************** + * Copyright (c) 2025 Contributors to the Eclipse Foundation + * + * See the NOTICE file(s) distributed with this work for additional + * information regarding copyright ownership. + * + * This program and the accompanying materials are made available under the + * terms of the Apache License Version 2.0 which is available at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * SPDX-License-Identifier: Apache-2.0 + ********************************************************************************/ +#include "score/memory/shared/flock/exclusive_flock_mutex.h" + +#include "score/os/fcntl.h" + +namespace score::memory::shared +{ + +namespace +{ + +constexpr auto kLockExclusiveBlocking = os::Fcntl::Operation::kLockExclusive; +constexpr auto kLockExclusiveNonBlocking = + score::os::Fcntl::Operation::kLockExclusive | score::os::Fcntl::Operation::kLockNB; + +} // namespace + +ExclusiveFlockMutex::ExclusiveFlockMutex(const LockFile& lock_file) noexcept + : exclusive_flock_mutex_{lock_file, kLockExclusiveBlocking, kLockExclusiveNonBlocking} +{ +} + +} // namespace score::memory::shared diff --git a/score/memory/shared/flock/exclusive_flock_mutex.h b/score/memory/shared/flock/exclusive_flock_mutex.h new file mode 100644 index 000000000..663729d59 --- /dev/null +++ b/score/memory/shared/flock/exclusive_flock_mutex.h @@ -0,0 +1,46 @@ +/******************************************************************************** + * Copyright (c) 2025 Contributors to the Eclipse Foundation + * + * See the NOTICE file(s) distributed with this work for additional + * information regarding copyright ownership. + * + * This program and the accompanying materials are made available under the + * terms of the Apache License Version 2.0 which is available at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * SPDX-License-Identifier: Apache-2.0 + ********************************************************************************/ +#ifndef SCORE_LIB_MEMORY_SHARED_FLOCK_EXCLUSIVE_FLOCK_MUTEX_H +#define SCORE_LIB_MEMORY_SHARED_FLOCK_EXCLUSIVE_FLOCK_MUTEX_H + +#include "score/memory/shared/flock/flock_mutex.h" +#include "score/memory/shared/lock_file.h" + +namespace score::memory::shared +{ + +class ExclusiveFlockMutex +{ + public: + explicit ExclusiveFlockMutex(const LockFile& lock_file) noexcept; + + void lock() noexcept + { + exclusive_flock_mutex_.lock(); + } + bool try_lock() noexcept + { + return exclusive_flock_mutex_.try_lock(); + } + void unlock() noexcept + { + exclusive_flock_mutex_.unlock(); + } + + private: + FlockMutex exclusive_flock_mutex_; +}; + +} // namespace score::memory::shared + +#endif // SCORE_LIB_MEMORY_SHARED_FLOCK_EXCLUSIVE_FLOCK_MUTEX_H diff --git a/score/memory/shared/flock/flock_mutex.cpp b/score/memory/shared/flock/flock_mutex.cpp new file mode 100644 index 000000000..d03329408 --- /dev/null +++ b/score/memory/shared/flock/flock_mutex.cpp @@ -0,0 +1,68 @@ +/******************************************************************************** + * Copyright (c) 2025 Contributors to the Eclipse Foundation + * + * See the NOTICE file(s) distributed with this work for additional + * information regarding copyright ownership. + * + * This program and the accompanying materials are made available under the + * terms of the Apache License Version 2.0 which is available at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * SPDX-License-Identifier: Apache-2.0 + ********************************************************************************/ +#include "score/memory/shared/flock/flock_mutex.h" + +#include "score/os/errno.h" +#include "score/mw/log/logging.h" + +#include + +namespace score::memory::shared +{ + +FlockMutex::FlockMutex(const LockFile& lock_file, + const score::os::Fcntl::Operation locking_operation, + const score::os::Fcntl::Operation try_locking_operation) noexcept + : file_descriptor_{lock_file.file_descriptor_}, + locking_operation_{locking_operation}, + try_locking_operation_{try_locking_operation} +{ +} + +void FlockMutex::lock() noexcept +{ + auto flock_result = score::os::Fcntl::instance().flock(file_descriptor_, locking_operation_); + if (!flock_result.has_value()) + { + score::mw::log::LogFatal("shm") << "Flock locking operation failed:" << flock_result.error().ToString(); + std::terminate(); + } +} + +bool FlockMutex::try_lock() noexcept +{ + auto flock_result = score::os::Fcntl::instance().flock(file_descriptor_, try_locking_operation_); + if (!flock_result.has_value()) + { + if (flock_result.error() == ::score::os::Error::createFromErrnoFlockSpecific(EWOULDBLOCK)) + { + return false; + } + score::mw::log::LogFatal("shm") << "Flock try locking operation failed:" << flock_result.error().ToString(); + std::terminate(); + } + return true; +} + +void FlockMutex::unlock() noexcept +{ + constexpr auto unlocking_operation = score::os::Fcntl::Operation::kUnLock; + auto flock_result = score::os::Fcntl::instance().flock(file_descriptor_, unlocking_operation); + if (!flock_result.has_value()) + { + score::mw::log::LogFatal("shm") << "Flock unlocking operation failed:" << flock_result.error().ToString(); + std::terminate(); + } +} + +} // namespace score::memory::shared diff --git a/score/memory/shared/flock/flock_mutex.h b/score/memory/shared/flock/flock_mutex.h new file mode 100644 index 000000000..8440ab168 --- /dev/null +++ b/score/memory/shared/flock/flock_mutex.h @@ -0,0 +1,42 @@ +/******************************************************************************** + * Copyright (c) 2025 Contributors to the Eclipse Foundation + * + * See the NOTICE file(s) distributed with this work for additional + * information regarding copyright ownership. + * + * This program and the accompanying materials are made available under the + * terms of the Apache License Version 2.0 which is available at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * SPDX-License-Identifier: Apache-2.0 + ********************************************************************************/ +#ifndef SCORE_LIB_MEMORY_SHARED_FLOCK_FLOCK_MUTEX_H +#define SCORE_LIB_MEMORY_SHARED_FLOCK_FLOCK_MUTEX_H + +#include "score/memory/shared/lock_file.h" + +#include "score/os/fcntl.h" + +namespace score::memory::shared +{ + +class FlockMutex +{ + public: + FlockMutex(const LockFile& lock_file, + const score::os::Fcntl::Operation locking_operation, + const score::os::Fcntl::Operation try_locking_operation) noexcept; + + void lock() noexcept; + bool try_lock() noexcept; + void unlock() noexcept; + + private: + std::int32_t file_descriptor_; + score::os::Fcntl::Operation locking_operation_; + score::os::Fcntl::Operation try_locking_operation_; +}; + +} // namespace score::memory::shared + +#endif // SCORE_LIB_MEMORY_SHARED_FLOCK_FLOCK_MUTEX_H diff --git a/score/memory/shared/flock/flock_mutex_and_lock.cpp b/score/memory/shared/flock/flock_mutex_and_lock.cpp new file mode 100644 index 000000000..1e9683840 --- /dev/null +++ b/score/memory/shared/flock/flock_mutex_and_lock.cpp @@ -0,0 +1,13 @@ +/******************************************************************************** + * Copyright (c) 2025 Contributors to the Eclipse Foundation + * + * See the NOTICE file(s) distributed with this work for additional + * information regarding copyright ownership. + * + * This program and the accompanying materials are made available under the + * terms of the Apache License Version 2.0 which is available at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * SPDX-License-Identifier: Apache-2.0 + ********************************************************************************/ +#include "score/memory/shared/flock/flock_mutex_and_lock.h" diff --git a/score/memory/shared/flock/flock_mutex_and_lock.h b/score/memory/shared/flock/flock_mutex_and_lock.h new file mode 100644 index 000000000..c139eee8d --- /dev/null +++ b/score/memory/shared/flock/flock_mutex_and_lock.h @@ -0,0 +1,52 @@ +/******************************************************************************** + * Copyright (c) 2025 Contributors to the Eclipse Foundation + * + * See the NOTICE file(s) distributed with this work for additional + * information regarding copyright ownership. + * + * This program and the accompanying materials are made available under the + * terms of the Apache License Version 2.0 which is available at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * SPDX-License-Identifier: Apache-2.0 + ********************************************************************************/ +#ifndef SCORE_LIB_MEMORY_SHARED_FLOCK_FLOCK_MUTEX_AND_LOCK_H +#define SCORE_LIB_MEMORY_SHARED_FLOCK_FLOCK_MUTEX_AND_LOCK_H + +#include "score/memory/shared/lock_file.h" + +#include + +namespace score::memory::shared +{ + +template +class FlockMutexAndLock final +{ + public: + explicit FlockMutexAndLock(const memory::shared::LockFile& lock_file) noexcept + : mutex_{lock_file}, lock_{mutex_, std::defer_lock} + { + } + ~FlockMutexAndLock() noexcept = default; + + bool TryLock() noexcept + { + return lock_.try_lock(); + } + + // Since lock_ stores a reference to mutex_, we should not + // move the FlockMutexAndLock as it will invalidate this reference. + FlockMutexAndLock(const FlockMutexAndLock&) = delete; + FlockMutexAndLock& operator=(const FlockMutexAndLock&) = delete; + FlockMutexAndLock(FlockMutexAndLock&&) = delete; + FlockMutexAndLock& operator=(FlockMutexAndLock&&) = delete; + + private: + T mutex_; + std::unique_lock lock_; +}; + +} // namespace score::memory::shared + +#endif // SCORE_LIB_MEMORY_SHARED_FLOCK_FLOCK_MUTEX_AND_LOCK_H diff --git a/score/memory/shared/flock/flock_mutex_test.cpp b/score/memory/shared/flock/flock_mutex_test.cpp new file mode 100644 index 000000000..ad1c8ee39 --- /dev/null +++ b/score/memory/shared/flock/flock_mutex_test.cpp @@ -0,0 +1,193 @@ +/******************************************************************************** + * Copyright (c) 2025 Contributors to the Eclipse Foundation + * + * See the NOTICE file(s) distributed with this work for additional + * information regarding copyright ownership. + * + * This program and the accompanying materials are made available under the + * terms of the Apache License Version 2.0 which is available at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * SPDX-License-Identifier: Apache-2.0 + ********************************************************************************/ +#include "score/memory/shared/flock/exclusive_flock_mutex.h" +#include "score/memory/shared/flock/shared_flock_mutex.h" +#include "score/memory/shared/lock_file.h" + +#include "score/os/fcntl.h" +#include "score/os/mocklib/fcntl_mock.h" +#include "score/os/mocklib/stat_mock.h" + +#include +#include +#include + +namespace score::memory::shared +{ +namespace +{ + +using Mode = ::score::os::Stat::Mode; +using Open = ::score::os::Fcntl::Open; + +using ::testing::_; +using ::testing::Return; + +#if defined(__QNX__) +// In QNX /tmp is just an "alias" for /dev/shmem/, but using the alias +// has a performance drawback! Therefore we use directly /dev/shmem/ (see Ticket-131757) +constexpr auto kLockFilePath{"/dev/shmem/flock_test_lock_file"}; +#else +constexpr auto kLockFilePath{"/tmp/flock_test_lock_file"}; +#endif + +score::os::Fcntl::Operation GetBlockingLockOperation(const ExclusiveFlockMutex&) noexcept +{ + return os::Fcntl::Operation::kLockExclusive; +} + +score::os::Fcntl::Operation GetBlockingLockOperation(const SharedFlockMutex&) noexcept +{ + return os::Fcntl::Operation::kLockShared; +} + +score::os::Fcntl::Operation GetNonBlockingLockOperation(const ExclusiveFlockMutex&) noexcept +{ + return os::Fcntl::Operation::kLockExclusive | score::os::Fcntl::Operation::kLockNB; +} + +score::os::Fcntl::Operation GetNonBlockingLockOperation(const SharedFlockMutex&) noexcept +{ + return os::Fcntl::Operation::kLockShared | score::os::Fcntl::Operation::kLockNB; +} + +template +class FlockTestFixture : public ::testing::Test +{ + public: + FlockTestFixture() + { + ON_CALL(*fcntl_mock_, open(testing::StrEq(kLockFilePath), _, _)).WillByDefault(Return(1)); + ON_CALL(*stat_mock_, chmod(testing::StrEq(kLockFilePath), _)).WillByDefault(Return(score::cpp::blank{})); + } + os::MockGuard fcntl_mock_{}; + os::MockGuard stat_mock_{}; + LockFile lock_file_{std::move(LockFile::Create(kLockFilePath).value())}; + T flock_mutex_{lock_file_}; +}; + +// Gtest will run all tests in the FlockLockTestFixture once for every type, t, in MyTypes, such that TypeParam +// == t for each run. +using MyTypes = ::testing::Types; +TYPED_TEST_SUITE(FlockTestFixture, MyTypes, ); + +TYPED_TEST(FlockTestFixture, LockWillReturnWhenFlockSucceeds) +{ + const auto blocking_lock_operation = GetBlockingLockOperation(this->flock_mutex_); + EXPECT_CALL(*this->fcntl_mock_, flock(_, blocking_lock_operation)).WillOnce(Return(score::cpp::blank{})); + this->flock_mutex_.lock(); +} + +TYPED_TEST(FlockTestFixture, LockWillTerminateWhenFlockFails) +{ + const auto blocking_lock_operation = GetBlockingLockOperation(this->flock_mutex_); + + auto lock_failure = [this, blocking_lock_operation] { + EXPECT_CALL(*this->fcntl_mock_, flock(_, blocking_lock_operation)) + .WillOnce(Return(score::cpp::unexpected<::score::os::Error>{::score::os::Error::createFromErrno(ENOENT)})); + this->flock_mutex_.lock(); + }; + EXPECT_DEATH(lock_failure(), ".*"); +} + +TYPED_TEST(FlockTestFixture, TryLockWillReturnTrueWhenFlockSucceeds) +{ + const auto non_blocking_lock_operation = GetNonBlockingLockOperation(this->flock_mutex_); + EXPECT_CALL(*this->fcntl_mock_, flock(_, non_blocking_lock_operation)).WillOnce(Return(score::cpp::blank{})); + EXPECT_TRUE(this->flock_mutex_.try_lock()); +} + +TYPED_TEST(FlockTestFixture, TryLockWillReturnFalseWhenFlockIsAlreadyBlocked) +{ + const auto non_blocking_lock_operation = GetNonBlockingLockOperation(this->flock_mutex_); + EXPECT_CALL(*this->fcntl_mock_, flock(_, non_blocking_lock_operation)) + .WillOnce(Return(score::cpp::unexpected<::score::os::Error>{::score::os::Error::createFromErrno(EWOULDBLOCK)})); + EXPECT_FALSE(this->flock_mutex_.try_lock()); +} + +TYPED_TEST(FlockTestFixture, TryLockWillTerminateWhenFlockFails) +{ + const auto non_blocking_lock_operation = GetNonBlockingLockOperation(this->flock_mutex_); + + auto try_lock_failure = [this, non_blocking_lock_operation] { + EXPECT_CALL(*this->fcntl_mock_, flock(_, non_blocking_lock_operation)) + .WillOnce(Return(score::cpp::unexpected<::score::os::Error>{::score::os::Error::createFromErrno(ENOENT)})); + this->flock_mutex_.try_lock(); + }; + EXPECT_DEATH(try_lock_failure(), ".*"); +} + +TYPED_TEST(FlockTestFixture, UnlockWillReturnWhenFlockSucceeds) +{ + const auto unlock_operation = score::os::Fcntl::Operation::kUnLock; + EXPECT_CALL(*this->fcntl_mock_, flock(_, unlock_operation)).WillOnce(Return(score::cpp::blank{})); + this->flock_mutex_.unlock(); +} + +TYPED_TEST(FlockTestFixture, UnlockWillTerminateWhenFlockFails) +{ + const auto unlock_operation = score::os::Fcntl::Operation::kUnLock; + + auto unlock_failure = [this] { + EXPECT_CALL(*this->fcntl_mock_, flock(_, unlock_operation)) + .WillOnce(Return(score::cpp::unexpected<::score::os::Error>{::score::os::Error::createFromErrno(ENOENT)})); + this->flock_mutex_.unlock(); + }; + EXPECT_DEATH(unlock_failure(), ".*"); +} + +TYPED_TEST(FlockTestFixture, CanCreateLockGuardFromFlockMutex) +{ + const auto blocking_lock_operation = GetBlockingLockOperation(this->flock_mutex_); + EXPECT_CALL(*this->fcntl_mock_, flock(_, blocking_lock_operation)).WillOnce(Return(score::cpp::blank{})); + + const auto unlock_operation = score::os::Fcntl::Operation::kUnLock; + EXPECT_CALL(*this->fcntl_mock_, flock(_, unlock_operation)).WillOnce(Return(score::cpp::blank{})); + + std::lock_guard lock{this->flock_mutex_}; +} + +TYPED_TEST(FlockTestFixture, CanCreateUniqueLockFromFlockMutex) +{ + const auto blocking_lock_operation = GetBlockingLockOperation(this->flock_mutex_); + EXPECT_CALL(*this->fcntl_mock_, flock(_, blocking_lock_operation)).WillOnce(Return(score::cpp::blank{})); + + const auto unlock_operation = score::os::Fcntl::Operation::kUnLock; + EXPECT_CALL(*this->fcntl_mock_, flock(_, unlock_operation)).WillOnce(Return(score::cpp::blank{})); + + std::unique_lock lock{this->flock_mutex_}; +} + +TYPED_TEST(FlockTestFixture, CanCreateUniqueLockFromFlockMutexAndTryLockWillReturnFlockTryLockSuccessfulResult) +{ + const auto non_blocking_lock_operation = GetNonBlockingLockOperation(this->flock_mutex_); + EXPECT_CALL(*this->fcntl_mock_, flock(_, non_blocking_lock_operation)).WillOnce(Return(score::cpp::blank{})); + + const auto unlock_operation = score::os::Fcntl::Operation::kUnLock; + EXPECT_CALL(*this->fcntl_mock_, flock(_, unlock_operation)).WillOnce(Return(score::cpp::blank{})); + + std::unique_lock lock{this->flock_mutex_, std::defer_lock}; + EXPECT_TRUE(lock.try_lock()); +} +TYPED_TEST(FlockTestFixture, CanCreateUniqueLockFromFlockMutexAndTryLockWillReturnFlockTryLockFailureResult) +{ + const auto non_blocking_lock_operation = GetNonBlockingLockOperation(this->flock_mutex_); + EXPECT_CALL(*this->fcntl_mock_, flock(_, non_blocking_lock_operation)) + .WillOnce(Return(score::cpp::unexpected<::score::os::Error>{::score::os::Error::createFromErrno(EWOULDBLOCK)})); + + std::unique_lock lock{this->flock_mutex_, std::defer_lock}; + EXPECT_FALSE(lock.try_lock()); +} + +} // namespace +} // namespace score::memory::shared diff --git a/score/memory/shared/flock/shared_flock_mutex.cpp b/score/memory/shared/flock/shared_flock_mutex.cpp new file mode 100644 index 000000000..dbdddcb2b --- /dev/null +++ b/score/memory/shared/flock/shared_flock_mutex.cpp @@ -0,0 +1,33 @@ +/******************************************************************************** + * Copyright (c) 2025 Contributors to the Eclipse Foundation + * + * See the NOTICE file(s) distributed with this work for additional + * information regarding copyright ownership. + * + * This program and the accompanying materials are made available under the + * terms of the Apache License Version 2.0 which is available at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * SPDX-License-Identifier: Apache-2.0 + ********************************************************************************/ +#include "score/memory/shared/flock/shared_flock_mutex.h" + +#include "score/os/fcntl.h" + +namespace score::memory::shared +{ + +namespace +{ + +constexpr auto kLockSharedBlocking = os::Fcntl::Operation::kLockShared; +constexpr auto kLockSharedNonBlocking = score::os::Fcntl::Operation::kLockShared | score::os::Fcntl::Operation::kLockNB; + +} // namespace + +SharedFlockMutex::SharedFlockMutex(const LockFile& lock_file) noexcept + : shared_flock_mutex_{lock_file, kLockSharedBlocking, kLockSharedNonBlocking} +{ +} + +} // namespace score::memory::shared diff --git a/score/memory/shared/flock/shared_flock_mutex.h b/score/memory/shared/flock/shared_flock_mutex.h new file mode 100644 index 000000000..99ea95c32 --- /dev/null +++ b/score/memory/shared/flock/shared_flock_mutex.h @@ -0,0 +1,46 @@ +/******************************************************************************** + * Copyright (c) 2025 Contributors to the Eclipse Foundation + * + * See the NOTICE file(s) distributed with this work for additional + * information regarding copyright ownership. + * + * This program and the accompanying materials are made available under the + * terms of the Apache License Version 2.0 which is available at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * SPDX-License-Identifier: Apache-2.0 + ********************************************************************************/ +#ifndef SCORE_LIB_MEMORY_SHARED_FLOCK_SHARED_FLOCK_MUTEX_H +#define SCORE_LIB_MEMORY_SHARED_FLOCK_SHARED_FLOCK_MUTEX_H + +#include "score/memory/shared/flock/flock_mutex.h" +#include "score/memory/shared/lock_file.h" + +namespace score::memory::shared +{ + +class SharedFlockMutex +{ + public: + explicit SharedFlockMutex(const LockFile& lock_file) noexcept; + + void lock() noexcept + { + shared_flock_mutex_.lock(); + } + bool try_lock() noexcept + { + return shared_flock_mutex_.try_lock(); + } + void unlock() noexcept + { + shared_flock_mutex_.unlock(); + } + + private: + FlockMutex shared_flock_mutex_; +}; + +} // namespace score::memory::shared + +#endif // SCORE_LIB_MEMORY_SHARED_FLOCK_SHARED_FLOCK_MUTEX_H diff --git a/score/memory/shared/i_atomic.h b/score/memory/shared/i_atomic.h new file mode 100644 index 000000000..c422cdc3e --- /dev/null +++ b/score/memory/shared/i_atomic.h @@ -0,0 +1,21 @@ +// ******************************************************************************* +// Copyright (c) 2025 Contributors to the Eclipse Foundation +// +// See the NOTICE file(s) distributed with this work for additional +// information regarding copyright ownership. +// +// This program and the accompanying materials are made available under the +// terms of the Apache License Version 2.0 which is available at +// https://www.apache.org/licenses/LICENSE-2.0 +// +// SPDX-License-Identifier: Apache-2.0 +// ******************************************************************************* + +#ifndef SCORE_MEMORY_SHARED_I_ATOMIC_H +#define SCORE_MEMORY_SHARED_I_ATOMIC_H + +#warning "score/memory/shared/i_atomic.h is deprecated. Include score/concurrency/atomic/i_atomic.h instead." + +#include "score/concurrency/atomic/i_atomic.h" + +#endif // SCORE_MEMORY_SHARED_I_ATOMIC_H diff --git a/score/memory/shared/i_shared_memory_factory.cpp b/score/memory/shared/i_shared_memory_factory.cpp new file mode 100644 index 000000000..f5ab5f89b --- /dev/null +++ b/score/memory/shared/i_shared_memory_factory.cpp @@ -0,0 +1,13 @@ +/******************************************************************************** + * Copyright (c) 2025 Contributors to the Eclipse Foundation + * + * See the NOTICE file(s) distributed with this work for additional + * information regarding copyright ownership. + * + * This program and the accompanying materials are made available under the + * terms of the Apache License Version 2.0 which is available at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * SPDX-License-Identifier: Apache-2.0 + ********************************************************************************/ +#include "score/memory/shared/i_shared_memory_factory.h" diff --git a/score/memory/shared/i_shared_memory_factory.h b/score/memory/shared/i_shared_memory_factory.h new file mode 100644 index 000000000..73c6e2ff5 --- /dev/null +++ b/score/memory/shared/i_shared_memory_factory.h @@ -0,0 +1,80 @@ +/******************************************************************************** + * Copyright (c) 2025 Contributors to the Eclipse Foundation + * + * See the NOTICE file(s) distributed with this work for additional + * information regarding copyright ownership. + * + * This program and the accompanying materials are made available under the + * terms of the Apache License Version 2.0 which is available at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * SPDX-License-Identifier: Apache-2.0 + ********************************************************************************/ +#ifndef SCORE_LIB_MEMORY_SHARED_I_SHARED_MEMORY_FACTORY_H +#define SCORE_LIB_MEMORY_SHARED_I_SHARED_MEMORY_FACTORY_H + +#include "score/memory/shared/i_shared_memory_resource.h" +#include "score/memory/shared/typedshm/typedshm_wrapper/typed_memory.h" +#include "score/os/utils/acl/access_control_list.h" + +#include + +#include +#include + +namespace score::memory::shared +{ + +class ISharedMemoryFactory +{ + public: + using InitializeCallback = ISharedMemoryResource::InitializeCallback; + using UserPermissionsMap = ISharedMemoryResource::UserPermissionsMap; + using UserPermissions = ISharedMemoryResource::UserPermissions; + using AccessControl = ISharedMemoryResource::AccessControl; + + virtual std::shared_ptr Open(const std::string&, + const bool, + const std::optional>&) noexcept = 0; + + virtual std::shared_ptr Create(std::string, + InitializeCallback, + const std::size_t, + const UserPermissions&, + const bool) noexcept = 0; + + virtual std::shared_ptr CreateAnonymous(std::uint64_t, + InitializeCallback, + const std::size_t, + const UserPermissions&, + const bool) noexcept = 0; + + virtual std::shared_ptr CreateOrOpen(std::string, + InitializeCallback, + const std::size_t, + const ISharedMemoryFactory::AccessControl, + const bool) noexcept = 0; + + virtual void Remove(const std::string&) noexcept = 0; + + virtual void RemoveStaleArtefacts(const std::string& path) noexcept = 0; + + virtual void SetTypedMemoryProvider(std::shared_ptr) noexcept = 0; + + virtual std::size_t GetControlBlockSize() noexcept = 0; + + virtual void Clear() noexcept = 0; + + ISharedMemoryFactory() noexcept = default; + virtual ~ISharedMemoryFactory() = default; + + protected: + ISharedMemoryFactory(const ISharedMemoryFactory&) noexcept = default; + ISharedMemoryFactory(ISharedMemoryFactory&&) noexcept = default; + ISharedMemoryFactory& operator=(const ISharedMemoryFactory&) noexcept = default; + ISharedMemoryFactory& operator=(ISharedMemoryFactory&&) noexcept = default; +}; + +} // namespace score::memory::shared + +#endif // SCORE_LIB_MEMORY_SHARED_I_SHARED_MEMORY_FACTORY_H diff --git a/score/memory/shared/i_shared_memory_resource.cpp b/score/memory/shared/i_shared_memory_resource.cpp new file mode 100644 index 000000000..b8feb1cb1 --- /dev/null +++ b/score/memory/shared/i_shared_memory_resource.cpp @@ -0,0 +1,13 @@ +/******************************************************************************** + * Copyright (c) 2025 Contributors to the Eclipse Foundation + * + * See the NOTICE file(s) distributed with this work for additional + * information regarding copyright ownership. + * + * This program and the accompanying materials are made available under the + * terms of the Apache License Version 2.0 which is available at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * SPDX-License-Identifier: Apache-2.0 + ********************************************************************************/ +#include "score/memory/shared/i_shared_memory_resource.h" diff --git a/score/memory/shared/i_shared_memory_resource.h b/score/memory/shared/i_shared_memory_resource.h new file mode 100644 index 000000000..f691d8e17 --- /dev/null +++ b/score/memory/shared/i_shared_memory_resource.h @@ -0,0 +1,87 @@ +/******************************************************************************** + * Copyright (c) 2025 Contributors to the Eclipse Foundation + * + * See the NOTICE file(s) distributed with this work for additional + * information regarding copyright ownership. + * + * This program and the accompanying materials are made available under the + * terms of the Apache License Version 2.0 which is available at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * SPDX-License-Identifier: Apache-2.0 + ********************************************************************************/ +#ifndef SCORE_LIB_MEMORY_SHARED_I_SHARED_MEMORY_RESOURCE_H +#define SCORE_LIB_MEMORY_SHARED_I_SHARED_MEMORY_RESOURCE_H + +#include "score/memory/shared/managed_memory_resource.h" +#include "score/memory/shared/user_permission.h" + +#include "score/os/acl.h" +#include "score/os/utils/acl/i_access_control_list.h" + +#include +#include +#include + +#include +#include +#include +#include +#include + +namespace score::memory::shared +{ + +class SharedMemoryResource; + +class ISharedMemoryResource : public ManagedMemoryResource +{ + public: + using WorldReadable = permission::WorldReadable; + using WorldWritable = permission::WorldWritable; + using UserPermissionsMap = permission::UserPermissionsMap; + using UserPermissions = permission::UserPermissions; + + // size of stored callback should be the base size of amp callback and a unique_ptr + // this way the user can pass any information to the callback through the pointer. + static constexpr std::size_t kCallbackSize{ + sizeof(score::cpp::callback) + + std::max(score::cpp::callback::alignment_t::value, sizeof(std::unique_ptr))}; + using InitializeCallback = score::cpp::callback), kCallbackSize>; + + using FileDescriptor = os::Acl::FileDescriptor; + + using AccessControlListFactory = score::cpp::callback(FileDescriptor)>; + + class AccessControl + { + public: + // Suppress "AUTOSAR C++14 M11-0-1" rule findings. This rule states: "Member data in non-POD class types shall + // be private.". + // Rationale: There are no class invariants to maintain which could be violated by directly accessing these + // member variables. + // coverity[autosar_cpp14_m11_0_1_violation] + const UserPermissions& permissions_; + // coverity[autosar_cpp14_m11_0_1_violation] + const std::optional> allowedProviders_; + }; + + ISharedMemoryResource() noexcept = default; + ~ISharedMemoryResource() noexcept override = default; + + virtual const std::string* getPath() const noexcept = 0; + virtual void UnlinkFilesystemEntry() const noexcept = 0; + virtual FileDescriptor GetFileDescriptor() const noexcept = 0; + virtual bool IsShmInTypedMemory() const noexcept = 0; + virtual std::string_view GetIdentifier() const noexcept = 0; + + protected: + ISharedMemoryResource(const ISharedMemoryResource&) noexcept = default; + ISharedMemoryResource(ISharedMemoryResource&&) noexcept = default; + ISharedMemoryResource& operator=(const ISharedMemoryResource&) noexcept = default; + ISharedMemoryResource& operator=(ISharedMemoryResource&&) noexcept = default; +}; + +} // namespace score::memory::shared + +#endif // SCORE_LIB_MEMORY_SHARED_I_SHARED_MEMORY_RESOURCE_H diff --git a/score/memory/shared/lock_file.cpp b/score/memory/shared/lock_file.cpp new file mode 100644 index 000000000..2f99f1ac7 --- /dev/null +++ b/score/memory/shared/lock_file.cpp @@ -0,0 +1,159 @@ +/******************************************************************************** + * Copyright (c) 2025 Contributors to the Eclipse Foundation + * + * See the NOTICE file(s) distributed with this work for additional + * information regarding copyright ownership. + * + * This program and the accompanying materials are made available under the + * terms of the Apache License Version 2.0 which is available at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * SPDX-License-Identifier: Apache-2.0 + ********************************************************************************/ +#include "score/memory/shared/lock_file.h" + +#include "score/os/fcntl.h" +#include "score/os/stat.h" +#include "score/os/unistd.h" +#include "score/mw/log/logging.h" + +#include + +namespace score::memory::shared +{ + +namespace +{ + +using Mode = ::score::os::Stat::Mode; +constexpr auto kReadAccessForAll = Mode::kReadUser | Mode::kReadGroup | Mode::kReadOthers; + +} // namespace + +std::optional LockFile::Create(std::string path) noexcept +{ + using Open = ::score::os::Fcntl::Open; + constexpr auto opening_flags = Open::kCreate | Open::kExclusive | Open::kReadOnly; + // The reason for banning is, because it's error-prone to use. One should use abstractions e.g. provided by + // the C++ standard library. But these abstraction do not support exclusive access, which is why we created + // this abstraction library. + // NOLINTNEXTLINE(score-banned-function): See above. + const auto create_result = ::score::os::Fcntl::instance().open(path.data(), opening_flags, kReadAccessForAll); + if (!create_result.has_value()) + { + score::mw::log::LogError("shm") << "LockFile::Create failed to open file: " << path + << " | Error: " << create_result.error().ToString(); + return {}; + } + auto result = score::os::Stat::instance().chmod(path.data(), kReadAccessForAll); + if (!result.has_value()) + { + score::mw::log::LogError("shm") << "LockFile::Create failed to chmod file: " << path + << " | Error: " << result.error().ToString(); + return {}; + } + constexpr bool owns_file{true}; + return LockFile{std::move(path), create_result.value(), owns_file}; +} + +std::optional LockFile::CreateOrOpen(std::string path, bool take_ownership) noexcept +{ + using Open = ::score::os::Fcntl::Open; + constexpr auto opening_flags = Open::kCreate | Open::kReadOnly; + // The reason for banning is, because it's error-prone to use. One should use abstractions e.g. provided by + // the C++ standard library. But these abstraction do not support exclusive access, which is why we created + // this abstraction library. + // NOLINTNEXTLINE(score-banned-function): See above. + const auto create_result = ::score::os::Fcntl::instance().open(path.data(), opening_flags, kReadAccessForAll); + if (!create_result.has_value()) + { + score::mw::log::LogError("shm") << "LockFile::CreateOrOpen failed to open file: " << path + << " | Error: " << create_result.error().ToString(); + return {}; + } + auto result = score::os::Stat::instance().chmod(path.data(), kReadAccessForAll); + if (!result.has_value()) + { + score::mw::log::LogError("shm") << "LockFile::CreateOrOpen failed to chmod file: " << path + << " | Error: " << result.error().ToString(); + return {}; + } + return LockFile{std::move(path), create_result.value(), take_ownership}; +} + +std::optional LockFile::Open(std::string path) noexcept +{ + using Open = ::score::os::Fcntl::Open; + constexpr auto opening_flags = Open::kReadOnly; + // The reason for banning is, because it's error-prone to use. One should use abstractions e.g. provided by + // the C++ standard library. But these abstraction do not support exclusive access, which is why we created + // this abstraction library. + // NOLINTNEXTLINE(score-banned-function): See above. + const auto create_result = ::score::os::Fcntl::instance().open(path.data(), opening_flags, kReadAccessForAll); + if (!create_result.has_value()) + { + score::mw::log::LogError("shm") << "LockFile::Open failed to open file: " << path + << " | Error: " << create_result.error().ToString(); + return {}; + } + constexpr bool owns_file{false}; + return LockFile{std::move(path), create_result.value(), owns_file}; +} + +LockFile::LockFile(std::string path, const std::int32_t file_descriptor, bool owns_file) noexcept + : lock_file_path_{std::move(path)}, file_descriptor_{file_descriptor}, lock_file_owns_file_{owns_file} +{ +} + +void LockFile::CleanupFile() noexcept +{ + if (file_descriptor_ != -1) + { + // The reason for banning is, because it's error-prone to use. One should use abstractions e.g. provided by + // the C++ standard library. But these abstraction do not support exclusive access, which is why we created + // this abstraction library. + // NOLINTNEXTLINE(score-banned-function): See above. + score::cpp::ignore = ::score::os::Unistd::instance().close(file_descriptor_); + if (lock_file_owns_file_) + { + score::cpp::ignore = ::score::os::Unistd::instance().unlink(lock_file_path_.data()); + } + } +} + +LockFile::~LockFile() noexcept +{ + CleanupFile(); +} + +LockFile::LockFile(LockFile&& other) noexcept + : lock_file_path_{std::move(other.lock_file_path_)}, + file_descriptor_{other.file_descriptor_}, + lock_file_owns_file_{other.lock_file_owns_file_} +{ + // Prevent the moved-from LockFile from cleaning up the LockFile whose ownership was transferred + other.file_descriptor_ = -1; +} + +// Suppress AUTOSAR C++14 A12-6-1" rule finding. The rule states "Move and copy assignment operators shall either move +// or respectively copy base classes and data members of a class, without any side effects.". +// Rationale: A LockFile is an RAII wrapper around a file. Therefore, it's required that a clean up of the moved-to +// LockFile is done during move assignment. +// coverity[autosar_cpp14_a6_2_1_violation] +LockFile& LockFile::operator=(LockFile&& other) & noexcept +{ + if (this != &other) + { + // Cleanup the moved-to LockFile + CleanupFile(); + lock_file_path_ = std::move(other.lock_file_path_); + file_descriptor_ = other.file_descriptor_; + lock_file_owns_file_ = other.lock_file_owns_file_; + + // Prevent the moved-from LockFile from cleaning up the LockFile whose ownership was transferred + other.file_descriptor_ = -1; + } + return *this; +} + +} // namespace score::memory::shared diff --git a/score/memory/shared/lock_file.h b/score/memory/shared/lock_file.h new file mode 100644 index 000000000..0c3e9370e --- /dev/null +++ b/score/memory/shared/lock_file.h @@ -0,0 +1,74 @@ +/******************************************************************************** + * Copyright (c) 2025 Contributors to the Eclipse Foundation + * + * See the NOTICE file(s) distributed with this work for additional + * information regarding copyright ownership. + * + * This program and the accompanying materials are made available under the + * terms of the Apache License Version 2.0 which is available at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * SPDX-License-Identifier: Apache-2.0 + ********************************************************************************/ +#ifndef SCORE_LIB_MEMORY_SHARED_LOCK_FILE_H +#define SCORE_LIB_MEMORY_SHARED_LOCK_FILE_H + +#include +#include +#include + +namespace score::memory::shared +{ + +class FlockMutex; + +/// \brief RAII style class which manages a lock file in the file system +/// +/// The LockFile object can be created with Create, CreateOrOpen or Open. Creating a LockFile with Create will "own" the +/// file while CreateOrOpen or Open will not. If the LockFile "owns" the file, then the path will be closed and unlinked +/// on destruction of the LockFile object. If the LockFile does not "own" the file, then other LockFile objects can +/// still open the path until the LockFile which "owns" the file is destroyed. Ownership can also be explicitly taken by +/// calling TakeOwnership. +class LockFile +{ + // Suppress "AUTOSAR C++14 A11-3-1" rule finding: "Friend declarations shall not be used.". + // The 'friend' class is employed to encapsulate non-public members. + // This design choice protects end users from implementation details + // and prevents incorrect usage. Friend classes provide controlled + // access to private members, utilized internally, ensuring that + // end users cannot access implementation specifics. + // coverity[autosar_cpp14_a11_3_1_violation : FALSE] + friend FlockMutex; + + public: + static std::optional Create(std::string path) noexcept; + static std::optional CreateOrOpen(std::string path, bool take_ownership) noexcept; + static std::optional Open(std::string path) noexcept; + + void TakeOwnership() noexcept + { + lock_file_owns_file_ = true; + } + + ~LockFile() noexcept; + + LockFile(const LockFile&) = delete; + LockFile& operator=(const LockFile&) = delete; + + LockFile(LockFile&& other) noexcept; + LockFile& operator=(LockFile&& other) & noexcept; + + private: + LockFile(std::string path, const std::int32_t file_descriptor, bool owns_file) noexcept; + + void CleanupFile() noexcept; + + std::string lock_file_path_; + std::int32_t file_descriptor_; + + bool lock_file_owns_file_; +}; + +} // namespace score::memory::shared + +#endif // SCORE_LIB_MEMORY_SHARED_LOCK_FILE_H diff --git a/score/memory/shared/lock_file_test.cpp b/score/memory/shared/lock_file_test.cpp new file mode 100644 index 000000000..a617c5eb2 --- /dev/null +++ b/score/memory/shared/lock_file_test.cpp @@ -0,0 +1,569 @@ +/******************************************************************************** + * Copyright (c) 2025 Contributors to the Eclipse Foundation + * + * See the NOTICE file(s) distributed with this work for additional + * information regarding copyright ownership. + * + * This program and the accompanying materials are made available under the + * terms of the Apache License Version 2.0 which is available at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * SPDX-License-Identifier: Apache-2.0 + ********************************************************************************/ +#include "score/memory/shared/lock_file.h" + +#include "score/os/errno.h" +#include "score/os/mocklib/fcntl_mock.h" +#include "score/os/mocklib/stat_mock.h" +#include "score/os/mocklib/unistdmock.h" +#include "score/os/stat.h" + +#include + +#include +#include +#include +#include +#include + +namespace score::memory::shared +{ +namespace +{ + +using ::testing::_; +using ::testing::InvokeWithoutArgs; +using ::testing::Return; +using ::testing::StrEq; + +using Mode = ::score::os::Stat::Mode; +using Open = ::score::os::Fcntl::Open; + +const std::string kLockFilePath{"/test_lock_file"}; +const std::int32_t kLockFileDescriptor{1234}; + +class LockFileTestFixture : public ::testing::Test +{ + protected: + os::MockGuard fcntl_mock_{}; + os::MockGuard stat_mock_{}; + os::MockGuard unistd_mock_{}; +}; + +using LockFileCreateFixture = LockFileTestFixture; +TEST_F(LockFileCreateFixture, LockFileShouldManageFileWithRaii) +{ + bool close_called{false}; + bool unlink_called{false}; + + // Expect that open is called on the file which returns a lock file descriptor + EXPECT_CALL(*fcntl_mock_, open(StrEq(kLockFilePath.data()), _, _)).WillOnce(Return(kLockFileDescriptor)); + + // Expect that chmod is called on the file to compensate any umask inflicted right adoptions + EXPECT_CALL(*stat_mock_, chmod(StrEq(kLockFilePath.data()), _)) + .WillOnce(Return(score::cpp::expected_blank{})); + + // and the file is closed and unlinked + EXPECT_CALL(*unistd_mock_, close(kLockFileDescriptor)) + .WillOnce(InvokeWithoutArgs([&close_called]() -> score::cpp::expected_blank { + close_called = true; + return {}; + })); + EXPECT_CALL(*unistd_mock_, unlink(StrEq(kLockFilePath.data()))) + .WillOnce(InvokeWithoutArgs([&unlink_called]() -> score::cpp::expected_blank { + unlink_called = true; + return {}; + })); + + { + // When we successfully Create a LockFile + const auto lock_file_result = LockFile::Create(kLockFilePath); + ASSERT_TRUE(lock_file_result.has_value()); + + // And the file won't be closed or unlinked + EXPECT_FALSE(close_called); + EXPECT_FALSE(unlink_called); + } + + // until the lock file is destroyed + EXPECT_TRUE(close_called); + EXPECT_TRUE(unlink_called); +} + +TEST_F(LockFileCreateFixture, LockFileCreateShouldReturnEmptyIfOpenFails) +{ + // Expecting that open is called on the file and returns an error + EXPECT_CALL(*fcntl_mock_, open(StrEq(kLockFilePath.data()), _, _)) + .WillOnce(Return(score::cpp::make_unexpected(os::Error::createFromErrno(ENOENT)))); + + // When we Create a LockFile + const auto lock_file_result = LockFile::Create(kLockFilePath); + + // Then an empty result is returned + ASSERT_FALSE(lock_file_result.has_value()); +} + +TEST_F(LockFileCreateFixture, LockFileCreateDoesNotCloseAndUnlinkFileIfOpenFails) +{ + // Expecting open is called on the file and returns an error + EXPECT_CALL(*fcntl_mock_, open(StrEq(kLockFilePath.data()), _, _)) + .WillOnce(Return(score::cpp::make_unexpected(os::Error::createFromErrno(ENOENT)))); + + // Then close and unlink is never called + EXPECT_CALL(*unistd_mock_, close(_)).Times(0); + EXPECT_CALL(*unistd_mock_, unlink(_)).Times(0); + + // When we Create a LockFile + score::cpp::ignore = LockFile::Create(kLockFilePath); +} + +TEST_F(LockFileCreateFixture, LockFileCreateShouldReturnEmptyIfChmodFails) +{ + // Expect that open is called on the file which returns a lock file descriptor + EXPECT_CALL(*fcntl_mock_, open(StrEq(kLockFilePath.data()), _, _)).WillOnce(Return(kLockFileDescriptor)); + + // but expecting that chmod is called on the file which returns an error + EXPECT_CALL(*stat_mock_, chmod(StrEq(kLockFilePath.data()), _)) + .WillOnce(Return(score::cpp::make_unexpected(os::Error::createFromErrno(ENOENT)))); + + // When we Create a LockFile + const auto lock_file_result = LockFile::Create(kLockFilePath); + + // Then an empty result is returned + ASSERT_FALSE(lock_file_result.has_value()); +} + +TEST_F(LockFileCreateFixture, LockFileCreateShouldCallOpenWithExclusiveFlag) +{ + const auto exclusive_opening_flags = Open::kExclusive | Open::kCreate | Open::kReadOnly; + + // Expect that open is called on the file with the exclusive flag set + EXPECT_CALL(*fcntl_mock_, open(StrEq(kLockFilePath.data()), exclusive_opening_flags, _)); + + // When we Create a LockFile + ASSERT_TRUE(LockFile::Create(kLockFilePath).has_value()); +} + +TEST_F(LockFileCreateFixture, IfLockFileCreateIsCalledThenLockFileShouldBeClosedAndUnlinkedOnDestruction) +{ + const auto exclusive_opening_flags = Open::kExclusive | Open::kCreate | Open::kReadOnly; + bool close_called{false}; + bool unlink_called{false}; + { + // Expect that open is called on the file + EXPECT_CALL(*fcntl_mock_, open(StrEq(kLockFilePath.data()), exclusive_opening_flags, _)) + .WillOnce(Return(kLockFileDescriptor)); + + // and the file is closed and unlinked + EXPECT_CALL(*unistd_mock_, close(kLockFileDescriptor)) + .WillOnce(InvokeWithoutArgs([&close_called]() -> score::cpp::expected_blank { + close_called = true; + return score::cpp::blank{}; + })); + EXPECT_CALL(*unistd_mock_, unlink(StrEq(kLockFilePath.data()))) + .WillOnce(InvokeWithoutArgs([&unlink_called]() -> score::cpp::expected_blank { + unlink_called = true; + return score::cpp::blank{}; + })); + + // When we successfully Create a LockFile + auto lock_file_result = LockFile::Create(kLockFilePath); + ASSERT_TRUE(lock_file_result.has_value()); + } + EXPECT_TRUE(close_called); + EXPECT_TRUE(unlink_called); +} + +using LockFileCreateOrOpenFixture = LockFileTestFixture; +TEST_F(LockFileCreateOrOpenFixture, LockFileCreateOrOpenShouldCallOpenWithoutExclusiveFlag) +{ + const auto opening_flags = Open::kCreate | Open::kReadOnly; + + // Expect that open is called on the file without the exclusive flag set + EXPECT_CALL(*fcntl_mock_, open(StrEq(kLockFilePath.data()), opening_flags, _)); + + // When we CreateOrOpen a LockFile + const bool take_ownership{false}; + ASSERT_TRUE(LockFile::CreateOrOpen(kLockFilePath, take_ownership).has_value()); +} + +TEST_F(LockFileCreateOrOpenFixture, LockFileCreateOrOpenShouldReturnEmptyIfOpenFails) +{ + // Expecting that open is called on the file and returns an error + EXPECT_CALL(*fcntl_mock_, open(StrEq(kLockFilePath.data()), _, _)) + .WillOnce(Return(score::cpp::make_unexpected(os::Error::createFromErrno(ENOENT)))); + + // When we CreateOrOpen a LockFile + const bool take_ownership{false}; + const auto lock_file_result = LockFile::CreateOrOpen(kLockFilePath, take_ownership); + + // Then an empty result is returned + ASSERT_FALSE(lock_file_result.has_value()); +} + +TEST_F(LockFileCreateOrOpenFixture, LockFileCreateOrOpenDoesNotCloseAndUnlinkFileIfOpenFails) +{ + // Expecting that open is called on the file and returns an error + EXPECT_CALL(*fcntl_mock_, open(StrEq(kLockFilePath.data()), _, _)) + .WillOnce(Return(score::cpp::make_unexpected(os::Error::createFromErrno(ENOENT)))); + + // Then close and unlink is never called + EXPECT_CALL(*unistd_mock_, close(_)).Times(0); + EXPECT_CALL(*unistd_mock_, unlink(_)).Times(0); + + // When we CreateOrOpen a LockFile + const bool take_ownership{false}; + score::cpp::ignore = LockFile::CreateOrOpen(kLockFilePath, take_ownership); +} + +TEST_F(LockFileCreateOrOpenFixture, LockFileCreateOrOpenShouldReturnEmptyIfChmodFails) +{ + // Expect that open is called on the file which returns a lock file descriptor + EXPECT_CALL(*fcntl_mock_, open(StrEq(kLockFilePath.data()), _, _)).WillOnce(Return(kLockFileDescriptor)); + + // but expecting that chmod is called on the file which returns an error + EXPECT_CALL(*stat_mock_, chmod(StrEq(kLockFilePath.data()), _)) + .WillOnce(Return(score::cpp::make_unexpected(os::Error::createFromErrno(ENOENT)))); + + // When we CreateOrOpen a LockFile + const bool take_ownership{false}; + const auto lock_file_result = LockFile::CreateOrOpen(kLockFilePath, take_ownership); + + // Then an empty result is returned + ASSERT_FALSE(lock_file_result.has_value()); +} + +TEST_F(LockFileCreateOrOpenFixture, IfLockFileCreateOrOpenIsCalledThenLockFileShouldBeClosedButNotUnlinkedOnDestruction) +{ + const auto opening_flags = Open::kCreate | Open::kReadOnly; + bool close_called{false}; + { + // Expect that open is called on the file + EXPECT_CALL(*fcntl_mock_, open(StrEq(kLockFilePath.data()), opening_flags, _)) + .WillOnce(Return(kLockFileDescriptor)); + + // and the file is closed and unlinked + EXPECT_CALL(*unistd_mock_, close(kLockFileDescriptor)) + .WillOnce(InvokeWithoutArgs([&close_called]() -> score::cpp::expected_blank { + close_called = true; + return score::cpp::blank{}; + })); + EXPECT_CALL(*unistd_mock_, unlink(StrEq(kLockFilePath.data()))).Times(0); + + // When we successfully CreateOrOpen a LockFile + const bool take_ownership{false}; + auto lock_file_result = LockFile::CreateOrOpen(kLockFilePath, take_ownership); + ASSERT_TRUE(lock_file_result.has_value()); + + EXPECT_FALSE(close_called); + } + EXPECT_TRUE(close_called); +} + +TEST_F(LockFileCreateOrOpenFixture, + IfLockFileCreateOrOpenWithOwnershipFlagSetIsCalledThenLockFileShouldBeClosedAndUnlinkedOnDestruction) +{ + const auto exclusive_opening_flags = Open::kCreate | Open::kReadOnly; + bool close_called{false}; + bool unlink_called{false}; + { + // Expect that open is called on the file + EXPECT_CALL(*fcntl_mock_, open(StrEq(kLockFilePath.data()), exclusive_opening_flags, _)) + .WillOnce(Return(kLockFileDescriptor)); + + // and the file is closed and unlinked + EXPECT_CALL(*unistd_mock_, close(kLockFileDescriptor)) + .WillOnce(InvokeWithoutArgs([&close_called]() -> score::cpp::expected_blank { + close_called = true; + return score::cpp::blank{}; + })); + EXPECT_CALL(*unistd_mock_, unlink(StrEq(kLockFilePath.data()))) + .WillOnce(InvokeWithoutArgs([&unlink_called]() -> score::cpp::expected_blank { + unlink_called = true; + return score::cpp::blank{}; + })); + + // When we successfully CreateOrOpen a LockFile with the take_ownership flag set + const bool take_ownership{true}; + auto lock_file_result = LockFile::CreateOrOpen(kLockFilePath, take_ownership); + ASSERT_TRUE(lock_file_result.has_value()); + + EXPECT_FALSE(close_called); + EXPECT_FALSE(unlink_called); + } + EXPECT_TRUE(close_called); + EXPECT_TRUE(unlink_called); +} + +TEST_F(LockFileCreateOrOpenFixture, + IfLockFileCreateOrOpenAndTakeOwnershipIsCalledThenLockFileShouldBeClosedAndUnlinkedOnDestruction) +{ + const auto exclusive_opening_flags = Open::kCreate | Open::kReadOnly; + bool close_called{false}; + bool unlink_called{false}; + { + // Expect that open is called on the file + EXPECT_CALL(*fcntl_mock_, open(StrEq(kLockFilePath.data()), exclusive_opening_flags, _)) + .WillOnce(Return(kLockFileDescriptor)); + + // and the file is closed and unlinked + EXPECT_CALL(*unistd_mock_, close(kLockFileDescriptor)) + .WillOnce(InvokeWithoutArgs([&close_called]() -> score::cpp::expected_blank { + close_called = true; + return score::cpp::blank{}; + })); + EXPECT_CALL(*unistd_mock_, unlink(StrEq(kLockFilePath.data()))) + .WillOnce(InvokeWithoutArgs([&unlink_called]() -> score::cpp::expected_blank { + unlink_called = true; + return score::cpp::blank{}; + })); + + // When we successfully CreateOrOpen a LockFile + const bool take_ownership{false}; + auto lock_file_result = LockFile::CreateOrOpen(kLockFilePath, take_ownership); + ASSERT_TRUE(lock_file_result.has_value()); + + // and TakeOwnerhips of the file + lock_file_result.value().TakeOwnership(); + + EXPECT_FALSE(close_called); + EXPECT_FALSE(unlink_called); + } + EXPECT_TRUE(close_called); + EXPECT_TRUE(unlink_called); +} + +using LockFileOpenFixture = LockFileTestFixture; +TEST_F(LockFileOpenFixture, LockFileOpenShouldCallFcntlOpenWithReadFlagOnly) +{ + const auto opening_flags = Open::kReadOnly; + + // Expect that open is called on the file without the exclusive flag set + EXPECT_CALL(*fcntl_mock_, open(StrEq(kLockFilePath.data()), opening_flags, _)); + + // When we Open a LockFile + const auto lock_file_result = LockFile::Open(kLockFilePath); +} + +TEST_F(LockFileOpenFixture, LockFileOpenShouldReturnEmptyIfOpenFails) +{ + // Expecting that open is called on the file and returns an error + EXPECT_CALL(*fcntl_mock_, open(StrEq(kLockFilePath.data()), _, _)) + .WillOnce(Return(score::cpp::make_unexpected(os::Error::createFromErrno(ENOENT)))); + + // when we Open a LockFile + const auto lock_file_result = LockFile::Open(kLockFilePath); + + // Then an empty result is returned + ASSERT_FALSE(lock_file_result.has_value()); +} + +TEST_F(LockFileOpenFixture, LockFileOpenDoesNotCloseAndUnlinkFileIfOpenFails) +{ + // Expecting that open is called on the file and returns an error + EXPECT_CALL(*fcntl_mock_, open(StrEq(kLockFilePath.data()), _, _)) + .WillOnce(Return(score::cpp::make_unexpected(os::Error::createFromErrno(ENOENT)))); + + // Then close and unlink is never called + EXPECT_CALL(*unistd_mock_, close(_)).Times(0); + EXPECT_CALL(*unistd_mock_, unlink(_)).Times(0); + + // When we Open a LockFile + score::cpp::ignore = LockFile::Open(kLockFilePath); +} + +TEST_F(LockFileOpenFixture, IfLockFileOpenIsCalledThenLockFileShouldBeClosedButNotUnlinkedOnDestruction) +{ + const auto opening_flags = Open::kReadOnly; + bool close_called{false}; + { + // Expect that open is called on the file + EXPECT_CALL(*fcntl_mock_, open(StrEq(kLockFilePath.data()), opening_flags, _)) + .WillOnce(Return(kLockFileDescriptor)); + + // and the file is closed and unlinked + EXPECT_CALL(*unistd_mock_, close(kLockFileDescriptor)) + .WillOnce(InvokeWithoutArgs([&close_called]() -> score::cpp::expected_blank { + close_called = true; + return score::cpp::blank{}; + })); + EXPECT_CALL(*unistd_mock_, unlink(StrEq(kLockFilePath.data()))).Times(0); + + // When we successfully Open a LockFile + auto lock_file_result = LockFile::Open(kLockFilePath); + ASSERT_TRUE(lock_file_result.has_value()); + + EXPECT_FALSE(close_called); + } + EXPECT_TRUE(close_called); +} + +TEST_F(LockFileOpenFixture, IfLockFileOpenAndTakeOwnershipIsCalledThenLockFileShouldBeClosedAndUnlinkedOnDestruction) +{ + const auto exclusive_opening_flags = Open::kReadOnly; + bool close_called{false}; + bool unlink_called{false}; + { + // Expect that open is called on the file + EXPECT_CALL(*fcntl_mock_, open(StrEq(kLockFilePath.data()), exclusive_opening_flags, _)) + .WillOnce(Return(kLockFileDescriptor)); + + // and the file is closed and unlinked + EXPECT_CALL(*unistd_mock_, close(kLockFileDescriptor)) + .WillOnce(InvokeWithoutArgs([&close_called]() -> score::cpp::expected_blank { + close_called = true; + return score::cpp::blank{}; + })); + EXPECT_CALL(*unistd_mock_, unlink(StrEq(kLockFilePath.data()))) + .WillOnce(InvokeWithoutArgs([&unlink_called]() -> score::cpp::expected_blank { + unlink_called = true; + return score::cpp::blank{}; + })); + + // When we successfully Open a LockFile + auto lock_file_result = LockFile::Open(kLockFilePath); + ASSERT_TRUE(lock_file_result.has_value()); + + // and TakeOwnerhips of the file + lock_file_result.value().TakeOwnership(); + + EXPECT_FALSE(close_called); + EXPECT_FALSE(unlink_called); + } + EXPECT_TRUE(close_called); + EXPECT_TRUE(unlink_called); +} + +using LockFileMoveFixture = LockFileTestFixture; +TEST_F(LockFileMoveFixture, LockFileShouldNotRemoveFileOnMoveConstruction) +{ + bool close_called{false}; + bool unlink_called{false}; + + { + // Expect that open is called on the file + EXPECT_CALL(*fcntl_mock_, open(StrEq(kLockFilePath.data()), _, _)).WillOnce(Return(kLockFileDescriptor)); + + // and the file is closed and unlinked + EXPECT_CALL(*unistd_mock_, close(kLockFileDescriptor)) + .WillOnce(InvokeWithoutArgs([&close_called]() -> score::cpp::expected_blank { + close_called = true; + return score::cpp::blank{}; + })); + EXPECT_CALL(*unistd_mock_, unlink(StrEq(kLockFilePath.data()))) + .WillOnce(InvokeWithoutArgs([&unlink_called]() -> score::cpp::expected_blank { + unlink_called = true; + return score::cpp::blank{}; + })); + + // When we successfully Create a LockFile + auto lock_file_result = LockFile::Create(kLockFilePath); + ASSERT_TRUE(lock_file_result.has_value()); + + // and call the move constructor on a new LockFile + LockFile lock_file_2{std::move(lock_file_result.value())}; + + // Then the lock file won't be destroyed + EXPECT_FALSE(close_called); + EXPECT_FALSE(unlink_called); + } + // Until the new LockFile goes out of scope + EXPECT_TRUE(close_called); + EXPECT_TRUE(unlink_called); +} + +TEST_F(LockFileMoveFixture, LockFileShouldRemoveFileOnMoveAssignment) +{ + const std::string lock_file_path_2{"/test_lock_file_2"}; + const std::int32_t lock_file_descriptor_2{5678}; + + int close_called{false}; + int close_called_2{false}; + int unlink_called{false}; + int unlink_called_2{false}; + + { + // Expect that open is called on both files + EXPECT_CALL(*fcntl_mock_, open(StrEq(kLockFilePath.data()), _, _)).WillOnce(Return(kLockFileDescriptor)); + EXPECT_CALL(*fcntl_mock_, open(StrEq(lock_file_path_2.data()), _, _)).WillOnce(Return(lock_file_descriptor_2)); + + // and both files are closed and unlinked + EXPECT_CALL(*unistd_mock_, close(kLockFileDescriptor)) + .WillOnce(InvokeWithoutArgs([&close_called]() -> score::cpp::expected_blank { + close_called = true; + return score::cpp::blank{}; + })); + EXPECT_CALL(*unistd_mock_, unlink(StrEq(kLockFilePath.data()))) + .WillOnce(InvokeWithoutArgs([&unlink_called]() -> score::cpp::expected_blank { + unlink_called = true; + return score::cpp::blank{}; + })); + EXPECT_CALL(*unistd_mock_, close(lock_file_descriptor_2)) + .WillOnce(InvokeWithoutArgs([&close_called_2]() -> score::cpp::expected_blank { + close_called_2 = true; + return score::cpp::blank{}; + })); + EXPECT_CALL(*unistd_mock_, unlink(StrEq(lock_file_path_2.data()))) + .WillOnce(InvokeWithoutArgs([&unlink_called_2]() -> score::cpp::expected_blank { + unlink_called_2 = true; + return score::cpp::blank{}; + })); + + // When we successfully Create 2 LockFiles + auto lock_file_result = LockFile::Create(kLockFilePath); + ASSERT_TRUE(lock_file_result.has_value()); + + auto lock_file_result_2 = LockFile::Create(lock_file_path_2); + ASSERT_TRUE(lock_file_result_2.has_value()); + + // And move assign the second to the first + lock_file_result.value() = std::move(lock_file_result_2.value()); + + // Then the first lock file should be immediately closed and unlinked + EXPECT_TRUE(close_called); + EXPECT_TRUE(unlink_called); + } + // And the second should be closed and unlinked when it goes out of scope + EXPECT_TRUE(close_called_2); + EXPECT_TRUE(unlink_called_2); +} + +TEST_F(LockFileMoveFixture, LockFileShouldNotRemoveFileWhenMoveAssigningToItself) +{ + bool close_called{false}; + bool unlink_called{false}; + + { + // Expect that open is called on the file + EXPECT_CALL(*fcntl_mock_, open(StrEq(kLockFilePath.data()), _, _)).WillOnce(Return(kLockFileDescriptor)); + + // and the file is closed and unlinked + EXPECT_CALL(*unistd_mock_, close(kLockFileDescriptor)) + .WillOnce(InvokeWithoutArgs([&close_called]() -> score::cpp::expected_blank { + close_called = true; + return score::cpp::blank{}; + })); + EXPECT_CALL(*unistd_mock_, unlink(StrEq(kLockFilePath.data()))) + .WillOnce(InvokeWithoutArgs([&unlink_called]() -> score::cpp::expected_blank { + unlink_called = true; + return score::cpp::blank{}; + })); + + // Given a valid LockFile + auto lock_file_result = LockFile::Create(kLockFilePath); + ASSERT_TRUE(lock_file_result.has_value()); + + // When we move assign the lock file to itself + lock_file_result.value() = std::move(lock_file_result.value()); + + // Then the lock file won't be destroyed + EXPECT_FALSE(close_called); + EXPECT_FALSE(unlink_called); + } + // Until the new LockFile goes out of scope + EXPECT_TRUE(close_called); + EXPECT_TRUE(unlink_called); +} + +} // namespace +} // namespace score::memory::shared diff --git a/score/memory/shared/managed_memory_resource.cpp b/score/memory/shared/managed_memory_resource.cpp new file mode 100644 index 000000000..57665e97b --- /dev/null +++ b/score/memory/shared/managed_memory_resource.cpp @@ -0,0 +1,13 @@ +/******************************************************************************** + * Copyright (c) 2025 Contributors to the Eclipse Foundation + * + * See the NOTICE file(s) distributed with this work for additional + * information regarding copyright ownership. + * + * This program and the accompanying materials are made available under the + * terms of the Apache License Version 2.0 which is available at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * SPDX-License-Identifier: Apache-2.0 + ********************************************************************************/ +#include "managed_memory_resource.h" diff --git a/score/memory/shared/managed_memory_resource.h b/score/memory/shared/managed_memory_resource.h new file mode 100644 index 000000000..5c682c652 --- /dev/null +++ b/score/memory/shared/managed_memory_resource.h @@ -0,0 +1,173 @@ +/******************************************************************************** + * Copyright (c) 2025 Contributors to the Eclipse Foundation + * + * See the NOTICE file(s) distributed with this work for additional + * information regarding copyright ownership. + * + * This program and the accompanying materials are made available under the + * terms of the Apache License Version 2.0 which is available at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * SPDX-License-Identifier: Apache-2.0 + ********************************************************************************/ +#ifndef SCORE_LIB_MEMORY_SHARED_MANAGED_MEMORY_RESOURCE_H +#define SCORE_LIB_MEMORY_SHARED_MANAGED_MEMORY_RESOURCE_H + +#include +#include + +namespace score::memory::shared +{ +namespace test +{ +// Suppress "AUTOSAR C++14 M3-2-3" rule finding: "A type, object or function that is used in multiple translation units +// shall be declared in one and only one file.". +// The forward declaration of ManagedMemoryResourceTestAttorney is necessary to establish a friend relation with +// ManagedMemoryResource that facilitates easier testing. +// ManagedMemoryResourceTestAttorney is only declared once hence it is a false positive. +// coverity[autosar_cpp14_m3_2_3_violation : FALSE] +class ManagedMemoryResourceTestAttorney; +} // namespace test + +template +class PolymorphicOffsetPtrAllocator; + +// Suppress "AUTOSAR C++14 M3-2-3" rule finding: "A type, object or function that is used in multiple translation units +// shall be declared in one and only one file.". +// The forward declaration of MemoryResourceProxy is necessary to avoid cyclic dependencies and to ensure that the +// ManagedMemoryResource class can be declared without requiring the full definition of MemoryResourceProxy. This +// forward declaration does not violate the AUTOSAR C++14 M3-2-3 guideline as it is a common practice to handle +// dependencies in large codebases. The full definition of MemoryResourceProxy will be provided elsewhere in the +// codebase. +// coverity[autosar_cpp14_m3_2_3_violation] +class MemoryResourceProxy; + +/** + * \brief The ManagedMemoryResource extends the C++17 defined + * std::pmr::memory_resource by an interface to retrieve an so-called + * MemoryResourceProxy. + * This MemoryResourceProxy can then be shared (e.g. over shared memory) + * and be used to identify a ManagedMemoryResource to allocate memory. This is always then necessary + * when the underlying memory_resource cannot be shared. + * + * In our particular case the std::pmr::memory_resource cannot be shared over shared memory, + * since it include a v-table which is non-valid over process boundaries. + */ + +// Suppress "AUTOSAR C++14 M3-2-3" rule finding: "A type, object or function that is used in multiple translation units +// shall be declared in one and only one file.". +// this is false positive. ManagedMemoryResource is declared only once. +// coverity[autosar_cpp14_m3_2_3_violation : FALSE] +class ManagedMemoryResource : public ::score::cpp::pmr::memory_resource +{ + public: + ManagedMemoryResource() noexcept = default; + ~ManagedMemoryResource() noexcept override = default; + + /** + * @brief Construct T allocating underlying MemoryResource + * @tparam T The type that shall be constructed + * @tparam Args The argument types + * @param args The argument values to construct T + * @return T* The pointer to the constructed data type + */ + template + T* construct(Args&&... args) noexcept(std::is_nothrow_constructible_v) + { + void* const memory = this->allocate(sizeof(T), alignof(T)); + // Operator \c new doesn't allocate any new resources, instead of that preallocated buffer is + // NOLINTNEXTLINE(score-no-dynamic-raw-memory): used by placement new + return new (memory) T(std::forward(args)...); + } + + /** + * @brief Destruct and deallocate T* in underlying MemoryResource + * @tparam T The type that shall be destructed + * @param t The actual T instance that shall be destructed + */ + template >> + void destruct(T& t) + { + t.~T(); + this->deallocate(&t, sizeof(T)); + } + + /** + * @brief Get the start address of the memory region that this memory resource is managing + * @return void* start address of memory resource (e.g. mmap result) + */ + virtual void* getBaseAddress() const noexcept = 0; + + /** + * @brief Get the start address of the region available to a user of this memory resource. + * The memory resource may store some house keeping data (such as a control block) at the start of the memory + * region. This function will return the address after that to which the user can freely write. + * @return void* start address of memory resource after house keeping data + */ + virtual void* getUsableBaseAddress() const noexcept = 0; + + /** + * @brief brief Get the number of bytes allocated by the user in the memory region. + * Does not include any house keeping data (such as a control block) allocated by the memory resource. + * @return number of bytes already allocated by the user + */ + virtual std::size_t GetUserAllocatedBytes() const noexcept = 0; + + /** + * @brief Determines whether memory resource should bypass bounds checking when calling + * MemoryResourceRegistry::GetBoundsFromIdentifier with a memory identifier. + * @return false if bounds checking should be performed on the resource, otherwise, true. + */ + virtual bool IsOffsetPtrBoundsCheckBypassingEnabled() const noexcept + { + return false; + } + + protected: + ManagedMemoryResource(const ManagedMemoryResource&) noexcept = default; + ManagedMemoryResource(ManagedMemoryResource&&) noexcept = default; + ManagedMemoryResource& operator=(const ManagedMemoryResource&) noexcept = default; + ManagedMemoryResource& operator=(ManagedMemoryResource&&) noexcept = default; + + private: + // Suppress "AUTOSAR C++14 A11-3-1" rule finding: "Friend declarations shall not be used.". + // The 'friend' class is employed to encapsulate non-public members. This design choice protects end users from + // implementation details and prevents incorrect usage. Friend classes provide controlled access to private members, + // utilized internally, ensuring that end users cannot access implementation specifics. + // This is for testing only + // coverity[autosar_cpp14_a11_3_1_violation] + friend class test::ManagedMemoryResourceTestAttorney; + + // PolymorphicOffsetPtrAllocator template is a friend to access getMemoryResourceProxy() in its constructor + // that accepts a ManagedMemoryResource reference. + // coverity[autosar_cpp14_a11_3_1_violation] + template + friend class PolymorphicOffsetPtrAllocator; + + // We make MemoryResourceRegistry a friend since it needs to access private internals of ManagedMemoryResource which + // we do not want to expose to the user via the public interface of ManagedMemoryResource. + // coverity[autosar_cpp14_a11_3_1_violation] + friend class MemoryResourceRegistry; + + /** + * @brief Get the end address of the memory region that this memory resource is managing + * @details Like with iterators, this returns a a past-the-end-address. E.g. the first byte, which + * isn't usable anymore/lies after the valid memory region of this resource. + * @return void* past-the-end address of memory resource + */ + virtual const void* getEndAddress() const noexcept = 0; + + /** + * We need to return a raw pointer, since we need to convert this + * pointer into an OffsetPtr if it shall be stored in shared memory. + * @return MemoryResourceProxy* that identifies _this_ memory_resource. + */ + + /// \todo: getMemoryResourceProxy should not return a non const pointer and the method should also be marked const. + /// This issue will be investigated and fixed in Ticket-146625" + virtual const MemoryResourceProxy* getMemoryResourceProxy() noexcept = 0; +}; + +} // namespace score::memory::shared + +#endif // SCORE_LIB_MEMORY_SHARED_MANAGED_MEMORY_RESOURCE_H diff --git a/score/memory/shared/managed_memory_resource_test.cpp b/score/memory/shared/managed_memory_resource_test.cpp new file mode 100644 index 000000000..179f8c2b7 --- /dev/null +++ b/score/memory/shared/managed_memory_resource_test.cpp @@ -0,0 +1,64 @@ +/******************************************************************************** + * Copyright (c) 2025 Contributors to the Eclipse Foundation + * + * See the NOTICE file(s) distributed with this work for additional + * information regarding copyright ownership. + * + * This program and the accompanying materials are made available under the + * terms of the Apache License Version 2.0 which is available at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * SPDX-License-Identifier: Apache-2.0 + ********************************************************************************/ +#include "score/memory/shared/managed_memory_resource.h" + +#include "fake/my_memory_resource.h" +#include "shared_memory_test_resources.h" + +#include "gtest/gtest.h" + +#include + +namespace score::memory::shared::test +{ + +TEST(ManagedMemoryResource, offersToGetMemoryResourceManager) +{ + std::unique_ptr unit = std::make_unique(); + ManagedMemoryResourceTestAttorney attorney(*unit); + EXPECT_NE(attorney.getMemoryResourceProxy(), nullptr); +} + +TEST(ManagedMemoryResource, CanDestructImplByParentClass) +{ + std::unique_ptr unit = std::make_unique(); + unit.reset(); + EXPECT_EQ(unit.get(), nullptr); +} + +TEST(ManagedMemoryResource, CanConstructAndDestructSimpleType) +{ + std::unique_ptr unit = std::make_unique(); + auto* theAnswer = unit->construct(42U); + EXPECT_EQ(*theAnswer, 42U); + unit->destruct(*theAnswer); +} + +TEST(ManagedMemoryResource, CanConstructAndDestructComplexType) +{ + std::unique_ptr unit = std::make_unique(); + auto* theAnswer = unit->construct>(std::vector{1U, 2U, 3U, 4U}); + EXPECT_EQ(theAnswer->at(0), 1U); + EXPECT_EQ(theAnswer->at(3), 4U); + unit->destruct(*theAnswer); +} + +TEST(ManagedMemoryResource, CanConstructAndDestructComplexTypeWithMultipleConstructionParams) +{ + std::unique_ptr unit = std::make_unique(); + auto* theAnswer = unit->construct>(4U, std::allocator>{}); + EXPECT_EQ(theAnswer->size(), 4U); + unit->destruct(*theAnswer); +} + +} // namespace score::memory::shared::test diff --git a/score/memory/shared/map.cpp b/score/memory/shared/map.cpp new file mode 100644 index 000000000..fb9ecfc73 --- /dev/null +++ b/score/memory/shared/map.cpp @@ -0,0 +1,13 @@ +/******************************************************************************** + * Copyright (c) 2025 Contributors to the Eclipse Foundation + * + * See the NOTICE file(s) distributed with this work for additional + * information regarding copyright ownership. + * + * This program and the accompanying materials are made available under the + * terms of the Apache License Version 2.0 which is available at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * SPDX-License-Identifier: Apache-2.0 + ********************************************************************************/ +#include "score/memory/shared/map.h" diff --git a/score/memory/shared/map.h b/score/memory/shared/map.h new file mode 100644 index 000000000..6e5744a4b --- /dev/null +++ b/score/memory/shared/map.h @@ -0,0 +1,58 @@ +/******************************************************************************** + * Copyright (c) 2025 Contributors to the Eclipse Foundation + * + * See the NOTICE file(s) distributed with this work for additional + * information regarding copyright ownership. + * + * This program and the accompanying materials are made available under the + * terms of the Apache License Version 2.0 which is available at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * SPDX-License-Identifier: Apache-2.0 + ********************************************************************************/ +#ifndef SCORE_LIB_MEMORY_SHARED_MAP_H +#define SCORE_LIB_MEMORY_SHARED_MAP_H + +#include "score/memory/shared/polymorphic_offset_ptr_allocator.h" + +#if defined(__linux__) +#include +#include +#endif // __linux__ + +#include +#include + +namespace score::memory::shared +{ + +/// \brief We provide our custom version of an std::map to ensure that it supports HEAP and SharedMemory usage with +/// our custom allocator. In addition we ensure nested container usage by using the scoped allocator adaptor. + +// Suppress "AUTOSAR C++14 A16-0-1" rule findings. This rule stated: "The pre-processor shall only be used for +// unconditional and conditional file inclusion and include guards, and using the following directives: (1) #ifndef, +// #ifdef, (3) #if, (4) #if defined, (5) #elif, (6) #else, (7) #define, (8) #endif, (9) #include.". +// Rationale: Pre-processor commands are used to allow different implementations for linux and QNX to exist in the same +// file. This keeps both implementations close (i.e. within the same functions) which makes the code easier to read and +// maintain. It also prevents compiler errors in linux code when compiling for QNX and vice versa. +// coverity[autosar_cpp14_a16_0_1_violation] +#if defined(__linux__) +template > +using Map = + boost::interprocess::map::value_type>>>; +// coverity[autosar_cpp14_a16_0_1_violation] Different implementation required for linux and QNX +#else +// on production with libc++ stl, we should use this! +template > +using Map = std:: + map::value_type>>>; +// coverity[autosar_cpp14_a16_0_1_violation] Different implementation required for linux and QNX +#endif // __linux__ + +} // namespace score::memory::shared + +#endif // SCORE_LIB_MEMORY_SHARED_MAP_H diff --git a/score/memory/shared/map_test.cpp b/score/memory/shared/map_test.cpp new file mode 100644 index 000000000..ef98c585e --- /dev/null +++ b/score/memory/shared/map_test.cpp @@ -0,0 +1,97 @@ +/******************************************************************************** + * Copyright (c) 2025 Contributors to the Eclipse Foundation + * + * See the NOTICE file(s) distributed with this work for additional + * information regarding copyright ownership. + * + * This program and the accompanying materials are made available under the + * terms of the Apache License Version 2.0 which is available at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * SPDX-License-Identifier: Apache-2.0 + ********************************************************************************/ +#include "score/memory/shared/map.h" + +#include "score/memory/shared/fake/my_memory_resource.h" +#include "score/memory/shared/vector.h" + +#include "gtest/gtest.h" + +namespace score::memory::shared +{ +namespace +{ + +TEST(Map, AllocatesMemoryOnProvidedResource) +{ + // Given a Map of int to int, ensure that the Map uses the allocator to get its memory + test::MyMemoryResource memory{}; + Map unit{memory}; + auto before_allocating_vector = memory.getAllocatedMemory(); + + // When inserting an element + unit[1] = 1; + + // ...we expect memory to be allocated from our allocator, and more than "needed" since Map also needs memory for + // its bookkeeping + EXPECT_GT(memory.getAllocatedMemory(), before_allocating_vector + sizeof(int)); +} + +TEST(Map, InnerVectorAllocatesMemoryOnProvidedResource) +{ + // Given a Map of a Vector + test::MyMemoryResource memory{}; + Map> unit{memory}; + auto before_allocating_vector = memory.getAllocatedMemory(); + + // When constructing a Vector within the Map + unit.emplace(std::piecewise_construct, std::forward_as_tuple(1), std::forward_as_tuple(64, 42)); + + // Then the memory is allocated on the memory resource that was provided to the map (seamless passing of allocator) + EXPECT_GT(memory.getAllocatedMemory(), before_allocating_vector + (sizeof(std::uint8_t) * 64)); +} + +TEST(Map, InnerVectorAllocatesMemoryOnProvidedResourceByDefaultConstruction) +{ + // Given a Map of a Vector + test::MyMemoryResource memory{}; + Map> unit{memory}; + auto before_allocating_vector = memory.getAllocatedMemory(); + + // When default constructing the Vector + unit[1].resize(128); + + // Then the memory is allocated on the memory resource that was provided to the map (seamless passing of allocator) + EXPECT_GT(memory.getAllocatedMemory(), before_allocating_vector + (sizeof(std::uint8_t) * 128)); +} + +TEST(Map, UserConstructedVectorCanBeUsed) +{ + // Given a Map of a Vector + test::MyMemoryResource memory{}; + Map> unit{memory}; + auto before_allocating_vector = memory.getAllocatedMemory(); + + // When the user constructs a Vector by using the same allocator + unit[1] = Vector(128, 42, unit.get_allocator()); + + // Then the memory is allocated on the memory resource that was provided to the map (seamless passing of allocator) + EXPECT_GT(memory.getAllocatedMemory(), before_allocating_vector + (sizeof(std::uint8_t) * 128)); +} + +TEST(Map, MapInMapAllocatesMemoryFromCorrectRessource) +{ + // Given a Map of a Map + test::MyMemoryResource memory{}; + Map> unit{memory}; + auto before_allocating_vector = memory.getAllocatedMemory(); + + // When implicit constructing the inner map + unit[1][2] = 3; + + // Then the memory is allocated on the memory resource that was provided to the map (seamless passing of allocator) + EXPECT_GT(memory.getAllocatedMemory(), before_allocating_vector + (sizeof(std::uint8_t) * 2)); +} + +} // namespace +} // namespace score::memory::shared diff --git a/score/memory/shared/memory_region_bounds.cpp b/score/memory/shared/memory_region_bounds.cpp new file mode 100644 index 000000000..ebf920d82 --- /dev/null +++ b/score/memory/shared/memory_region_bounds.cpp @@ -0,0 +1,79 @@ +/******************************************************************************** + * Copyright (c) 2025 Contributors to the Eclipse Foundation + * + * See the NOTICE file(s) distributed with this work for additional + * information regarding copyright ownership. + * + * This program and the accompanying materials are made available under the + * terms of the Apache License Version 2.0 which is available at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * SPDX-License-Identifier: Apache-2.0 + ********************************************************************************/ +#include "score/memory/shared/memory_region_bounds.h" + +#include + +namespace score::memory::shared +{ + +namespace +{ + +constexpr std::uintptr_t kInvalidAddress{0U}; + +bool AreBothValidOrBothInvalid(const std::uintptr_t start_address, const std::uintptr_t end_address) noexcept +{ + const bool both_valid = ((start_address == kInvalidAddress) && (end_address == kInvalidAddress)); + const bool both_invalid = ((start_address != kInvalidAddress) && (end_address != kInvalidAddress)); + return both_valid || both_invalid; +} + +} // namespace + +MemoryRegionBounds::MemoryRegionBounds() noexcept : MemoryRegionBounds{kInvalidAddress, kInvalidAddress} {} + +MemoryRegionBounds::MemoryRegionBounds(const std::uintptr_t start_address, const std::uintptr_t end_address) noexcept + : start_address_{start_address}, end_address_{end_address} +{ + SCORE_LANGUAGE_FUTURECPP_ASSERT_PRD(AreBothValidOrBothInvalid(start_address, end_address)); +} + +void MemoryRegionBounds::Set(const std::uintptr_t start_address, const std::uintptr_t end_address) noexcept +{ + SCORE_LANGUAGE_FUTURECPP_ASSERT_PRD(AreBothValidOrBothInvalid(start_address, end_address)); + start_address_ = start_address; + end_address_ = end_address; +} + +void MemoryRegionBounds::Reset() noexcept +{ + Set(kInvalidAddress, kInvalidAddress); +} + +bool MemoryRegionBounds::has_value() const noexcept +{ + return !((start_address_ == kInvalidAddress) || (end_address_ == kInvalidAddress)); +} + +std::uintptr_t MemoryRegionBounds::GetStartAddress() const noexcept +{ + return start_address_; +} + +std::uintptr_t MemoryRegionBounds::GetEndAddress() const noexcept +{ + return end_address_; +} + +bool operator==(const MemoryRegionBounds& lhs, const MemoryRegionBounds& rhs) noexcept +{ + return ((lhs.GetStartAddress() == rhs.GetStartAddress()) && (lhs.GetEndAddress() == rhs.GetEndAddress())); +} + +bool operator!=(const MemoryRegionBounds& lhs, const MemoryRegionBounds& rhs) noexcept +{ + return !(lhs == rhs); +} + +} // namespace score::memory::shared diff --git a/score/memory/shared/memory_region_bounds.h b/score/memory/shared/memory_region_bounds.h new file mode 100644 index 000000000..3dbaf668d --- /dev/null +++ b/score/memory/shared/memory_region_bounds.h @@ -0,0 +1,49 @@ +/******************************************************************************** + * Copyright (c) 2025 Contributors to the Eclipse Foundation + * + * See the NOTICE file(s) distributed with this work for additional + * information regarding copyright ownership. + * + * This program and the accompanying materials are made available under the + * terms of the Apache License Version 2.0 which is available at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * SPDX-License-Identifier: Apache-2.0 + ********************************************************************************/ +#ifndef SCORE_LIB_MEMORY_SHARED_MEMORY_REGION_BOUNDS_H +#define SCORE_LIB_MEMORY_SHARED_MEMORY_REGION_BOUNDS_H + +#include + +namespace score::memory::shared +{ + +/// \brief Class which stores an optional start / end address of a memory region. +/// +/// It uses sentinel values to represent that it is empty i.e. that it doesn't store memory bounds. We do this instead +/// of using std::optional to be as performant as possible. The addresses are stored as std::uintptr_t and can be +/// coverted back to pointers using CastIntegerToPointer in pointer_arithmetic_util. +class MemoryRegionBounds +{ + public: + MemoryRegionBounds() noexcept; + MemoryRegionBounds(const std::uintptr_t start_address, const std::uintptr_t end_address) noexcept; + + void Set(const std::uintptr_t start_address, const std::uintptr_t end_address) noexcept; + void Reset() noexcept; + + bool has_value() const noexcept; + std::uintptr_t GetStartAddress() const noexcept; + std::uintptr_t GetEndAddress() const noexcept; + + private: + std::uintptr_t start_address_; + std::uintptr_t end_address_; +}; + +bool operator==(const MemoryRegionBounds& lhs, const MemoryRegionBounds& rhs) noexcept; +bool operator!=(const MemoryRegionBounds& lhs, const MemoryRegionBounds& rhs) noexcept; + +} // namespace score::memory::shared + +#endif // SCORE_LIB_MEMORY_SHARED_MEMORY_REGION_BOUNDS_H diff --git a/score/memory/shared/memory_region_bounds_test.cpp b/score/memory/shared/memory_region_bounds_test.cpp new file mode 100644 index 000000000..b0d772be7 --- /dev/null +++ b/score/memory/shared/memory_region_bounds_test.cpp @@ -0,0 +1,180 @@ +/******************************************************************************** + * Copyright (c) 2025 Contributors to the Eclipse Foundation + * + * See the NOTICE file(s) distributed with this work for additional + * information regarding copyright ownership. + * + * This program and the accompanying materials are made available under the + * terms of the Apache License Version 2.0 which is available at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * SPDX-License-Identifier: Apache-2.0 + ********************************************************************************/ +#include "score/memory/shared/memory_region_bounds.h" + +#include +#include + +namespace score::memory::shared::test +{ +namespace +{ + +using namespace ::testing; + +constexpr std::uintptr_t kValidStartAddress{10U}; +constexpr std::uintptr_t kValidEndAddress{20U}; + +constexpr std::uintptr_t kInvalidStartAddress{0U}; +constexpr std::uintptr_t kInvalidEndAddress{0U}; + +TEST(MemoryRegionBoundsTest, DefaultConstructingHasNoValue) +{ + // When default constructing a MemoryRegionBounds + const MemoryRegionBounds memory_region_bounds{}; + + // Then it has no value + EXPECT_FALSE(memory_region_bounds.has_value()); +} + +TEST(MemoryRegionBoundsTest, ConstructingWithValuesMarksHasValue) +{ + // When constructing a MemoryRegionBounds with valid bounds + const MemoryRegionBounds memory_region_bounds{kValidStartAddress, kValidEndAddress}; + + // Then it has a value + EXPECT_TRUE(memory_region_bounds.has_value()); +} + +TEST(MemoryRegionBoundsTest, ConstructingWithInvalidValuesMarksHasNoValue) +{ + // When constructing a MemoryRegionBounds with invalid bounds (representing nullptrs) + const MemoryRegionBounds memory_region_bounds{kInvalidStartAddress, kInvalidEndAddress}; + + // Then it has no value + EXPECT_FALSE(memory_region_bounds.has_value()); +} + +TEST(MemoryRegionBoundsTest, GettingAddressesReturnsValuesPassedToConstructor) +{ + // Given a MemoryRegionBounds constructed with valid bounds + const MemoryRegionBounds memory_region_bounds{kValidStartAddress, kValidEndAddress}; + + // When getting the start and end addresses + const auto actual_start_address = memory_region_bounds.GetStartAddress(); + const auto actual_end_address = memory_region_bounds.GetEndAddress(); + + // Then the addresses are the same as those passed to the constructor + EXPECT_EQ(actual_start_address, kValidStartAddress); + EXPECT_EQ(actual_end_address, kValidEndAddress); +} + +TEST(MemoryRegionBoundsTest, SettingAddressesUpdatesAddresses) +{ + // Given a default constructed MemoryRegionBounds + MemoryRegionBounds memory_region_bounds{}; + + // When setting the addresses with valid values + memory_region_bounds.Set(kValidStartAddress, kValidEndAddress); + + // Then the addresses are the same as those passed to the Set function + const auto actual_start_address = memory_region_bounds.GetStartAddress(); + const auto actual_end_address = memory_region_bounds.GetEndAddress(); + EXPECT_EQ(actual_start_address, kValidStartAddress); + EXPECT_EQ(actual_end_address, kValidEndAddress); +} + +TEST(MemoryRegionBoundsTest, SettingMarksHasValue) +{ + // Given a default constructed MemoryRegionBounds + MemoryRegionBounds memory_region_bounds{}; + + // When setting the addresses with valid values + memory_region_bounds.Set(kValidStartAddress, kValidEndAddress); + + // Then it has a value + EXPECT_TRUE(memory_region_bounds.has_value()); +} + +TEST(MemoryRegionBoundsTest, ResettingClearsAddresses) +{ + // Given a MemoryRegionBounds constructed with valid bounds + MemoryRegionBounds memory_region_bounds{kValidStartAddress, kValidEndAddress}; + + // When calling Reset + memory_region_bounds.Reset(); + + // Then the addresses are invalid + const auto actual_start_address = memory_region_bounds.GetStartAddress(); + const auto actual_end_address = memory_region_bounds.GetEndAddress(); + EXPECT_EQ(actual_start_address, kInvalidStartAddress); + EXPECT_EQ(actual_end_address, kInvalidEndAddress); +} + +TEST(MemoryRegionBoundsTest, ResettingMarksHasNoValue) +{ + // Given a MemoryRegionBounds constructed with valid bounds + MemoryRegionBounds memory_region_bounds{kValidStartAddress, kValidEndAddress}; + + // When calling Reset + memory_region_bounds.Reset(); + + // Then it has no value + EXPECT_FALSE(memory_region_bounds.has_value()); +} + +TEST(MemoryRegionBoundsDeathTest, ConstructingWithOneValidAndOneInvalidValueTerminates) +{ + // When constructing a MemoryRegionBounds with one valid and one invalid address + // Then we terminate + EXPECT_DEATH(MemoryRegionBounds(kValidStartAddress, kInvalidEndAddress), ".*"); +} + +TEST(MemoryRegionBoundsDeathTest, SettingOneValidAndOneInvalidValueTerminates) +{ + // Given a default constructed MemoryRegionBounds + MemoryRegionBounds memory_region_bounds{}; + + // When setting the addresses with one valid and one invalid address + // Then we terminate + EXPECT_DEATH(memory_region_bounds.Set(kValidStartAddress, kInvalidEndAddress), ".*"); +} + +TEST(MemoryRegionBoundsEqualToOperatorTest, ComparingTwoMemoryRegionBoundsWithSameValidAddressesReturnsTrue) +{ + // Given two MemoryRegionBounds constructed with the same valid bounds + const MemoryRegionBounds memory_region_bounds_1{kValidStartAddress, kValidEndAddress}; + const MemoryRegionBounds memory_region_bounds_2{kValidStartAddress, kValidEndAddress}; + + // When comparing the two + // Then the result is true + EXPECT_TRUE(memory_region_bounds_1 == memory_region_bounds_2); + EXPECT_FALSE(memory_region_bounds_1 != memory_region_bounds_2); +} + +TEST(MemoryRegionBoundsEqualToOperatorTest, ComparingTwoMemoryRegionBoundsWithDifferentStartAddressesReturnsFalse) +{ + // Given two MemoryRegionBounds constructed with the same valid end address but different start addresses + const MemoryRegionBounds memory_region_bounds_1{kValidStartAddress, kValidEndAddress}; + const MemoryRegionBounds memory_region_bounds_2{kValidStartAddress + 1U, kValidEndAddress}; + + // When comparing the two + // Then the result is false + EXPECT_FALSE(memory_region_bounds_1 == memory_region_bounds_2); + EXPECT_TRUE(memory_region_bounds_1 != memory_region_bounds_2); +} + +TEST(MemoryRegionBoundsEqualToOperatorTest, ComparingTwoMemoryRegionBoundsWithDifferentEndAddressesReturnsFalse) +{ + // Given two MemoryRegionBounds constructed with the same valid start address but different end addresses + const MemoryRegionBounds memory_region_bounds_1{kValidStartAddress, kValidEndAddress}; + const MemoryRegionBounds memory_region_bounds_2{kValidStartAddress, kValidEndAddress + 1U}; + + // When comparing the two + // Then the result is false + EXPECT_FALSE(memory_region_bounds_1 == memory_region_bounds_2); + EXPECT_TRUE(memory_region_bounds_1 != memory_region_bounds_2); +} + +} // namespace +} // namespace score::memory::shared::test diff --git a/score/memory/shared/memory_region_map.cpp b/score/memory/shared/memory_region_map.cpp new file mode 100644 index 000000000..f2750c43f --- /dev/null +++ b/score/memory/shared/memory_region_map.cpp @@ -0,0 +1,382 @@ +/******************************************************************************** + * Copyright (c) 2025 Contributors to the Eclipse Foundation + * + * See the NOTICE file(s) distributed with this work for additional + * information regarding copyright ownership. + * + * This program and the accompanying materials are made available under the + * terms of the Apache License Version 2.0 which is available at + * https://www.apache.org/licenses/LICENSE-2.0 + * + * SPDX-License-Identifier: Apache-2.0 + ********************************************************************************/ +#include "score/memory/shared/memory_region_map.h" + +#include "score/mw/log/logging.h" + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace score::memory::shared::detail +{ + +namespace +{ + +bool DoesRegionIteratorOverlapWithExistingRegionInMap( + const std::map::const_iterator region_it, + const std::map& map) noexcept +{ + if (map.size() == 1U) + { + return false; + } + + // If region isn't the first region (i.e. region with the smallest start / end addresses) in the map, ensure that + // the start of the current region is larger than the end of the previous region. + if (region_it != map.cbegin()) + { + auto previous_region_it = std::prev(region_it); + + const auto previous_region_end_address = previous_region_it->second; + const auto current_region_start_address = region_it->first; + + if (current_region_start_address < previous_region_end_address) + { + return true; + } + } + + // If region isn't the last region (i.e. region with the largest start / end addresses) in the map, ensure that the + // end of the current region is smaller than the start of the next region. + auto next_region_it = std::next(region_it); + if (next_region_it != map.cend()) + { + const auto next_region_start_address = next_region_it->first; + const auto current_region_end_address = region_it->second; + + if (current_region_end_address > next_region_start_address) + { + return true; + } + } + return false; +} + +} // namespace + +template