Skip to content

Commit 6e98c8a

Browse files
committed
ace: zephyr: alloc: Use virtual memory heap for buffers
Use virtual memory heap to allocate buffers for ace platforms. The new buffer allocation mechanism uses buffers of a predefined size. Each core have a dedicated region of the virtual address space from which buffers are allocated. Signed-off-by: Adrian Warecki <adrian.warecki@intel.com>
1 parent c4cb1fc commit 6e98c8a

3 files changed

Lines changed: 121 additions & 1 deletion

File tree

zephyr/Kconfig

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -82,4 +82,12 @@ config SOF_ZEPHYR_NO_SOF_CLOCK
8282
Do not use SOF clk.h interface to set the DSP clock frequency.
8383
Requires implementation of platform/lib/clk.h.
8484

85+
config VIRTUAL_HEAP
86+
bool "Use virtual memory heap to allocate a buffers"
87+
default y if ACE
88+
default n
89+
depends on ACE
90+
help
91+
Enabling this option will use the virtual memory heap allocator to allocate buffers.
92+
It is based on a set of buffers whose size is predetermined.
8593
endif

zephyr/include/sof/lib/regions_mm.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@
3636
* either be spanned on specifically configured heap or have
3737
* individual configs with bigger block sizes.
3838
*/
39-
#define MAX_MEMORY_ALLOCATORS_COUNT 8
39+
#define MAX_MEMORY_ALLOCATORS_COUNT 10
4040

4141
/* vmh_get_default_heap_config() function will try to split the region
4242
* down the given count. Only applicable when API client did not

zephyr/lib/alloc.c

Lines changed: 112 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,18 @@
1818
#include <sof/trace/trace.h>
1919
#include <rtos/symbol.h>
2020
#include <rtos/wait.h>
21+
#if CONFIG_VIRTUAL_HEAP
22+
#include <sof/lib/regions_mm.h>
23+
24+
struct vmh_heap *virtual_buffers_heap[CONFIG_MP_MAX_NUM_CPUS];
25+
struct k_spinlock vmh_lock;
26+
27+
#undef HEAPMEM_SIZE
28+
/* Buffers are allocated from virtual space so we can safely reduce the heap size.
29+
*/
30+
#define HEAPMEM_SIZE 0x40000
31+
#endif /* CONFIG_VIRTUAL_HEAP */
32+
2133

2234
/* Zephyr includes */
2335
#include <zephyr/init.h>
@@ -193,6 +205,89 @@ static void l3_heap_free(struct k_heap *h, void *mem)
193205

194206
#endif
195207

208+
#if CONFIG_VIRTUAL_HEAP
209+
static void *virtual_heap_alloc(struct vmh_heap *heap, uint32_t flags, uint32_t caps, size_t bytes,
210+
uint32_t align)
211+
{
212+
void *mem = vmh_alloc(heap, bytes);
213+
214+
if (!mem)
215+
return NULL;
216+
217+
assert(IS_ALIGNED(mem, align));
218+
219+
if (flags & SOF_MEM_FLAG_COHERENT)
220+
return sys_cache_uncached_ptr_get((__sparse_force void __sparse_cache *)mem);
221+
222+
return mem;
223+
}
224+
225+
/**
226+
* Checks whether pointer is from virtual memory range.
227+
* @param ptr Pointer to memory being checked.
228+
* @return True if pointer falls into virtual memory region, false otherwise.
229+
*/
230+
static bool is_virtual_heap_pointer(void *ptr)
231+
{
232+
uintptr_t virtual_heap_start = POINTER_TO_UINT(sys_cache_cached_ptr_get(&heapmem)) +
233+
HEAPMEM_SIZE;
234+
uintptr_t virtual_heap_end = CONFIG_KERNEL_VM_BASE + CONFIG_KERNEL_VM_SIZE;
235+
236+
if (!is_cached(ptr))
237+
ptr = (__sparse_force void *)sys_cache_cached_ptr_get(ptr);
238+
239+
return ((POINTER_TO_UINT(ptr) >= virtual_heap_start) &&
240+
(POINTER_TO_UINT(ptr) < virtual_heap_end));
241+
}
242+
243+
static void virtual_heap_free(void *ptr)
244+
{
245+
struct vmh_heap *const heap = virtual_buffers_heap[cpu_get_id()];
246+
int ret;
247+
248+
ptr = (__sparse_force void *)sys_cache_cached_ptr_get(ptr);
249+
250+
ret = vmh_free(heap, ptr);
251+
if (ret)
252+
tr_err(&zephyr_tr, "Unable to free %p! %d", ptr, ret);
253+
}
254+
255+
static const struct vmh_heap_config static_hp_buffers = {
256+
{
257+
{ 128, 32},
258+
{ 512, 8},
259+
{ 1024, 44},
260+
{ 2048, 8},
261+
{ 4096, 11},
262+
{ 8192, 10},
263+
{ 65536, 3},
264+
{ 131072, 1},
265+
{ 524288, 1} /* buffer for kpb */
266+
},
267+
};
268+
269+
static int virtual_heap_init(void)
270+
{
271+
int core;
272+
273+
k_spinlock_init(&vmh_lock);
274+
275+
for (core = 0; core < CONFIG_MP_MAX_NUM_CPUS; core++) {
276+
struct vmh_heap *heap = vmh_init_heap(&static_hp_buffers, MEM_REG_ATTR_CORE_HEAP,
277+
core, false);
278+
if (!heap)
279+
tr_err(&zephyr_tr, "Unable to init virtual heap for core %d!", core);
280+
281+
virtual_buffers_heap[core] = heap;
282+
}
283+
284+
return 0;
285+
}
286+
287+
SYS_INIT(virtual_heap_init, POST_KERNEL, 1);
288+
289+
#endif /* CONFIG_VIRTUAL_HEAP */
290+
196291
static void *heap_alloc_aligned(struct k_heap *h, size_t min_align, size_t bytes)
197292
{
198293
k_spinlock_key_t key;
@@ -384,6 +479,9 @@ EXPORT_SYMBOL(rzalloc);
384479
void *rballoc_align(uint32_t flags, uint32_t caps, size_t bytes,
385480
uint32_t align)
386481
{
482+
#if CONFIG_VIRTUAL_HEAP
483+
struct vmh_heap *virtual_heap;
484+
#endif
387485
struct k_heap *heap;
388486

389487
/* choose a heap */
@@ -399,6 +497,13 @@ void *rballoc_align(uint32_t flags, uint32_t caps, size_t bytes,
399497
heap = &sof_heap;
400498
}
401499

500+
#if CONFIG_VIRTUAL_HEAP
501+
/* Use virtual heap if it is available */
502+
virtual_heap = virtual_buffers_heap[cpu_get_id()];
503+
if (virtual_heap)
504+
return virtual_heap_alloc(virtual_heap, flags, caps, bytes, align);
505+
#endif /* CONFIG_VIRTUAL_HEAP */
506+
402507
if (flags & SOF_MEM_FLAG_COHERENT)
403508
return heap_alloc_aligned(heap, align, bytes);
404509

@@ -421,6 +526,13 @@ void rfree(void *ptr)
421526
}
422527
#endif
423528

529+
#if CONFIG_VIRTUAL_HEAP
530+
if (is_virtual_heap_pointer(ptr)) {
531+
virtual_heap_free(ptr);
532+
return;
533+
}
534+
#endif
535+
424536
heap_free(&sof_heap, ptr);
425537
}
426538
EXPORT_SYMBOL(rfree);

0 commit comments

Comments
 (0)