Skip to content

Commit 50a8774

Browse files
committed
zephyr: lib: alloc: Use cached memory for L3 Heap
This patch implements recommended hardware flow for Intel ACE platforms. The L3 heap should be accessed via cached pointers including management data. Signed-off-by: Jaroslaw Stelter <Jaroslaw.Stelter@intel.com>
1 parent 16b53b4 commit 50a8774

1 file changed

Lines changed: 72 additions & 6 deletions

File tree

zephyr/lib/alloc.c

Lines changed: 72 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -116,8 +116,8 @@ static inline uintptr_t get_l3_heap_start(void)
116116
* - main_fw_load_offset
117117
* - main fw size in manifest
118118
*/
119-
return (uintptr_t)z_soc_uncached_ptr((__sparse_force void __sparse_cache *)
120-
ROUND_UP(IMR_L3_HEAP_BASE, L3_MEM_PAGE_SIZE));
119+
return (uintptr_t)((void __sparse_cache *)
120+
ROUND_UP(IMR_L3_HEAP_BASE, L3_MEM_PAGE_SIZE));
121121
}
122122

123123
/**
@@ -145,14 +145,71 @@ static bool is_l3_heap_pointer(void *ptr)
145145
uintptr_t l3_heap_start = get_l3_heap_start();
146146
uintptr_t l3_heap_end = l3_heap_start + get_l3_heap_size();
147147

148-
if (is_cached(ptr))
149-
ptr = z_soc_uncached_ptr((__sparse_force void __sparse_cache *)ptr);
150-
151148
if ((POINTER_TO_UINT(ptr) >= l3_heap_start) && (POINTER_TO_UINT(ptr) < l3_heap_end))
152149
return true;
153150

154151
return false;
155152
}
153+
154+
static void *l3_heap_alloc_aligned(struct k_heap *h, size_t min_align, size_t bytes)
155+
{
156+
k_spinlock_key_t key;
157+
void *ret;
158+
#if CONFIG_SYS_HEAP_RUNTIME_STATS && CONFIG_IPC_MAJOR_4
159+
struct sys_memory_stats stats;
160+
#endif
161+
if (!cpu_is_primary(arch_proc_id())) {
162+
tr_err(&zephyr_tr, "L3_HEAP available only for primary core!");
163+
k_panic();
164+
}
165+
166+
key = k_spin_lock(&h->lock);
167+
ret = sys_heap_aligned_alloc(&h->heap, min_align, bytes);
168+
k_spin_unlock(&h->lock, key);
169+
170+
#if CONFIG_SYS_HEAP_RUNTIME_STATS && CONFIG_IPC_MAJOR_4
171+
sys_heap_runtime_stats_get(&h->heap, &stats);
172+
tr_info(&zephyr_tr, "heap allocated: %u free: %u max allocated: %u",
173+
stats.allocated_bytes, stats.free_bytes, stats.max_allocated_bytes);
174+
#endif
175+
176+
return ret;
177+
}
178+
179+
static void __sparse_cache *l3_heap_alloc_aligned_cached(struct k_heap *h,
180+
size_t min_align, size_t bytes)
181+
{
182+
void __sparse_cache *ptr;
183+
184+
/*
185+
* Zephyr sys_heap stores metadata at start of each
186+
* heap allocation. To ensure no allocated cached buffer
187+
* overlaps the same cacheline with the metadata chunk,
188+
* align both allocation start and size of allocation
189+
* to cacheline. Only cached allocations are supported in
190+
* l3_heap.
191+
*/
192+
min_align = MAX(PLATFORM_DCACHE_ALIGN, min_align);
193+
bytes = ALIGN_UP(bytes, min_align);
194+
195+
ptr = (void __sparse_cache *)l3_heap_alloc_aligned(h, min_align, bytes);
196+
197+
return ptr;
198+
}
199+
200+
static void l3_heap_free(struct k_heap *h, void *mem)
201+
{
202+
if (!cpu_is_primary(arch_proc_id())) {
203+
tr_err(&zephyr_tr, "L3_HEAP available only for primary core!");
204+
k_panic();
205+
}
206+
207+
k_spinlock_key_t key = k_spin_lock(&h->lock);
208+
209+
sys_heap_free(&h->heap, mem);
210+
k_spin_unlock(&h->lock, key);
211+
}
212+
156213
#endif
157214

158215
static void *heap_alloc_aligned(struct k_heap *h, size_t min_align, size_t bytes)
@@ -251,6 +308,15 @@ void *rmalloc(enum mem_zone zone, uint32_t flags, uint32_t caps, size_t bytes)
251308
if (caps & SOF_MEM_CAPS_L3) {
252309
#if CONFIG_L3_HEAP
253310
heap = &l3_heap;
311+
/* Uncached L3_HEAP should be not used */
312+
if (!zone_is_cached(zone))
313+
k_panic();
314+
ptr = (__sparse_force void *)l3_heap_alloc_aligned_cached(heap, 0, bytes);
315+
316+
if (!ptr && zone == SOF_MEM_ZONE_SYS)
317+
k_panic();
318+
319+
return ptr;
254320
#else
255321
k_panic();
256322
#endif
@@ -352,7 +418,7 @@ void rfree(void *ptr)
352418

353419
#if CONFIG_L3_HEAP
354420
if (is_l3_heap_pointer(ptr)) {
355-
heap_free(&l3_heap, ptr);
421+
l3_heap_free(&l3_heap, ptr);
356422
return;
357423
}
358424
#endif

0 commit comments

Comments
 (0)