Skip to content

Commit 23dbee2

Browse files
committed
zephyr: lib: alloc: Use cached memory for L3 Heap
This patch implements recommended hardware flow for Intel ACE platforms. The L3 heap should be accessed via cached pointers including management data. Signed-off-by: Jaroslaw Stelter <Jaroslaw.Stelter@intel.com>
1 parent c4bfdc6 commit 23dbee2

1 file changed

Lines changed: 94 additions & 6 deletions

File tree

zephyr/lib/alloc.c

Lines changed: 94 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -116,8 +116,8 @@ static inline uintptr_t get_l3_heap_start(void)
116116
* - main_fw_load_offset
117117
* - main fw size in manifest
118118
*/
119-
return (uintptr_t)z_soc_uncached_ptr((__sparse_force void __sparse_cache *)
120-
ROUND_UP(IMR_L3_HEAP_BASE, L3_MEM_PAGE_SIZE));
119+
return (uintptr_t)((__sparse_force void __sparse_cache *)
120+
ROUND_UP(IMR_L3_HEAP_BASE, L3_MEM_PAGE_SIZE));
121121
}
122122

123123
/**
@@ -145,14 +145,92 @@ static bool is_l3_heap_pointer(void *ptr)
145145
uintptr_t l3_heap_start = get_l3_heap_start();
146146
uintptr_t l3_heap_end = l3_heap_start + get_l3_heap_size();
147147

148-
if (is_cached(ptr))
149-
ptr = z_soc_uncached_ptr((__sparse_force void __sparse_cache *)ptr);
150-
151148
if ((POINTER_TO_UINT(ptr) >= l3_heap_start) && (POINTER_TO_UINT(ptr) < l3_heap_end))
152149
return true;
153150

154151
return false;
155152
}
153+
154+
/**
155+
* Flush cached L3 heap management data.
156+
*/
157+
static inline void l3_heap_flush(struct k_heap *h)
158+
{
159+
if (h == &l3_heap)
160+
sys_cache_data_flush_and_invd_range((void *)get_l3_heap_start(),
161+
get_l3_heap_size());
162+
}
163+
164+
/**
165+
* Invalidate cached L3 heap management data.
166+
*/
167+
static inline void l3_heap_invalidate(struct k_heap *h)
168+
{
169+
if (h == &l3_heap)
170+
sys_cache_data_invd_range((void *)get_l3_heap_start(),
171+
get_l3_heap_size());
172+
}
173+
174+
static void *l3_heap_alloc_aligned(struct k_heap *h, size_t min_align, size_t bytes)
175+
{
176+
k_spinlock_key_t key;
177+
void *ret;
178+
#if CONFIG_SYS_HEAP_RUNTIME_STATS && CONFIG_IPC_MAJOR_4
179+
struct sys_memory_stats stats;
180+
#endif
181+
182+
key = k_spin_lock(&h->lock);
183+
l3_heap_invalidate(h);
184+
ret = sys_heap_aligned_alloc(&h->heap, min_align, bytes);
185+
l3_heap_flush(h);
186+
k_spin_unlock(&h->lock, key);
187+
188+
#if CONFIG_SYS_HEAP_RUNTIME_STATS && CONFIG_IPC_MAJOR_4
189+
sys_heap_runtime_stats_get(&h->heap, &stats);
190+
tr_info(&zephyr_tr, "heap allocated: %u free: %u max allocated: %u",
191+
stats.allocated_bytes, stats.free_bytes, stats.max_allocated_bytes);
192+
#endif
193+
194+
return ret;
195+
}
196+
197+
static void __sparse_cache *l3_heap_alloc_aligned_cached(struct k_heap *h,
198+
size_t min_align, size_t bytes)
199+
{
200+
void __sparse_cache *ptr;
201+
202+
/*
203+
* Zephyr sys_heap stores metadata at start of each
204+
* heap allocation. To ensure no allocated cached buffer
205+
* overlaps the same cacheline with the metadata chunk,
206+
* align both allocation start and size of allocation
207+
* to cacheline. As cached and non-cached allocations are
208+
* mixed, same rules need to be followed for both type of
209+
* allocations.
210+
*/
211+
min_align = MAX(PLATFORM_DCACHE_ALIGN, min_align);
212+
bytes = ALIGN_UP(bytes, min_align);
213+
214+
ptr = (__sparse_force void __sparse_cache *)l3_heap_alloc_aligned(h, min_align, bytes);
215+
216+
if (ptr)
217+
ptr = z_soc_cached_ptr((__sparse_force void *)ptr);
218+
219+
return ptr;
220+
}
221+
222+
static void l3_heap_free(struct k_heap *h, void *mem)
223+
{
224+
k_spinlock_key_t key = k_spin_lock(&h->lock);
225+
226+
sys_cache_data_flush_and_invd_range(mem,
227+
sys_heap_usable_size(&h->heap, mem));
228+
l3_heap_invalidate(h);
229+
sys_heap_free(&h->heap, mem);
230+
l3_heap_flush(h);
231+
k_spin_unlock(&h->lock, key);
232+
}
233+
156234
#endif
157235

158236
static void *heap_alloc_aligned(struct k_heap *h, size_t min_align, size_t bytes)
@@ -251,6 +329,15 @@ void *rmalloc(enum mem_zone zone, uint32_t flags, uint32_t caps, size_t bytes)
251329
if (caps & SOF_MEM_CAPS_L3) {
252330
#if CONFIG_L3_HEAP
253331
heap = &l3_heap;
332+
/* Uncached L3_HEAP should be not used */
333+
if (!zone_is_cached(zone))
334+
k_panic();
335+
ptr = (__sparse_force void *)l3_heap_alloc_aligned_cached(heap, 0, bytes);
336+
337+
if (!ptr && zone == SOF_MEM_ZONE_SYS)
338+
k_panic();
339+
340+
return ptr;
254341
#else
255342
k_panic();
256343
#endif
@@ -352,7 +439,7 @@ void rfree(void *ptr)
352439

353440
#if CONFIG_L3_HEAP
354441
if (is_l3_heap_pointer(ptr)) {
355-
heap_free(&l3_heap, ptr);
442+
l3_heap_free(&l3_heap, ptr);
356443
return;
357444
}
358445
#endif
@@ -367,6 +454,7 @@ static int heap_init(void)
367454

368455
#if CONFIG_L3_HEAP
369456
sys_heap_init(&l3_heap.heap, UINT_TO_POINTER(get_l3_heap_start()), get_l3_heap_size());
457+
l3_heap_flush(&l3_heap);
370458
#endif
371459

372460
return 0;

0 commit comments

Comments
 (0)