Skip to content

Commit d3433aa

Browse files
committed
zephyr: lib: alloc: Use cached memory for L3 Heap
This patch implements recommended hardware flow for Intel ACE platforms. The L3 heap should be accessed via cached pointers including management data. Signed-off-by: Jaroslaw Stelter <Jaroslaw.Stelter@intel.com>
1 parent 16b53b4 commit d3433aa

1 file changed

Lines changed: 91 additions & 6 deletions

File tree

zephyr/lib/alloc.c

Lines changed: 91 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -116,8 +116,8 @@ static inline uintptr_t get_l3_heap_start(void)
116116
* - main_fw_load_offset
117117
* - main fw size in manifest
118118
*/
119-
return (uintptr_t)z_soc_uncached_ptr((__sparse_force void __sparse_cache *)
120-
ROUND_UP(IMR_L3_HEAP_BASE, L3_MEM_PAGE_SIZE));
119+
return (uintptr_t)((__sparse_force void __sparse_cache *)
120+
ROUND_UP(IMR_L3_HEAP_BASE, L3_MEM_PAGE_SIZE));
121121
}
122122

123123
/**
@@ -145,14 +145,89 @@ static bool is_l3_heap_pointer(void *ptr)
145145
uintptr_t l3_heap_start = get_l3_heap_start();
146146
uintptr_t l3_heap_end = l3_heap_start + get_l3_heap_size();
147147

148-
if (is_cached(ptr))
149-
ptr = z_soc_uncached_ptr((__sparse_force void __sparse_cache *)ptr);
150-
151148
if ((POINTER_TO_UINT(ptr) >= l3_heap_start) && (POINTER_TO_UINT(ptr) < l3_heap_end))
152149
return true;
153150

154151
return false;
155152
}
153+
154+
/**
155+
* Flush and invalidate cached L3 heap.
156+
*/
157+
static inline void l3_heap_flush_invalidate(struct k_heap *h)
158+
{
159+
if (h == &l3_heap)
160+
sys_cache_data_flush_and_invd_range((void *)get_l3_heap_start(),
161+
get_l3_heap_size());
162+
}
163+
164+
/**
165+
* Invalidate cached L3 heap management data.
166+
*/
167+
static inline void l3_heap_flush(struct k_heap *h)
168+
{
169+
if (h == &l3_heap)
170+
sys_cache_data_flush_range((void *)get_l3_heap_start(),
171+
get_l3_heap_size());
172+
}
173+
174+
static void *l3_heap_alloc_aligned(struct k_heap *h, size_t min_align, size_t bytes)
175+
{
176+
k_spinlock_key_t key;
177+
void *ret;
178+
#if CONFIG_SYS_HEAP_RUNTIME_STATS && CONFIG_IPC_MAJOR_4
179+
struct sys_memory_stats stats;
180+
#endif
181+
182+
key = k_spin_lock(&h->lock);
183+
l3_heap_flush_invalidate(h);
184+
ret = sys_heap_aligned_alloc(&h->heap, min_align, bytes);
185+
l3_heap_flush(h);
186+
k_spin_unlock(&h->lock, key);
187+
188+
#if CONFIG_SYS_HEAP_RUNTIME_STATS && CONFIG_IPC_MAJOR_4
189+
sys_heap_runtime_stats_get(&h->heap, &stats);
190+
tr_info(&zephyr_tr, "heap allocated: %u free: %u max allocated: %u",
191+
stats.allocated_bytes, stats.free_bytes, stats.max_allocated_bytes);
192+
#endif
193+
194+
return ret;
195+
}
196+
197+
static void __sparse_cache *l3_heap_alloc_aligned_cached(struct k_heap *h,
198+
size_t min_align, size_t bytes)
199+
{
200+
void __sparse_cache *ptr;
201+
202+
/*
203+
* Zephyr sys_heap stores metadata at start of each
204+
* heap allocation. To ensure no allocated cached buffer
205+
* overlaps the same cacheline with the metadata chunk,
206+
* align both allocation start and size of allocation
207+
* to cacheline. As cached and non-cached allocations are
208+
* mixed, same rules need to be followed for both type of
209+
* allocations.
210+
*/
211+
min_align = MAX(PLATFORM_DCACHE_ALIGN, min_align);
212+
bytes = ALIGN_UP(bytes, min_align);
213+
214+
ptr = (__sparse_force void __sparse_cache *)l3_heap_alloc_aligned(h, min_align, bytes);
215+
216+
if (ptr)
217+
ptr = z_soc_cached_ptr((__sparse_force void *)ptr);
218+
219+
return ptr;
220+
}
221+
222+
static void l3_heap_free(struct k_heap *h, void *mem)
223+
{
224+
k_spinlock_key_t key = k_spin_lock(&h->lock);
225+
226+
sys_heap_free(&h->heap, mem);
227+
l3_heap_flush(h);
228+
k_spin_unlock(&h->lock, key);
229+
}
230+
156231
#endif
157232

158233
static void *heap_alloc_aligned(struct k_heap *h, size_t min_align, size_t bytes)
@@ -251,6 +326,15 @@ void *rmalloc(enum mem_zone zone, uint32_t flags, uint32_t caps, size_t bytes)
251326
if (caps & SOF_MEM_CAPS_L3) {
252327
#if CONFIG_L3_HEAP
253328
heap = &l3_heap;
329+
/* Uncached L3_HEAP should be not used */
330+
if (!zone_is_cached(zone))
331+
k_panic();
332+
ptr = (__sparse_force void *)l3_heap_alloc_aligned_cached(heap, 0, bytes);
333+
334+
if (!ptr && zone == SOF_MEM_ZONE_SYS)
335+
k_panic();
336+
337+
return ptr;
254338
#else
255339
k_panic();
256340
#endif
@@ -352,7 +436,7 @@ void rfree(void *ptr)
352436

353437
#if CONFIG_L3_HEAP
354438
if (is_l3_heap_pointer(ptr)) {
355-
heap_free(&l3_heap, ptr);
439+
l3_heap_free(&l3_heap, ptr);
356440
return;
357441
}
358442
#endif
@@ -367,6 +451,7 @@ static int heap_init(void)
367451

368452
#if CONFIG_L3_HEAP
369453
sys_heap_init(&l3_heap.heap, UINT_TO_POINTER(get_l3_heap_start()), get_l3_heap_size());
454+
l3_heap_flush_invalidate(&l3_heap);
370455
#endif
371456

372457
return 0;

0 commit comments

Comments
 (0)