@@ -7,7 +7,7 @@ use core::ffi::c_void;
77use core:: mem:: { align_of, size_of} ;
88use core:: panic:: PanicInfo ;
99use core:: ptr:: { self , NonNull } ;
10- use core:: sync:: atomic:: { AtomicPtr , Ordering } ;
10+ use core:: sync:: atomic:: { AtomicPtr , AtomicUsize , Ordering } ;
1111use talc:: { ClaimOnOom , Span , Talc , Talck } ;
1212
1313// Yes if panic just loops forever and you know died.
@@ -31,22 +31,50 @@ pub type ErrorHandler = unsafe extern "C" fn(error: HeapError, ptr: *mut c_void)
3131// Use AtomicPtr for safe, lock-free access (even if we only set it once).
3232static ERROR_HANDLER : AtomicPtr < c_void > = AtomicPtr :: new ( ptr:: null_mut ( ) ) ;
3333
34- // A constant canary value to verify metadata integrity.
35- const CANARY_VALUE : usize = 0xDEC0DED ;
34+ // A constant checksum value to verify metadata integrity.
35+ static CHECKSUM_SALT : AtomicUsize = AtomicUsize :: new ( 0x5A17_C0DE ) ;
3636
3737/// Metadata struct stored *before* the user pointer's aligned position.
3838/// We store the requested size and the alignment used for the user pointer.
3939#[ repr( C ) ] // Ensure predictable layout for size/alignment calculations
4040#[ derive( Debug , Clone , Copy ) ]
4141struct Metadata {
42- /// A canary value to help detect memory corruption .
43- canary : usize ,
42+ /// A checksum derived from size, alignment, and a salt .
43+ checksum : usize ,
4444 /// The size requested by the user for the allocation.
4545 size : usize ,
4646 /// The alignment requested by the user (or default for malloc).
4747 alignment : usize ,
4848}
4949
50+ // Calculates a simple checksum for metadata integrity verification.
51+ ///
52+ /// This function mixes the allocation size, alignment, and a global salt to produce
53+ /// a verification value.
54+ ///
55+ /// # Algorithm
56+ /// It uses a lightweight mixing step (similar to MurmurHash3's finalizer) to ensure
57+ /// that small bit changes in size/alignment result in large changes in the checksum.
58+ ///
59+ /// # Stale Pointer Protection
60+ /// The inclusion of `CHECKSUM_SALT` (which rotates on `heap_init`) ensures that
61+ /// pointers from a previous heap generation cannot be successfully freed or reallocated
62+ /// after the allocator has been reset, preventing use-after-reset UB.
63+ #[ inline( always) ]
64+ fn calculate_checksum ( size : usize , alignment : usize ) -> usize {
65+ let mut x = size;
66+ // Mix in the alignment and the global salt.
67+ // The salt changes every time the heap is re-initialized.
68+ x ^= alignment;
69+ x ^= CHECKSUM_SALT . load ( Ordering :: Relaxed ) ;
70+
71+ // Apply a fast avalanche function to spread the bits.
72+ // This helps detect partial corruption or linear overflows.
73+ x = ( x ^ ( x >> 15 ) ) . wrapping_mul ( 0xd168aaad ) ;
74+ x ^= x >> 15 ;
75+ x
76+ }
77+
5078// Use a spinlock mutex for thread safety in concurrent environments (if applicable)
5179static ALLOCATOR : Talck < spin:: Mutex < ( ) > , ClaimOnOom > =
5280 Talc :: new ( unsafe { ClaimOnOom :: new ( Span :: empty ( ) ) } ) . lock ( ) ;
@@ -121,8 +149,11 @@ unsafe fn get_metadata_ptr(user_ptr: NonNull<u8>) -> Result<NonNull<Metadata>, (
121149 let metadata_ptr = user_ptr. as_ptr ( ) . sub ( METADATA_SIZE ) ;
122150 let metadata = & * ( metadata_ptr. cast :: < Metadata > ( ) ) ;
123151
124- // Check the canary to verify integrity.
125- if metadata. canary != CANARY_VALUE {
152+ // Verify the checksum to ensure metadata integrity.
153+ let expected = calculate_checksum ( metadata. size , metadata. alignment ) ;
154+
155+ // Check the checksum to verify integrity.
156+ if metadata. checksum != expected {
126157 Err ( ( ) )
127158 } else {
128159 Ok ( NonNull :: new_unchecked ( metadata_ptr. cast :: < Metadata > ( ) ) )
@@ -168,15 +199,12 @@ unsafe fn recover_alloc_info(user_ptr: NonNull<u8>) -> Result<(NonNull<u8>, Layo
168199#[ no_mangle]
169200pub unsafe extern "C" fn heap_init ( address : * mut u8 , size : usize ) -> bool {
170201 // Basic sanity checks
171- if address. is_null ( ) || size < METADATA_SIZE * 8 {
202+ if address. is_null ( ) || size < METADATA_SIZE * 2 {
172203 return false ;
173204 }
174205
175- // Ensure size is somewhat reasonable. Talc itself has minimal overhead.
176- // We need enough space for at least one metadata block and minimal user data.
177- if size < METADATA_SIZE * 2 {
178- return false ;
179- }
206+ // Update checksum salt to help catch stale metadata across resets.
207+ CHECKSUM_SALT . fetch_add ( 1 , Ordering :: SeqCst ) ;
180208
181209 // Lock the allocator
182210 let mut allocator_guard = ALLOCATOR . lock ( ) ;
@@ -288,7 +316,7 @@ unsafe fn allocate_internal(size: usize, alignment: usize) -> *mut c_void {
288316 // Write the metadata (original requested size and alignment).
289317 // Safety: metadata_ptr points to valid, allocated space of METADATA_SIZE within the block.
290318 metadata_ptr. write ( Metadata {
291- canary : CANARY_VALUE ,
319+ checksum : calculate_checksum ( size , alignment ) ,
292320 size,
293321 alignment,
294322 } ) ;
@@ -333,6 +361,11 @@ pub unsafe extern "C" fn calloc(nmemb: usize, size: usize) -> *mut c_void {
333361 None => return ptr:: null_mut ( ) ,
334362 } ;
335363
364+ // Check for zero-sized allocation.
365+ if total_size == 0 {
366+ return ptr:: null_mut ( ) ;
367+ }
368+
336369 // Allocate using the internal allocator with default alignment.
337370 // Calloc typically uses the same alignment as malloc (align_of::<usize>).
338371 let ptr = allocate_internal ( total_size, align_of :: < usize > ( ) ) ;
@@ -391,10 +424,10 @@ pub unsafe extern "C" fn free(ptr: *mut c_void) {
391424 // Recover the original allocation pointer and layout using the metadata stored before user_ptr.
392425 match recover_alloc_info ( user_ptr) {
393426 Ok ( ( start_ptr, original_layout, _) ) => {
394- // Before freeing, corrupt the canary to help catch use-after-free
427+ // Before freeing, corrupt the checksum to help catch use-after-free
395428 // if the user tries to free it again.
396429 if let Ok ( metadata_ptr) = get_metadata_ptr ( user_ptr) {
397- ( * metadata_ptr. as_ptr ( ) ) . canary = 0 ; // Set to a non-canary value
430+ ( * metadata_ptr. as_ptr ( ) ) . checksum = 0 ; // Set to a non-checksum value
398431 }
399432
400433 // Safety: `start_ptr` and `original_layout` must correspond to a previous
@@ -423,7 +456,7 @@ pub unsafe extern "C" fn realloc(ptr: *mut c_void, size: usize) -> *mut c_void {
423456 // Handle null pointer: standard requires this is equivalent to malloc(size).
424457 let Some ( user_ptr) = NonNull :: new ( ptr. cast :: < u8 > ( ) ) else {
425458 // Use default alignment consistent with malloc.
426- return allocate_internal ( size, align_of :: < usize > ( ) ) ;
459+ return malloc ( size) ;
427460 } ;
428461
429462 // Handle size == 0: standard requires this is equivalent to free(ptr) and returns null.
@@ -454,7 +487,6 @@ pub unsafe extern "C" fn realloc(ptr: *mut c_void, size: usize) -> *mut c_void {
454487 }
455488
456489 core:: cmp:: Ordering :: Less => {
457- // --- Shrink ---
458490 // Calculate the new total layout for the smaller size.
459491 let ( new_total_layout, _) = match layout_for_allocation ( size, alignment) {
460492 Ok ( l) => l,
@@ -471,7 +503,7 @@ pub unsafe extern "C" fn realloc(ptr: *mut c_void, size: usize) -> *mut c_void {
471503
472504 // Safety: metadata_ptr is valid and points to the correct location.
473505 metadata_ptr. write ( Metadata {
474- canary : CANARY_VALUE ,
506+ checksum : calculate_checksum ( size , alignment ) ,
475507 size,
476508 alignment,
477509 } ) ;
@@ -504,7 +536,7 @@ pub unsafe extern "C" fn realloc(ptr: *mut c_void, size: usize) -> *mut c_void {
504536
505537 // Safety: metadata_ptr is valid as pointer hasn't moved.
506538 metadata_ptr. write ( Metadata {
507- canary : CANARY_VALUE , // <-- THE FIX
539+ checksum : calculate_checksum ( size , alignment ) ,
508540 size,
509541 alignment,
510542 } ) ;
0 commit comments