diff --git a/src/mem/alloc/bestfit.rs b/src/mem/alloc/bestfit.rs index 4732a85..9948e58 100644 --- a/src/mem/alloc/bestfit.rs +++ b/src/mem/alloc/bestfit.rs @@ -49,7 +49,7 @@ impl BestFitAllocator { // Check if the pointer is 128bit aligned. if !ptr.is_multiple_of(align_of::()) { - return Err(kerr!(InvalidArgument)); + return Err(kerr!(InvalidArgument)); } if range.end.diff(range.start) < Self::MIN_RANGE_SIZE { @@ -169,7 +169,11 @@ impl BestFitAllocator { } unsafe fn contains(meta: &BestFitMeta, target: PhysAddr, size: usize) -> bool { - let begin = unsafe { Self::user_ptr(NonNull::new_unchecked(meta as *const BestFitMeta as *mut u8)) }; + let begin = unsafe { + Self::user_ptr(NonNull::new_unchecked( + meta as *const BestFitMeta as *mut u8, + )) + }; debug_assert!(size > 0); if target >= begin.into() { @@ -194,7 +198,12 @@ impl super::Allocator for BestFitAllocator { /// `align` - The alignment of the block. /// /// Returns the user pointer to the block if successful, otherwise an error. - fn malloc(&mut self, size: usize, align: usize, request: Option) -> Result> { + fn malloc( + &mut self, + size: usize, + align: usize, + request: Option, + ) -> Result> { // Check if the alignment is valid. if align == 0 || align > align_of::() { return Err(kerr!(InvalidAlign)); @@ -204,7 +213,7 @@ impl super::Allocator for BestFitAllocator { if !request.is_multiple_of(align) { return Err(kerr!(InvalidAlign)); } - } + } // Check if the size is valid. if size == 0 { @@ -309,7 +318,9 @@ impl super::Allocator for BestFitAllocator { } if let Some(request) = request { - debug_assert!(unsafe { Self::contains(block.cast::().as_ref(), request, size) }); + debug_assert!(unsafe { + Self::contains(block.cast::().as_ref(), request, size) + }); } // Return the user pointer. @@ -328,10 +339,13 @@ impl super::Allocator for BestFitAllocator { meta.next = self.head; // Check if the size of the block is correct. - bug_on!(meta.size != super::super::align_up(size), "Invalid size in free()"); + bug_on!( + meta.size != super::super::align_up(size), + "Invalid size in free()" + ); - // Set the size of the block. - meta.size = size; + // Set the size of the block (must stay aligned, matching what malloc stored). + meta.size = super::super::align_up(size); // Set the block as the new head. self.head = Some(block); @@ -343,9 +357,10 @@ impl super::Allocator for BestFitAllocator { #[cfg(test)] mod tests { use crate::error::Kind; + use crate::mem::align_up; - use super::*; use super::super::*; + use super::*; fn verify_block(user_ptr: NonNull, size: usize, next: Option>) { let control_ptr = unsafe { BestFitAllocator::control_ptr(user_ptr) }; @@ -411,7 +426,11 @@ mod tests { let ptr = allocator.malloc::(128, 1, Some(request)).unwrap(); // Check that the returned pointer contains the requested address. - let meta = unsafe { BestFitAllocator::control_ptr(ptr).cast::().as_ref() }; + let meta = unsafe { + BestFitAllocator::control_ptr(ptr) + .cast::() + .as_ref() + }; assert!(unsafe { BestFitAllocator::contains(meta, request, 128) }); } @@ -626,6 +645,40 @@ mod tests { verify_ptrs_not_overlaping(ptrs.as_slice()); } + #[test] + fn free_corrupts_metadata() { + let mut allocator = BestFitAllocator::new(); + // Use a size NOT 16-byte aligned so align_up(size) > size. + const SIZE: usize = 17; + const ALIGNED: usize = 32; // align_up(17) on 64-bit: (17+15)&!15 = 32 + assert!(align_up(SIZE) == ALIGNED); + + // Allocate just enough space for one block. + let range = alloc_range(ALIGNED + size_of::() + BestFitAllocator::align_up()); + unsafe { + allocator.add_range(&range).unwrap(); + } + + // First alloc: meta.size = align_up(17) = 32. + let ptr1: core::ptr::NonNull = allocator.malloc(SIZE, 1, None).unwrap(); + + // First free: meta.size set to 17 (BUG: should stay 32). + unsafe { + allocator.free(ptr1, SIZE); + } + + // Second alloc: select_block(32) fails (meta.size=17 < 32), fallback select_block(17) + // succeeds and returns the block with meta.size still = 17. + let ptr2: core::ptr::NonNull = allocator + .malloc(SIZE, 1, None) + .expect("second malloc should succeed via fallback path"); + + // Second free: bug_on!(meta.size(17) != align_up(17)(32)) → panics. + unsafe { + allocator.free(ptr2, SIZE); + } + } + #[test] fn multi_range_oom() { // This function allocates multiple ranges and then frees one of them randomly. And only then there is no oom. @@ -663,8 +716,8 @@ mod tests { // VERIFICATION ------------------------------------------------------------------------------------------------------- #[cfg(kani)] mod verification { + use super::super::*; use super::*; - use core::{alloc::Layout, ptr}; fn verify_block(user_ptr: NonNull, size: usize, next: Option>) { let control_ptr = unsafe { BestFitAllocator::control_ptr(user_ptr) }; @@ -674,14 +727,14 @@ mod verification { assert_eq!(meta.next, next); } - fn alloc_range(length: usize) -> Option> { + fn alloc_range(length: usize) -> Option> { let alloc_range = std::alloc::Layout::from_size_align(length, align_of::()).unwrap(); let ptr = unsafe { std::alloc::alloc(alloc_range) }; if ptr.is_null() || ((ptr as usize) >= isize::MAX as usize - length) { None } else { - Some(ptr as usize..ptr as usize + length) + Some(PhysAddr::new(ptr as usize)..PhysAddr::new(ptr as usize + length)) } } @@ -699,7 +752,7 @@ mod verification { if let Some(range) = alloc_range(larger_size) { unsafe { - assert_eq!(allocator.add_range(range), Ok(())); + assert!(matches!(allocator.add_range(&range), Ok(()))); } let ptr = allocator.malloc(size, 1, None).unwrap(); diff --git a/src/mem/pfa/bitset.rs b/src/mem/pfa/bitset.rs index 7dc5692..16137e6 100644 --- a/src/mem/pfa/bitset.rs +++ b/src/mem/pfa/bitset.rs @@ -137,14 +137,14 @@ impl super::Allocator for Allocator { // Mark all bits in the middle words as used. { let mid_cnt = len / Self::BITS_PER_WORD; - + for i in 0..mid_cnt { self.l1[idx + i] = 0; } idx += mid_cnt; } - + // Mark the remaining bits in the last word as used. self.l1[idx] &= !((!0usize).unbounded_shl((Self::BITS_PER_WORD - (len % Self::BITS_PER_WORD)) as u32)); return Some(self.begin + (start * super::PAGE_SIZE)); @@ -180,11 +180,22 @@ impl super::Allocator for Allocator { mod tests { use super::*; + #[test] + fn last_bit_underflow() { + // Only the last page in word 0 is free + let mut allocator = Allocator::<1>::new(PhysAddr::new(0)).unwrap(); + allocator.l1[0] = 1; + + let result = super::super::Allocator::alloc(&mut allocator, 1); + + assert!(result.is_some()); + } + #[test] fn test_random_pattern() { - const ITARATIONS: usize = 1000; + const ITERATIONS: usize = 1000; - for i in 0..ITARATIONS { + for _ in 0..ITERATIONS { const N: usize = 1024; const BITS: usize = Allocator::::BITS_PER_WORD; const ALLOC_SIZE: usize = 100; @@ -224,4 +235,26 @@ mod tests { } } } -} \ No newline at end of file +} + +#[cfg(kani)] +mod verification { + use super::*; + use hal::mem::PhysAddr; + + #[kani::proof] + #[kani::unwind(70)] + fn verify_alloc_no_rem_underflow_single_word() { + let mut allocator = Allocator::<1>::new(PhysAddr::new(0)).unwrap(); + + let l1_0: usize = kani::any(); + + allocator.l1[0] = l1_0; + + let page_count: usize = kani::any(); + + kani::assume(page_count >= 1 && page_count <= 64); + + let _ = super::super::Allocator::alloc(&mut allocator, page_count); + } +} diff --git a/src/sched/thread.rs b/src/sched/thread.rs index 80d01f1..b5ad71f 100644 --- a/src/sched/thread.rs +++ b/src/sched/thread.rs @@ -385,3 +385,26 @@ impl Project for Thread { self.waiter.as_mut() } } + +#[cfg(test)] +mod tests { + use super::RtServer; + + fn make_server(budget: u32, period: u32, deadline: u64) -> RtServer { + let tid = super::Id::new(1, super::task::KERNEL_TASK); + let uid = tid.get_uid(1); + RtServer::new(budget, period, deadline, uid) + } + + #[test] + fn replenish_budget_overflow() { + // 2 * budget = 4_294_967_296 > u32::MAX → overflows. + // In release: wraps to 0, which is less than budget. + let budget: u32 = u32::MAX / 2 + 1; + + let mut server = make_server(budget, 1, 0); + + server.replenish(); + server.budget_left(); + } +} diff --git a/src/types/array.rs b/src/types/array.rs index 15eae5b..1f08e56 100644 --- a/src/types/array.rs +++ b/src/types/array.rs @@ -262,7 +262,7 @@ impl Vec { } // If we don't have enough space, we need to grow the extra storage. - let grow = additional - N + len_extra; + let grow = self.len + additional - N; let mut new_extra = Box::new_slice_uninit(grow)?; // Check that the new extra storage has the requested length. @@ -449,7 +449,7 @@ impl Vec { /// Returns `Some(&T)` if the index is in-bounds, otherwise `None`. pub fn at(&self, index: usize) -> Option<&T> { // Check if the index is in-bounds. - if index > self.len - 1 { + if index >= self.len { return None; } @@ -597,8 +597,9 @@ impl Drop for Vec { } // Drop all elements in the extra storage. - for elem in &mut (*self.extra)[0..self.len - N] { - // Safety: the elements until self.len - N are initialized. + let extra_init = self.len.saturating_sub(N).min(self.extra.len()); + for elem in &mut (*self.extra)[0..extra_init] { + // Safety: the elements until extra_init are initialized. unsafe { elem.assume_init_drop(); } @@ -650,3 +651,107 @@ impl GetMut for Vec { self.at3_mut(*index1.borrow(), *index2.borrow(), *index3.borrow()) } } + + +#[cfg(test)] +mod tests { + use super::{Vec, IndexMap}; + + #[test] + fn no_length_underflow() { + let vec = Vec::::new(); + assert!(vec.len() == 0); + + // If the length check is wrong, at(0) would panic due to an underflow + assert_eq!(vec.at(0), None); + } + + #[test] + fn reserve_underflow() { + // N=8, fill 7 elements so len=7 and extra.len()=0. + // reserve(2): 7+2=9 > 8+0=8, needs grow. grow = 2 - 8 + 0 = underflow. + let mut vec = Vec::::new(); + for i in 0..7usize { + vec.push(i).unwrap(); + } + assert_eq!(vec.len(), 7); + + // FIXME: Gives OOM error + vec.reserve(2).unwrap(); + } + + #[test] + fn drop_underflow() { + let mut vec = Vec::::new(); + for i in 0..7usize { vec.push(i).unwrap(); } + // Drop used to panic here + drop(vec); + } + + #[test] + fn index_map_iter_cycle_max() { + let m: IndexMap = IndexMap::new(); + let _ = m.iter_from_cycle(Some(&usize::MAX)).next(); + } +} + + + +#[cfg(kani)] +mod verification { + use super::IndexMap; + use crate::types::traits::Get; + + /// Verify that insert followed by get returns the inserted value. + #[kani::proof] + fn verify_insert_get_roundtrip() { + let mut m: IndexMap = IndexMap::new(); + let idx: usize = kani::any(); + kani::assume(idx < 8); + let val: u32 = kani::any(); + + m.insert(&idx, val).unwrap(); + assert_eq!(m.get(&idx), Some(&val)); + } + + /// Verify that removing an inserted value leaves the slot empty. + #[kani::proof] + fn verify_remove_clears_slot() { + let mut m: IndexMap = IndexMap::new(); + let idx: usize = kani::any(); + kani::assume(idx < 8); + let val: u32 = kani::any(); + + m.insert(&idx, val).unwrap(); + let removed = m.remove(&idx); + assert_eq!(removed, Some(val)); + assert_eq!(m.get(&idx), None); + } + + /// Verify iter_from_cycle does not overflow when idx is a valid in-bounds index. + #[kani::proof] + #[kani::unwind(16)] + fn verify_iter_from_cycle_no_overflow_valid_idx() { + let m: IndexMap = IndexMap::new(); + let idx: usize = kani::any(); + kani::assume(idx < 8); // only valid indices + + // iter_from_cycle computes to_index(Some(idx)) + 1 = idx + 1 (≤ 8, no overflow) + let _ = m.iter_from_cycle(Some(&idx)).next(); + } + + /// Verify next() does not overflow when idx is a valid in-bounds index. + #[kani::proof] + #[kani::unwind(16)] + fn verify_next_no_overflow_valid_idx() { + let mut m: IndexMap = IndexMap::new(); + // Fill all slots so next() iterates the maximum i times. + for i in 0usize..8 { + m.insert(&i, i as u32).unwrap(); + } + let idx: usize = kani::any(); + kani::assume(idx < 8); + let result = m.next(Some(&idx)); + assert!(result.is_some()); // map is full, must find something + } +} diff --git a/src/types/heap.rs b/src/types/heap.rs index a25b1f8..62de196 100644 --- a/src/types/heap.rs +++ b/src/types/heap.rs @@ -106,3 +106,121 @@ impl BinaryHeap { self.vec.len() } } + + +#[cfg(kani)] +mod verification { + use super::BinaryHeap; + + /// Verify that pushing a single element and popping it returns the same element. + #[kani::proof] + #[kani::unwind(5)] + fn verify_push_pop_roundtrip() { + let mut heap = BinaryHeap::::new(); + let v: u32 = kani::any(); + heap.push(v).unwrap(); + let popped = heap.pop(); + assert_eq!(popped, Some(v)); + assert!(heap.is_empty()); + } + + /// Verify that pushing two elements and popping gives the smaller one first (min-heap). + #[kani::proof] + #[kani::unwind(5)] + fn verify_min_heap_two_elements() { + let mut heap = BinaryHeap::::new(); + let a: u32 = kani::any(); + let b: u32 = kani::any(); + heap.push(a).unwrap(); + heap.push(b).unwrap(); + let first = heap.pop().unwrap(); + let second = heap.pop().unwrap(); + // Min-heap: first <= second, and {first, second} == {a, b} + assert!(first <= second); + assert!((first == a && second == b) || (first == b && second == a)); + } + + /// Verify that pushing three elements pops them in non-decreasing order. + #[kani::proof] + #[kani::unwind(6)] + fn verify_min_heap_three_elements_sorted() { + let mut heap = BinaryHeap::::new(); + let a: u32 = kani::any(); + let b: u32 = kani::any(); + let c: u32 = kani::any(); + heap.push(a).unwrap(); + heap.push(b).unwrap(); + heap.push(c).unwrap(); + let x = heap.pop().unwrap(); + let y = heap.pop().unwrap(); + let z = heap.pop().unwrap(); + // Must come out in non-decreasing order. + assert!(x <= y); + assert!(y <= z); + } + + /// Verify that peek() always returns the minimum element after arbitrary pushes. + #[kani::proof] + #[kani::unwind(6)] + fn verify_peek_is_minimum() { + let mut heap = BinaryHeap::::new(); + let a: u32 = kani::any(); + let b: u32 = kani::any(); + let c: u32 = kani::any(); + heap.push(a).unwrap(); + heap.push(b).unwrap(); + heap.push(c).unwrap(); + let peeked = *heap.peek().unwrap(); + // peeked must be <= all elements + assert!(peeked <= a); + assert!(peeked <= b); + assert!(peeked <= c); + } +} + + +#[cfg(test)] +mod tests { + use super::BinaryHeap; + + #[test] + fn test_heap_sorted_order() { + let mut heap = BinaryHeap::::new(); + for &v in &[5u32, 2, 8, 1, 9, 3] { + heap.push(v).unwrap(); + } + let mut prev = 0u32; + while let Some(v) = heap.pop() { + assert!(v >= prev, "heap pop out of order: {} after {}", v, prev); + prev = v; + } + } + + #[test] + fn test_heap_single_element() { + let mut heap = BinaryHeap::::new(); + heap.push(42).unwrap(); + assert_eq!(heap.peek(), Some(&42)); + assert_eq!(heap.pop(), Some(42)); + assert!(heap.is_empty()); + } + + #[test] + fn test_heap_empty_peek_pop() { + let mut heap = BinaryHeap::::new(); + assert!(heap.peek().is_none()); + assert!(heap.pop().is_none()); + } + + #[test] + fn test_heap_duplicate_values() { + let mut heap = BinaryHeap::::new(); + heap.push(3).unwrap(); + heap.push(3).unwrap(); + heap.push(1).unwrap(); + assert_eq!(heap.pop(), Some(1)); + assert_eq!(heap.pop(), Some(3)); + assert_eq!(heap.pop(), Some(3)); + } +} + diff --git a/src/types/list.rs b/src/types/list.rs index be84923..f8f3f1b 100644 --- a/src/types/list.rs +++ b/src/types/list.rs @@ -97,7 +97,7 @@ impl List { } /// Pushes `id` to the back of the list. If `id` is already in the list, it is moved to the back. - /// + /// /// Errors if `id` does not exist in `storage` or if the node corresponding to `id` is linked but not in the list. pub fn push_back + GetMut>(&mut self, id: T, storage: &mut S) -> Result<()> where @@ -360,4 +360,249 @@ mod tests { assert_eq!(list.pop_front(&mut s).unwrap(), None); assert!(list.is_empty()); } -} \ No newline at end of file +} + +#[cfg(kani)] +mod verification { + use core::borrow::Borrow; + + use super::{Linkable, Links, List}; + use crate::types::{array::IndexMap, traits::{Get, ToIndex}}; + + #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] + struct Id(usize); + + impl ToIndex for Id { + fn to_index>(idx: Option) -> usize { + idx.as_ref().map_or(0, |k| k.borrow().0) + } + } + + #[derive(Clone, Copy)] + struct Tag; + + struct Node { + links: Links, + } + + impl Node { + fn new() -> Self { + Self { links: Links::new() } + } + } + + impl Linkable for Node { + fn links(&self) -> &Links { &self.links } + fn links_mut(&mut self) -> &mut Links { &mut self.links } + } + + fn make_storage() -> IndexMap { + let mut map = IndexMap::new(); + map.insert(&Id(0), Node::new()).unwrap(); + map.insert(&Id(1), Node::new()).unwrap(); + map.insert(&Id(2), Node::new()).unwrap(); + map.insert(&Id(3), Node::new()).unwrap(); + map + } + + /// Verifies the bug! in push_front (old_head not in storage) is unreachable + /// through correct API usage: all IDs we push exist in storage. + #[kani::proof] + fn verify_push_front_bug_unreachable() { + let mut s = make_storage(); + let mut list = List::::new(); + + list.push_front(Id(0), &mut s).unwrap(); + list.push_front(Id(1), &mut s).unwrap(); + list.push_front(Id(2), &mut s).unwrap(); + + assert_eq!(list.len(), 3); + assert_eq!(list.head(), Some(Id(2))); + assert_eq!(list.tail(), Some(Id(0))); + } + + /// Verifies the bug! in push_back (old_tail not in storage) is unreachable + /// through correct API usage. + #[kani::proof] + fn verify_push_back_bug_unreachable() { + let mut s = make_storage(); + let mut list = List::::new(); + + list.push_back(Id(0), &mut s).unwrap(); + list.push_back(Id(1), &mut s).unwrap(); + list.push_back(Id(2), &mut s).unwrap(); + + assert_eq!(list.len(), 3); + assert_eq!(list.head(), Some(Id(0))); + assert_eq!(list.tail(), Some(Id(2))); + } + + /// Verifies the bug! calls in remove (prev/next not in storage) are unreachable + /// when removing the middle element of a 3-item list. + #[kani::proof] + fn verify_remove_middle_bug_unreachable() { + let mut s = make_storage(); + let mut list = List::::new(); + + list.push_back(Id(0), &mut s).unwrap(); + list.push_back(Id(1), &mut s).unwrap(); + list.push_back(Id(2), &mut s).unwrap(); + + list.remove(Id(1), &mut s).unwrap(); + + assert_eq!(list.len(), 2); + assert_eq!(list.head(), Some(Id(0))); + assert_eq!(list.tail(), Some(Id(2))); + assert_eq!(s.get(Id(0)).unwrap().links().next, Some(Id(2))); + assert_eq!(s.get(Id(2)).unwrap().links().prev, Some(Id(0))); + } + + /// Verifies pop_front on empty list returns Ok(None) without panic. + #[kani::proof] + fn verify_pop_empty_no_panic() { + let mut s = make_storage(); + let mut list = List::::new(); + let result = list.pop_front(&mut s); + assert!(result.is_ok()); + assert!(result.unwrap().is_none()); + } + + /// Verifies length invariant: push N distinct items, len == N. + /// Uses symbolic ID ordering so kani explores all 3! = 6 permutations. + #[kani::proof] + #[kani::unwind(4)] + fn verify_len_invariant_push_three() { + let mut s = make_storage(); + let mut list = List::::new(); + + let a: usize = kani::any(); + let b: usize = kani::any(); + let c: usize = kani::any(); + kani::assume(a < 4 && b < 4 && c < 4); + kani::assume(a != b && b != c && a != c); + + list.push_back(Id(a), &mut s).unwrap(); + list.push_back(Id(b), &mut s).unwrap(); + list.push_back(Id(c), &mut s).unwrap(); + + assert_eq!(list.len(), 3); + assert_eq!(list.head(), Some(Id(a))); + assert_eq!(list.tail(), Some(Id(c))); + } + + /// Verifies that reinserting an already-present item does not change len. + #[kani::proof] + fn verify_reinsert_preserves_len() { + let mut s = make_storage(); + let mut list = List::::new(); + + list.push_back(Id(0), &mut s).unwrap(); + list.push_back(Id(1), &mut s).unwrap(); + assert_eq!(list.len(), 2); + + list.push_back(Id(1), &mut s).unwrap(); + assert_eq!(list.len(), 2); + } + + /// Verifies full push/pop cycle leaves list empty. + #[kani::proof] + fn verify_push_pop_cycle_empty() { + let mut s = make_storage(); + let mut list = List::::new(); + + list.push_back(Id(0), &mut s).unwrap(); + list.push_back(Id(1), &mut s).unwrap(); + list.push_back(Id(2), &mut s).unwrap(); + + list.pop_front(&mut s).unwrap(); + list.pop_front(&mut s).unwrap(); + list.pop_front(&mut s).unwrap(); + + assert!(list.is_empty()); + assert_eq!(list.len(), 0); + assert!(list.head().is_none()); + assert!(list.tail().is_none()); + } + + // ----------------------------------------------------------------------- + // FIFO ordering and len ≤ N proofs (Task #12) + // ----------------------------------------------------------------------- + + /// Verify FIFO ordering: push_back(a, b, c) then pop_front yields a, b, c in order. + /// Uses symbolic distinct IDs so Kani explores all 24 permutations (4P3 = 24). + #[kani::proof] + #[kani::unwind(8)] + fn verify_fifo_ordering_symbolic() { + let mut s = make_storage(); + let mut list = List::::new(); + + let a: usize = kani::any(); + let b: usize = kani::any(); + let c: usize = kani::any(); + kani::assume(a < 4 && b < 4 && c < 4); + kani::assume(a != b && b != c && a != c); + + list.push_back(Id(a), &mut s).unwrap(); + list.push_back(Id(b), &mut s).unwrap(); + list.push_back(Id(c), &mut s).unwrap(); + + // FIFO: pop order must match push order + assert_eq!(list.pop_front(&mut s).unwrap(), Some(Id(a))); + assert_eq!(list.pop_front(&mut s).unwrap(), Some(Id(b))); + assert_eq!(list.pop_front(&mut s).unwrap(), Some(Id(c))); + assert_eq!(list.pop_front(&mut s).unwrap(), None); + assert!(list.is_empty()); + } + + /// Verify len ≤ N: after any sequence of pushes with N distinct nodes in storage, + /// len never exceeds N. + /// With 4-slot storage, push all 4 distinct IDs → len == 4. + /// Re-inserting an existing ID is an in-place move, not an increase. + #[kani::proof] + #[kani::unwind(6)] + fn verify_len_never_exceeds_capacity() { + let mut s = make_storage(); // 4 slots + let mut list = List::::new(); + + // Fill the list. + list.push_back(Id(0), &mut s).unwrap(); + list.push_back(Id(1), &mut s).unwrap(); + list.push_back(Id(2), &mut s).unwrap(); + list.push_back(Id(3), &mut s).unwrap(); + assert_eq!(list.len(), 4); + assert!(list.len() <= 4); + + // Re-inserting an already-present ID must not increase len. + let x: usize = kani::any(); + kani::assume(x < 4); + list.push_back(Id(x), &mut s).unwrap(); + assert_eq!(list.len(), 4); + assert!(list.len() <= 4); + } + + /// Verify that head() is always the first-inserted element and tail() is always + /// the last, for any two symbolic distinct IDs. + #[kani::proof] + #[kani::unwind(5)] + fn verify_head_tail_invariant() { + let mut s = make_storage(); + let mut list = List::::new(); + + let a: usize = kani::any(); + let b: usize = kani::any(); + kani::assume(a < 4 && b < 4 && a != b); + + list.push_back(Id(a), &mut s).unwrap(); + assert_eq!(list.head(), Some(Id(a))); + assert_eq!(list.tail(), Some(Id(a))); + + list.push_back(Id(b), &mut s).unwrap(); + assert_eq!(list.head(), Some(Id(a))); + assert_eq!(list.tail(), Some(Id(b))); + + // After popping the front, tail stays, new head is b. + list.pop_front(&mut s).unwrap(); + assert_eq!(list.head(), Some(Id(b))); + assert_eq!(list.tail(), Some(Id(b))); + } +} diff --git a/src/types/rbtree.rs b/src/types/rbtree.rs index 3a5b0f0..9870ae8 100644 --- a/src/types/rbtree.rs +++ b/src/types/rbtree.rs @@ -116,7 +116,7 @@ impl RbTree if node.cmp(last) == core::cmp::Ordering::Less { last.links_mut().left = Some(id); } else { - last.links_mut().right = Some(id); + last.links_mut().right = Some(id); } } } @@ -389,7 +389,7 @@ impl RbTree }; let is_black = |node_id: Option, storage: &S| -> bool { !is_red(node_id, storage) }; - + while id != self.root && is_black(id, storage) { let parent_id = parent.unwrap_or_else(|| { bug!("node linked from tree does not have a parent."); @@ -720,22 +720,19 @@ impl RbTree // TESTING ------------------------------------------------------------------------------------------------------------ -#[cfg(test)] -mod tests { - use super::*; - use super::{Get, GetMut}; - use std::borrow::Borrow; - use std::collections::HashSet; +#[cfg(any(test, kani))] +mod test_common { + use super::{Compare, Linkable, Links}; - struct Tree; + pub(super) struct Tree; - struct Node { - key: i32, - links: Links, + pub(super) struct Node { + pub(super) key: i32, + pub(super) links: Links, } impl Node { - fn new(key: i32) -> Self { + pub(super) fn new(key: i32) -> Self { Self { key, links: Links::new(), @@ -758,6 +755,15 @@ mod tests { &mut self.links } } +} + +#[cfg(test)] +mod tests { + use super::*; + use super::{Get, GetMut}; + use super::test_common::{Tree, Node}; + use std::borrow::Borrow; + use std::collections::HashSet; struct NodeStore { nodes: Vec, @@ -1148,3 +1154,480 @@ mod tests { } // END TESTING + +#[cfg(kani)] +mod verification { + use core::borrow::Borrow; + + use super::{Color, Compare, Linkable, Links, RbTree}; + use super::test_common::{Tree, Node}; + use crate::types::{array::IndexMap, traits::{Get, GetMut, ToIndex}}; + + + /// Assert the three RB colour invariants hold on every reachable node: + /// 1. Root is black. + /// 2. No red node has a red child. + /// 3. Every root-to-null path passes through the same number of black nodes. + /// + /// Also asserts child→parent back-pointers are consistent. + fn assert_rb_invariants( + root: Option, + storage: &IndexMap, + ) { + // 1. Root colour. + if let Some(rid) = root { + assert!( + matches!(storage.get(rid).unwrap().links().color, Color::Black), + "root must be black" + ); + } + + const SENTINEL: usize = usize::MAX; + // Stack entry: (raw_id [SENTINEL = null], accumulated_black_count) + let mut stack: [(usize, u32); 16] = [(SENTINEL, 0); 16]; + let mut sp = 0usize; + + // Push the root. + stack[sp] = (root.unwrap_or(SENTINEL), 0); + sp += 1; + + let mut expected_bh: i64 = -1; // not yet observed + + while sp > 0 { + sp -= 1; + let (raw_id, bh) = stack[sp]; + + if raw_id == SENTINEL { + // Null leaf: counts as one extra black level. + let path_bh = bh as i64 + 1; + if expected_bh < 0 { + expected_bh = path_bh; + } else { + assert_eq!(path_bh, expected_bh, "black-height invariant violated"); + } + continue; + } + + let node = storage.get(raw_id).unwrap(); + let links = node.links(); + let is_red = matches!(links.color, Color::Red); + let new_bh = if is_red { bh } else { bh + 1 }; + + // 2. No adjacent red nodes. + if is_red { + if let Some(l) = links.left { + assert!( + !matches!(storage.get(l).unwrap().links().color, Color::Red), + "red node has red left child" + ); + } + if let Some(r) = links.right { + assert!( + !matches!(storage.get(r).unwrap().links().color, Color::Red), + "red node has red right child" + ); + } + } + + // Parent-pointer consistency. + if let Some(l) = links.left { + assert_eq!( + storage.get(l).unwrap().links().parent, + Some(raw_id), + "left child has wrong parent pointer" + ); + } + if let Some(r) = links.right { + assert_eq!( + storage.get(r).unwrap().links().parent, + Some(raw_id), + "right child has wrong parent pointer" + ); + } + + // Push children (right first so left is processed first). + assert!(sp + 2 <= 16, "DFS stack overflow — tree deeper than expected"); + stack[sp] = (links.right.unwrap_or(SENTINEL), new_bh); + sp += 1; + stack[sp] = (links.left.unwrap_or(SENTINEL), new_bh); + sp += 1; + } + } + + /// Assert the BST ordering property: an iterative in-order traversal + /// visits keys in strictly ascending order. + fn assert_bst_order( + root: Option, + storage: &IndexMap, + ) { + // Iterative in-order: push nodes going left, pop and visit, then go right. + let mut stack: [usize; 16] = [0; 16]; + let mut sp = 0usize; + let mut cur = root; + let mut prev_key: Option = None; + + loop { + // Descend left. + while let Some(id) = cur { + assert!(sp < 16, "in-order stack overflow"); + stack[sp] = id; + sp += 1; + cur = storage.get(id).unwrap().links().left; + } + if sp == 0 { + break; + } + sp -= 1; + let id = stack[sp]; + let node = storage.get(id).unwrap(); + // Strictly increasing. + if let Some(prev) = prev_key { + assert!(node.key > prev, "BST order violated"); + } + prev_key = Some(node.key); + cur = node.links().right; + } + } + + /// Verify inserting a single node doesn't panic and min() is correct. + #[kani::proof] + fn verify_insert_single_no_bug() { + let mut s: IndexMap = IndexMap::new(); + s.insert(&0, Node::new(42)).unwrap(); + + let mut tree: RbTree = RbTree::new(); + tree.insert(0, &mut s).unwrap(); + + assert_eq!(tree.min(), Some(0)); + } + + /// Verify inserting two nodes (both orderings via symbolic keys) doesn't panic, + /// and min() returns the node with the smaller key. + #[kani::proof] + #[kani::unwind(5)] + fn verify_insert_two_min_correct() { + let mut s: IndexMap = IndexMap::new(); + let key_a: i32 = kani::any(); + let key_b: i32 = kani::any(); + kani::assume(key_a != key_b); + + s.insert(&0, Node::new(key_a)).unwrap(); + s.insert(&1, Node::new(key_b)).unwrap(); + + let mut tree: RbTree = RbTree::new(); + tree.insert(0, &mut s).unwrap(); + tree.insert(1, &mut s).unwrap(); + + let min_id = tree.min().unwrap(); + let min_key = s.get(min_id).unwrap().key; + assert!(min_key <= key_a); + assert!(min_key <= key_b); + } + + /// Verify insert of three concrete nodes exercises insert_fixup without bug!. + #[kani::proof] + #[kani::unwind(6)] + fn verify_insert_three_no_bug() { + let mut s: IndexMap = IndexMap::new(); + s.insert(&0, Node::new(10)).unwrap(); + s.insert(&1, Node::new(5)).unwrap(); + s.insert(&2, Node::new(15)).unwrap(); + + let mut tree: RbTree = RbTree::new(); + tree.insert(0, &mut s).unwrap(); + tree.insert(1, &mut s).unwrap(); + tree.insert(2, &mut s).unwrap(); + + assert_eq!(tree.min(), Some(1)); + } + + /// Verify remove of root from 3-node tree exercises delete_fixup without bug!. + #[kani::proof] + #[kani::unwind(6)] + fn verify_remove_root_no_bug() { + let mut s: IndexMap = IndexMap::new(); + s.insert(&0, Node::new(10)).unwrap(); + s.insert(&1, Node::new(5)).unwrap(); + s.insert(&2, Node::new(15)).unwrap(); + + let mut tree: RbTree = RbTree::new(); + tree.insert(0, &mut s).unwrap(); + tree.insert(1, &mut s).unwrap(); + tree.insert(2, &mut s).unwrap(); + + tree.remove(0, &mut s).unwrap(); + + // After removing 10 (root), min must still be 5 (id=1) + assert_eq!(tree.min(), Some(1)); + } + + /// Verify removing the current minimum updates min() correctly. + #[kani::proof] + #[kani::unwind(6)] + fn verify_min_updates_after_remove_min() { + let mut s: IndexMap = IndexMap::new(); + s.insert(&0, Node::new(10)).unwrap(); + s.insert(&1, Node::new(5)).unwrap(); + s.insert(&2, Node::new(15)).unwrap(); + + let mut tree: RbTree = RbTree::new(); + tree.insert(0, &mut s).unwrap(); + tree.insert(1, &mut s).unwrap(); + tree.insert(2, &mut s).unwrap(); + + assert_eq!(tree.min(), Some(1)); + tree.remove(1, &mut s).unwrap(); // remove minimum (key=5) + assert_eq!(tree.min(), Some(0)); // new minimum is key=10 + } + + /// Verify that after removing all nodes the tree is empty. + #[kani::proof] + #[kani::unwind(6)] + fn verify_remove_all_empty() { + let mut s: IndexMap = IndexMap::new(); + s.insert(&0, Node::new(10)).unwrap(); + s.insert(&1, Node::new(5)).unwrap(); + s.insert(&2, Node::new(15)).unwrap(); + + let mut tree: RbTree = RbTree::new(); + tree.insert(0, &mut s).unwrap(); + tree.insert(1, &mut s).unwrap(); + tree.insert(2, &mut s).unwrap(); + + tree.remove(1, &mut s).unwrap(); + tree.remove(0, &mut s).unwrap(); + tree.remove(2, &mut s).unwrap(); + + assert!(tree.min().is_none()); + } + + /// Verify ascending-order insertion [1,2,3] — stress-tests right-leaning fixup. + #[kani::proof] + #[kani::unwind(6)] + fn verify_insert_ascending_order() { + let mut s: IndexMap = IndexMap::new(); + s.insert(&0, Node::new(1)).unwrap(); + s.insert(&1, Node::new(2)).unwrap(); + s.insert(&2, Node::new(3)).unwrap(); + + let mut tree: RbTree = RbTree::new(); + tree.insert(0, &mut s).unwrap(); + tree.insert(1, &mut s).unwrap(); + tree.insert(2, &mut s).unwrap(); + + let min_id = tree.min().unwrap(); + assert_eq!(s.get(min_id).unwrap().key, 1); + } + + // ----------------------------------------------------------------------- + // Invariant-checking proofs + // ----------------------------------------------------------------------- + + /// After inserting 3 concrete nodes the RB colour invariants and BST order hold. + /// Baseline check that the helpers themselves are correct on a small tree. + #[kani::proof] + #[kani::unwind(20)] + fn verify_rb_invariants_three_nodes() { + let mut s: IndexMap = IndexMap::new(); + s.insert(&0, Node::new(10)).unwrap(); + s.insert(&1, Node::new(5)).unwrap(); + s.insert(&2, Node::new(15)).unwrap(); + + let mut tree: RbTree = RbTree::new(); + tree.insert(0, &mut s).unwrap(); + tree.insert(1, &mut s).unwrap(); + tree.insert(2, &mut s).unwrap(); + + assert_rb_invariants(tree.root, &s); + assert_bst_order(tree.root, &s); + } + + /// RB invariants hold after inserting 7 nodes in ascending order and after + /// removing all 7 nodes. Ascending insertion maximally stresses the fixup path. + /// Unwind budget: 7 inserts×3 fixup iters = 21; 7 removes×3 = 21; 2 DFS calls×15 = 30; + /// 2 in-order calls×7 = 14; total ≈ 86. Using unwind(100) for headroom. + #[kani::proof] + #[kani::unwind(100)] + fn verify_rb_invariants_seven_ascending() { + let mut s: IndexMap = IndexMap::new(); + for i in 0usize..7 { + s.insert(&i, Node::new(i as i32 + 1)).unwrap(); + } + + let mut tree: RbTree = RbTree::new(); + for i in 0usize..7 { + tree.insert(i, &mut s).unwrap(); + } + + assert_rb_invariants(tree.root, &s); + assert_bst_order(tree.root, &s); + + // Remove all and verify the tree empties cleanly. + for i in 0usize..7 { + tree.remove(i, &mut s).unwrap(); + } + assert!(tree.root.is_none()); + assert!(tree.min().is_none()); + assert_rb_invariants(tree.root, &s); // trivially true for empty tree + } + + /// RB invariants hold after inserting 7 nodes in descending order. + #[kani::proof] + #[kani::unwind(100)] + fn verify_rb_invariants_seven_descending() { + let mut s: IndexMap = IndexMap::new(); + for i in 0usize..7 { + s.insert(&i, Node::new(7 - i as i32)).unwrap(); + } + + let mut tree: RbTree = RbTree::new(); + for i in 0usize..7 { + tree.insert(i, &mut s).unwrap(); + } + + assert_rb_invariants(tree.root, &s); + assert_bst_order(tree.root, &s); + } + + /// RB invariants hold after balanced insertion [4,2,6,1,3,5,7] and after + /// removing the root and minimum. + #[kani::proof] + #[kani::unwind(100)] + fn verify_rb_invariants_balanced_with_removes() { + let keys: [i32; 7] = [4, 2, 6, 1, 3, 5, 7]; + let mut s: IndexMap = IndexMap::new(); + for (i, &k) in keys.iter().enumerate() { + s.insert(&i, Node::new(k)).unwrap(); + } + + let mut tree: RbTree = RbTree::new(); + for i in 0usize..7 { + tree.insert(i, &mut s).unwrap(); + } + + assert_rb_invariants(tree.root, &s); + assert_bst_order(tree.root, &s); + + // Remove root (key=4, idx=0). + tree.remove(0, &mut s).unwrap(); + assert_rb_invariants(tree.root, &s); + assert_bst_order(tree.root, &s); + + // Remove current minimum (key=1, idx=3). + tree.remove(3, &mut s).unwrap(); + assert_rb_invariants(tree.root, &s); + assert_bst_order(tree.root, &s); + } + + /// Verify descending-order insertion [3,2,1] — stress-tests left-leaning fixup. + #[kani::proof] + #[kani::unwind(6)] + fn verify_insert_descending_order() { + let mut s: IndexMap = IndexMap::new(); + s.insert(&0, Node::new(3)).unwrap(); + s.insert(&1, Node::new(2)).unwrap(); + s.insert(&2, Node::new(1)).unwrap(); + + let mut tree: RbTree = RbTree::new(); + tree.insert(0, &mut s).unwrap(); + tree.insert(1, &mut s).unwrap(); + tree.insert(2, &mut s).unwrap(); + + let min_id = tree.min().unwrap(); + assert_eq!(s.get(min_id).unwrap().key, 1); + } + + /// 7-node ascending insertion [1..7] — maximally stresses right-rotation fixup. + /// min() must equal 1 throughout; after removing all nodes tree is empty. + #[kani::proof] + #[kani::unwind(30)] + fn verify_seven_ascending_insert_remove_all() { + let mut s: IndexMap = IndexMap::new(); + for i in 0usize..7 { + s.insert(&i, Node::new(i as i32 + 1)).unwrap(); // keys 1..=7 + } + + let mut tree: RbTree = RbTree::new(); + for i in 0usize..7 { + tree.insert(i, &mut s).unwrap(); + } + + // Min must be key 1 (stored at index 0). + assert_eq!(tree.min(), Some(0)); + assert_eq!(s.get(0usize).unwrap().key, 1); + + // Remove all nodes in insertion order; min must update correctly. + for i in 0usize..7 { + tree.remove(i, &mut s).unwrap(); + } + assert!(tree.min().is_none()); + } + + /// 7-node descending insertion [7..1] — maximally stresses left-rotation fixup. + #[kani::proof] + #[kani::unwind(30)] + fn verify_seven_descending_insert_remove_all() { + let mut s: IndexMap = IndexMap::new(); + for i in 0usize..7 { + s.insert(&i, Node::new(7 - i as i32)).unwrap(); // keys 7,6,5,4,3,2,1 + } + + let mut tree: RbTree = RbTree::new(); + for i in 0usize..7 { + tree.insert(i, &mut s).unwrap(); + } + + // After inserting [7,6,5,4,3,2,1], min is key 1 (stored at index 6). + assert_eq!(tree.min(), Some(6)); + assert_eq!(s.get(6usize).unwrap().key, 1); + + // Remove min repeatedly and verify it advances each time. + let expected_mins: [i32; 7] = [1, 2, 3, 4, 5, 6, 7]; + for &expected_key in &expected_mins { + let min_id = tree.min().unwrap(); + assert_eq!(s.get(min_id).unwrap().key, expected_key); + tree.remove(min_id, &mut s).unwrap(); + } + assert!(tree.min().is_none()); + } + + /// 7-node BFS-level insertion [4,2,6,1,3,5,7] — produces a perfectly balanced tree. + /// Exercises the case where very few fixup rotations are needed. + /// Then removes nodes in reverse BFS order and checks min after each. + #[kani::proof] + #[kani::unwind(30)] + fn verify_seven_balanced_insert_remove() { + // keys stored at their storage indices + // idx: 0=4, 1=2, 2=6, 3=1, 4=3, 5=5, 6=7 + let keys: [i32; 7] = [4, 2, 6, 1, 3, 5, 7]; + let mut s: IndexMap = IndexMap::new(); + for (i, &k) in keys.iter().enumerate() { + s.insert(&i, Node::new(k)).unwrap(); + } + + let mut tree: RbTree = RbTree::new(); + for i in 0usize..7 { + tree.insert(i, &mut s).unwrap(); + } + + // Min is key 1 at index 3. + assert_eq!(tree.min(), Some(3)); + assert_eq!(s.get(3usize).unwrap().key, 1); + + // Remove the root (key=4, idx=0). + tree.remove(0, &mut s).unwrap(); + // New min is still key 1 (index 3). + assert_eq!(tree.min(), Some(3)); + + // Remove min (key=1, idx=3). New min = key 2 (idx=1). + tree.remove(3, &mut s).unwrap(); + assert_eq!(tree.min(), Some(1)); + assert_eq!(s.get(1usize).unwrap().key, 2); + + // Remove remaining 5 nodes. + for i in [1usize, 2, 4, 5, 6] { + tree.remove(i, &mut s).unwrap(); + } + assert!(tree.min().is_none()); + } +}