From 002cf0692aa08e5f78b6e82b483ceeb30d1ccf04 Mon Sep 17 00:00:00 2001 From: Bart Jacobs Date: Wed, 21 Jan 2026 21:22:05 +0100 Subject: [PATCH] Update RawVec proof to nightly-2025-11-25 --- .../linked_list.rs-negative/verify.sh | 2 +- .../collections/linked_list.rs/verify.sh | 2 +- .../alloc/raw_vec/mod.rs/original/raw_vec.rs | 95 +- .../alloc/raw_vec/mod.rs/verified/raw_vec.rs | 987 +++++++++++++----- .../alloc/raw_vec/mod.rs/verify.sh | 2 +- .../raw_vec/mod.rs/with-directives/raw_vec.rs | 99 +- verifast-proofs/setup-verifast-home | 15 + 7 files changed, 841 insertions(+), 361 deletions(-) diff --git a/verifast-proofs/alloc/collections/linked_list.rs-negative/verify.sh b/verifast-proofs/alloc/collections/linked_list.rs-negative/verify.sh index efefb67db8476..a415b018ad639 100644 --- a/verifast-proofs/alloc/collections/linked_list.rs-negative/verify.sh +++ b/verifast-proofs/alloc/collections/linked_list.rs-negative/verify.sh @@ -1,6 +1,6 @@ set -e -x -export VFVERSION=25.11 +export VFVERSION=26.01 ! verifast -rustc_args "--edition 2021 --cfg test" -skip_specless_fns verified/lib.rs ! refinement-checker --rustc-args "--edition 2021 --cfg test" original/lib.rs verified/lib.rs diff --git a/verifast-proofs/alloc/collections/linked_list.rs/verify.sh b/verifast-proofs/alloc/collections/linked_list.rs/verify.sh index fb3a071de6b5a..46dbdb84057ce 100644 --- a/verifast-proofs/alloc/collections/linked_list.rs/verify.sh +++ b/verifast-proofs/alloc/collections/linked_list.rs/verify.sh @@ -1,6 +1,6 @@ set -e -x -export VFVERSION=25.11 +export VFVERSION=26.01 verifast -rustc_args "--edition 2021 --cfg test" -skip_specless_fns -ignore_unwind_paths -allow_assume verified/lib.rs refinement-checker --rustc-args "--edition 2021 --cfg test" original/lib.rs verified/lib.rs > /dev/null diff --git a/verifast-proofs/alloc/raw_vec/mod.rs/original/raw_vec.rs b/verifast-proofs/alloc/raw_vec/mod.rs/original/raw_vec.rs index bc9692f5b6c2f..236e33e2f450e 100644 --- a/verifast-proofs/alloc/raw_vec/mod.rs/original/raw_vec.rs +++ b/verifast-proofs/alloc/raw_vec/mod.rs/original/raw_vec.rs @@ -668,8 +668,7 @@ impl RawVecInner { /// - `elem_layout` must be valid for `self`, i.e. it must be the same `elem_layout` used to /// initially construct `self` /// - `elem_layout`'s size must be a multiple of its alignment - /// - The sum of `len` and `additional` must be greater than or equal to - /// `self.capacity(elem_layout.size())` + /// - The sum of `len` and `additional` must be greater than the current capacity unsafe fn grow_amortized( &mut self, len: usize, @@ -693,16 +692,12 @@ impl RawVecInner { let cap = cmp::max(self.cap.as_inner() * 2, required_cap); let cap = cmp::max(min_non_zero_cap(elem_layout.size()), cap); - let new_layout = layout_array(cap, elem_layout)?; - // SAFETY: - // - For the `current_memory` call: Precondition passed to caller - // - For the `finish_grow` call: Precondition passed to caller - // + `current_memory` does the right thing - let ptr = - unsafe { finish_grow(new_layout, self.current_memory(elem_layout), &mut self.alloc)? }; + // - cap >= len + additional + // - other preconditions passed to caller + let ptr = unsafe { self.finish_grow(cap, elem_layout)? }; - // SAFETY: layout_array would have resulted in a capacity overflow if we tried to allocate more than `isize::MAX` items + // SAFETY: `finish_grow` would have failed if `cap > isize::MAX` unsafe { self.set_ptr_and_cap(ptr, cap) }; Ok(()) } @@ -711,8 +706,7 @@ impl RawVecInner { /// - `elem_layout` must be valid for `self`, i.e. it must be the same `elem_layout` used to /// initially construct `self` /// - `elem_layout`'s size must be a multiple of its alignment - /// - The sum of `len` and `additional` must be greater than or equal to - /// `self.capacity(elem_layout.size())` + /// - The sum of `len` and `additional` must be greater than the current capacity unsafe fn grow_exact( &mut self, len: usize, @@ -726,21 +720,44 @@ impl RawVecInner { } let cap = len.checked_add(additional).ok_or(CapacityOverflow)?; - let new_layout = layout_array(cap, elem_layout)?; - // SAFETY: - // - For the `current_memory` call: Precondition passed to caller - // - For the `finish_grow` call: Precondition passed to caller - // + `current_memory` does the right thing - let ptr = - unsafe { finish_grow(new_layout, self.current_memory(elem_layout), &mut self.alloc)? }; - // SAFETY: layout_array would have resulted in a capacity overflow if we tried to allocate more than `isize::MAX` items - unsafe { - self.set_ptr_and_cap(ptr, cap); - } + // SAFETY: preconditions passed to caller + let ptr = unsafe { self.finish_grow(cap, elem_layout)? }; + + // SAFETY: `finish_grow` would have failed if `cap > isize::MAX` + unsafe { self.set_ptr_and_cap(ptr, cap) }; Ok(()) } + /// # Safety + /// - `elem_layout` must be valid for `self`, i.e. it must be the same `elem_layout` used to + /// initially construct `self` + /// - `elem_layout`'s size must be a multiple of its alignment + /// - `cap` must be greater than the current capacity + // not marked inline(never) since we want optimizers to be able to observe the specifics of this + // function, see tests/codegen-llvm/vec-reserve-extend.rs. + #[cold] + unsafe fn finish_grow( + &self, + cap: usize, + elem_layout: Layout, + ) -> Result, TryReserveError> { + let new_layout = layout_array(cap, elem_layout)?; + + let memory = if let Some((ptr, old_layout)) = unsafe { self.current_memory(elem_layout) } { + debug_assert_eq!(old_layout.align(), new_layout.align()); + unsafe { + // The allocator checks for alignment equality + hint::assert_unchecked(old_layout.align() == new_layout.align()); + self.alloc.grow(ptr, old_layout, new_layout) + } + } else { + self.alloc.allocate(new_layout) + }; + + memory.map_err(|_| AllocError { layout: new_layout, non_exhaustive: () }.into()) + } + /// # Safety /// - `elem_layout` must be valid for `self`, i.e. it must be the same `elem_layout` used to /// initially construct `self` @@ -820,38 +837,6 @@ impl RawVecInner { } } -/// # Safety -/// If `current_memory` matches `Some((ptr, old_layout))`: -/// - `ptr` must denote a block of memory *currently allocated* via `alloc` -/// - `old_layout` must *fit* that block of memory -/// - `new_layout` must have the same alignment as `old_layout` -/// - `new_layout.size()` must be greater than or equal to `old_layout.size()` -/// If `current_memory` is `None`, this function is safe. -// not marked inline(never) since we want optimizers to be able to observe the specifics of this -// function, see tests/codegen-llvm/vec-reserve-extend.rs. -#[cold] -unsafe fn finish_grow( - new_layout: Layout, - current_memory: Option<(NonNull, Layout)>, - alloc: &mut A, -) -> Result, TryReserveError> -where - A: Allocator, -{ - let memory = if let Some((ptr, old_layout)) = current_memory { - debug_assert_eq!(old_layout.align(), new_layout.align()); - unsafe { - // The allocator checks for alignment equality - hint::assert_unchecked(old_layout.align() == new_layout.align()); - alloc.grow(ptr, old_layout, new_layout) - } - } else { - alloc.allocate(new_layout) - }; - - memory.map_err(|_| AllocError { layout: new_layout, non_exhaustive: () }.into()) -} - // Central function for reserve error handling. #[cfg(not(no_global_oom_handling))] #[cold] diff --git a/verifast-proofs/alloc/raw_vec/mod.rs/verified/raw_vec.rs b/verifast-proofs/alloc/raw_vec/mod.rs/verified/raw_vec.rs index b41a2d34a16a8..5e3adfb1d4232 100644 --- a/verifast-proofs/alloc/raw_vec/mod.rs/verified/raw_vec.rs +++ b/verifast-proofs/alloc/raw_vec/mod.rs/verified/raw_vec.rs @@ -21,6 +21,8 @@ use crate::boxed::Box; use crate::collections::TryReserveError; use crate::collections::TryReserveErrorKind::*; +//@ use std::collections::{TryReserveError, TryReserveErrorKind}; + #[cfg(test)] mod tests; @@ -50,7 +52,7 @@ lem mul_zero(x: i32, y: i32) #[cfg(not(no_global_oom_handling))] #[cfg_attr(not(panic = "immediate-abort"), inline(never))] fn capacity_overflow() -> ! -//@ req true; +//@ req thread_token(?t); //@ ens false; { panic!("capacity overflow"); @@ -139,13 +141,19 @@ pred RawVecInner(t: thread_id_t, self: RawVecInner, elemLayout: Layout, al ptr == self.ptr.as_non_null_ptr().as_ptr() &*& ptr as usize % elemLayout.align() == 0 &*& pointer_within_limits(ptr) == true &*& + elemLayout.repeat(capacity) == some(pair(?allocLayout, ?stride)) &*& if capacity * elemLayout.size() == 0 { true } else { - elemLayout.repeat(capacity) == some(pair(?allocLayout, ?stride)) &*& alloc_block_in(alloc_id, ptr, allocLayout) }; +pred_ctor RawVecInner_full_borrow_content_(t: thread_id_t, l: *RawVecInner, elemLayout: Layout, alloc_id: alloc_id_t, ptr: *u8, capacity: usize)() = + *l |-> ?self_ &*& RawVecInner(t, self_, elemLayout, alloc_id, ptr, capacity); + +pred RawVecInner_full_borrow(k: lifetime_t, t: thread_id_t, l: *RawVecInner, elemLayout: Layout, alloc_id: alloc_id_t, ptr: *u8, capacity: usize) = + full_borrow(k, RawVecInner_full_borrow_content_(t, l, elemLayout, alloc_id, ptr, capacity)); + lem RawVecInner_send_(t1: thread_id_t) req type_interp::() &*& is_Send(typeid(A)) == true &*& RawVecInner::(?t0, ?self_, ?elemLayout, ?alloc_id, ?ptr, ?capacity); ens type_interp::() &*& RawVecInner::(t1, self_, elemLayout, alloc_id, ptr, capacity); @@ -160,11 +168,7 @@ pred RawVecInner0(self: RawVecInner, elemLayout: Layout, ptr: *u8, capacit ptr == self.ptr.as_non_null_ptr().as_ptr() &*& ptr as usize % elemLayout.align() == 0 &*& pointer_within_limits(ptr) == true &*& - if capacity * elemLayout.size() == 0 { - true - } else { - elemLayout.repeat(capacity) == some(pair(?allocLayout, ?stride)) - }; + elemLayout.repeat(capacity) == some(pair(?allocLayout, ?stride)); pred >.own(t, self_) = .own(t, self_.alloc) &*& @@ -199,11 +203,15 @@ lem RawVecInner_send(t1: thread_id_t) lem_auto RawVecInner_inv() req RawVecInner::(?t, ?self_, ?elemLayout, ?alloc_id, ?ptr, ?capacity); ens RawVecInner::(t, self_, elemLayout, alloc_id, ptr, capacity) &*& + lifetime_inclusion(lft_of_type::(), alloc_id.lft) == true &*& ptr != 0 &*& ptr as usize % elemLayout.align() == 0 &*& + elemLayout.repeat(capacity) != none &*& 0 <= capacity &*& capacity <= usize::MAX; { open RawVecInner(t, self_, elemLayout, alloc_id, ptr, capacity); std::num::niche_types::UsizeNoHighBit_inv(self_.cap); + std::alloc::Allocator_inv(); + std::alloc::Layout_inv(elemLayout); close RawVecInner(t, self_, elemLayout, alloc_id, ptr, capacity); } @@ -227,19 +235,17 @@ pred_ctor RawVecInner_frac_borrow_content(l: *RawVecInner, elemLayout: Lay ptr == u.as_non_null_ptr().as_ptr() &*& ptr as usize % elemLayout.align() == 0 &*& pointer_within_limits(ptr) == true &*& - if capacity * elemLayout.size() == 0 { - true - } else { - elemLayout.repeat(capacity) == some(pair(?allocLayout, ?stride)) - }; + elemLayout.repeat(capacity) == some(pair(?allocLayout, ?stride)); pred RawVecInner_share_(k: lifetime_t, t: thread_id_t, l: *RawVecInner, elemLayout: Layout, alloc_id: alloc_id_t, ptr: *u8, capacity: usize) = + pointer_within_limits(&(*l).alloc) == true &*& [_]std::alloc::Allocator_share(k, t, &(*l).alloc, alloc_id) &*& + elemLayout.repeat(capacity) != none &*& capacity <= usize::MAX &*& [_]frac_borrow(k, RawVecInner_frac_borrow_content(l, elemLayout, ptr, capacity)) &*& ptr != 0; lem RawVecInner_share__inv() req [_]RawVecInner_share_::(?k, ?t, ?l, ?elemLayout, ?alloc_id, ?ptr, ?capacity); - ens ptr != 0; + ens ptr != 0 &*& elemLayout.repeat(capacity) != none &*& capacity <= usize::MAX; { open RawVecInner_share_(k, t, l, elemLayout, alloc_id, ptr, capacity); } @@ -268,13 +274,72 @@ lem RawVecInner_sync_(t1: thread_id_t) pred RawVecInner_share_end_token(k: lifetime_t, t: thread_id_t, l: *RawVecInner, elemLayout: Layout, alloc_id: alloc_id_t, ptr: *u8, capacity: usize) = borrow_end_token(k, std::alloc::Allocator_full_borrow_content_(t, &(*l).alloc, alloc_id)) &*& borrow_end_token(k, RawVecInner_frac_borrow_content(l, elemLayout, ptr, capacity)) &*& + elemLayout.repeat(capacity) == some(pair(?allocLayout, ?stride)) &*& if capacity * elemLayout.size() == 0 { true } else { - elemLayout.repeat(capacity) == some(pair(?allocLayout, ?stride)) &*& alloc_block_in(alloc_id, ptr, allocLayout) }; +pred RawVecInner_share0_end_token(k: lifetime_t, t: thread_id_t, l: *RawVecInner, elemLayout: Layout, alloc_id: alloc_id_t, ptr: *u8, capacity: usize) = + borrow_end_token(k, std::alloc::Allocator_full_borrow_content_(t, &(*l).alloc, alloc_id)) &*& + borrow_end_token(k, RawVecInner_frac_borrow_content(l, elemLayout, ptr, capacity)) &*& + elemLayout.repeat(capacity) == some(pair(?allocLayout, ?stride)); + +lem RawVecInner_share_full_(k: lifetime_t, l: *RawVecInner) + req type_interp::() &*& atomic_mask(MaskTop) &*& [?q]lifetime_token(k) &*& + RawVecInner_full_borrow(k, ?t, l, ?elemLayout, ?alloc_id, ?ptr, ?capacity); + ens type_interp::() &*& atomic_mask(MaskTop) &*& [q]lifetime_token(k) &*& + [_]RawVecInner_share_(k, t, l, elemLayout, alloc_id, ptr, capacity); +{ + open RawVecInner_full_borrow(k, t, l, elemLayout, alloc_id, ptr, capacity); + let klong = open_full_borrow_strong_m(k, RawVecInner_full_borrow_content_(t, l, elemLayout, alloc_id, ptr, capacity), q); + open RawVecInner_full_borrow_content_::(t, l, elemLayout, alloc_id, ptr, capacity)(); + assert *l |-> ?self_; + open_points_to(l); + points_to_limits(&(*l).alloc); + open RawVecInner(t, self_, elemLayout, alloc_id, ptr, capacity); + close RawVecInner_frac_borrow_content::(l, elemLayout, ptr, capacity)(); + std::alloc::close_Allocator_full_borrow_content_(t, &(*l).alloc); + close sep(std::alloc::Allocator_full_borrow_content_(t, &(*l).alloc, alloc_id), RawVecInner_frac_borrow_content::(l, elemLayout, ptr, capacity))(); + { + pred Ctx() = + elemLayout.repeat(capacity) == some(pair(?allocLayout, ?stride)) &*& + if capacity * elemLayout.size() == 0 { + true + } else { + alloc_block_in(alloc_id, ptr, allocLayout) + }; + close Ctx(); + produce_lem_ptr_chunk full_borrow_convert_strong( + Ctx, + sep(std::alloc::Allocator_full_borrow_content_(t, &(*l).alloc, alloc_id), RawVecInner_frac_borrow_content::(l, elemLayout, ptr, capacity)), + klong, + RawVecInner_full_borrow_content_(t, l, elemLayout, alloc_id, ptr, capacity) + )() { + open Ctx(); + open sep(std::alloc::Allocator_full_borrow_content_(t, &(*l).alloc, alloc_id), RawVecInner_frac_borrow_content::(l, elemLayout, ptr, capacity))(); + std::alloc::open_Allocator_full_borrow_content_::(t, &(*l).alloc, alloc_id); + open RawVecInner_frac_borrow_content::(l, elemLayout, ptr, capacity)(); + let self1 = *l; + close RawVecInner(t, self1, elemLayout, alloc_id, ptr, capacity); + close RawVecInner_full_borrow_content_::(t, l, elemLayout, alloc_id, ptr, capacity)(); + } { + close_full_borrow_strong_m( + klong, + RawVecInner_full_borrow_content_(t, l, elemLayout, alloc_id, ptr, capacity), + sep(std::alloc::Allocator_full_borrow_content_(t, &(*l).alloc, alloc_id), RawVecInner_frac_borrow_content::(l, elemLayout, ptr, capacity)) + ); + full_borrow_mono(klong, k, sep(std::alloc::Allocator_full_borrow_content_(t, &(*l).alloc, alloc_id), RawVecInner_frac_borrow_content::(l, elemLayout, ptr, capacity))); + } + } + full_borrow_split_m(k, std::alloc::Allocator_full_borrow_content_(t, &(*l).alloc, alloc_id), RawVecInner_frac_borrow_content::(l, elemLayout, ptr, capacity)); + full_borrow_into_frac_m(k, RawVecInner_frac_borrow_content(l, elemLayout, ptr, capacity)); + std::alloc::share_Allocator_full_borrow_content_m(k, t, &(*l).alloc, alloc_id); + close RawVecInner_share_(k, t, l, elemLayout, alloc_id, ptr, capacity); + leak RawVecInner_share_(k, t, l, elemLayout, alloc_id, ptr, capacity); +} + lem share_RawVecInner(k: lifetime_t, l: *RawVecInner) nonghost_callers_only req [?q]lifetime_token(k) &*& @@ -288,6 +353,7 @@ lem share_RawVecInner(k: lifetime_t, l: *RawVecInner) close RawVecInner_frac_borrow_content::(l, elemLayout, ptr, capacity)(); borrow(k, RawVecInner_frac_borrow_content(l, elemLayout, ptr, capacity)); full_borrow_into_frac(k, RawVecInner_frac_borrow_content(l, elemLayout, ptr, capacity)); + points_to_limits(&(*l).alloc); std::alloc::close_Allocator_full_borrow_content_(t, &(*l).alloc); borrow(k, std::alloc::Allocator_full_borrow_content_(t, &(*l).alloc, alloc_id)); std::alloc::share_Allocator_full_borrow_content_(k, t, &(*l).alloc, alloc_id); @@ -309,6 +375,51 @@ lem end_share_RawVecInner(l: *RawVecInner) close RawVecInner(t, *l, elemLayout, alloc_id, ptr, capacity); } +lem share_RawVecInner0(k: lifetime_t, l: *RawVecInner, elemLayout: Layout, ptr: *u8, capacity: usize) + nonghost_callers_only + req [?q]lifetime_token(k) &*& + *l |-> ?self_ &*& + Allocator(?t, self_.alloc, ?alloc_id) &*& + capacity == logical_capacity(self_.cap, elemLayout.size()) &*& + ptr == self_.ptr.as_non_null_ptr().as_ptr() &*& + ptr as usize % elemLayout.align() == 0 &*& + pointer_within_limits(ptr) == true &*& + elemLayout.repeat(capacity) == some(pair(?allocLayout, ?stride)); + ens [q]lifetime_token(k) &*& + [_]RawVecInner_share_(k, t, l, elemLayout, alloc_id, ptr, capacity) &*& + RawVecInner_share0_end_token(k, t, l, elemLayout, alloc_id, ptr, capacity); +{ + close RawVecInner_frac_borrow_content::(l, elemLayout, ptr, capacity)(); + borrow(k, RawVecInner_frac_borrow_content(l, elemLayout, ptr, capacity)); + full_borrow_into_frac(k, RawVecInner_frac_borrow_content(l, elemLayout, ptr, capacity)); + points_to_limits(&(*l).alloc); + std::alloc::close_Allocator_full_borrow_content_(t, &(*l).alloc); + borrow(k, std::alloc::Allocator_full_borrow_content_(t, &(*l).alloc, alloc_id)); + std::alloc::share_Allocator_full_borrow_content_(k, t, &(*l).alloc, alloc_id); + std::num::niche_types::UsizeNoHighBit_inv(self_.cap); + close RawVecInner_share_(k, t, l, elemLayout, alloc_id, ptr, capacity); + close RawVecInner_share0_end_token(k, t, l, elemLayout, alloc_id, ptr, capacity); + leak RawVecInner_share_(k, t, l, elemLayout, alloc_id, ptr, capacity); +} + +lem end_share_RawVecInner0(l: *RawVecInner) + nonghost_callers_only + req RawVecInner_share0_end_token(?k, ?t, l, ?elemLayout, ?alloc_id, ?ptr, ?capacity) &*& [_]lifetime_dead_token(k); + ens *l |-> ?self_ &*& + Allocator(t, self_.alloc, alloc_id) &*& + capacity == logical_capacity(self_.cap, elemLayout.size()) &*& + ptr == self_.ptr.as_non_null_ptr().as_ptr() &*& + ptr as usize % elemLayout.align() == 0 &*& + pointer_within_limits(ptr) == true &*& + elemLayout.repeat(capacity) == some(pair(?allocLayout, ?stride)); +{ + open RawVecInner_share0_end_token(k, t, l, elemLayout, alloc_id, ptr, capacity); + borrow_end(k, std::alloc::Allocator_full_borrow_content_(t, &(*l).alloc, alloc_id)); + std::alloc::open_Allocator_full_borrow_content_(t, &(*l).alloc, alloc_id); + borrow_end(k, RawVecInner_frac_borrow_content(l, elemLayout, ptr, capacity)); + open RawVecInner_frac_borrow_content::(l, elemLayout, ptr, capacity)(); +} + lem init_ref_RawVecInner_(l: *RawVecInner) nonghost_callers_only req ref_init_perm(l, ?l0) &*& @@ -378,6 +489,7 @@ lem init_ref_RawVecInner_(l: *RawVecInner) frac_borrow_split(k, scaledp(f/2, RawVecInner_frac_borrow_content(l, elemLayout, ptr, capacity)), scaledp(f, ref_initialized_(l))); frac_borrow_implies_scaled(k, f/2, RawVecInner_frac_borrow_content(l, elemLayout, ptr, capacity)); frac_borrow_implies_scaled(k, f, ref_initialized_(l)); + assert pointer_within_limits(ref_origin(&(*l0).alloc)) == true; close RawVecInner_share_(k, t, l, elemLayout, alloc_id, ptr, capacity); leak RawVecInner_share_(k, t, l, elemLayout, alloc_id, ptr, capacity); } @@ -437,6 +549,7 @@ lem init_ref_RawVecInner_m(l: *RawVecInner) full_borrow_into_frac_m(k, scaledp(f/2, sep_(ref_initialized_(l), RawVecInner_frac_borrow_content(l, elemLayout, ptr, capacity)))); frac_borrow_implies_scaled(k, f/2, sep_(ref_initialized_(l), RawVecInner_frac_borrow_content(l, elemLayout, ptr, capacity))); frac_borrow_split(k, ref_initialized_(l), RawVecInner_frac_borrow_content(l, elemLayout, ptr, capacity)); + assert pointer_within_limits(ref_origin(&(*l0).alloc)) == true; close RawVecInner_share_(k, t, l, elemLayout, alloc_id, ptr, capacity); leak RawVecInner_share_(k, t, l, elemLayout, alloc_id, ptr, capacity); } @@ -462,7 +575,7 @@ lem RawVecInner_share_full(k: lifetime_t, t: thread_id_t, l: *RawVecInner) open >.own(t, *l); std::alloc::open_Allocator_own((*l).alloc); assert Allocator(_, _, ?alloc_id); - open RawVecInner0(_, ?elemLayout, ?ptr, ?capacity); + open RawVecInner0(?self_, ?elemLayout, ?ptr, ?capacity); { pred Ctx() = true; produce_lem_ptr_chunk full_borrow_convert_strong(Ctx, sep(std::alloc::Allocator_full_borrow_content_(t, &(*l).alloc, alloc_id), RawVecInner_frac_borrow_content(l, elemLayout, ptr, capacity)), klong, RawVecInner_full_borrow_content(t, l))() { @@ -486,6 +599,7 @@ lem RawVecInner_share_full(k: lifetime_t, t: thread_id_t, l: *RawVecInner) } std::alloc::share_Allocator_full_borrow_content_m(k, t, &(*l).alloc, alloc_id); full_borrow_into_frac_m(k, RawVecInner_frac_borrow_content(l, elemLayout, ptr, capacity)); + std::num::niche_types::UsizeNoHighBit_inv(self_.cap); close RawVecInner_share_::(k, t, l, elemLayout, alloc_id, ptr, capacity); leak RawVecInner_share_::(k, t, l, elemLayout, alloc_id, ptr, capacity); close RawVecInner_share::(k, t, l); @@ -548,6 +662,7 @@ lem init_ref_RawVecInner(l: *RawVecInner) full_borrow_into_frac_m(k, scaledp(f/2, sep_(ref_initialized_(l), RawVecInner_frac_borrow_content(l, elemLayout, ptr, capacity)))); frac_borrow_implies_scaled(k, f/2, sep_(ref_initialized_(l), RawVecInner_frac_borrow_content(l, elemLayout, ptr, capacity))); frac_borrow_split(k, ref_initialized_(l), RawVecInner_frac_borrow_content(l, elemLayout, ptr, capacity)); + assert pointer_within_limits(ref_origin(&(*l0).alloc)) == true; close RawVecInner_share_(k, t, l, elemLayout, alloc_id, ptr, capacity); leak RawVecInner_share_(k, t, l, elemLayout, alloc_id, ptr, capacity); close RawVecInner_share::(k, t, l); @@ -564,6 +679,21 @@ lem RawVecInner_sync(t1: thread_id_t) leak RawVecInner_share(k, t1, l); } +fix RawVecInner::alloc(self_: RawVecInner) -> A { self_.alloc } + +lem RawVecInner_into_raw_parts(self_: RawVecInner) + req RawVecInner(?t, self_, ?elemLayout, ?alloc_id, ?ptr, ?capacity); + ens Allocator(t, self_.alloc(), alloc_id) &*& + if capacity * elemLayout.size() == 0 { + true + } else { + elemLayout.repeat(capacity) == some(pair(?allocLayout, ?stride)) &*& + alloc_block_in(alloc_id, ptr, allocLayout) + }; +{ + open RawVecInner(t, self_, elemLayout, alloc_id, ptr, capacity); +} + @*/ /*@ @@ -571,6 +701,39 @@ lem RawVecInner_sync(t1: thread_id_t) pred RawVec(t: thread_id_t, self: RawVec, alloc_id: alloc_id_t, ptr: *T, capacity: usize) = RawVecInner(t, self.inner, Layout::new::, alloc_id, ?ptr_, capacity) &*& ptr == ptr_ as *T; +fix RawVec_full_borrow_content_(t: thread_id_t, l: *RawVec, alloc_id: alloc_id_t, ptr: *T, capacity: usize) -> pred() { + RawVecInner_full_borrow_content_(t, &(*l).inner, Layout::new::(), alloc_id, ptr as *u8, capacity) +} + +lem close_RawVec_full_borrow_content_(t: thread_id_t, l: *RawVec, alloc_id: alloc_id_t, ptr: *T, capacity: usize) + req *l |-> ?self_ &*& RawVec(t, self_, alloc_id, ptr, capacity); + ens RawVec_full_borrow_content_::(t, l, alloc_id, ptr, capacity)(); +{ + open RawVec(t, self_, alloc_id, ptr, capacity); + open_points_to(l); + close RawVecInner_full_borrow_content_::(t, &(*l).inner, Layout::new::(), alloc_id, ptr as *u8, capacity)(); +} + +lem open_RawVec_full_borrow_content_(t: thread_id_t, l: *RawVec, alloc_id: alloc_id_t, ptr: *T, capacity: usize) + req RawVec_full_borrow_content_::(t, l, alloc_id, ptr, capacity)(); + ens *l |-> ?self_ &*& RawVec(t, self_, alloc_id, ptr, capacity); +{ + open RawVecInner_full_borrow_content_::(t, &(*l).inner, Layout::new::(), alloc_id, ptr as *u8, capacity)(); + close RawVec(t, *l, alloc_id, ptr, capacity); + close_points_to(l); +} + +pred RawVec_full_borrow(k: lifetime_t, t: thread_id_t, l: *RawVec, alloc_id: alloc_id_t, ptr: *T, capacity: usize) = + RawVecInner_full_borrow(k, t, &(*l).inner, Layout::new::(), alloc_id, ptr as *u8, capacity); + +lem close_RawVec_full_borrow(k: lifetime_t, t: thread_id_t, l: *RawVec, alloc_id: alloc_id_t, ptr: *T, capacity: usize) + req full_borrow(k, RawVec_full_borrow_content_::(t, l, alloc_id, ptr, capacity)); + ens RawVec_full_borrow(k, t, l, alloc_id, ptr, capacity); +{ + close RawVecInner_full_borrow(k, t, &(*l).inner, Layout::new::(), alloc_id, ptr as *u8, capacity); + close RawVec_full_borrow(k, t, l, alloc_id, ptr, capacity); +} + pred >.own(t, self_) = RawVec(t, self_, ?alloc_id, ?ptr, ?capacity) &*& array_at_lft_(alloc_id.lft, ptr, capacity, _); lem RawVec_own_mono() @@ -580,20 +743,75 @@ lem RawVec_own_mono() assume(false); // https://github.com/verifast/verifast/issues/610 } +lem RawVec_send_(t1: thread_id_t) + req type_interp::() &*& is_Send(typeid(A)) == true &*& RawVec::(?t0, ?v, ?alloc_id, ?ptr, ?capacity); + ens type_interp::() &*& RawVec::(t1, v, alloc_id, ptr, capacity); +{ + open RawVec(t0, v, alloc_id, ptr, capacity); + RawVecInner_send_::(t1); + close RawVec(t1, v, alloc_id, ptr, capacity); +} + lem RawVec_send(t1: thread_id_t) req type_interp::() &*& type_interp::() &*& is_Send(typeid(RawVec)) == true &*& RawVec_own::(?t0, ?v); ens type_interp::() &*& type_interp::() &*& RawVec_own::(t1, v); { open >.own(t0, v); - open RawVec(t0, v, ?alloc_id, ?ptr, ?capacity); - RawVecInner_send_::(t1); - close RawVec(t1, v, alloc_id, ptr, capacity); + RawVec_send_(t1); close >.own(t1, v); } +lem RawVec_inv() + req RawVec::(?t, ?self_, ?alloc_id, ?ptr, ?capacity); + ens RawVec::(t, self_, alloc_id, ptr, capacity) &*& + lifetime_inclusion(lft_of_type::(), alloc_id.lft) == true &*& + ptr != 0 &*& ptr as usize % std::mem::align_of::() == 0 &*& + 0 <= capacity &*& capacity <= usize::MAX; +{ + open RawVec(t, self_, alloc_id, ptr, capacity); + RawVecInner_inv(); + close RawVec(t, self_, alloc_id, ptr, capacity); +} + +lem RawVec_inv2() + req RawVec::(?t, ?self_, ?alloc_id, ?ptr, ?capacity); + ens RawVec::(t, self_, alloc_id, ptr, capacity) &*& + lifetime_inclusion(lft_of_type::(), alloc_id.lft) == true &*& + ptr != 0 &*& ptr as usize % std::mem::align_of::() == 0 &*& + 0 <= capacity &*& + Layout::new::().repeat(capacity) != none &*& + if std::mem::size_of::() == 0 { capacity == usize::MAX } else { capacity <= isize::MAX }; +{ + open RawVec(t, self_, alloc_id, ptr, capacity); + RawVecInner_inv2(); + close RawVec(t, self_, alloc_id, ptr, capacity); +} + +lem RawVec_to_own(self_: RawVec) + req RawVec(?t, self_, ?alloc_id, ?ptr, ?capacity) &*& array_at_lft_(alloc_id.lft, ptr, capacity, _); + ens >.own(t, self_); +{ + close >.own(t, self_); +} + +lem open_RawVec_own(self_: RawVec) + req >.own(?t, self_); + ens RawVec(t, self_, ?alloc_id, ?ptr, ?capacity) &*& array_at_lft_(alloc_id.lft, ptr, capacity, _); +{ + open >.own(t, self_); +} + pred RawVec_share_(k: lifetime_t, t: thread_id_t, l: *RawVec, alloc_id: alloc_id_t, ptr: *T, capacity: usize) = [_]RawVecInner_share_(k, t, &(*l).inner, Layout::new::(), alloc_id, ptr as *u8, capacity); +lem RawVec_share__inv() + req [_]RawVec_share_::(?k, ?t, ?l, ?alloc_id, ?ptr, ?capacity); + ens ptr != 0 &*& Layout::new::().repeat(capacity) != none &*& capacity <= usize::MAX; +{ + open RawVec_share_(k, t, l, alloc_id, ptr, capacity); + RawVecInner_share__inv(); +} + lem RawVec_share__mono(k: lifetime_t, k1: lifetime_t, t: thread_id_t, l: *RawVec) req type_interp::() &*& type_interp::() &*& lifetime_inclusion(k1, k) == true &*& [_]RawVec_share_::(k, t, l, ?alloc_id, ?ptr, ?capacity); ens type_interp::() &*& type_interp::() &*& [_]RawVec_share_::(k1, t, l, alloc_id, ptr, capacity); @@ -605,8 +823,8 @@ lem RawVec_share__mono(k: lifetime_t, k1: lifetime_t, t: thread_id_t, l: * } lem RawVec_sync_(t1: thread_id_t) - req type_interp::() &*& type_interp::() &*& is_Sync(typeid(RawVec)) == true &*& [_]RawVec_share_::(?k, ?t0, ?l, ?alloc_id, ?ptr, ?capacity); - ens type_interp::() &*& type_interp::() &*& [_]RawVec_share_::(k, t1, l, alloc_id, ptr, capacity); + req type_interp::() &*& [_]RawVec_share_::(?k, ?t0, ?l, ?alloc_id, ?ptr, ?capacity) &*& is_Sync(typeid(RawVec)) == true; + ens type_interp::() &*& [_]RawVec_share_::(k, t1, l, alloc_id, ptr, capacity); { open RawVec_share_::(k, t0, l, alloc_id, ptr, capacity); RawVecInner_sync_::(t1); @@ -617,6 +835,18 @@ lem RawVec_sync_(t1: thread_id_t) pred RawVec_share_end_token(k: lifetime_t, t: thread_id_t, l: *RawVec, alloc_id: alloc_id_t, ptr: *T, capacity: usize) = RawVecInner_share_end_token(k, t, &(*l).inner, Layout::new::(), alloc_id, ptr as *u8, capacity); +lem RawVec_share_full_(k: lifetime_t, l: *RawVec) + req type_interp::() &*& type_interp::() &*& atomic_mask(MaskTop) &*& [?q]lifetime_token(k) &*& + RawVec_full_borrow(k, ?t, l, ?alloc_id, ?ptr, ?capacity); + ens type_interp::() &*& type_interp::() &*& atomic_mask(MaskTop) &*& [q]lifetime_token(k) &*& + [_]RawVec_share_(k, t, l, alloc_id, ptr, capacity); +{ + open RawVec_full_borrow(k, t, l, alloc_id, ptr, capacity); + RawVecInner_share_full_(k, &(*l).inner); + close RawVec_share_(k, t, l, alloc_id, ptr, capacity); + leak RawVec_share_(k, t, l, alloc_id, ptr, capacity); +} + lem share_RawVec(k: lifetime_t, l: *RawVec) nonghost_callers_only req [?q]lifetime_token(k) &*& *l |-> ?self_ &*& RawVec(?t, self_, ?alloc_id, ?ptr, ?capacity); @@ -675,6 +905,39 @@ lem init_ref_RawVec_(l: *RawVec) } } +lem init_ref_RawVec_m(l: *RawVec) + req type_interp::() &*& atomic_mask(Nlft) &*& ref_init_perm(l, ?l0) &*& [_]RawVec_share_(?k, ?t, l0, ?alloc_id, ?ptr, ?capacity) &*& [?q]lifetime_token(k); + ens type_interp::() &*& atomic_mask(Nlft) &*& [q]lifetime_token(k) &*& [_]RawVec_share_(k, t, l, alloc_id, ptr, capacity) &*& [_]frac_borrow(k, ref_initialized_(l)); +{ + open RawVec_share_(k, t, l0, alloc_id, ptr, capacity); + open_ref_init_perm_RawVec(l); + init_ref_RawVecInner_m(&(*l).inner); + close RawVec_share_(k, t, l, alloc_id, ptr, capacity); + leak RawVec_share_(k, t, l, alloc_id, ptr, capacity); + + let klong = open_frac_borrow_strong_m(k, ref_initialized_(&(*l).inner), q); + open [?f]ref_initialized_::>(&(*l).inner)(); + close_ref_initialized_RawVec(l, f); + close [f]ref_initialized_::>(l)(); + { + pred Ctx() = true; + produce_lem_ptr_chunk frac_borrow_convert_strong(Ctx, scaledp(f, ref_initialized_(l)), klong, f, ref_initialized_(&(*l).inner))() { + open Ctx(); + open scaledp(f, ref_initialized_(l))(); + open ref_initialized_::>(l)(); + open_ref_initialized_RawVec(l); + close [f]ref_initialized_::>(&(*l).inner)(); + } { + close Ctx(); + close scaledp(f, ref_initialized_(l))(); + close_frac_borrow_strong_m(); + full_borrow_mono(klong, k, scaledp(f, ref_initialized_(l))); + full_borrow_into_frac_m(k, scaledp(f, ref_initialized_(l))); + frac_borrow_implies_scaled(k, f, ref_initialized_(l)); + } + } +} + pred >.share(k, t, l) = [_]RawVec_share_(k, t, l, ?alloc_id, ?ptr, ?capacity); lem RawVec_share_mono(k: lifetime_t, k1: lifetime_t, t: thread_id_t, l: *RawVec) @@ -694,6 +957,7 @@ lem RawVec_share_full(k: lifetime_t, t: thread_id_t, l: *RawVec) let klong = open_full_borrow_strong_m(k, RawVec_full_borrow_content::(t, l), q); open RawVec_full_borrow_content::(t, l)(); let self_ = *l; + points_to_limits(&(*l).inner.alloc); open >.own(t, self_); open RawVec(t, self_, ?alloc_id, ?ptr, ?capacity); open RawVecInner(t, self_.inner, Layout::new::(), alloc_id, ptr as *u8, capacity); @@ -781,6 +1045,22 @@ lem init_ref_RawVec(l: *RawVec) } } +fix RawVec::alloc(self_: RawVec) -> A { self_.inner.alloc() } + +lem RawVec_into_raw_parts(self_: RawVec) + req RawVec(?t, self_, ?alloc_id, ?ptr, ?capacity); + ens Allocator(t, self_.alloc(), alloc_id) &*& + if capacity * std::mem::size_of::() == 0 { + true + } else { + Layout::new::().repeat(capacity) == some(pair(?allocLayout, ?stride)) &*& + alloc_block_in(alloc_id, ptr as *u8, allocLayout) + }; +{ + open RawVec(t, self_, alloc_id, ptr, capacity); + RawVecInner_into_raw_parts(self_.inner); +} + @*/ impl RawVec { @@ -877,12 +1157,12 @@ impl RawVec { } @*/ { + // Check assumption made in `current_memory` + const { assert!(T::LAYOUT.size() % T::LAYOUT.align() == 0) }; //@ close exists(std::mem::size_of::()); //@ std::alloc::Layout_inv(Layout::new::()); //@ std::alloc::is_valid_layout_size_of_align_of::(); //@ std::ptr::Alignment_as_nonzero_new(std::mem::align_of::()); - // Check assumption made in `current_memory` - const { assert!(T::LAYOUT.size() % T::LAYOUT.align() == 0) }; let r = Self { inner: RawVecInner::new_in(alloc, Alignment::of::()), _marker: PhantomData }; //@ close RawVec::(t, r, alloc_id, ?ptr, ?capacity); //@ u8s_at_lft__to_array_at_lft_(ptr, capacity); @@ -952,11 +1232,13 @@ impl RawVec { /// /// Note, that the requested capacity and `self.capacity()` could differ, as /// an allocator could overallocate and return a greater memory block than requested. - pub(crate) unsafe fn into_box(self, len: usize) -> Box<[MaybeUninit], A> - //@ req thread_token(?t) &*& RawVec(t, self, ?alloc_id, ?ptr, len) &*& array_at_lft_(alloc_id.lft, ptr as *T, len, ?vs); + pub(crate) unsafe fn into_box(mut self, len: usize) -> Box<[MaybeUninit], A> + //@ req thread_token(?t) &*& RawVec(t, self, ?alloc_id, ?ptr, ?capacity_) &*& array_at_lft_(alloc_id.lft, ptr as *T, len, ?vs) &*& if std::mem::size_of::() == 0 { true } else { len == capacity_ }; //@ ens thread_token(t) &*& std::boxed::Box_in::<[std::mem::MaybeUninit], A>(t, result, alloc_id, slice_of_elems(map(std::mem::MaybeUninit::new_maybe_uninit, vs))); //@ on_unwind_ens thread_token(t); { + //@ RawVec_inv2(); + // Sanity-check one half of the safety requirement (we cannot check the other half). if cfg!(debug_assertions) { //~allow_dead_code //@ let k = begin_lifetime(); @@ -973,7 +1255,7 @@ impl RawVec { //@ open_points_to(&self); if !(len <= capacity) { - core::panicking::panic("`len` must be smaller than or equal to `self.capacity()`"); //~allow_dead_code + unsafe { core::hint::unreachable_unchecked(); } } } @@ -1169,8 +1451,68 @@ impl RawVec { /// Returns a shared reference to the allocator backing this `RawVec`. #[inline] - pub(crate) fn allocator(&self) -> &A { - self.inner.allocator() + pub(crate) fn allocator(&self) -> &A + /*@ + req + [?q]lifetime_token(?k) &*& + exists(?readOnly) &*& + if readOnly { + [_]points_to_shared(k, self, ?self_) &*& + ens [q]lifetime_token(k) &*& + [_]points_to_shared(k, result, self_.alloc()) &*& + [_]frac_borrow(k, ref_initialized_(result)) + } else { + [_]RawVec_share_(k, ?t, self, ?alloc_id, ?ptr, ?capacity) &*& + ens [q]lifetime_token(k) &*& + [_]std::alloc::Allocator_share(k, t, result, alloc_id) &*& + [_]frac_borrow(k, ref_initialized_(result)) + }; + @*/ + //@ ens true; + /*@ + safety_proof { + open >.share(?k, _t, self); + close exists(false); + let result = call(); + std::alloc::close_Allocator_share(k, _t, result); + } + @*/ + { + //@ let inner_ref = precreate_ref(&(*self).inner); + /*@ + if readOnly { + open points_to_shared(k, self, ?self_); + open_frac_borrow_strong_(k, mk_points_to(self, self_), q); + open [?f]mk_points_to::>(self, self_)(); + open_points_to(self); + close [f]mk_points_to::>(&(*self).inner, self_.inner)(); + close scaledp(f, mk_points_to(&(*self).inner, self_.inner))(); + produce_lem_ptr_chunk restore_frac_borrow(True, scaledp(f, mk_points_to(&(*self).inner, self_.inner)), f, mk_points_to(self, self_))() { + open scaledp(f, mk_points_to(&(*self).inner, self_.inner))(); + open mk_points_to::>(&(*self).inner, self_.inner)(); + open_points_to(&(*self).inner); + close_points_to(self, f); + close [f]mk_points_to::>(self, self_)(); + } { + close_frac_borrow_strong_(); + } + full_borrow_into_frac(k, scaledp(f, mk_points_to(&(*self).inner, self_.inner))); + frac_borrow_implies_scaled(k, f, mk_points_to(&(*self).inner, self_.inner)); + close points_to_shared(k, &(*self).inner, self_.inner); + leak points_to_shared(k, &(*self).inner, self_.inner); + init_ref_readonly_points_to_shared(inner_ref); + } else { + open RawVec_share_(k, ?t, self, ?alloc_id, ?ptr, ?capacity); + init_ref_RawVecInner_(inner_ref); + } + @*/ + //@ open_frac_borrow(k, ref_initialized_(inner_ref), q/2); + //@ open [?f]ref_initialized_::>(inner_ref)(); + let r = self.inner.allocator(); + //@ assert [f]ref_initialized::>(inner_ref); + //@ close [f]ref_initialized_::>(inner_ref)(); + //@ close_frac_borrow(f, ref_initialized_(inner_ref)); + r } /// Ensures that the buffer contains at least enough space to hold `len + @@ -1228,7 +1570,7 @@ impl RawVec { len > capacity0 || len + additional <= capacity1, Result::Err(e) => RawVec(t, self1, alloc_id, ptr0, capacity0) &*& array_at_lft_(alloc_id.lft, ptr0, capacity0, _) &*& - .own(t, e) + .own(t, e) }; @*/ /*@ @@ -1244,7 +1586,7 @@ impl RawVec { Result::Err(e) => { } } - close >.own(_t, result); + close >.own(_t, result); } @*/ { @@ -1316,7 +1658,7 @@ impl RawVec { len > capacity0 || len + additional <= capacity1, Result::Err(e) => RawVec(t, self1, alloc_id, ptr0, capacity0) &*& array_at_lft_(alloc_id.lft, ptr0, capacity0, _) &*& - .own(t, e) + .own(t, e) }; @*/ /*@ @@ -1332,7 +1674,7 @@ impl RawVec { Result::Err(e) => { } } - close >.own(_t, result); + close >.own(_t, result); } @*/ { @@ -1373,9 +1715,45 @@ impl RawVec { /// Aborts on OOM. #[cfg(not(no_global_oom_handling))] #[inline] - pub(crate) fn shrink_to_fit(&mut self, cap: usize) { + pub(crate) fn shrink_to_fit(&mut self, cap: usize) + /*@ + req thread_token(?t) &*& t == currentThread &*& + *self |-> ?self0 &*& + RawVec(t, self0, ?alloc_id, ?ptr0, ?capacity0) &*& + array_at_lft_(alloc_id.lft, ptr0, capacity0, ?vs0); + @*/ + /*@ + ens thread_token(t) &*& + *self |-> ?self1 &*& + RawVec(t, self1, alloc_id, ?ptr1, ?capacity1) &*& + array_at_lft_(alloc_id.lft, ptr1, capacity1, take(capacity1, vs0)) &*& + cap <= capacity0 &*& + cap <= capacity1 &*& + capacity1 == if std::mem::size_of::() == 0 { usize::MAX } else { cap }; + @*/ + /*@ + safety_proof { + open >.own(_t, ?self0); + call(); + assert RawVec(_, ?self1, _, _, _); + close >.own(_t, self1); + } + @*/ + { + //@ size_align::(); + //@ open_points_to(self); + //@ open RawVec(t, self0, alloc_id, ptr0, capacity0); + //@ RawVecInner_inv2(); + //@ array_at_lft__to_u8s_at_lft_(ptr0, capacity0); + //@ assert array_at_lft_::(_, _, _, ?bs); + //@ array_at_lft__inv(); // SAFETY: All calls on self.inner pass T::LAYOUT as the elem_layout - unsafe { self.inner.shrink_to_fit(cap, T::LAYOUT) } + let r = unsafe { self.inner.shrink_to_fit(cap, T::LAYOUT) }; + //@ close_points_to(self); + //@ close RawVec(t, *self, alloc_id, ?ptr1, ?capacity1); + //@ u8s_at_lft__to_array_at_lft_(ptr1, capacity1); + //@ vals__of_u8s__take::(capacity1, bs, capacity0); + r } } @@ -1407,7 +1785,7 @@ impl RawVecInner { /*@ ens thread_token(t) &*& RawVecInner(t, result, Layout::from_size_align(elemSize, align.as_nonzero().get()), alloc_id, ?ptr, ?capacity) &*& - array_at_lft_(alloc_id.lft, ptr, capacity * elemSize, _) &*& + array_at_lft_(alloc_id.lft, ptr, capacity * elemSize, []) &*& capacity * elemSize == 0; @*/ //@ on_unwind_ens false; @@ -1444,6 +1822,14 @@ impl RawVecInner { let r = Self { ptr, cap, alloc }; //@ div_rem_nonneg_unique(align.as_nonzero().get(), align.as_nonzero().get(), 1, 0); //@ let layout = Layout::from_size_align(elemSize, align.as_nonzero().get()); + /*@ + if layout.size() == 0 { + div_rem_nonneg_unique(layout.size(), layout.align(), 0, 0); + std::alloc::Layout_repeat_size_aligned_intro(layout, logical_capacity(cap, layout.size())); + } else { + std::alloc::Layout_repeat_0_intro(layout); + } + @*/ //@ close RawVecInner(t, r, layout, alloc_id, _, _); r } @@ -1525,7 +1911,7 @@ impl RawVecInner { fn try_allocate_in( capacity: usize, init: AllocInit, - alloc: A, + mut alloc: A, elem_layout: Layout, ) -> Result /*@ @@ -1548,7 +1934,7 @@ impl RawVecInner { elem_layout.size() % elem_layout.align() != 0 || n == capacity_ * elem_layout.size() &*& forall(bs, (eq)(0)) == true }, - Result::Err(e) => .own(t, e) + Result::Err(e) => .own(t, e) }; @*/ /*@ @@ -1572,7 +1958,7 @@ impl RawVecInner { } Result::Err(e) => { } } - close , std::collections::TryReserveError>>.own(_t, result); + close , TryReserveError>>.own(_t, result); } @*/ { @@ -1583,9 +1969,9 @@ impl RawVecInner { let layout = match layout_array(capacity, elem_layout) { Ok(layout) => layout, Err(_) => { - //@ leak .own(_, _); + //@ leak .own(_, _); //@ std::alloc::Allocator_to_own(alloc); - //@ close .own(currentThread, std::collections::TryReserveErrorKind::CapacityOverflow); + //@ close .own(currentThread, TryReserveErrorKind::CapacityOverflow); return Err(CapacityOverflow.into()) }, }; @@ -1612,7 +1998,7 @@ impl RawVecInner { @*/ return Ok(r); } - + let result = match init { AllocInit::Uninitialized => { let r; @@ -1708,6 +2094,19 @@ impl RawVecInner { //@ ens RawVecInner(t, result, elem_layout, alloc_id, ptr, logical_capacity(cap, elem_layout.size())); { let r = Self { ptr: unsafe { Unique::new_unchecked(ptr) }, cap, alloc }; + //@ std::alloc::Layout_inv(elem_layout); + /*@ + if cap.as_inner() * elem_layout.size() == 0 { + std::num::niche_types::UsizeNoHighBit_inv(cap); + mul_zero(cap.as_inner(), elem_layout.size()); + if elem_layout.size() == 0 { + div_rem_nonneg_unique(elem_layout.size(), elem_layout.align(), 0, 0); + std::alloc::Layout_repeat_size_aligned_intro(elem_layout, logical_capacity(cap, elem_layout.size())); + } else { + std::alloc::Layout_repeat_0_intro(elem_layout); + } + } + @*/ //@ close RawVecInner(t, r, elem_layout, alloc_id, ptr, logical_capacity(cap, elem_layout.size())); r } @@ -1729,6 +2128,19 @@ impl RawVecInner { //@ ens RawVecInner(t, result, elem_layout, alloc_id, ptr.as_ptr(), logical_capacity(cap, elem_layout.size())); { let r = Self { ptr: Unique::from(ptr), cap, alloc }; + /*@ + if cap.as_inner() * elem_layout.size() == 0 { + std::num::niche_types::UsizeNoHighBit_inv(cap); + std::alloc::Layout_inv(elem_layout); + mul_zero(cap.as_inner(), elem_layout.size()); + if elem_layout.size() == 0 { + div_rem_nonneg_unique(elem_layout.size(), elem_layout.align(), 0, 0); + std::alloc::Layout_repeat_size_aligned_intro(elem_layout, usize::MAX); + } else { + std::alloc::Layout_repeat_0_intro(elem_layout); + } + } + @*/ //@ close RawVecInner(t, r, elem_layout, alloc_id, _, _); r } @@ -1804,8 +2216,69 @@ impl RawVecInner { } #[inline] - fn allocator(&self) -> &A { - &self.alloc + fn allocator(&self) -> &A + /*@ + req [?q]lifetime_token(?k) &*& + exists(?readOnly) &*& + if readOnly { + [_]points_to_shared(k, self, ?self_) &*& + ens [q]lifetime_token(k) &*& + [_]points_to_shared(k, result, self_.alloc()) &*& + [_]frac_borrow(k, ref_initialized_(result)) + } else { + [_]RawVecInner_share_(k, ?t, self, ?elem_layout, ?alloc_id, ?ptr, ?capacity) &*& + ens [q]lifetime_token(k) &*& + [_]std::alloc::Allocator_share(k, t, result, alloc_id) &*& + [_]frac_borrow(k, ref_initialized_(result)) + }; + @*/ + //@ ens true; + /*@ + safety_proof { + open >.share(?k, _t, self); + close exists(false); + let result = call(); + std::alloc::close_Allocator_share(k, _t, result); + } + @*/ + { + //@ let alloc_ref = precreate_ref(&(*self).alloc); + /*@ + if readOnly { + open points_to_shared(k, self, ?self_); + open_frac_borrow_strong_(k, mk_points_to(self, self_), q); + open [?f]mk_points_to::>(self, self_)(); + open_points_to(self); + close [f]mk_points_to::(&(*self).alloc, self_.alloc)(); + close scaledp(f, mk_points_to(&(*self).alloc, self_.alloc))(); + { + pred Ctx() = [f](*self).ptr |-> self_.ptr &*& [f](*self).cap |-> self_.cap &*& [f]struct_RawVecInner_padding(self); + close Ctx(); + produce_lem_ptr_chunk restore_frac_borrow(Ctx, scaledp(f, mk_points_to(&(*self).alloc, self_.alloc)), f, mk_points_to(self, self_))() { + open Ctx(); + open scaledp(f, mk_points_to(&(*self).alloc, self_.alloc))(); + open [f]mk_points_to::(&(*self).alloc, self_.alloc)(); + close [f]mk_points_to::>(self, self_)(); + } { + close_frac_borrow_strong_(); + full_borrow_into_frac(k, scaledp(f, mk_points_to(&(*self).alloc, self_.alloc))); + } + } + frac_borrow_implies_scaled(k, f, mk_points_to(&(*self).alloc, self_.alloc)); + close points_to_shared(k, &(*self).alloc, self_.alloc); + leak points_to_shared(k, &(*self).alloc, self_.alloc); + init_ref_readonly_points_to_shared(alloc_ref); + } else { + open RawVecInner_share_(k, ?t, self, ?elem_layout, ?alloc_id, ?ptr, ?capacity); + std::alloc::init_ref_Allocator_share(k, t, alloc_ref); + } + @*/ + //@ open_frac_borrow(k, ref_initialized_::(alloc_ref), q); + //@ open [?f]ref_initialized_::(alloc_ref)(); + let r = &self.alloc; + //@ close [f]ref_initialized_::(alloc_ref)(); + //@ close_frac_borrow(f, ref_initialized_::(alloc_ref)); + r } /// # Safety @@ -1935,7 +2408,7 @@ impl RawVecInner { Result::Err(e) => RawVecInner(t, self1, elem_layout, alloc_id, ptr0, capacity0) &*& array_at_lft_(alloc_id.lft, ptr0, capacity0 * elem_layout.size(), _) &*& - .own(t, e) + .own(t, e) }; @*/ { @@ -2017,7 +2490,7 @@ impl RawVecInner { Result::Err(e) => RawVecInner(t, self1, elem_layout, alloc_id, ptr0, capacity0) &*& array_at_lft_(alloc_id.lft, ptr0, capacity0 * elem_layout.size(), _) &*& - .own(t, e) + .own(t, e) }; @*/ { @@ -2066,7 +2539,24 @@ impl RawVecInner { /// - `cap` must be less than or equal to `self.capacity(elem_layout.size())` #[cfg(not(no_global_oom_handling))] #[inline] - unsafe fn shrink_to_fit(&mut self, cap: usize, elem_layout: Layout) { + unsafe fn shrink_to_fit(&mut self, cap: usize, elem_layout: Layout) + /*@ + req thread_token(?t) &*& t == currentThread &*& + elem_layout.size() % elem_layout.align() == 0 &*& + *self |-> ?self0 &*& + RawVecInner(t, self0, elem_layout, ?alloc_id, ?ptr0, ?capacity0) &*& + array_at_lft_(alloc_id.lft, ptr0, capacity0 * elem_layout.size(), ?bs0); + @*/ + /*@ + ens thread_token(t) &*& + *self |-> ?self1 &*& + RawVecInner(t, self1, elem_layout, alloc_id, ?ptr1, ?capacity1) &*& + array_at_lft_(alloc_id.lft, ptr1, capacity1 * elem_layout.size(), take(capacity1 * elem_layout.size(), bs0)) &*& + cap <= capacity0 &*& + cap <= capacity1 &*& + capacity1 == if elem_layout.size() == 0 { usize::MAX } else { cap }; + @*/ + { if let Err(err) = unsafe { self.shrink(cap, elem_layout) } { handle_error(err); } @@ -2114,8 +2604,7 @@ impl RawVecInner { /// - `elem_layout` must be valid for `self`, i.e. it must be the same `elem_layout` used to /// initially construct `self` /// - `elem_layout`'s size must be a multiple of its alignment - /// - The sum of `len` and `additional` must be greater than or equal to - /// `self.capacity(elem_layout.size())` + /// - The sum of `len` and `additional` must be greater than the current capacity unsafe fn grow_amortized( &mut self, len: usize, @@ -2141,7 +2630,7 @@ impl RawVecInner { Result::Err(e) => RawVecInner(t, self1, elem_layout, alloc_id, ptr0, capacity0) &*& array_at_lft_(alloc_id.lft, ptr0, capacity0 * elem_layout.size(), _) &*& - .own(t, e) + .own(t, e) }; @*/ { @@ -2153,14 +2642,14 @@ impl RawVecInner { if elem_layout.size() == 0 { // Since we return a capacity of `usize::MAX` when `elem_size` is // 0, getting to here necessarily means the `RawVec` is overfull. - //@ close .own(t, std::collections::TryReserveErrorKind::CapacityOverflow); + //@ close .own(t, TryReserveErrorKind::CapacityOverflow); return Err(CapacityOverflow.into()); } // Nothing we can really do about these checks, sadly. - //@ close .own(t, std::collections::TryReserveErrorKind::CapacityOverflow); + //@ close .own(t, TryReserveErrorKind::CapacityOverflow); let required_cap = len.checked_add(additional).ok_or(CapacityOverflow)?; - //@ leak .own(t, std::collections::TryReserveErrorKind::CapacityOverflow); + //@ leak .own(t, TryReserveErrorKind::CapacityOverflow); //@ open_points_to(self); //@ std::num::niche_types::UsizeNoHighBit_inv(self0.cap); @@ -2169,63 +2658,37 @@ impl RawVecInner { let cap0 = cmp::max(self.cap.as_inner() * 2, required_cap); let cap = cmp::max(min_non_zero_cap(elem_layout.size()), cap0); - let new_layout = layout_array(cap, elem_layout)?; - //@ std::alloc::Layout_repeat_some_size_aligned(elem_layout, cap); - //@ let k = begin_lifetime(); - //@ share_RawVecInner(k, self); + //@ open RawVecInner(t, self0, elem_layout, alloc_id, ptr0, capacity0); + //@ share_RawVecInner0(k, self, elem_layout, ptr0, capacity0); //@ let self_ref = precreate_ref(self); //@ init_ref_RawVecInner_(self_ref); //@ open_frac_borrow(k, ref_initialized_(self_ref), 1/2); //@ open [?f]ref_initialized_::>(self_ref)(); - let current_memory = self.current_memory(elem_layout); + let finish_grow_result; + { + //@ let_lft 'a = k; + finish_grow_result = unsafe { self.finish_grow/*@::@*/(cap, elem_layout) }; + } //@ close [f]ref_initialized_::>(self_ref)(); //@ close_frac_borrow(f, ref_initialized_(self_ref)); //@ end_lifetime(k); - //@ end_share_RawVecInner(self); - - //@ open RawVecInner(t, ?self01, elem_layout, alloc_id, ptr0, capacity0); - //@ let ptr0_ = self01.ptr; - //@ let cap0_ = self01.cap; - //@ assert ptr0_.as_non_null_ptr().as_ptr() == ptr0; - //@ std::alloc::Layout_inv(elem_layout); - /*@ - if capacity0 * elem_layout.size() != 0 { - let elemLayout = elem_layout; - assert elemLayout.repeat(capacity0) == some(pair(?allocLayout, ?stride)); - std::alloc::Layout_repeat_some_size_aligned(elemLayout, capacity0); - std::alloc::Layout_inv(allocLayout); - } - @*/ - - //@ std::alloc::Layout_size_Layout_from_size_align(capacity0 * elem_layout.size(), elem_layout.align()); - //@ std::alloc::Layout_align_Layout_from_size_align(capacity0 * elem_layout.size(), elem_layout.align()); - - //@ if capacity0 * elem_layout.size() == 0 { leak ptr0[..0] |-> _; } - - //@ std::num::niche_types::UsizeNoHighBit_inv(self01.cap); - //@ assert cap >= len + additional; - //@ mul_mono_l(1, elem_layout.size(), self01.cap.as_inner()); - //@ mul_mono_l(1, elem_layout.size(), cap); - //@ assert new_layout.size() == cap * elem_layout.size(); - //@ assert cap <= new_layout.size(); - //@ mul_mono_l(self01.cap.as_inner(), cap, elem_layout.size()); + //@ end_share_RawVecInner0(self); //@ open_points_to(self); - // SAFETY: - // - For the `current_memory` call: Precondition passed to caller - // - For the `finish_grow` call: Precondition passed to caller - // + `current_memory` does the right thing - match core::ops::Try::branch(unsafe { finish_grow(new_layout, current_memory, &mut self.alloc) }) { + //@ mul_mono_l(1, elem_layout.size(), cap); + + // SAFETY: Precondition passed to caller + `current_memory` does the right thing + match core::ops::Try::branch(finish_grow_result) { core::ops::ControlFlow::Break(residual) => { //@ let self1 = *self; //@ close RawVecInner(t, self1, elem_layout, alloc_id, ptr0, capacity0); core::ops::FromResidual::from_residual(residual) } core::ops::ControlFlow::Continue(ptr) => { - // SAFETY: layout_array would have resulted in a capacity overflow if we tried to allocate more than `isize::MAX` items unsafe { + // SAFETY: layout_array would have resulted in a capacity overflow if we tried to allocate more than `isize::MAX` items self.set_ptr_and_cap(ptr, cap); //@ let self1 = *self; //@ std::alloc::alloc_block_in_aligned(ptr.as_ptr() as *u8); @@ -2234,8 +2697,13 @@ impl RawVecInner { //@ assert 0 <= self0.cap.as_inner(); //@ assert 0 <= logical_capacity(self0.cap, elem_layout.size()); //@ assert cap != 0; - //@ std::alloc::Layout_inv(new_layout); - //@ close RawVecInner::(t, self1, elem_layout, alloc_id, _, _); + //@ std::alloc::Layout_inv(elem_layout); + //@ assert 0 <= cap * elem_layout.size(); + //@ assert cap * elem_layout.size() <= isize::MAX - isize::MAX % elem_layout.align(); + //@ std::alloc::Layout_repeat_some_size_aligned(elem_layout, cap); + //@ assert ptr.as_ptr() as usize % Layout::from_size_align(cap * elem_layout.size(), elem_layout.align()).align() == 0; + //@ std::alloc::Layout_align_Layout_from_size_align(cap * elem_layout.size(), elem_layout.align()); + //@ close RawVecInner::(t, self1, elem_layout, alloc_id, ptr.as_ptr() as *u8, cap); } Ok(()) } @@ -2246,8 +2714,7 @@ impl RawVecInner { /// - `elem_layout` must be valid for `self`, i.e. it must be the same `elem_layout` used to /// initially construct `self` /// - `elem_layout`'s size must be a multiple of its alignment - /// - The sum of `len` and `additional` must be greater than or equal to - /// `self.capacity(elem_layout.size())` + /// - The sum of `len` and `additional` must be greater than the current capacity unsafe fn grow_exact( &mut self, len: usize, @@ -2273,7 +2740,7 @@ impl RawVecInner { Result::Err(e) => RawVecInner(t, self1, elem_layout, alloc_id, ptr0, capacity0) &*& array_at_lft_(alloc_id.lft, ptr0, capacity0 * elem_layout.size(), _) &*& - .own(t, e) + .own(t, e) }; @*/ { @@ -2285,55 +2752,33 @@ impl RawVecInner { return Err(e.into()); } - //@ close .own(t, std::collections::TryReserveErrorKind::CapacityOverflow); + //@ close .own(t, TryReserveErrorKind::CapacityOverflow); let cap = len.checked_add(additional).ok_or(CapacityOverflow)?; - //@ leak .own(t, std::collections::TryReserveErrorKind::CapacityOverflow); - let new_layout = layout_array(cap, elem_layout)?; - //@ std::alloc::Layout_repeat_some_size_aligned(elem_layout, cap); + //@ leak .own(t, TryReserveErrorKind::CapacityOverflow); //@ let k = begin_lifetime(); - //@ share_RawVecInner(k, self); + //@ open RawVecInner(t, self0, elem_layout, alloc_id, ptr0, capacity0); + //@ share_RawVecInner0(k, self, elem_layout, ptr0, capacity0); //@ let self_ref = precreate_ref(self); //@ init_ref_RawVecInner_(self_ref); //@ open_frac_borrow(k, ref_initialized_(self_ref), 1/2); //@ open [?f]ref_initialized_::>(self_ref)(); - let current_memory = self.current_memory(elem_layout); + let finish_grow_result; + { + //@ let_lft 'a = k; + finish_grow_result = unsafe { self.finish_grow/*@::@*/(cap, elem_layout) }; + } //@ close [f]ref_initialized_::>(self_ref)(); //@ close_frac_borrow(f, ref_initialized_(self_ref)); //@ end_lifetime(k); - //@ end_share_RawVecInner(self); + //@ end_share_RawVecInner0(self); //@ open_points_to(self); - //@ open RawVecInner(t, *self, elem_layout, alloc_id, ptr0, capacity0); - //@ let ptr0_ = (*self).ptr; - //@ let cap0_ = (*self).cap; - //@ assert ptr0_.as_non_null_ptr().as_ptr() == ptr0; - //@ std::alloc::Layout_inv(elem_layout); - /*@ - if capacity0 * elem_layout.size() != 0 { - let elemLayout = elem_layout; - assert elemLayout.repeat(capacity0) == some(pair(?allocLayout, ?stride)); - std::alloc::Layout_repeat_some_size_aligned(elemLayout, capacity0); - std::alloc::Layout_inv(allocLayout); - } - @*/ - //@ std::alloc::Layout_size_Layout_from_size_align(capacity0 * elem_layout.size(), elem_layout.align()); - //@ std::alloc::Layout_align_Layout_from_size_align(capacity0 * elem_layout.size(), elem_layout.align()); - - //@ if capacity0 * elem_layout.size() == 0 { leak ptr0[..0] |-> _; } - - //@ std::num::niche_types::UsizeNoHighBit_inv(self0.cap); - //@ assert cap == len + additional; - //@ mul_mono_l(1, elem_layout.size(), self0.cap.as_inner()); //@ mul_mono_l(1, elem_layout.size(), cap); - //@ mul_mono_l(capacity0, cap, elem_layout.size()); - - // SAFETY: - // - For the `current_memory` call: Precondition passed to caller - // - For the `finish_grow` call: Precondition passed to caller - // + `current_memory` does the right thing - match core::ops::Try::branch(unsafe { finish_grow(new_layout, current_memory, &mut self.alloc) }) { + + // SAFETY: Precondition passed to caller + `current_memory` does the right thing + match core::ops::Try::branch(finish_grow_result) { core::ops::ControlFlow::Break(residual) => { //@ let self1 = *self; //@ close RawVecInner(t, self1, elem_layout, alloc_id, ptr0, capacity0); @@ -2342,6 +2787,9 @@ impl RawVecInner { core::ops::ControlFlow::Continue(ptr) => { // SAFETY: layout_array would have resulted in a capacity overflow if we tried to allocate more than `isize::MAX` items unsafe { + //@ let elemLayout = elem_layout; + //@ assert elemLayout.repeat(cap) == some(pair(?new_layout, ?stride)); + //@ std::alloc::Layout_repeat_some_size_aligned(elemLayout, cap); //@ assert new_layout.size() == elem_layout.size() * cap; //@ mul_mono_l(1, elem_layout.size(), cap); self.set_ptr_and_cap(ptr, cap); @@ -2360,6 +2808,149 @@ impl RawVecInner { } } + /// # Safety + /// - `elem_layout` must be valid for `self`, i.e. it must be the same `elem_layout` used to + /// initially construct `self` + /// - `elem_layout`'s size must be a multiple of its alignment + /// - `cap` must be greater than the current capacity + // not marked inline(never) since we want optimizers to be able to observe the specifics of this + // function, see tests/codegen-llvm/vec-reserve-extend.rs. + #[cold] + unsafe fn finish_grow<'a>( + &'a self, + cap: usize, + elem_layout: Layout, + ) -> Result, TryReserveError> + /*@ + req thread_token(?t) &*& t == currentThread &*& + 1 <= elem_layout.size() &*& + elem_layout.size() % elem_layout.align() == 0 &*& + [_]RawVecInner_share_('a, t, self, elem_layout, ?alloc_id, ?ptr0, ?capacity0) &*& [?q]lifetime_token('a) &*& + if capacity0 * elem_layout.size() == 0 { + true + } else { + elem_layout.repeat(capacity0) == some(pair(?allocLayout, ?stride)) &*& + std::alloc::alloc_block_in(alloc_id, ptr0, allocLayout) + } &*& + array_at_lft_(alloc_id.lft, ptr0, capacity0 * elem_layout.size(), _) &*& + capacity0 <= cap; + @*/ + /*@ + ens thread_token(t) &*& [q]lifetime_token('a) &*& + match result { + Result::Ok(new_ptr) => + elem_layout.repeat(cap) == some(pair(?allocLayout, ?stride)) &*& + alloc_block_in(alloc_id, new_ptr.as_ptr() as *u8, allocLayout) &*& + array_at_lft_(alloc_id.lft, new_ptr.as_ptr() as *u8, cap * elem_layout.size(), _) &*& + cap * elem_layout.size() <= isize::MAX &*& + std::alloc::is_valid_layout(cap * elem_layout.size(), elem_layout.align()) == true, + Result::Err(e) => + if capacity0 * elem_layout.size() == 0 { + true + } else { + elem_layout.repeat(capacity0) == some(pair(?allocLayout, ?stride)) &*& + std::alloc::alloc_block_in(alloc_id, ptr0, allocLayout) + } &*& + array_at_lft_(alloc_id.lft, ptr0, capacity0 * elem_layout.size(), _) &*& + .own(currentThread, e) + }; + @*/ + { + //@ std::alloc::Layout_inv(elem_layout); + + let new_layout = layout_array(cap, elem_layout)?; + //@ std::alloc::Layout_repeat_some_size_aligned(elem_layout, cap); + + //@ let self_ref = precreate_ref(self); + //@ init_ref_RawVecInner_(self_ref); + //@ open_frac_borrow('a, ref_initialized_(self_ref), q/2); + //@ open [?f]ref_initialized_::>(self_ref)(); + // SAFETY: Precondition passed to caller + let current_memory = unsafe { (&*(self as *const RawVecInner)).current_memory(elem_layout) }; + //@ close [f]ref_initialized_::>(self_ref)(); + //@ close_frac_borrow(f, ref_initialized_(self_ref)); + + //@ open RawVecInner_share_('a, t, self, elem_layout, alloc_id, ptr0, capacity0); + //@ std::alloc::Layout_inv(elem_layout); + /*@ + if capacity0 * elem_layout.size() != 0 { + let elemLayout = elem_layout; + assert elemLayout.repeat(capacity0) == some(pair(?allocLayout, ?stride)); + std::alloc::Layout_repeat_some_size_aligned(elemLayout, capacity0); + std::alloc::Layout_inv(allocLayout); + } + @*/ + //@ std::alloc::Layout_size_Layout_from_size_align(capacity0 * elem_layout.size(), elem_layout.align()); + //@ std::alloc::Layout_align_Layout_from_size_align(capacity0 * elem_layout.size(), elem_layout.align()); + + //@ open_frac_borrow('a, RawVecInner_frac_borrow_content(self, elem_layout, ptr0, capacity0), q/2); + //@ open [?f1]RawVecInner_frac_borrow_content::(self, elem_layout, ptr0, capacity0)(); + //@ let cap0 = (*self).cap; + //@ std::num::niche_types::UsizeNoHighBit_inv(cap0); + //@ close [f1]RawVecInner_frac_borrow_content::(self, elem_layout, ptr0, capacity0)(); + //@ close_frac_borrow(f1, RawVecInner_frac_borrow_content(self, elem_layout, ptr0, capacity0)); + //@ mul_mono_l(1, elem_layout.size(), cap0.as_inner()); + //@ mul_mono_l(1, elem_layout.size(), cap); + //@ mul_mono_l(capacity0, cap, elem_layout.size()); + + let memory = if let Some((ptr, old_layout)) = current_memory { + // debug_assert_eq!(old_layout.align(), new_layout.align()); + if cfg!(debug_assertions) { //~allow_dead_code // FIXME: The source location associated + //with a dead `else` branch is the entire `if` statement :-( + match (&old_layout.align(), &new_layout.align()) { + (left_val, right_val) => + if !(*left_val == *right_val) { + let kind = core::panicking::AssertKind::Eq; //~allow_dead_code + core::panicking::assert_failed(kind, &*left_val, &*right_val, None); //~allow_dead_code + } + } + } + unsafe { + // The allocator checks for alignment equality + hint::assert_unchecked(old_layout.align() == new_layout.align()); + //@ std::alloc::Layout_repeat_some_size_aligned(elem_layout, capacity0); + //@ assert elem_layout.repeat(capacity0) == some(pair(?allocLayout, ?stride)); + //@ assert allocLayout == old_layout; + //@ assert ptr.as_ptr() as *u8 == ptr0; + //@ assert std::alloc::alloc_block_in(alloc_id, ptr0, allocLayout); + //@ let alloc_ref = precreate_ref(&(*self).alloc); + //@ std::alloc::init_ref_Allocator_share::('a, t, alloc_ref); + //@ open_frac_borrow('a, ref_initialized_::(alloc_ref), q/2); + //@ open [?f2]ref_initialized_::(alloc_ref)(); + //@ std::alloc::close_Allocator_ref::<'a, A>(t, alloc_ref); + let r = self.alloc.grow/*@::@*/(ptr, old_layout, new_layout); + //@ close [f2]ref_initialized_::(alloc_ref)(); + //@ close_frac_borrow(f2, ref_initialized_::(alloc_ref)); + //@ leak Allocator(_, _, _); + r + } + } else { + //@ let alloc_ref = precreate_ref(&(*self).alloc); + //@ std::alloc::init_ref_Allocator_share::('a, t, alloc_ref); + //@ open_frac_borrow('a, ref_initialized_::(alloc_ref), q/2); + //@ open [?f2]ref_initialized_::(alloc_ref)(); + //@ std::alloc::close_Allocator_ref::<'a, A>(t, alloc_ref); + let r = self.alloc.allocate/*@::@*/(new_layout); + //@ close [f2]ref_initialized_::(alloc_ref)(); + //@ close_frac_borrow(f2, ref_initialized_::(alloc_ref)); + //@ leak Allocator(_, _, _); + r + }; + + let new_layout_ref = &new_layout; + match memory { + Ok(ptr) => Ok(ptr), + Err(err) => { + let e = AllocError { layout: *new_layout_ref, non_exhaustive: () }; + //@ std::alloc::close_Layout_own(t, new_layout); + //@ close_tuple_0_own(t); + //@ close .own(t, e); + Err(e.into()) + } + } + } + + /// # Safety /// - `elem_layout` must be valid for `self`, i.e. it must be the same `elem_layout` used to /// initially construct `self` @@ -2373,7 +2964,7 @@ impl RawVecInner { elem_layout.size() % elem_layout.align() == 0 &*& *self |-> ?self0 &*& RawVecInner(t, self0, elem_layout, ?alloc_id, ?ptr0, ?capacity0) &*& - array_at_lft_(alloc_id.lft, ptr0, capacity0 * elem_layout.size(), _); + array_at_lft_(alloc_id.lft, ptr0, capacity0 * elem_layout.size(), ?bs0); @*/ /*@ ens thread_token(t) &*& @@ -2381,12 +2972,14 @@ impl RawVecInner { match result { Result::Ok(u) => RawVecInner(t, self1, elem_layout, alloc_id, ?ptr1, ?capacity1) &*& - array_at_lft_(alloc_id.lft, ptr1, capacity1 * elem_layout.size(), _) &*& - cap <= capacity1, + array_at_lft_(alloc_id.lft, ptr1, capacity1 * elem_layout.size(), take(capacity1 * elem_layout.size(), bs0)) &*& + cap <= capacity0 &*& + cap <= capacity1 &*& + capacity1 == if elem_layout.size() == 0 { usize::MAX } else { cap }, Result::Err(e) => RawVecInner(t, self1, elem_layout, alloc_id, ptr0, capacity0) &*& - array_at_lft_(alloc_id.lft, ptr0, capacity0 * elem_layout.size(), _) &*& - .own(t, e) + array_at_lft_(alloc_id.lft, ptr0, capacity0 * elem_layout.size(), bs0) &*& + .own(t, e) }; @*/ { @@ -2427,7 +3020,7 @@ impl RawVecInner { elem_layout.size() % elem_layout.align() == 0 &*& *self |-> ?self0 &*& RawVecInner(t, self0, elem_layout, ?alloc_id, ?ptr0, ?capacity0) &*& - array_at_lft_(alloc_id.lft, ptr0, capacity0 * elem_layout.size(), _) &*& + array_at_lft_(alloc_id.lft, ptr0, capacity0 * elem_layout.size(), ?bs0) &*& cap <= capacity0; @*/ /*@ @@ -2436,12 +3029,13 @@ impl RawVecInner { match result { Result::Ok(u) => RawVecInner(t, self1, elem_layout, alloc_id, ?ptr1, ?capacity1) &*& - array_at_lft_(alloc_id.lft, ptr1, capacity1 * elem_layout.size(), _) &*& - cap <= capacity1, + array_at_lft_(alloc_id.lft, ptr1, capacity1 * elem_layout.size(), take(capacity1 * elem_layout.size(), bs0)) &*& + cap <= capacity1 &*& + capacity1 == if elem_layout.size() == 0 { usize::MAX } else { cap }, Result::Err(e) => RawVecInner(t, self1, elem_layout, alloc_id, ptr0, capacity0) &*& - array_at_lft_(alloc_id.lft, ptr0, capacity0 * elem_layout.size(), _) &*& - .own(t, e) + array_at_lft_(alloc_id.lft, ptr0, capacity0 * elem_layout.size(), bs0) &*& + .own(t, e) }; @*/ { @@ -2460,6 +3054,9 @@ impl RawVecInner { let (ptr, layout) = if let Some(mem) = current_memory { mem } else { + //@ std::alloc::Layout_inv(elem_layout); + //@ mul_zero(capacity0, elem_layout.size()); + //@ RawVecInner_inv2(); return Ok(()) }; @@ -2499,6 +3096,7 @@ impl RawVecInner { //@ let ptr1_ = (*self).ptr; //@ assert ptr1_.as_non_null_ptr().as_ptr() as usize == elem_layout.align(); //@ div_rem_nonneg_unique(elem_layout.align(), elem_layout.align(), 1, 0); + //@ std::alloc::Layout_repeat_0_intro(elem_layout); //@ close RawVecInner(t, *self, elem_layout, alloc_id, _, _); } else { let ptr = unsafe { @@ -2596,115 +3194,12 @@ impl RawVecInner { } } -/// # Safety -/// If `current_memory` matches `Some((ptr, old_layout))`: -/// - `ptr` must denote a block of memory *currently allocated* via `alloc` -/// - `old_layout` must *fit* that block of memory -/// - `new_layout` must have the same alignment as `old_layout` -/// - `new_layout.size()` must be greater than or equal to `old_layout.size()` -/// If `current_memory` is `None`, this function is safe. -// not marked inline(never) since we want optimizers to be able to observe the specifics of this -// function, see tests/codegen-llvm/vec-reserve-extend.rs. -#[cold] -unsafe fn finish_grow( - new_layout: Layout, - current_memory: Option<(NonNull, Layout)>, - alloc: &mut A, -) -> Result, TryReserveError> -where - A: Allocator, -/*@ -req thread_token(?t) &*& t == currentThread &*& - *alloc |-> ?alloc0 &*& Allocator(t, alloc0, ?alloc_id) &*& - match current_memory { - Option::None => true, - Option::Some(memory) => - alloc_block_in(alloc_id, memory.0.as_ptr(), memory.1) &*& - array_at_lft_(alloc_id.lft, memory.0.as_ptr(), memory.1.size(), _) &*& - memory.1.size() <= new_layout.size() &*& - memory.1.align() == new_layout.align() - }; -@*/ -/*@ -ens thread_token(t) &*& *alloc |-> ?alloc1 &*& Allocator(t, alloc1, alloc_id) &*& - match result { - Result::Ok(ptr) => - alloc_block_in(alloc_id, ptr.as_ptr() as *u8, new_layout) &*& - array_at_lft_(alloc_id.lft, ptr.as_ptr() as *u8, new_layout.size(), _) &*& - new_layout.size() <= isize::MAX, - Result::Err(e) => - match current_memory { - Option::None => true, - Option::Some(memory) => - alloc_block_in(alloc_id, memory.0.as_ptr(), memory.1) &*& - array_at_lft_(alloc_id.lft, memory.0.as_ptr(), memory.1.size(), _) - } &*& - .own(currentThread, e) - }; -@*/ -{ - let memory = if let Some((ptr, old_layout)) = current_memory { - // debug_assert_eq!(old_layout.align(), new_layout.align()); - if cfg!(debug_assertions) { //~allow_dead_code // FIXME: The source location associated - //with a dead `else` branch is the entire `if` statement :-( - match (&old_layout.align(), &new_layout.align()) { - (left_val, right_val) => - if !(*left_val == *right_val) { - let kind = core::panicking::AssertKind::Eq; //~allow_dead_code - core::panicking::assert_failed(kind, &*left_val, &*right_val, None); //~allow_dead_code - } - } - } - unsafe { - // The allocator checks for alignment equality - hint::assert_unchecked(old_layout.align() == new_layout.align()); - let r; - //@ let alloc_ref = precreate_ref(alloc); - //@ let k1 = begin_lifetime(); - { - //@ let_lft 'a = k1; - //@ std::alloc::init_ref_Allocator_at_lifetime::<'a, A>(alloc_ref); - r = alloc.grow/*@::@*/(ptr, old_layout, new_layout); - //@ leak Allocator(_, _, _); - } - //@ end_lifetime(k1); - //@ std::alloc::end_ref_Allocator_at_lifetime::(); - r - } - } else { - let r; - //@ let alloc_ref = precreate_ref(alloc); - //@ let k1 = begin_lifetime(); - { - //@ let_lft 'a = k1; - //@ std::alloc::init_ref_Allocator_at_lifetime::<'a, A>(alloc_ref); - r = alloc.allocate/*@::@*/(new_layout); - //@ leak Allocator(_, _, _); - } - //@ end_lifetime(k1); - //@ std::alloc::end_ref_Allocator_at_lifetime::(); - r - }; - - let new_layout_ref = &new_layout; - match memory { - Ok(ptr) => Ok(ptr), - Err(err) => { - let e = AllocError { layout: *new_layout_ref, non_exhaustive: () }; - //@ std::alloc::close_Layout_own(t, new_layout); - //@ close_tuple_0_own(t); - //@ close .own(t, e); - Err(e.into()) - } - } -} - // Central function for reserve error handling. #[cfg(not(no_global_oom_handling))] #[cold] #[optimize(size)] fn handle_error(e: TryReserveError) -> ! -//@ req true; +//@ req thread_token(?t); //@ ens false; { match e.kind() { @@ -2720,7 +3215,7 @@ fn layout_array(cap: usize, elem_layout: Layout) -> Result elem_layout.repeat(cap) == some(pair(layout, ?stride)), - Result::Err(err) => .own(currentThread, err) + Result::Err(err) => .own(currentThread, err) }; @*/ /*@ @@ -2731,7 +3226,7 @@ safety_proof { Result::Ok(layout) => { std::alloc::close_Layout_own(_t, layout); } Result::Err(e) => {} } - close >.own(_t, result); + close >.own(_t, result); } @*/ { diff --git a/verifast-proofs/alloc/raw_vec/mod.rs/verify.sh b/verifast-proofs/alloc/raw_vec/mod.rs/verify.sh index a48b3866a89dc..4fd168572c26a 100644 --- a/verifast-proofs/alloc/raw_vec/mod.rs/verify.sh +++ b/verifast-proofs/alloc/raw_vec/mod.rs/verify.sh @@ -1,6 +1,6 @@ set -e -x -export VFVERSION=25.11 +export VFVERSION=26.01 verifast -skip_specless_fns -ignore_unwind_paths -allow_assume verified/lib.rs refinement-checker with-directives/lib.rs verified/lib.rs > /dev/null diff --git a/verifast-proofs/alloc/raw_vec/mod.rs/with-directives/raw_vec.rs b/verifast-proofs/alloc/raw_vec/mod.rs/with-directives/raw_vec.rs index 9fd200414f9d8..c9df11a0ef467 100644 --- a/verifast-proofs/alloc/raw_vec/mod.rs/with-directives/raw_vec.rs +++ b/verifast-proofs/alloc/raw_vec/mod.rs/with-directives/raw_vec.rs @@ -222,7 +222,7 @@ impl RawVec { /// /// Note, that the requested capacity and `self.capacity()` could differ, as /// an allocator could overallocate and return a greater memory block than requested. - pub(crate) unsafe fn into_box(self, len: usize) -> Box<[MaybeUninit], A> { + pub(crate) unsafe fn into_box(/*@~mut@*/ self, len: usize) -> Box<[MaybeUninit], A> { // Sanity-check one half of the safety requirement (we cannot check the other half). debug_assert!( len <= self.capacity(), @@ -449,7 +449,7 @@ impl RawVecInner { fn try_allocate_in( capacity: usize, init: AllocInit, - alloc: A, + /*@~mut@*/ alloc: A, elem_layout: Layout, ) -> Result { // We avoid `unwrap_or_else` here because it bloats the amount of @@ -668,8 +668,7 @@ impl RawVecInner { /// - `elem_layout` must be valid for `self`, i.e. it must be the same `elem_layout` used to /// initially construct `self` /// - `elem_layout`'s size must be a multiple of its alignment - /// - The sum of `len` and `additional` must be greater than or equal to - /// `self.capacity(elem_layout.size())` + /// - The sum of `len` and `additional` must be greater than the current capacity unsafe fn grow_amortized( &mut self, len: usize, @@ -693,16 +692,12 @@ impl RawVecInner { let cap = cmp::max(self.cap.as_inner() * 2, required_cap); let cap = cmp::max(min_non_zero_cap(elem_layout.size()), cap); - let new_layout = layout_array(cap, elem_layout)?; - // SAFETY: - // - For the `current_memory` call: Precondition passed to caller - // - For the `finish_grow` call: Precondition passed to caller - // + `current_memory` does the right thing - let ptr = - unsafe { finish_grow(new_layout, self.current_memory(elem_layout), &mut self.alloc)? }; + // - cap >= len + additional + // - other preconditions passed to caller + let ptr = unsafe { self.finish_grow(cap, elem_layout)? }; - // SAFETY: layout_array would have resulted in a capacity overflow if we tried to allocate more than `isize::MAX` items + // SAFETY: `finish_grow` would have failed if `cap > isize::MAX` unsafe { self.set_ptr_and_cap(ptr, cap) }; Ok(()) } @@ -711,8 +706,7 @@ impl RawVecInner { /// - `elem_layout` must be valid for `self`, i.e. it must be the same `elem_layout` used to /// initially construct `self` /// - `elem_layout`'s size must be a multiple of its alignment - /// - The sum of `len` and `additional` must be greater than or equal to - /// `self.capacity(elem_layout.size())` + /// - The sum of `len` and `additional` must be greater than the current capacity unsafe fn grow_exact( &mut self, len: usize, @@ -726,21 +720,44 @@ impl RawVecInner { } let cap = len.checked_add(additional).ok_or(CapacityOverflow)?; - let new_layout = layout_array(cap, elem_layout)?; - // SAFETY: - // - For the `current_memory` call: Precondition passed to caller - // - For the `finish_grow` call: Precondition passed to caller - // + `current_memory` does the right thing - let ptr = - unsafe { finish_grow(new_layout, self.current_memory(elem_layout), &mut self.alloc)? }; - // SAFETY: layout_array would have resulted in a capacity overflow if we tried to allocate more than `isize::MAX` items - unsafe { - self.set_ptr_and_cap(ptr, cap); - } + // SAFETY: preconditions passed to caller + let ptr = unsafe { self.finish_grow(cap, elem_layout)? }; + + // SAFETY: `finish_grow` would have failed if `cap > isize::MAX` + unsafe { self.set_ptr_and_cap(ptr, cap) }; Ok(()) } + /// # Safety + /// - `elem_layout` must be valid for `self`, i.e. it must be the same `elem_layout` used to + /// initially construct `self` + /// - `elem_layout`'s size must be a multiple of its alignment + /// - `cap` must be greater than the current capacity + // not marked inline(never) since we want optimizers to be able to observe the specifics of this + // function, see tests/codegen-llvm/vec-reserve-extend.rs. + #[cold] + unsafe fn finish_grow( + &self, + cap: usize, + elem_layout: Layout, + ) -> Result, TryReserveError> { + let new_layout = layout_array(cap, elem_layout)?; + + let memory = if let Some((ptr, old_layout)) = unsafe { self.current_memory(elem_layout) } { + debug_assert_eq!(old_layout.align(), new_layout.align()); + unsafe { + // The allocator checks for alignment equality + hint::assert_unchecked(old_layout.align() == new_layout.align()); + self.alloc.grow(ptr, old_layout, new_layout) + } + } else { + self.alloc.allocate(new_layout) + }; + + memory.map_err(|_| AllocError { layout: new_layout, non_exhaustive: () }.into()) + } + /// # Safety /// - `elem_layout` must be valid for `self`, i.e. it must be the same `elem_layout` used to /// initially construct `self` @@ -820,38 +837,6 @@ impl RawVecInner { } } -/// # Safety -/// If `current_memory` matches `Some((ptr, old_layout))`: -/// - `ptr` must denote a block of memory *currently allocated* via `alloc` -/// - `old_layout` must *fit* that block of memory -/// - `new_layout` must have the same alignment as `old_layout` -/// - `new_layout.size()` must be greater than or equal to `old_layout.size()` -/// If `current_memory` is `None`, this function is safe. -// not marked inline(never) since we want optimizers to be able to observe the specifics of this -// function, see tests/codegen-llvm/vec-reserve-extend.rs. -#[cold] -unsafe fn finish_grow( - new_layout: Layout, - current_memory: Option<(NonNull, Layout)>, - alloc: &mut A, -) -> Result, TryReserveError> -where - A: Allocator, -{ - let memory = if let Some((ptr, old_layout)) = current_memory { - debug_assert_eq!(old_layout.align(), new_layout.align()); - unsafe { - // The allocator checks for alignment equality - hint::assert_unchecked(old_layout.align() == new_layout.align()); - alloc.grow(ptr, old_layout, new_layout) - } - } else { - alloc.allocate(new_layout) - }; - - memory.map_err(|_| AllocError { layout: new_layout, non_exhaustive: () }.into()) -} - // Central function for reserve error handling. #[cfg(not(no_global_oom_handling))] #[cold] diff --git a/verifast-proofs/setup-verifast-home b/verifast-proofs/setup-verifast-home index 069fe16a761b4..bb59ba4d016f6 100644 --- a/verifast-proofs/setup-verifast-home +++ b/verifast-proofs/setup-verifast-home @@ -18,11 +18,26 @@ if [[ ! -d "$VERIFAST_HOME" ]]; then fi case "$VFVERSION,$VFPLATFORM" in + 26.01,linux) + # https://github.com/verifast/verifast/attestations/17117165 + VFHASH=f7cb316b482c5113fe65801d44ff2d2db1490be07c2d05f1dee3547c4ada8390 + RUST_VERSION=nightly-2025-11-25 + ;; + 26.01,macos-aarch) + # https://github.com/verifast/verifast/attestations/17117186 + VFHASH=f316062f224b51f0956bf7375f34089558f4847671ef60e13899da6e079caf00 + RUST_VERSION=nightly-2025-11-25 + ;; 25.11,linux) # https://github.com/verifast/verifast/attestations/14103492 VFHASH=990c3cadba7cfc9ef9c19d5f1ff039fd746155164fe4a5ec365c625182400f3e RUST_VERSION=nightly-2025-10-09 ;; + 25.11,macos-aarch) + # https://github.com/verifast/verifast/attestations/14103502 + VFHASH=f047f44a5884f57a4ad5177ed7eb6d8681fd9631e59681ae32ed8a3d75378bd5 + RUST_VERSION=nightly-2025-10-09 + ;; 25.08,macos-aarch) # https://github.com/verifast/verifast/attestations/10123874 VFHASH=fd489c4e5038945464dce06072c892f18b431a66fb47b77acde54b668e3d1d5f