From a5902c7e9955de2285a1a694a07674e8e9b53b0f Mon Sep 17 00:00:00 2001 From: Jay Bosamiya Date: Wed, 28 Feb 2024 17:49:43 -0500 Subject: [PATCH] Update snapshot examples, and add new test --- examples/atomic.rs | 574 + examples/ironfleet.rs | 16 +- examples/mimalloc.rs | 27906 +++++++++++++++++------------------ examples/nr.rs | 28 +- examples/pagetable.rs | 24 +- examples/storage.rs | 7154 ++++----- examples/vstd.rs | 74 +- tests/snapshot-examples.rs | 5 + 8 files changed, 18401 insertions(+), 17380 deletions(-) create mode 100644 examples/atomic.rs diff --git a/examples/atomic.rs b/examples/atomic.rs new file mode 100644 index 0000000..561c6df --- /dev/null +++ b/examples/atomic.rs @@ -0,0 +1,574 @@ +#![allow(unused_imports)] + +use core::sync::atomic::{ + AtomicBool, AtomicI16, AtomicI32, AtomicI64, AtomicI8, AtomicIsize, AtomicU16, AtomicU32, + AtomicU64, AtomicU8, AtomicUsize, Ordering, +}; + +use crate::modes::*; +use crate::pervasive::*; +use builtin::*; +use builtin_macros::*; + +macro_rules! make_unsigned_integer_atomic { + ($at_ident:ident, $p_ident:ident, $p_data_ident:ident, $rust_ty: ty, $value_ty: ty, $wrap_add:ident, $wrap_sub:ident) => { + // TODO we could support `std::intrinsics::wrapping_add` + // and use that instead. + + verus! { + + pub open spec fn $wrap_add(a: int, b: int) -> int { + if a + b > (<$value_ty>::MAX as int) { + a + b - ((<$value_ty>::MAX as int) - (<$value_ty>::MIN as int) + 1) + } else { + a + b + } + } + + pub open spec fn $wrap_sub(a: int, b: int) -> int { + if a - b < (<$value_ty>::MIN as int) { + a - b + ((<$value_ty>::MAX as int) - (<$value_ty>::MIN as int) + 1) + } else { + a - b + } + } + + } // verus! + atomic_types!($at_ident, $p_ident, $p_data_ident, $rust_ty, $value_ty); + #[cfg_attr(verus_keep_ghost, verus::internal(verus_macro))] + impl $at_ident { + atomic_common_methods!($at_ident, $p_ident, $p_data_ident, $rust_ty, $value_ty); + atomic_integer_methods!($at_ident, $p_ident, $rust_ty, $value_ty, $wrap_add, $wrap_sub); + } + }; +} + +macro_rules! make_signed_integer_atomic { + ($at_ident:ident, $p_ident:ident, $p_data_ident:ident, $rust_ty: ty, $value_ty: ty, $wrap_add:ident, $wrap_sub:ident) => { + verus! { + + pub open spec fn $wrap_add(a: int, b: int) -> int { + if a + b > (<$value_ty>::MAX as int) { + a + b - ((<$value_ty>::MAX as int) - (<$value_ty>::MIN as int) + 1) + } else if a + b < (<$value_ty>::MIN as int) { + a + b + ((<$value_ty>::MAX as int) - (<$value_ty>::MIN as int) + 1) + } else { + a + b + } + } + + pub open spec fn $wrap_sub(a: int, b: int) -> int { + if a - b > (<$value_ty>::MAX as int) { + a - b - ((<$value_ty>::MAX as int) - (<$value_ty>::MIN as int) + 1) + } else if a - b < (<$value_ty>::MIN as int) { + a - b + ((<$value_ty>::MAX as int) - (<$value_ty>::MIN as int) + 1) + } else { + a - b + } + } + + } // verus! + atomic_types!($at_ident, $p_ident, $p_data_ident, $rust_ty, $value_ty); + #[cfg_attr(verus_keep_ghost, verus::internal(verus_macro))] + impl $at_ident { + atomic_common_methods!($at_ident, $p_ident, $p_data_ident, $rust_ty, $value_ty); + atomic_integer_methods!($at_ident, $p_ident, $rust_ty, $value_ty, $wrap_add, $wrap_sub); + } + }; +} + +macro_rules! make_bool_atomic { + ($at_ident:ident, $p_ident:ident, $p_data_ident:ident, $rust_ty: ty, $value_ty: ty) => { + atomic_types!($at_ident, $p_ident, $p_data_ident, $rust_ty, $value_ty); + #[cfg_attr(verus_keep_ghost, verus::internal(verus_macro))] + impl $at_ident { + atomic_common_methods!($at_ident, $p_ident, $p_data_ident, $rust_ty, $value_ty); + atomic_bool_methods!($at_ident, $p_ident, $rust_ty, $value_ty); + } + }; +} + +macro_rules! atomic_types { + ($at_ident:ident, $p_ident:ident, $p_data_ident:ident, $rust_ty: ty, $value_ty: ty) => { + verus! { + + #[verifier::external_body] /* vattr */ + pub struct $at_ident { + ato: $rust_ty, + } + + #[verifier::external_body] /* vattr */ + pub tracked struct $p_ident { + no_copy: NoCopy, + } + + pub ghost struct $p_data_ident { + pub patomic: int, + pub value: $value_ty, + } + + impl $p_ident { + #[verifier::external_body] /* vattr */ + pub spec fn view(self) -> $p_data_ident; + + pub open spec fn is_for(&self, patomic: $at_ident) -> bool { + self.view().patomic == patomic.id() + } + + pub open spec fn points_to(&self, v: $value_ty) -> bool { + self.view().value == v + } + } + + } + }; +} + +macro_rules! atomic_common_methods { + ($at_ident:ident, $p_ident:ident, $p_data_ident:ident, $rust_ty: ty, $value_ty: ty) => { + verus!{ + + pub spec fn id(&self) -> int; + + #[inline(always)] + #[verifier::external_body] /* vattr */ + pub const fn new(i: $value_ty) -> (res: ($at_ident, Tracked<$p_ident>)) + ensures + equal(res.1@.view(), $p_data_ident{ patomic: res.0.id(), value: i }), + { + let p = $at_ident { ato: <$rust_ty>::new(i) }; + (p, Tracked::assume_new()) + } + + #[inline(always)] + #[verifier::external_body] /* vattr */ + #[verifier::atomic] /* vattr */ + pub fn load(&self, Tracked(perm): Tracked<&$p_ident>) -> (ret: $value_ty) + requires + equal(self.id(), perm.view().patomic), + ensures equal(perm.view().value, ret), + opens_invariants none + { + return self.ato.load(Ordering::SeqCst); + } + + #[inline(always)] + #[verifier::external_body] /* vattr */ + #[verifier::atomic] /* vattr */ + pub fn store(&self, Tracked(perm): Tracked<&mut $p_ident>, v: $value_ty) + requires + equal(self.id(), old(perm).view().patomic), + ensures equal(perm.view().value, v) && equal(self.id(), perm.view().patomic), + opens_invariants none + { + self.ato.store(v, Ordering::SeqCst); + } + + #[inline(always)] + #[verifier::external_body] /* vattr */ + #[verifier::atomic] /* vattr */ + pub fn compare_exchange(&self, Tracked(perm): Tracked<&mut $p_ident>, current: $value_ty, new: $value_ty) -> (ret: Result<$value_ty, $value_ty>) + requires + equal(self.id(), old(perm).view().patomic), + ensures + equal(self.id(), perm.view().patomic) + && match ret { + Result::Ok(r) => + current == old(perm).view().value + && equal(perm.view().value, new) + && equal(r, old(perm).view().value), + Result::Err(r) => + current != old(perm).view().value + && equal(perm.view().value, old(perm).view().value) + && equal(r, old(perm).view().value), + }, + opens_invariants none + { + match self.ato.compare_exchange(current, new, Ordering::SeqCst, Ordering::SeqCst) { + Ok(x) => Result::Ok(x), + Err(x) => Result::Err(x), + } + } + + #[inline(always)] + #[verifier::external_body] /* vattr */ + #[verifier::atomic] /* vattr */ + pub fn compare_exchange_weak(&self, Tracked(perm): Tracked<&mut $p_ident>, current: $value_ty, new: $value_ty) -> (ret: Result<$value_ty, $value_ty>) + requires + equal(self.id(), old(perm).view().patomic), + ensures + equal(self.id(), perm.view().patomic) + && match ret { + Result::Ok(r) => + current == old(perm).view().value + && equal(perm.view().value, new) + && equal(r, old(perm).view().value), + Result::Err(r) => + equal(perm.view().value, old(perm).view().value) + && equal(r, old(perm).view().value), + }, + opens_invariants none + { + match self.ato.compare_exchange_weak(current, new, Ordering::SeqCst, Ordering::SeqCst) { + Ok(x) => Result::Ok(x), + Err(x) => Result::Err(x), + } + } + + #[inline(always)] + #[verifier::external_body] /* vattr */ + #[verifier::atomic] /* vattr */ + pub fn swap(&self, Tracked(perm): Tracked<&mut $p_ident>, v: $value_ty) -> (ret: $value_ty) + requires + equal(self.id(), old(perm).view().patomic), + ensures + equal(perm.view().value, v) + && equal(old(perm).view().value, ret) + && equal(self.id(), perm.view().patomic), + opens_invariants none + { + return self.ato.swap(v, Ordering::SeqCst); + } + + #[inline(always)] + #[verifier::external_body] /* vattr */ + pub fn into_inner(self, Tracked(perm): Tracked<$p_ident>) -> (ret: $value_ty) + requires + equal(self.id(), perm.view().patomic), + ensures equal(perm.view().value, ret), + opens_invariants none + { + return self.ato.into_inner(); + } + + } + }; +} + +macro_rules! atomic_integer_methods { + ($at_ident:ident, $p_ident:ident, $rust_ty: ty, $value_ty: ty, $wrap_add:ident, $wrap_sub:ident) => { + verus!{ + + // Note that wrapping-on-overflow is the defined behavior for fetch_add and fetch_sub + // for Rust's atomics (in contrast to ordinary arithmetic) + + #[inline(always)] + #[verifier::external_body] /* vattr */ + #[verifier::atomic] /* vattr */ + pub fn fetch_add_wrapping(&self, Tracked(perm): Tracked<&mut $p_ident>, n: $value_ty) -> (ret: $value_ty) + requires equal(self.id(), old(perm).view().patomic), + ensures + equal(old(perm).view().value, ret), + perm.view().patomic == old(perm).view().patomic, + perm.view().value as int == $wrap_add(old(perm).view().value as int, n as int), + opens_invariants none + { + return self.ato.fetch_add(n, Ordering::SeqCst); + } + + #[inline(always)] + #[verifier::external_body] /* vattr */ + #[verifier::atomic] /* vattr */ + pub fn fetch_sub_wrapping(&self, Tracked(perm): Tracked<&mut $p_ident>, n: $value_ty) -> (ret: $value_ty) + requires equal(self.id(), old(perm).view().patomic), + ensures + equal(old(perm).view().value, ret), + perm.view().patomic == old(perm).view().patomic, + perm.view().value as int == $wrap_sub(old(perm).view().value as int, n as int), + opens_invariants none + { + return self.ato.fetch_sub(n, Ordering::SeqCst); + } + + // fetch_add and fetch_sub are more natural in the common case that you + // don't expect wrapping + + #[inline(always)] + #[verifier::atomic] /* vattr */ + pub fn fetch_add(&self, Tracked(perm): Tracked<&mut $p_ident>, n: $value_ty) -> (ret: $value_ty) + requires + equal(self.id(), old(perm).view().patomic), + (<$value_ty>::MIN as int) <= old(perm).view().value + n, + old(perm).view().value + n <= (<$value_ty>::MAX as int), + ensures + equal(old(perm).view().value, ret), + perm.view().patomic == old(perm).view().patomic, + perm.view().value == old(perm).view().value + n, + opens_invariants none + { + self.fetch_add_wrapping(Tracked(&mut *perm), n) + } + + #[inline(always)] + #[verifier::atomic] /* vattr */ + pub fn fetch_sub(&self, Tracked(perm): Tracked<&mut $p_ident>, n: $value_ty) -> (ret: $value_ty) + requires + equal(self.id(), old(perm).view().patomic), + (<$value_ty>::MIN as int) <= old(perm).view().value - n, + old(perm).view().value - n <= <$value_ty>::MAX as int, + ensures + equal(old(perm).view().value, ret), + perm.view().patomic == old(perm).view().patomic, + perm.view().value == old(perm).view().value - n, + opens_invariants none + { + self.fetch_sub_wrapping(Tracked(&mut *perm), n) + } + + #[inline(always)] + #[verifier::external_body] /* vattr */ + #[verifier::atomic] /* vattr */ + pub fn fetch_and(&self, Tracked(perm): Tracked<&mut $p_ident>, n: $value_ty) -> (ret: $value_ty) + requires equal(self.id(), old(perm).view().patomic), + ensures + equal(old(perm).view().value, ret), + perm.view().patomic == old(perm).view().patomic, + perm.view().value == (old(perm).view().value & n), + opens_invariants none + { + return self.ato.fetch_and(n, Ordering::SeqCst); + } + + #[inline(always)] + #[verifier::external_body] /* vattr */ + #[verifier::atomic] /* vattr */ + pub fn fetch_or(&self, Tracked(perm): Tracked<&mut $p_ident>, n: $value_ty) -> (ret: $value_ty) + requires equal(self.id(), old(perm).view().patomic), + ensures + equal(old(perm).view().value, ret), + perm.view().patomic == old(perm).view().patomic, + perm.view().value == (old(perm).view().value | n), + opens_invariants none + { + return self.ato.fetch_or(n, Ordering::SeqCst); + } + + #[inline(always)] + #[verifier::external_body] /* vattr */ + #[verifier::atomic] /* vattr */ + pub fn fetch_xor(&self, Tracked(perm): Tracked<&mut $p_ident>, n: $value_ty) -> (ret: $value_ty) + requires equal(self.id(), old(perm).view().patomic), + ensures + equal(old(perm).view().value, ret), + perm.view().patomic == old(perm).view().patomic, + perm.view().value == (old(perm).view().value ^ n), + opens_invariants none + { + return self.ato.fetch_xor(n, Ordering::SeqCst); + } + + #[inline(always)] + #[verifier::external_body] /* vattr */ + #[verifier::atomic] /* vattr */ + pub fn fetch_nand(&self, Tracked(perm): Tracked<&mut $p_ident>, n: $value_ty) -> (ret: $value_ty) + requires equal(self.id(), old(perm).view().patomic), + ensures + equal(old(perm).view().value, ret), + perm.view().patomic == old(perm).view().patomic, + perm.view().value == !(old(perm).view().value & n), + opens_invariants none + { + return self.ato.fetch_nand(n, Ordering::SeqCst); + } + + #[inline(always)] + #[verifier::external_body] /* vattr */ + #[verifier::atomic] /* vattr */ + pub fn fetch_max(&self, Tracked(perm): Tracked<&mut $p_ident>, n: $value_ty) -> (ret: $value_ty) + requires equal(self.id(), old(perm).view().patomic), + ensures + equal(old(perm).view().value, ret), + perm.view().patomic == old(perm).view().patomic, + perm.view().value == (if old(perm).view().value > n { old(perm).view().value } else { n }), + opens_invariants none + { + return self.ato.fetch_max(n, Ordering::SeqCst); + } + + #[inline(always)] + #[verifier::external_body] /* vattr */ + #[verifier::atomic] /* vattr */ + pub fn fetch_min(&self, Tracked(perm): Tracked<&mut $p_ident>, n: $value_ty) -> (ret: $value_ty) + requires equal(self.id(), old(perm).view().patomic), + ensures + equal(old(perm).view().value, ret), + perm.view().patomic == old(perm).view().patomic, + perm.view().value == (if old(perm).view().value < n { old(perm).view().value } else { n }), + opens_invariants none + { + return self.ato.fetch_min(n, Ordering::SeqCst); + } + + } + }; +} + +macro_rules! atomic_bool_methods { + ($at_ident:ident, $p_ident:ident, $rust_ty: ty, $value_ty: ty) => { + verus!{ + + #[inline(always)] + #[verifier::external_body] /* vattr */ + #[verifier::atomic] /* vattr */ + pub fn fetch_and(&self, Tracked(perm): Tracked<&mut $p_ident>, n: $value_ty) -> (ret: $value_ty) + requires + equal(self.id(), old(perm).view().patomic), + ensures + equal(old(perm).view().value, ret) + && perm.view().patomic == old(perm).view().patomic + && perm.view().value == (old(perm).view().value && n), + opens_invariants none + { + return self.ato.fetch_and(n, Ordering::SeqCst); + } + + #[inline(always)] + #[verifier::external_body] /* vattr */ + #[verifier::atomic] /* vattr */ + pub fn fetch_or(&self, Tracked(perm): Tracked<&mut $p_ident>, n: $value_ty) -> (ret: $value_ty) + requires + equal(self.id(), old(perm).view().patomic), + ensures + equal(old(perm).view().value, ret) + && perm.view().patomic == old(perm).view().patomic + && perm.view().value == (old(perm).view().value || n), + opens_invariants none + { + return self.ato.fetch_or(n, Ordering::SeqCst); + } + + #[inline(always)] + #[verifier::external_body] /* vattr */ + #[verifier::atomic] /* vattr */ + pub fn fetch_xor(&self, Tracked(perm): Tracked<&mut $p_ident>, n: $value_ty) -> (ret: $value_ty) + requires + equal(self.id(), old(perm).view().patomic), + ensures + equal(old(perm).view().value, ret) + && perm.view().patomic == old(perm).view().patomic + && perm.view().value == ((old(perm).view().value && !n) || (!old(perm).view().value && n)), + opens_invariants none + { + return self.ato.fetch_xor(n, Ordering::SeqCst); + } + + #[inline(always)] + #[verifier::external_body] /* vattr */ + #[verifier::atomic] /* vattr */ + pub fn fetch_nand(&self, Tracked(perm): Tracked<&mut $p_ident>, n: $value_ty) -> (ret: $value_ty) + requires + equal(self.id(), old(perm).view().patomic), + ensures + equal(old(perm).view().value, ret) + && perm.view().patomic == old(perm).view().patomic + && perm.view().value == !(old(perm).view().value && n), + opens_invariants none + { + return self.ato.fetch_nand(n, Ordering::SeqCst); + } + + } + }; +} + +make_bool_atomic!( + PAtomicBool, + PermissionBool, + PermissionDataBool, + AtomicBool, + bool +); + +make_unsigned_integer_atomic!( + PAtomicU8, + PermissionU8, + PermissionDataU8, + AtomicU8, + u8, + wrapping_add_u8, + wrapping_sub_u8 +); +make_unsigned_integer_atomic!( + PAtomicU16, + PermissionU16, + PermissionDataU16, + AtomicU16, + u16, + wrapping_add_u16, + wrapping_sub_u16 +); +make_unsigned_integer_atomic!( + PAtomicU32, + PermissionU32, + PermissionDataU32, + AtomicU32, + u32, + wrapping_add_u32, + wrapping_sub_u32 +); +make_unsigned_integer_atomic!( + PAtomicU64, + PermissionU64, + PermissionDataU64, + AtomicU64, + u64, + wrapping_add_u64, + wrapping_sub_u64 +); +make_unsigned_integer_atomic!( + PAtomicUsize, + PermissionUsize, + PermissionDataUsize, + AtomicUsize, + usize, + wrapping_add_usize, + wrapping_sub_usize +); + +make_signed_integer_atomic!( + PAtomicI8, + PermissionI8, + PermissionDataI8, + AtomicI8, + i8, + wrapping_add_i8, + wrapping_sub_i8 +); +make_signed_integer_atomic!( + PAtomicI16, + PermissionI16, + PermissionDataI16, + AtomicI16, + i16, + wrapping_add_i16, + wrapping_sub_i16 +); +make_signed_integer_atomic!( + PAtomicI32, + PermissionI32, + PermissionDataI32, + AtomicI32, + i32, + wrapping_add_i32, + wrapping_sub_i32 +); +make_signed_integer_atomic!( + PAtomicI64, + PermissionI64, + PermissionDataI64, + AtomicI64, + i64, + wrapping_add_i64, + wrapping_sub_i64 +); +make_signed_integer_atomic!( + PAtomicIsize, + PermissionIsize, + PermissionDataIsize, + AtomicIsize, + isize, + wrapping_add_isize, + wrapping_sub_isize +); + +// TODO Support AtomicPtr diff --git a/examples/ironfleet.rs b/examples/ironfleet.rs index 86cedd2..3113349 100644 --- a/examples/ironfleet.rs +++ b/examples/ironfleet.rs @@ -91,7 +91,7 @@ pub enum AppReply { } } // verus! -// verus + // verus } mod app_interface_t { @@ -2682,7 +2682,7 @@ impl DelegationMap { } } // verus! -// end verus! + // end verus! } mod endpoint_hashmap_t { @@ -2901,7 +2901,7 @@ pub open spec fn is_valid_lio_op( // LIoOpSeqCompatibleWithReduction } // verus! -// verus + // verus } mod hashmap_t { @@ -7113,7 +7113,7 @@ pub fn sht_main(netc: NetClient, args: Args) -> Result<(), IronError> } } // verus! -// verus + // verus } mod marshal_ironsht_specific_v { @@ -11651,7 +11651,7 @@ pub proof fn choose_smallest(low: int, high: int, p: FnSpec(int) -> bool) -> (re } } // verus! -} + } pub mod clone_v { use vstd::prelude::*; @@ -11667,7 +11667,7 @@ pub trait VerusClone: Sized { } } // verus! -} + } pub mod seq_lib_v { use builtin::*; @@ -12002,7 +12002,7 @@ pub proof fn some_differing_index_for_unequal_seqs(s1: Seq, s2: Seq) -> } } // verus! -} + } pub mod set_lib_ext_v { use builtin::*; @@ -12343,7 +12343,7 @@ pub proof fn flatten_sets_singleton_auto() // lemmas somewhere, but it's not easy to see from the profiler yet. } // verus! -} + } } // TODO: maybe move into Verus? diff --git a/examples/mimalloc.rs b/examples/mimalloc.rs index a668e06..40efc7d 100644 --- a/examples/mimalloc.rs +++ b/examples/mimalloc.rs @@ -16,293 +16,290 @@ mod os_mem { verus! { - #[verus::trusted] - pub open spec fn page_size() -> int { - 4096 - } - - #[verus::trusted] - pub fn get_page_size() -> (u: usize) - ensures - u == page_size(), - { - 4096 - } - - #[verus::trusted] - #[verifier(external_body)] - pub tracked struct OsMem { - no_copy: NoCopy, - } - - #[verus::trusted] - pub ghost struct MemProtect { - pub read: bool, - pub write: bool, - } - - #[verus::trusted] - pub ghost struct OsMemData { - pub byte_addr: int, - pub mem_protect: MemProtect, - } - - #[verus::trusted] - pub tracked struct MemChunk { - pub os: Map, - pub points_to: PointsToRaw, - } - - #[verus::trusted] - impl MemChunk { - pub open spec fn wf(&self) -> bool { - self.wf_os() - } - - pub open spec fn wf_os(&self) -> bool { - forall|addr: int| - #[trigger] - self.os.dom().contains(addr) ==> self.os[addr]@.byte_addr == addr - } - - #[verifier::inline] - pub open spec fn range_os(&self) -> Set { - self.os.dom() - } +#[verus::trusted] +pub open spec fn page_size() -> int { + 4096 +} - pub open spec fn range_os_rw(&self) -> Set { - Set::::new( - |addr| - self.os.dom().contains(addr) && self.os[addr]@.mem_protect == MemProtect { - read: true, - write: true, - }, - ) - } +#[verus::trusted] +pub fn get_page_size() -> (u: usize) + ensures + u == page_size(), +{ + 4096 +} - pub open spec fn range_os_none(&self) -> Set { - Set::::new( - |addr| - self.os.dom().contains(addr) && self.os[addr]@.mem_protect == MemProtect { - read: false, - write: false, - }, - ) - } +#[verus::trusted] +#[verifier(external_body)] +pub tracked struct OsMem { + no_copy: NoCopy, +} - #[verifier::inline] - pub open spec fn range_points_to(&self) -> Set { - self.points_to@.dom() - } +#[verus::trusted] +pub ghost struct MemProtect { + pub read: bool, + pub write: bool, +} - pub open spec fn has_pointsto_for_all_read_write(&self) -> bool { - self.range_os_rw() <= self.range_points_to() - } +#[verus::trusted] +pub ghost struct OsMemData { + pub byte_addr: int, + pub mem_protect: MemProtect, +} - pub open spec fn os_has_range(&self, start: int, len: int) -> bool { - set_int_range(start, start + len) <= self.range_os() - } +#[verus::trusted] +pub tracked struct MemChunk { + pub os: Map, + pub points_to: PointsToRaw, +} - pub open spec fn os_exact_range(&self, start: int, len: int) -> bool { - set_int_range(start, start + len) =~= self.range_os() - } +#[verus::trusted] +impl MemChunk { + pub open spec fn wf(&self) -> bool { + self.wf_os() + } - pub open spec fn os_has_range_read_write(&self, start: int, len: int) -> bool { - set_int_range(start, start + len) <= self.range_os_rw() - } + pub open spec fn wf_os(&self) -> bool { + forall|addr: int| #[trigger] + self.os.dom().contains(addr) ==> self.os[addr]@.byte_addr == addr + } - pub open spec fn os_has_range_no_read_write(&self, start: int, len: int) -> bool { - set_int_range(start, start + len) <= self.range_os_none() - } + #[verifier::inline] + pub open spec fn range_os(&self) -> Set { + self.os.dom() + } - pub open spec fn has_new_pointsto(&self, the_old: &MemChunk) -> bool { - // Same domain for OS permissions knowledge - self.os.dom() == the_old.os.dom()// points_to grew monotonically - && the_old.points_to@.dom().subset_of( - self.points_to@.dom(), - )// stuff with rw permission grew monotonically - && (forall|addr: int| - #[trigger] - the_old.os.dom().contains(addr) ==> the_old.os[addr]@.mem_protect == MemProtect { - read: true, - write: true, - } ==> self.os[addr]@.mem_protect == MemProtect { - read: true, - write: true, - })// Anything that became rw, we now have the points_to for it - && (forall|addr: int| + pub open spec fn range_os_rw(&self) -> Set { + Set::::new( + |addr| self.os.dom().contains(addr) && self.os[addr]@.mem_protect == MemProtect { read: true, write: true, - } && the_old.os[addr]@.mem_protect != MemProtect { read: true, write: true } - ==> #[trigger] - self.points_to@.dom().contains(addr)) - } + }, + ) } - #[verus::trusted] - impl OsMem { - pub spec fn view(self) -> OsMemData; + pub open spec fn range_os_none(&self) -> Set { + Set::::new( + |addr| + self.os.dom().contains(addr) && self.os[addr]@.mem_protect == MemProtect { + read: false, + write: false, + }, + ) } - #[verus::trusted] - pub const MAP_FAILED: usize = usize::MAX; + #[verifier::inline] + pub open spec fn range_points_to(&self) -> Set { + self.points_to@.dom() + } - //// Wrappers - // TODO should allow these to return 0 for error case - #[verus::trusted] - #[verifier::external_body] - pub fn mmap_prot_none(hint: usize, len: usize) -> (pt: (usize, Tracked)) - requires - hint as int % page_size() == 0, - len as int % page_size() == 0, - ensures - pt.0 != MAP_FAILED ==> pt.1@.wf(), - pt.0 != MAP_FAILED ==> pt.1@.os_exact_range(pt.0 as int, len as int), - pt.0 != MAP_FAILED ==> pt.1@.os_has_range_no_read_write(pt.0 as int, len as int), - pt.0 != MAP_FAILED ==> pt.0 + len < usize::MAX, - { - let p = _mmap_prot_none(hint as *mut libc::c_void, len); - let p = if p == libc::MAP_FAILED { - MAP_FAILED - } else { - p as usize - }; - (p, Tracked::assume_new()) + pub open spec fn has_pointsto_for_all_read_write(&self) -> bool { + self.range_os_rw() <= self.range_points_to() } - #[verus::trusted] - #[verifier::external_body] - pub fn mmap_prot_read_write(hint: usize, len: usize) -> (pt: (usize, Tracked)) - requires - hint as int % page_size() == 0, - len as int % page_size() == 0, - ensures - pt.0 != MAP_FAILED ==> pt.1@.wf(), - pt.0 != MAP_FAILED ==> pt.1@.os_exact_range(pt.0 as int, len as int), - pt.0 != MAP_FAILED ==> pt.1@.os_has_range_read_write(pt.0 as int, len as int), - pt.0 != MAP_FAILED ==> pt.1@.has_pointsto_for_all_read_write(), - pt.0 != MAP_FAILED ==> pt.0 + len < usize::MAX, - pt.0 != MAP_FAILED ==> pt.0 as int % page_size() == 0, - { - let p = _mmap_prot_read_write(hint as *mut libc::c_void, len); - let p = if p == libc::MAP_FAILED { - MAP_FAILED - } else { - p as usize - }; - (p, Tracked::assume_new()) + pub open spec fn os_has_range(&self, start: int, len: int) -> bool { + set_int_range(start, start + len) <= self.range_os() } - #[verus::trusted] - #[verifier::external_body] - pub fn mprotect_prot_none(addr: PPtr, len: usize, Tracked(mem): Tracked<&mut MemChunk>) - requires - addr.id() as int % page_size() == 0, - len as int % page_size() == 0, - old(mem).wf(), - old(mem).os_exact_range(addr.id(), len as int), - old(mem).has_pointsto_for_all_read_write(), - ensures - mem.wf(), - mem.os_exact_range(addr.id(), len as int), - mem.os_has_range_no_read_write(addr.id(), len as int), - mem.points_to@ === Map::empty(), - { - _mprotect_prot_none(addr.uptr as *mut libc::c_void, len); + pub open spec fn os_exact_range(&self, start: int, len: int) -> bool { + set_int_range(start, start + len) =~= self.range_os() } - #[verus::trusted] - #[verifier::external_body] - pub fn mprotect_prot_read_write(addr: PPtr, len: usize, Tracked(mem): Tracked<&mut MemChunk>) - requires - addr.id() as int % page_size() == 0, - len as int % page_size() == 0, - old(mem).wf(), - old(mem).os_exact_range(addr.id(), len as int), - ensures - mem.wf(), - mem.os_exact_range(addr.id(), len as int), - mem.os_has_range_read_write(addr.id(), len as int), - mem.has_new_pointsto(&*old(mem)), - old(mem).has_pointsto_for_all_read_write() ==> mem.has_pointsto_for_all_read_write(), - { - _mprotect_prot_read_write(addr.uptr as *mut libc::c_void, len); + pub open spec fn os_has_range_read_write(&self, start: int, len: int) -> bool { + set_int_range(start, start + len) <= self.range_os_rw() } - //// Tested for macOS / Linux - #[verus::trusted] - #[verifier::external] - fn _mmap_prot_read_write(hint_addr: *mut libc::c_void, len: usize) -> *mut libc::c_void { - unsafe { - libc::mmap( - hint_addr, - len, - PROT_READ | PROT_WRITE, - MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, - // The fd argument is ignored [if MAP_ANONYMOUS is specified]; however, - // some implementations require fd to be -1 - -1, - 0, - ) - } + pub open spec fn os_has_range_no_read_write(&self, start: int, len: int) -> bool { + set_int_range(start, start + len) <= self.range_os_none() } - #[verifier::external] - fn _mmap_prot_none(hint_addr: *mut libc::c_void, len: usize) -> *mut libc::c_void { - unsafe { - libc::mmap( - hint_addr, - len, - PROT_NONE, - MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, - // The fd argument is ignored [if MAP_ANONYMOUS is specified]; however, - // some implementations require fd to be -1 - -1, - 0, - ) - } + pub open spec fn has_new_pointsto(&self, the_old: &MemChunk) -> bool { + // Same domain for OS permissions knowledge + self.os.dom() == the_old.os.dom() // points_to grew monotonically + && the_old.points_to@.dom().subset_of( + self.points_to@.dom(), + ) // stuff with rw permission grew monotonically + && (forall|addr: int| #[trigger] + the_old.os.dom().contains(addr) ==> the_old.os[addr]@.mem_protect == MemProtect { + read: true, + write: true, + } ==> self.os[addr]@.mem_protect == MemProtect { + read: true, + write: true, + }) // Anything that became rw, we now have the points_to for it + && (forall|addr: int| + self.os.dom().contains(addr) && self.os[addr]@.mem_protect == MemProtect { + read: true, + write: true, + } && the_old.os[addr]@.mem_protect != MemProtect { read: true, write: true } + ==> #[trigger] self.points_to@.dom().contains(addr)) } +} - #[verus::trusted] - #[verifier::external] - fn _mprotect_prot_read_write(addr: *mut libc::c_void, len: usize) { - unsafe { - let res = libc::mprotect(addr as *mut libc::c_void, len, PROT_READ | PROT_WRITE); - if res != 0 { - panic!("mprotect failed"); - } +#[verus::trusted] +impl OsMem { + pub spec fn view(self) -> OsMemData; +} + +#[verus::trusted] +pub const MAP_FAILED: usize = usize::MAX; + +//// Wrappers +// TODO should allow these to return 0 for error case +#[verus::trusted] +#[verifier::external_body] +pub fn mmap_prot_none(hint: usize, len: usize) -> (pt: (usize, Tracked)) + requires + hint as int % page_size() == 0, + len as int % page_size() == 0, + ensures + pt.0 != MAP_FAILED ==> pt.1@.wf(), + pt.0 != MAP_FAILED ==> pt.1@.os_exact_range(pt.0 as int, len as int), + pt.0 != MAP_FAILED ==> pt.1@.os_has_range_no_read_write(pt.0 as int, len as int), + pt.0 != MAP_FAILED ==> pt.0 + len < usize::MAX, +{ + let p = _mmap_prot_none(hint as *mut libc::c_void, len); + let p = if p == libc::MAP_FAILED { + MAP_FAILED + } else { + p as usize + }; + (p, Tracked::assume_new()) +} + +#[verus::trusted] +#[verifier::external_body] +pub fn mmap_prot_read_write(hint: usize, len: usize) -> (pt: (usize, Tracked)) + requires + hint as int % page_size() == 0, + len as int % page_size() == 0, + ensures + pt.0 != MAP_FAILED ==> pt.1@.wf(), + pt.0 != MAP_FAILED ==> pt.1@.os_exact_range(pt.0 as int, len as int), + pt.0 != MAP_FAILED ==> pt.1@.os_has_range_read_write(pt.0 as int, len as int), + pt.0 != MAP_FAILED ==> pt.1@.has_pointsto_for_all_read_write(), + pt.0 != MAP_FAILED ==> pt.0 + len < usize::MAX, + pt.0 != MAP_FAILED ==> pt.0 as int % page_size() == 0, +{ + let p = _mmap_prot_read_write(hint as *mut libc::c_void, len); + let p = if p == libc::MAP_FAILED { + MAP_FAILED + } else { + p as usize + }; + (p, Tracked::assume_new()) +} + +#[verus::trusted] +#[verifier::external_body] +pub fn mprotect_prot_none(addr: PPtr, len: usize, Tracked(mem): Tracked<&mut MemChunk>) + requires + addr.id() as int % page_size() == 0, + len as int % page_size() == 0, + old(mem).wf(), + old(mem).os_exact_range(addr.id(), len as int), + old(mem).has_pointsto_for_all_read_write(), + ensures + mem.wf(), + mem.os_exact_range(addr.id(), len as int), + mem.os_has_range_no_read_write(addr.id(), len as int), + mem.points_to@ === Map::empty(), +{ + _mprotect_prot_none(addr.uptr as *mut libc::c_void, len); +} + +#[verus::trusted] +#[verifier::external_body] +pub fn mprotect_prot_read_write(addr: PPtr, len: usize, Tracked(mem): Tracked<&mut MemChunk>) + requires + addr.id() as int % page_size() == 0, + len as int % page_size() == 0, + old(mem).wf(), + old(mem).os_exact_range(addr.id(), len as int), + ensures + mem.wf(), + mem.os_exact_range(addr.id(), len as int), + mem.os_has_range_read_write(addr.id(), len as int), + mem.has_new_pointsto(&*old(mem)), + old(mem).has_pointsto_for_all_read_write() ==> mem.has_pointsto_for_all_read_write(), +{ + _mprotect_prot_read_write(addr.uptr as *mut libc::c_void, len); +} + +//// Tested for macOS / Linux +#[verus::trusted] +#[verifier::external] +fn _mmap_prot_read_write(hint_addr: *mut libc::c_void, len: usize) -> *mut libc::c_void { + unsafe { + libc::mmap( + hint_addr, + len, + PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, + // The fd argument is ignored [if MAP_ANONYMOUS is specified]; however, + // some implementations require fd to be -1 + -1, + 0, + ) + } +} + +#[verifier::external] +fn _mmap_prot_none(hint_addr: *mut libc::c_void, len: usize) -> *mut libc::c_void { + unsafe { + libc::mmap( + hint_addr, + len, + PROT_NONE, + MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, + // The fd argument is ignored [if MAP_ANONYMOUS is specified]; however, + // some implementations require fd to be -1 + -1, + 0, + ) + } +} + +#[verus::trusted] +#[verifier::external] +fn _mprotect_prot_read_write(addr: *mut libc::c_void, len: usize) { + unsafe { + let res = libc::mprotect(addr as *mut libc::c_void, len, PROT_READ | PROT_WRITE); + if res != 0 { + panic!("mprotect failed"); } } +} - #[verus::trusted] - #[verifier::external] - fn _mprotect_prot_none(addr: *mut libc::c_void, len: usize) { - unsafe { - let res = libc::mprotect(addr as *mut libc::c_void, len, PROT_NONE); - if res != 0 { - panic!("mprotect failed"); - } +#[verus::trusted] +#[verifier::external] +fn _mprotect_prot_none(addr: *mut libc::c_void, len: usize) { + unsafe { + let res = libc::mprotect(addr as *mut libc::c_void, len, PROT_NONE); + if res != 0 { + panic!("mprotect failed"); } } +} - //// Misc utilities - #[verus::trusted] - #[verifier::external_type_specification] - pub struct ExTimespec(libc::timespec); +//// Misc utilities +#[verus::trusted] +#[verifier::external_type_specification] +pub struct ExTimespec(libc::timespec); - #[verus::trusted] - #[inline(always)] - #[verifier::external_body] - pub fn clock_gettime_monotonic() -> libc::timespec { - let mut ts = libc::timespec { tv_sec: 0, tv_nsec: 0 }; - unsafe { - libc::clock_gettime(libc::CLOCK_MONOTONIC, &mut ts); - } - ts +#[verus::trusted] +#[inline(always)] +#[verifier::external_body] +pub fn clock_gettime_monotonic() -> libc::timespec { + let mut ts = libc::timespec { tv_sec: 0, tv_nsec: 0 }; + unsafe { + libc::clock_gettime(libc::CLOCK_MONOTONIC, &mut ts); } + ts +} - } // verus! +} // verus! } mod thread { @@ -312,122 +309,122 @@ mod thread { verus! { - // Note that ThreadIds may be re-used since we use the u64 version - #[verus::trusted] - pub struct ThreadId { - pub thread_id: u64, - } - - /// Proof object that guarantees the owning thread has the given ThreadId. - #[verus::trusted] - #[cfg(verus_keep_ghost)] - #[verifier(external_body)] - pub tracked struct IsThread {} - - #[verus::trusted] - #[cfg(verus_keep_ghost)] - impl !Sync for IsThread { - - } - - #[verus::trusted] - #[cfg(verus_keep_ghost)] - impl !Send for IsThread { - - } - - // TODO: remove this when !Sync, !Send are supported by stable Rust - #[cfg(not(verus_keep_ghost))] - #[verifier(external_body)] - #[verus::trusted] - pub tracked struct IsThread { - _no_send_sync: core::marker::PhantomData<*const ()>, - } +// Note that ThreadIds may be re-used since we use the u64 version +#[verus::trusted] +pub struct ThreadId { + pub thread_id: u64, +} - #[verus::trusted] - impl IsThread { - pub spec fn view(&self) -> ThreadId; +/// Proof object that guarantees the owning thread has the given ThreadId. +#[verus::trusted] +#[cfg(verus_keep_ghost)] +#[verifier(external_body)] +pub tracked struct IsThread {} - /// Guarantees that any two `IsThread` objects on the same thread - /// will have the same ID. - #[verifier(external_body)] - pub proof fn agrees(tracked self, tracked other: IsThread) - ensures - self@ == other@, - { - unimplemented!(); - } +#[verus::trusted] +#[cfg(verus_keep_ghost)] +impl !Sync for IsThread { - #[verifier(external_body)] - pub proof fn nonzero(tracked self) - ensures - self@.thread_id != 0, - { - unimplemented!(); - } - } +} - #[verus::trusted] - #[verifier(external)] - impl Clone for IsThread { - #[cfg(verus_keep_ghost)] - fn clone(&self) -> Self { - IsThread { } - } +#[verus::trusted] +#[cfg(verus_keep_ghost)] +impl !Send for IsThread { - #[cfg(not(verus_keep_ghost))] - fn clone(&self) -> Self { - IsThread { _no_send_sync: Default::default() } - } - } +} - #[verus::trusted] - impl Copy for IsThread { +// TODO: remove this when !Sync, !Send are supported by stable Rust +#[cfg(not(verus_keep_ghost))] +#[verifier(external_body)] +#[verus::trusted] +pub tracked struct IsThread { + _no_send_sync: core::marker::PhantomData<*const ()>, +} - } +#[verus::trusted] +impl IsThread { + pub spec fn view(&self) -> ThreadId; - // Note: NO guarantee that a thread is not re-used - #[verus::trusted] - #[cfg(feature = "override_system_allocator")] - #[cfg(target_os = "linux")] - #[verifier::external_body] - pub fn thread_id() -> (res: (ThreadId, Tracked)) + /// Guarantees that any two `IsThread` objects on the same thread + /// will have the same ID. + #[verifier(external_body)] + pub proof fn agrees(tracked self, tracked other: IsThread) ensures - res.1@@ == res.0, + self@ == other@, { - let id: i32 = unsafe { libc::gettid() }; - let id_u64: u64 = ((id as u64) << 1) | 1; // make sure it's nonzero - let id = ThreadId { thread_id: id_u64 }; - (id, Tracked::assume_new()) + unimplemented!(); } - // NOTE: std::thread recursively calls malloc, so this can't be used when doing override - #[verus::trusted] - #[cfg(not(feature = "override_system_allocator"))] - #[verifier::external_body] - pub fn thread_id() -> (res: (ThreadId, Tracked)) + #[verifier(external_body)] + pub proof fn nonzero(tracked self) ensures - res.1@@ == res.0, + self@.thread_id != 0, { - let id: std::thread::ThreadId = std::thread::current().id(); - let id = ThreadId { thread_id: id.as_u64().into() }; - (id, Tracked::assume_new()) + unimplemented!(); } +} - /// Returns _just_ the ghost object, without physically obtaining the thread ID. - #[verus::trusted] - #[verifier::external_body] - pub proof fn ghost_thread_id() -> (tracked res: IsThread) { - unimplemented!(); +#[verus::trusted] +#[verifier(external)] +impl Clone for IsThread { + #[cfg(verus_keep_ghost)] + fn clone(&self) -> Self { + IsThread { } } - #[verus::trusted] - #[verifier::external_fn_specification] - pub fn ex_yield_now() { - std::thread::yield_now() + #[cfg(not(verus_keep_ghost))] + fn clone(&self) -> Self { + IsThread { _no_send_sync: Default::default() } } +} + +#[verus::trusted] +impl Copy for IsThread { + +} + +// Note: NO guarantee that a thread is not re-used +#[verus::trusted] +#[cfg(feature = "override_system_allocator")] +#[cfg(target_os = "linux")] +#[verifier::external_body] +pub fn thread_id() -> (res: (ThreadId, Tracked)) + ensures + res.1@@ == res.0, +{ + let id: i32 = unsafe { libc::gettid() }; + let id_u64: u64 = ((id as u64) << 1) | 1; // make sure it's nonzero + let id = ThreadId { thread_id: id_u64 }; + (id, Tracked::assume_new()) +} + +// NOTE: std::thread recursively calls malloc, so this can't be used when doing override +#[verus::trusted] +#[cfg(not(feature = "override_system_allocator"))] +#[verifier::external_body] +pub fn thread_id() -> (res: (ThreadId, Tracked)) + ensures + res.1@@ == res.0, +{ + let id: std::thread::ThreadId = std::thread::current().id(); + let id = ThreadId { thread_id: id.as_u64().into() }; + (id, Tracked::assume_new()) +} - } // verus! +/// Returns _just_ the ghost object, without physically obtaining the thread ID. +#[verus::trusted] +#[verifier::external_body] +pub proof fn ghost_thread_id() -> (tracked res: IsThread) { + unimplemented!(); +} + +#[verus::trusted] +#[verifier::external_fn_specification] +pub fn ex_yield_now() { + std::thread::yield_now() +} + +} // verus! } // fundamentals and definitions @@ -451,116 +448,116 @@ mod tokens { verus! { - pub ghost struct HeapId { - pub id: nat, - pub uniq: int, - } +pub ghost struct HeapId { + pub id: nat, + pub uniq: int, +} - pub ghost struct TldId { - pub id: nat, - } +pub ghost struct TldId { + pub id: nat, +} - pub ghost struct SegmentId { - pub id: nat, - pub uniq: int, - } +pub ghost struct SegmentId { + pub id: nat, + pub uniq: int, +} - pub ghost struct PageId { - pub segment_id: SegmentId, - pub idx: nat, - } +pub ghost struct PageId { + pub segment_id: SegmentId, + pub idx: nat, +} - pub ghost struct BlockId { - pub page_id: PageId, - // Index of the block within the *page* - pub idx: nat, - // Recall that a page may be multiple slices. - // The `slice_idx` is the index of the *specific* slice that this block is in. - // (Relative to the segment, so the slice's "offset" is going to be - // slice_idx - page_id.idx) - pub slice_idx: nat, - pub block_size: nat, - } +pub ghost struct BlockId { + pub page_id: PageId, + // Index of the block within the *page* + pub idx: nat, + // Recall that a page may be multiple slices. + // The `slice_idx` is the index of the *specific* slice that this block is in. + // (Relative to the segment, so the slice's "offset" is going to be + // slice_idx - page_id.idx) + pub slice_idx: nat, + pub block_size: nat, +} - impl PageId { - pub open spec fn range_from(&self, lo: int, hi: int) -> Set { - Set::new( - |page_id: PageId| - page_id.segment_id == self.segment_id && self.idx + lo <= page_id.idx < self.idx - + hi, - ) - } +impl PageId { + pub open spec fn range_from(&self, lo: int, hi: int) -> Set { + Set::new( + |page_id: PageId| + page_id.segment_id == self.segment_id && self.idx + lo <= page_id.idx < self.idx + + hi, + ) } +} - impl BlockId { - pub open spec fn wf(&self) -> bool { - self.slice_idx >= self.page_id.idx - } - - pub open spec fn page_id_for_slice(&self) -> PageId { - PageId { segment_id: self.page_id.segment_id, idx: self.slice_idx } - } +impl BlockId { + pub open spec fn wf(&self) -> bool { + self.slice_idx >= self.page_id.idx + } - pub open spec fn get_slice_idx(page_id: PageId, idx: nat, block_size: nat) -> nat { - (page_id.idx + (crate::layout::start_offset(block_size as int) + idx * block_size) / ( - SLICE_SIZE as int)) as nat - } + pub open spec fn page_id_for_slice(&self) -> PageId { + PageId { segment_id: self.page_id.segment_id, idx: self.slice_idx } + } - pub open spec fn slice_idx_is_right(&self) -> bool { - self.slice_idx == BlockId::get_slice_idx(self.page_id, self.idx, self.block_size) - } + pub open spec fn get_slice_idx(page_id: PageId, idx: nat, block_size: nat) -> nat { + (page_id.idx + (crate::layout::start_offset(block_size as int) + idx * block_size) / ( + SLICE_SIZE as int)) as nat } - // States - pub ghost enum DelayState { - UseDelayedFree, - Freeing, - NoDelayedFree, - NeverDelayedFree, + pub open spec fn slice_idx_is_right(&self) -> bool { + self.slice_idx == BlockId::get_slice_idx(self.page_id, self.idx, self.block_size) } +} - impl DelayState { - pub open spec fn to_int(&self) -> int { - match self { - DelayState::UseDelayedFree => 0, - DelayState::Freeing => 1, - DelayState::NoDelayedFree => 2, - DelayState::NeverDelayedFree => 3, - } +// States +pub ghost enum DelayState { + UseDelayedFree, + Freeing, + NoDelayedFree, + NeverDelayedFree, +} + +impl DelayState { + pub open spec fn to_int(&self) -> int { + match self { + DelayState::UseDelayedFree => 0, + DelayState::Freeing => 1, + DelayState::NoDelayedFree => 2, + DelayState::NeverDelayedFree => 3, } } +} - /*pub struct PageThreadListState { +/*pub struct PageThreadListState { pub delayed: DelayedState, pub len: nat, }*/ - pub ghost struct PageState { - pub offset: int, - //pub prev: Option, - //pub next: Option, - pub block_size: nat, - pub num_blocks: nat, - pub shared_access: PageSharedAccess, - pub is_enabled: bool, - } +pub ghost struct PageState { + pub offset: int, + //pub prev: Option, + //pub next: Option, + pub block_size: nat, + pub num_blocks: nat, + pub shared_access: PageSharedAccess, + pub is_enabled: bool, +} - pub ghost struct SegmentState { - // TODO what do we need here? - //pub has_extra_slice: bool, - pub shared_access: SegmentSharedAccess, - pub is_enabled: bool, - } +pub ghost struct SegmentState { + // TODO what do we need here? + //pub has_extra_slice: bool, + pub shared_access: SegmentSharedAccess, + pub is_enabled: bool, +} - pub ghost struct BlockState { - // Shared access this element can 'guard' - pub segment_shared_access: SegmentSharedAccess, - pub page_shared_access: PageSharedAccess, - pub page_slice_shared_access: PageSharedAccess, - pub heap_id: Option, - } +pub ghost struct BlockState { + // Shared access this element can 'guard' + pub segment_shared_access: SegmentSharedAccess, + pub page_shared_access: PageSharedAccess, + pub page_slice_shared_access: PageSharedAccess, + pub heap_id: Option, +} - /* +/* pub ghost struct PageQueueIds { // TODO are these supposed to be options? pub first: Option, @@ -568,144 +565,144 @@ mod tokens { } */ - pub ghost struct HeapState { - // For the doubly-linked list of Pages - //pub pages: Map, - //pub full_list: PageQueueIds, - pub shared_access: HeapSharedAccess, - } +pub ghost struct HeapState { + // For the doubly-linked list of Pages + //pub pages: Map, + //pub full_list: PageQueueIds, + pub shared_access: HeapSharedAccess, +} - pub ghost struct ThreadState { - pub heap_id: HeapId, - pub heap: HeapState, - pub segments: Map, - pub pages: Map, - } +pub ghost struct ThreadState { + pub heap_id: HeapId, + pub heap: HeapState, + pub segments: Map, + pub pages: Map, +} - pub ghost struct ThreadCheckedState { - pub pages: Set, - } +pub ghost struct ThreadCheckedState { + pub pages: Set, +} - // Shared States - use crate::types::SegmentSharedAccess; - use crate::types::SegmentLocalAccess; +// Shared States +use crate::types::SegmentSharedAccess; +use crate::types::SegmentLocalAccess; + +//pub struct PageSharedAccess { i: int } +//pub struct PageLocalAccess { i: int } +use crate::types::PageSharedAccess; +use crate::types::PageLocalAccess; +use crate::types::HeapSharedAccess; +use crate::types::HeapLocalAccess; + +// TODO this is currently unused, and I don't remember exactly why I made it. +// Is it going to be necessary when we do more cleanup stuff? +// +// Actor lets us track what a single thread is doing. +// The idea is that when the thread checks the 'thread id' of a page, +// it needs to then be sure that the page will remain valid for the duration +// of the period the thread is accessing it. +// That means we need to prevent the thread from modifying the page state +// while the 'AccessingMySegment' is in progress. +pub ghost enum Actor { + Idle, + //AccessingMySegment(SegmentId, SegmentLocalAccess), + Abandoned, +} - //pub struct PageSharedAccess { i: int } - //pub struct PageLocalAccess { i: int } - use crate::types::PageSharedAccess; - use crate::types::PageLocalAccess; - use crate::types::HeapSharedAccess; - use crate::types::HeapLocalAccess; +pub ghost enum DelayFreeingActor { + HeapUnknown, + Heap(HeapId, HeapSharedAccess, PageSharedAccess), +} - // TODO this is currently unused, and I don't remember exactly why I made it. - // Is it going to be necessary when we do more cleanup stuff? - // - // Actor lets us track what a single thread is doing. - // The idea is that when the thread checks the 'thread id' of a page, - // it needs to then be sure that the page will remain valid for the duration - // of the period the thread is accessing it. - // That means we need to prevent the thread from modifying the page state - // while the 'AccessingMySegment' is in progress. - pub ghost enum Actor { - Idle, - //AccessingMySegment(SegmentId, SegmentLocalAccess), - Abandoned, - } - - pub ghost enum DelayFreeingActor { - HeapUnknown, - Heap(HeapId, HeapSharedAccess, PageSharedAccess), - } - - pub type ThreadId = crate::thread::ThreadId; - - // PAPER CUT: doing this more than once, no generic finite condition for map, - // having to do the maximum thing - pub open spec fn segment_u_max(s: Set) -> int - decreases s.len(), - when s.finite() - { - if s.len() == 0 { - 0 - } else { - let x = s.choose(); - vstd::math::max(segment_u_max(s.remove(x)), x.uniq) - } - } +pub type ThreadId = crate::thread::ThreadId; - proof fn segment_u_max_not_in(s: Set) - requires - s.finite(), - ensures - forall|id: SegmentId| s.contains(id) ==> id.uniq < segment_u_max(s) + 1, - decreases s.len(), - { - vstd::set_lib::lemma_set_empty_equivalency_len(s); - if s.len() == 0 { - assert(s === Set::empty()); - } else { - let x = s.choose(); - let t = s.remove(x); - segment_u_max_not_in(t); - } +// PAPER CUT: doing this more than once, no generic finite condition for map, +// having to do the maximum thing +pub open spec fn segment_u_max(s: Set) -> int + decreases s.len(), + when s.finite() +{ + if s.len() == 0 { + 0 + } else { + let x = s.choose(); + vstd::math::max(segment_u_max(s.remove(x)), x.uniq) } +} - pub open spec fn segment_get_unused_uniq_field(s: Set) -> int { - segment_u_max(s) + 1 +proof fn segment_u_max_not_in(s: Set) + requires + s.finite(), + ensures + forall|id: SegmentId| s.contains(id) ==> id.uniq < segment_u_max(s) + 1, + decreases s.len(), +{ + vstd::set_lib::lemma_set_empty_equivalency_len(s); + if s.len() == 0 { + assert(s === Set::empty()); + } else { + let x = s.choose(); + let t = s.remove(x); + segment_u_max_not_in(t); } +} - pub proof fn lemma_segment_get_unused_uniq_field(s: Set) - requires - s.finite(), - ensures - forall|id: SegmentId| s.contains(id) ==> id.uniq != segment_get_unused_uniq_field(s), - { - segment_u_max_not_in(s); - } +pub open spec fn segment_get_unused_uniq_field(s: Set) -> int { + segment_u_max(s) + 1 +} - pub open spec fn heap_u_max(s: Set) -> int - decreases s.len(), - when s.finite() - { - if s.len() == 0 { - 0 - } else { - let x = s.choose(); - vstd::math::max(heap_u_max(s.remove(x)), x.uniq) - } - } +pub proof fn lemma_segment_get_unused_uniq_field(s: Set) + requires + s.finite(), + ensures + forall|id: SegmentId| s.contains(id) ==> id.uniq != segment_get_unused_uniq_field(s), +{ + segment_u_max_not_in(s); +} - proof fn heap_u_max_not_in(s: Set) - requires - s.finite(), - ensures - forall|id: HeapId| s.contains(id) ==> id.uniq < heap_u_max(s) + 1, - decreases s.len(), - { - vstd::set_lib::lemma_set_empty_equivalency_len(s); - if s.len() == 0 { - assert(s === Set::empty()); - } else { - let x = s.choose(); - let t = s.remove(x); - heap_u_max_not_in(t); - } +pub open spec fn heap_u_max(s: Set) -> int + decreases s.len(), + when s.finite() +{ + if s.len() == 0 { + 0 + } else { + let x = s.choose(); + vstd::math::max(heap_u_max(s.remove(x)), x.uniq) } +} - pub open spec fn heap_get_unused_uniq_field(s: Set) -> int { - heap_u_max(s) + 1 +proof fn heap_u_max_not_in(s: Set) + requires + s.finite(), + ensures + forall|id: HeapId| s.contains(id) ==> id.uniq < heap_u_max(s) + 1, + decreases s.len(), +{ + vstd::set_lib::lemma_set_empty_equivalency_len(s); + if s.len() == 0 { + assert(s === Set::empty()); + } else { + let x = s.choose(); + let t = s.remove(x); + heap_u_max_not_in(t); } +} - pub proof fn lemma_heap_get_unused_uniq_field(s: Set) - requires - s.finite(), - ensures - forall|id: HeapId| s.contains(id) ==> id.uniq != heap_get_unused_uniq_field(s), - { - heap_u_max_not_in(s); - } +pub open spec fn heap_get_unused_uniq_field(s: Set) -> int { + heap_u_max(s) + 1 +} - } // verus! +pub proof fn lemma_heap_get_unused_uniq_field(s: Set) + requires + s.finite(), + ensures + forall|id: HeapId| s.contains(id) ==> id.uniq != heap_get_unused_uniq_field(s), +{ + heap_u_max_not_in(s); +} + +} // verus! tokenized_state_machine! { Mim { fields { // Thread-local state to each entity @@ -2139,72 +2136,72 @@ mod types { verus! { - //// Page header data - #[repr(C)] - pub struct PageInner { - pub flags0: u8, // is_reset, is_committed, is_zero_init, - pub capacity: u16, - pub reserved: u16, - pub flags1: u8, // in_full, has_aligned - pub flags2: u8, // is_zero, retire_expire - pub free: LL, - // number of blocks that are allocated, or in `xthread_free` - // In other words, this is the "complement" of the number - // of blocks in `free` and `local_free`. - pub used: u32, - pub xblock_size: u32, - pub local_free: LL, - } - - impl PageInner { - pub open spec fn wf( - &self, - page_id: PageId, - page_state: PageState, - mim_instance: Mim::Instance, - ) -> bool { - &&& page_state.block_size == self.xblock_size as nat - &&& self.free.wf() - &&& self.free.fixed_page() - &&& self.free.page_id() == page_id - &&& self.free.block_size() == page_state.block_size - &&& self.free.instance() == mim_instance - &&& self.free.heap_id().is_none() - &&& self.local_free.wf() - &&& self.local_free.fixed_page() - &&& self.local_free.page_id() == page_id - &&& self.local_free.block_size() == page_state.block_size - &&& self.local_free.instance() == mim_instance - &&& self.local_free.heap_id().is_none() - &&& self.used + self.free.len() + self.local_free.len() == page_state.num_blocks - &&& self.local_free.fixed_page() - &&& self.free.fixed_page() - &&& self.local_free.block_size() == page_state.block_size - &&& self.free.block_size() == page_state.block_size - &&& self.capacity <= self.reserved - &&& self.capacity == page_state.num_blocks - &&& self.xblock_size > 0 - } - - pub open spec fn zeroed(&self) -> bool { - &&& self.capacity == 0 - &&& self.reserved == 0 - &&& self.free.wf() && self.free.len() == 0 - &&& self.used == 0 - &&& self.xblock_size == 0 - &&& self.local_free.wf() && self.local_free.len() == 0 - } - - pub open spec fn zeroed_except_block_size(&self) -> bool { - &&& self.capacity == 0 - &&& self.reserved == 0 - &&& self.free.wf() && self.free.len() == 0 - &&& self.used == 0 - &&& self.local_free.wf() && self.local_free.len() == 0 - } - } - - tokenized_state_machine!{ BoolAgree { +//// Page header data +#[repr(C)] +pub struct PageInner { + pub flags0: u8, // is_reset, is_committed, is_zero_init, + pub capacity: u16, + pub reserved: u16, + pub flags1: u8, // in_full, has_aligned + pub flags2: u8, // is_zero, retire_expire + pub free: LL, + // number of blocks that are allocated, or in `xthread_free` + // In other words, this is the "complement" of the number + // of blocks in `free` and `local_free`. + pub used: u32, + pub xblock_size: u32, + pub local_free: LL, +} + +impl PageInner { + pub open spec fn wf( + &self, + page_id: PageId, + page_state: PageState, + mim_instance: Mim::Instance, + ) -> bool { + &&& page_state.block_size == self.xblock_size as nat + &&& self.free.wf() + &&& self.free.fixed_page() + &&& self.free.page_id() == page_id + &&& self.free.block_size() == page_state.block_size + &&& self.free.instance() == mim_instance + &&& self.free.heap_id().is_none() + &&& self.local_free.wf() + &&& self.local_free.fixed_page() + &&& self.local_free.page_id() == page_id + &&& self.local_free.block_size() == page_state.block_size + &&& self.local_free.instance() == mim_instance + &&& self.local_free.heap_id().is_none() + &&& self.used + self.free.len() + self.local_free.len() == page_state.num_blocks + &&& self.local_free.fixed_page() + &&& self.free.fixed_page() + &&& self.local_free.block_size() == page_state.block_size + &&& self.free.block_size() == page_state.block_size + &&& self.capacity <= self.reserved + &&& self.capacity == page_state.num_blocks + &&& self.xblock_size > 0 + } + + pub open spec fn zeroed(&self) -> bool { + &&& self.capacity == 0 + &&& self.reserved == 0 + &&& self.free.wf() && self.free.len() == 0 + &&& self.used == 0 + &&& self.xblock_size == 0 + &&& self.local_free.wf() && self.local_free.len() == 0 + } + + pub open spec fn zeroed_except_block_size(&self) -> bool { + &&& self.capacity == 0 + &&& self.reserved == 0 + &&& self.free.wf() && self.free.len() == 0 + &&& self.used == 0 + &&& self.local_free.wf() && self.local_free.len() == 0 + } +} + +tokenized_state_machine!{ BoolAgree { fields { #[sharding(variable)] pub x: bool, #[sharding(variable)] pub y: bool, @@ -2235,7 +2232,7 @@ mod types { #[inductive(set)] fn set_inductive(pre: Self, post: Self, b: bool) { } }} - struct_with_invariants!{ +struct_with_invariants!{ pub struct AtomicHeapPtr { pub atomic: AtomicUsize<_, (BoolAgree::y, Option), _>, @@ -2271,39 +2268,39 @@ mod types { } } - impl AtomicHeapPtr { - pub open spec fn is_empty(&self) -> bool { - self.emp@@.value - } +impl AtomicHeapPtr { + pub open spec fn is_empty(&self) -> bool { + self.emp@@.value + } - pub fn empty() -> (ahp: AtomicHeapPtr) - ensures - ahp.is_empty(), - { - let tracked (Tracked(emp_inst), Tracked(emp_x), Tracked(emp_y)) = - BoolAgree::Instance::initialize(true); - let ghost g = (Ghost(arbitrary()), Ghost(arbitrary()), Tracked(emp_x), Tracked(emp_inst)); - AtomicHeapPtr { - page_id: Ghost(arbitrary()), - instance: Ghost(arbitrary()), - emp: Tracked(emp_x), - emp_inst: Tracked(emp_inst), - atomic: AtomicUsize::new(Ghost(g), 0, Tracked((emp_y, None))), - } + pub fn empty() -> (ahp: AtomicHeapPtr) + ensures + ahp.is_empty(), + { + let tracked (Tracked(emp_inst), Tracked(emp_x), Tracked(emp_y)) = + BoolAgree::Instance::initialize(true); + let ghost g = (Ghost(arbitrary()), Ghost(arbitrary()), Tracked(emp_x), Tracked(emp_inst)); + AtomicHeapPtr { + page_id: Ghost(arbitrary()), + instance: Ghost(arbitrary()), + emp: Tracked(emp_x), + emp_inst: Tracked(emp_inst), + atomic: AtomicUsize::new(Ghost(g), 0, Tracked((emp_y, None))), } + } - #[inline(always)] - pub fn disable(&mut self) -> (hop: Tracked) - requires - old(self).wf(old(self).instance@, old(self).page_id@), - !old(self).is_empty(), - ensures - self.is_empty(), - hop@@.instance == old(self).instance@, - hop@@.key == old(self).page_id@, - { - let tracked mut heap_of_page; - my_atomic_with_ghost!( + #[inline(always)] + pub fn disable(&mut self) -> (hop: Tracked) + requires + old(self).wf(old(self).instance@, old(self).page_id@), + !old(self).is_empty(), + ensures + self.is_empty(), + hop@@.instance == old(self).instance@, + hop@@.key == old(self).page_id@, + { + let tracked mut heap_of_page; + my_atomic_with_ghost!( &self.atomic => no_op(); ghost g => { let tracked (mut y, heap_of_page_opt) = g; @@ -2312,233 +2309,233 @@ mod types { g = (y, None); } ); - Tracked(heap_of_page) - } + Tracked(heap_of_page) } +} - #[repr(C)] - pub struct Page { - pub count: PCell, - pub offset: u32, // this value is read-only while the Page is shared - pub inner: PCell, - pub xthread_free: ThreadLLWithDelayBits, - pub xheap: AtomicHeapPtr, - pub prev: PCell>, - pub next: PCell>, - pub padding: usize, - } +#[repr(C)] +pub struct Page { + pub count: PCell, + pub offset: u32, // this value is read-only while the Page is shared + pub inner: PCell, + pub xthread_free: ThreadLLWithDelayBits, + pub xheap: AtomicHeapPtr, + pub prev: PCell>, + pub next: PCell>, + pub padding: usize, +} + +pub tracked struct PageSharedAccess { + pub tracked points_to: ptr::PointsTo, +} + +pub tracked struct PageLocalAccess { + pub tracked count: cell::PointsTo, + pub tracked inner: cell::PointsTo, + pub tracked prev: cell::PointsTo>, + pub tracked next: cell::PointsTo>, +} - pub tracked struct PageSharedAccess { - pub tracked points_to: ptr::PointsTo, +pub tracked struct PageFullAccess { + pub tracked s: PageSharedAccess, + pub tracked l: PageLocalAccess, +} + +impl Page { + pub open spec fn wf( + &self, + page_id: PageId, + block_size: nat, + mim_instance: Mim::Instance, + ) -> bool { + self.xthread_free.wf() && !self.xthread_free.is_empty() && self.xthread_free.instance + == mim_instance && self.xthread_free.page_id() == page_id + && self.xthread_free.block_size() == block_size && self.xheap.wf(mim_instance, page_id) + && !self.xheap.is_empty() } - pub tracked struct PageLocalAccess { - pub tracked count: cell::PointsTo, - pub tracked inner: cell::PointsTo, - pub tracked prev: cell::PointsTo>, - pub tracked next: cell::PointsTo>, + pub open spec fn wf_secondary(&self, mim_instance: Mim::Instance) -> bool { + self.xthread_free.wf() && self.xthread_free.is_empty() && self.xthread_free.instance + == mim_instance } - pub tracked struct PageFullAccess { - pub tracked s: PageSharedAccess, - pub tracked l: PageLocalAccess, + pub open spec fn wf_unused(&self, mim_instance: Mim::Instance) -> bool { + self.xthread_free.wf() && self.xthread_free.is_empty() && self.xthread_free.instance + == mim_instance } +} - impl Page { - pub open spec fn wf( - &self, - page_id: PageId, - block_size: nat, - mim_instance: Mim::Instance, - ) -> bool { - self.xthread_free.wf() && !self.xthread_free.is_empty() && self.xthread_free.instance - == mim_instance && self.xthread_free.page_id() == page_id - && self.xthread_free.block_size() == block_size && self.xheap.wf(mim_instance, page_id) - && !self.xheap.is_empty() - } +pub open spec fn page_differ_only_in_offset(page1: Page, page2: Page) -> bool { + page2 == Page { offset: page2.offset, ..page1 } +} - pub open spec fn wf_secondary(&self, mim_instance: Mim::Instance) -> bool { - self.xthread_free.wf() && self.xthread_free.is_empty() && self.xthread_free.instance - == mim_instance - } +pub open spec fn psa_differ_only_in_offset(psa1: PageSharedAccess, psa2: PageSharedAccess) -> bool { + psa1.points_to@.value.is_some() && psa2.points_to@.value.is_some() + && page_differ_only_in_offset( + psa1.points_to@.value.unwrap(), + psa2.points_to@.value.unwrap(), + ) && psa1.points_to@.pptr == psa2.points_to@.pptr +} - pub open spec fn wf_unused(&self, mim_instance: Mim::Instance) -> bool { - self.xthread_free.wf() && self.xthread_free.is_empty() && self.xthread_free.instance - == mim_instance - } +impl PageSharedAccess { + pub open spec fn wf( + &self, + page_id: PageId, + block_size: nat, + mim_instance: Mim::Instance, + ) -> bool { + &&& is_page_ptr(self.points_to@.pptr, page_id) + &&& self.points_to@.value.is_Some() + &&& self.points_to@.value.get_Some_0().wf(page_id, block_size, mim_instance) } - pub open spec fn page_differ_only_in_offset(page1: Page, page2: Page) -> bool { - page2 == Page { offset: page2.offset, ..page1 } + pub open spec fn wf_secondary( + &self, + page_id: PageId, + block_size: nat, + mim_instance: Mim::Instance, + ) -> bool { + &&& is_page_ptr(self.points_to@.pptr, page_id) + &&& self.points_to@.value.is_Some() + &&& self.points_to@.value.get_Some_0().wf_secondary(mim_instance) } - pub open spec fn psa_differ_only_in_offset(psa1: PageSharedAccess, psa2: PageSharedAccess) -> bool { - psa1.points_to@.value.is_some() && psa2.points_to@.value.is_some() - && page_differ_only_in_offset( - psa1.points_to@.value.unwrap(), - psa2.points_to@.value.unwrap(), - ) && psa1.points_to@.pptr == psa2.points_to@.pptr + pub open spec fn wf_unused(&self, page_id: PageId, mim_instance: Mim::Instance) -> bool { + &&& is_page_ptr(self.points_to@.pptr, page_id) + &&& self.points_to@.value.is_Some() + &&& self.points_to@.value.get_Some_0().wf_unused(mim_instance) } +} - impl PageSharedAccess { - pub open spec fn wf( - &self, - page_id: PageId, - block_size: nat, - mim_instance: Mim::Instance, - ) -> bool { - &&& is_page_ptr(self.points_to@.pptr, page_id) - &&& self.points_to@.value.is_Some() - &&& self.points_to@.value.get_Some_0().wf(page_id, block_size, mim_instance) - } - - pub open spec fn wf_secondary( - &self, - page_id: PageId, - block_size: nat, - mim_instance: Mim::Instance, - ) -> bool { - &&& is_page_ptr(self.points_to@.pptr, page_id) - &&& self.points_to@.value.is_Some() - &&& self.points_to@.value.get_Some_0().wf_secondary(mim_instance) - } +pub open spec fn wf_reserved(block_size: int, reserved: int, count: int) -> bool { + reserved * block_size + crate::layout::start_offset(block_size) <= count * SLICE_SIZE +} - pub open spec fn wf_unused(&self, page_id: PageId, mim_instance: Mim::Instance) -> bool { - &&& is_page_ptr(self.points_to@.pptr, page_id) - &&& self.points_to@.value.is_Some() - &&& self.points_to@.value.get_Some_0().wf_unused(mim_instance) +impl PageLocalAccess { + pub open spec fn wf( + &self, + page_id: PageId, + page_state: PageState, + mim_instance: Mim::Instance, + ) -> bool { + (page_state.offset == 0 ==> page_state.shared_access.wf( + page_id, + page_state.block_size, + mim_instance, + )) && (page_state.offset != 0 ==> page_state.shared_access.wf_secondary( + page_id, + page_state.block_size, + mim_instance, + )) && page_state.is_enabled && match page_state.shared_access.points_to@.value { + Some(page) => { + &&& self.inner@.pcell == page.inner.id() + &&& self.count@.pcell == page.count.id() + &&& self.prev@.pcell == page.prev.id() + &&& self.next@.pcell == page.next.id() + &&& match ( + self.count@.value, + self.inner@.value, + self.prev@.value, + self.next@.value, + ) { + (Some(count), Some(page_inner), Some(prev), Some(next)) => { + //&&& is_page_ptr_opt(prev, page_state.prev) + //&&& is_page_ptr_opt(next, page_state.next) + &&& (page_state.offset == 0 ==> page_inner.wf( + page_id, + page_state, + mim_instance, + ) && wf_reserved( + page_state.block_size as int, + page_inner.reserved as int, + count as int, + )) + &&& (page_state.offset != 0 ==> page_inner.zeroed_except_block_size()) + }, + _ => false, + } + }, + None => false, } } - pub open spec fn wf_reserved(block_size: int, reserved: int, count: int) -> bool { - reserved * block_size + crate::layout::start_offset(block_size) <= count * SLICE_SIZE - } - - impl PageLocalAccess { - pub open spec fn wf( - &self, - page_id: PageId, - page_state: PageState, - mim_instance: Mim::Instance, - ) -> bool { - (page_state.offset == 0 ==> page_state.shared_access.wf( - page_id, - page_state.block_size, - mim_instance, - )) && (page_state.offset != 0 ==> page_state.shared_access.wf_secondary( - page_id, - page_state.block_size, - mim_instance, - )) && page_state.is_enabled && match page_state.shared_access.points_to@.value { - Some(page) => { - &&& self.inner@.pcell == page.inner.id() - &&& self.count@.pcell == page.count.id() - &&& self.prev@.pcell == page.prev.id() - &&& self.next@.pcell == page.next.id() - &&& match ( - self.count@.value, - self.inner@.value, - self.prev@.value, - self.next@.value, - ) { - (Some(count), Some(page_inner), Some(prev), Some(next)) => { - //&&& is_page_ptr_opt(prev, page_state.prev) - //&&& is_page_ptr_opt(next, page_state.next) - &&& (page_state.offset == 0 ==> page_inner.wf( - page_id, - page_state, - mim_instance, - ) && wf_reserved( - page_state.block_size as int, - page_inner.reserved as int, - count as int, - )) - &&& (page_state.offset != 0 ==> page_inner.zeroed_except_block_size()) - }, - _ => false, - } - }, - None => false, - } - } - - pub open spec fn wf_unused( - &self, - page_id: PageId, - shared_access: PageSharedAccess, - popped: Popped, - mim_instance: Mim::Instance, - ) -> bool { - shared_access.wf_unused(page_id, mim_instance) && match shared_access.points_to@.value { - Some(page) => { - &&& self.count@.pcell == page.count.id() - &&& self.inner@.pcell == page.inner.id() - &&& self.prev@.pcell == page.prev.id() - &&& self.next@.pcell == page.next.id() - &&& match self.inner@.value { - Some(page_inner) => { - page_inner.zeroed_except_block_size()/*&& ( + pub open spec fn wf_unused( + &self, + page_id: PageId, + shared_access: PageSharedAccess, + popped: Popped, + mim_instance: Mim::Instance, + ) -> bool { + shared_access.wf_unused(page_id, mim_instance) && match shared_access.points_to@.value { + Some(page) => { + &&& self.count@.pcell == page.count.id() + &&& self.inner@.pcell == page.inner.id() + &&& self.prev@.pcell == page.prev.id() + &&& self.next@.pcell == page.next.id() + &&& match self.inner@.value { + Some(page_inner) => { + page_inner.zeroed_except_block_size()/*&& ( && page_id.idx != 0 && (popped != Popped::Ready(page_id) && !(popped.is_VeryUnready() && popped.get_VeryUnready_0() == page_id.segment_id && popped.get_VeryUnready_1() == page_id.idx)) ==> page_inner.xblock_size == 0 )*/ - }, - _ => false, - }// TODO move PageData comparison in here? + }, + _ => false, + } // TODO move PageData comparison in here? - }, - None => false, - } + }, + None => false, } } +} - impl PageFullAccess { - pub open spec fn wf_empty_page_global(&self) -> bool { - &&& self.s.points_to@.value.is_some() - &&& self.s.points_to@.value.unwrap().inner.id() == self.l.inner@.pcell - &&& self.l.inner@.value.is_some() - &&& self.l.inner@.value.unwrap().zeroed() - } +impl PageFullAccess { + pub open spec fn wf_empty_page_global(&self) -> bool { + &&& self.s.points_to@.value.is_some() + &&& self.s.points_to@.value.unwrap().inner.id() == self.l.inner@.pcell + &&& self.l.inner@.value.is_some() + &&& self.l.inner@.value.unwrap().zeroed() } +} - ///////////////////////////////////////////// - ///////////////////////////////////////////// Segments - ///////////////////////////////////////////// - #[derive(Clone, Copy)] - pub enum SegmentKind { - Normal, - Huge, - } +///////////////////////////////////////////// +///////////////////////////////////////////// Segments +///////////////////////////////////////////// +#[derive(Clone, Copy)] +pub enum SegmentKind { + Normal, + Huge, +} - #[repr(C)] - pub struct SegmentHeaderMain { - pub memid: usize, - pub mem_is_pinned: bool, - pub mem_is_large: bool, - pub mem_is_committed: bool, - pub mem_alignment: usize, - pub mem_align_offset: usize, - pub allow_decommit: bool, - pub decommit_expire: i64, - pub decommit_mask: CommitMask, - pub commit_mask: CommitMask, - } +#[repr(C)] +pub struct SegmentHeaderMain { + pub memid: usize, + pub mem_is_pinned: bool, + pub mem_is_large: bool, + pub mem_is_committed: bool, + pub mem_alignment: usize, + pub mem_align_offset: usize, + pub allow_decommit: bool, + pub decommit_expire: i64, + pub decommit_mask: CommitMask, + pub commit_mask: CommitMask, +} - #[repr(C)] - pub struct SegmentHeaderMain2 { - pub next: PPtr, - pub abandoned: usize, - pub abandoned_visits: usize, - pub used: usize, - pub cookie: usize, - pub segment_slices: usize, - pub segment_info_slices: usize, - pub kind: SegmentKind, - pub slice_entries: usize, - } +#[repr(C)] +pub struct SegmentHeaderMain2 { + pub next: PPtr, + pub abandoned: usize, + pub abandoned_visits: usize, + pub used: usize, + pub cookie: usize, + pub segment_slices: usize, + pub segment_info_slices: usize, + pub kind: SegmentKind, + pub slice_entries: usize, +} - struct_with_invariants!{ +struct_with_invariants!{ #[repr(C)] pub struct SegmentHeader { pub main: PCell, @@ -2569,1528 +2566,1512 @@ mod types { } } - pub tracked struct SegmentSharedAccess { - pub points_to: ptr::PointsTo, - } +pub tracked struct SegmentSharedAccess { + pub points_to: ptr::PointsTo, +} - impl SegmentSharedAccess { - pub open spec fn wf(&self, segment_id: SegmentId, mim_instance: Mim::Instance) -> bool { - &&& is_segment_ptr(self.points_to@.pptr, segment_id) - &&& (match self.points_to@.value { - Some(segment_header) => segment_header.wf(mim_instance, segment_id), - None => false, - }) - } +impl SegmentSharedAccess { + pub open spec fn wf(&self, segment_id: SegmentId, mim_instance: Mim::Instance) -> bool { + &&& is_segment_ptr(self.points_to@.pptr, segment_id) + &&& (match self.points_to@.value { + Some(segment_header) => segment_header.wf(mim_instance, segment_id), + None => false, + }) } +} - pub tracked struct SegmentLocalAccess { - pub mem: MemChunk, - pub main: cell::PointsTo, - pub main2: cell::PointsTo, - } +pub tracked struct SegmentLocalAccess { + pub mem: MemChunk, + pub main: cell::PointsTo, + pub main2: cell::PointsTo, +} - impl SegmentLocalAccess { - pub open spec fn wf( - &self, - segment_id: SegmentId, - segment_state: SegmentState, - mim_instance: Mim::Instance, - ) -> bool { - &&& segment_state.shared_access.wf(segment_id, mim_instance) - &&& segment_state.shared_access.points_to@.value.unwrap().main.id() == self.main@.pcell - &&& self.main@.value.is_some() - &&& segment_state.shared_access.points_to@.value.unwrap().main2.id() == self.main2@.pcell - &&& self.main2@.value.is_some() - &&& segment_state.is_enabled - } - } - - ///////////////////////////////////////////// - ///////////////////////////////////////////// Heaps - ///////////////////////////////////////////// - pub struct PageQueue { - pub first: PPtr, - pub last: PPtr, - pub block_size: usize, - } - - impl Clone for PageQueue { - fn clone(&self) -> (s: Self) - ensures - s == *self, - { - PageQueue { first: self.first, last: self.last, block_size: self.block_size } - } +impl SegmentLocalAccess { + pub open spec fn wf( + &self, + segment_id: SegmentId, + segment_state: SegmentState, + mim_instance: Mim::Instance, + ) -> bool { + &&& segment_state.shared_access.wf(segment_id, mim_instance) + &&& segment_state.shared_access.points_to@.value.unwrap().main.id() == self.main@.pcell + &&& self.main@.value.is_some() + &&& segment_state.shared_access.points_to@.value.unwrap().main2.id() == self.main2@.pcell + &&& self.main2@.value.is_some() + &&& segment_state.is_enabled } +} - impl Copy for PageQueue { +///////////////////////////////////////////// +///////////////////////////////////////////// Heaps +///////////////////////////////////////////// +pub struct PageQueue { + pub first: PPtr, + pub last: PPtr, + pub block_size: usize, +} +impl Clone for PageQueue { + fn clone(&self) -> (s: Self) + ensures + s == *self, + { + PageQueue { first: self.first, last: self.last, block_size: self.block_size } } +} - #[repr(C)] - pub struct Heap { - pub tld_ptr: TldPtr, - pub pages_free_direct: PCell<[PPtr; 129]>, // length PAGES_DIRECT - pub pages: PCell<[PageQueue; 75]>, // length BIN_FULL + 1 - pub thread_delayed_free: ThreadLLSimple, - pub thread_id: ThreadId, - pub arena_id: ArenaId, - //pub cookie: usize, - //pub keys: usize, - //pub random: - pub page_count: PCell, - pub page_retired_min: PCell, - pub page_retired_max: PCell, - //pub next: HeapPtr, - pub no_reclaim: bool, - // TODO should be a global, but right now we don't support pointers to globals - pub page_empty_ptr: PPtr, - } +impl Copy for PageQueue { + +} + +#[repr(C)] +pub struct Heap { + pub tld_ptr: TldPtr, + pub pages_free_direct: PCell<[PPtr; 129]>, // length PAGES_DIRECT + pub pages: PCell<[PageQueue; 75]>, // length BIN_FULL + 1 + pub thread_delayed_free: ThreadLLSimple, + pub thread_id: ThreadId, + pub arena_id: ArenaId, + //pub cookie: usize, + //pub keys: usize, + //pub random: + pub page_count: PCell, + pub page_retired_min: PCell, + pub page_retired_max: PCell, + //pub next: HeapPtr, + pub no_reclaim: bool, + // TODO should be a global, but right now we don't support pointers to globals + pub page_empty_ptr: PPtr, +} - pub struct HeapSharedAccess { - pub points_to: ptr::PointsTo, +pub struct HeapSharedAccess { + pub points_to: ptr::PointsTo, +} + +pub struct HeapLocalAccess { + pub pages_free_direct: cell::PointsTo<[PPtr; 129]>, + pub pages: cell::PointsTo<[PageQueue; 75]>, + pub page_count: cell::PointsTo, + pub page_retired_min: cell::PointsTo, + pub page_retired_max: cell::PointsTo, +} + +impl Heap { + pub open spec fn wf( + &self, + heap_id: HeapId, + tld_id: TldId, + mim_instance: Mim::Instance, + ) -> bool { + &&& self.thread_delayed_free.wf() + &&& self.thread_delayed_free.instance == mim_instance + &&& self.thread_delayed_free.heap_id == heap_id + &&& self.tld_ptr.wf() + &&& self.tld_ptr.tld_id == tld_id } +} - pub struct HeapLocalAccess { - pub pages_free_direct: cell::PointsTo<[PPtr; 129]>, - pub pages: cell::PointsTo<[PageQueue; 75]>, - pub page_count: cell::PointsTo, - pub page_retired_min: cell::PointsTo, - pub page_retired_max: cell::PointsTo, +impl HeapSharedAccess { + pub open spec fn wf( + &self, + heap_id: HeapId, + tld_id: TldId, + mim_instance: Mim::Instance, + ) -> bool { + is_heap_ptr(self.points_to@.pptr, heap_id) && self.points_to@.value.is_Some() + && self.points_to@.value.get_Some_0().wf(heap_id, tld_id, mim_instance) } - impl Heap { - pub open spec fn wf( - &self, - heap_id: HeapId, - tld_id: TldId, - mim_instance: Mim::Instance, - ) -> bool { - &&& self.thread_delayed_free.wf() - &&& self.thread_delayed_free.instance == mim_instance - &&& self.thread_delayed_free.heap_id == heap_id - &&& self.tld_ptr.wf() - &&& self.tld_ptr.tld_id == tld_id - } + pub open spec fn wf2(&self, heap_id: HeapId, mim_instance: Mim::Instance) -> bool { + self.wf(heap_id, self.points_to@.value.unwrap().tld_ptr.tld_id@, mim_instance) } +} - impl HeapSharedAccess { - pub open spec fn wf( - &self, - heap_id: HeapId, - tld_id: TldId, - mim_instance: Mim::Instance, - ) -> bool { - is_heap_ptr(self.points_to@.pptr, heap_id) && self.points_to@.value.is_Some() - && self.points_to@.value.get_Some_0().wf(heap_id, tld_id, mim_instance) - } +pub open spec fn pages_free_direct_match(pfd_val: int, p_val: int, emp: int) -> bool { + (p_val == 0 ==> pfd_val == emp) && (p_val != 0 ==> pfd_val == p_val) +} + +pub open spec fn pages_free_direct_is_correct( + pfd: Seq>, + pages: Seq, + emp: int, +) -> bool { + &&& pfd.len() == PAGES_DIRECT + &&& pages.len() == BIN_FULL + 1 + &&& (forall|wsize| + 0 <= wsize < pfd.len() ==> pages_free_direct_match( + (#[trigger] pfd[wsize]).id(), + pages[smallest_bin_fitting_size(wsize * INTPTR_SIZE)].first.id(), + emp, + )) +} - pub open spec fn wf2(&self, heap_id: HeapId, mim_instance: Mim::Instance) -> bool { - self.wf(heap_id, self.points_to@.value.unwrap().tld_ptr.tld_id@, mim_instance) +impl HeapLocalAccess { + pub open spec fn wf( + &self, + heap_id: HeapId, + heap_state: HeapState, + tld_id: TldId, + mim_instance: Mim::Instance, + emp: int, + ) -> bool { + self.wf_basic(heap_id, heap_state, tld_id, mim_instance) && pages_free_direct_is_correct( + self.pages_free_direct@.value.unwrap()@, + self.pages@.value.unwrap()@, + emp, + ) && heap_state.shared_access.points_to@.value.unwrap().page_empty_ptr.id() == emp + } + + pub open spec fn wf_basic( + &self, + heap_id: HeapId, + heap_state: HeapState, + tld_id: TldId, + mim_instance: Mim::Instance, + ) -> bool { + heap_state.shared_access.wf(heap_id, tld_id, mim_instance) && { + let heap = heap_state.shared_access.points_to@.value.unwrap(); + heap.pages_free_direct.id() == self.pages_free_direct@.pcell && heap.pages.id() + == self.pages@.pcell && heap.page_count.id() == self.page_count@.pcell + && heap.page_retired_min.id() == self.page_retired_min@.pcell + && heap.page_retired_max.id() == self.page_retired_max@.pcell + && self.pages_free_direct@.value.is_some() && self.pages@.value.is_some() + && self.page_count@.value.is_some() && self.page_retired_min@.value.is_some() + && self.page_retired_max@.value.is_some() && (forall|i: int| #[trigger] + valid_bin_idx(i) ==> self.pages@.value.unwrap()[i].block_size == size_of_bin( + i, + )) // 0 isn't a valid_bin_idx + && self.pages@.value.unwrap()[0].block_size == 8 + && self.pages@.value.unwrap()[BIN_FULL as int].block_size == 8 * (524288 + + 2) //MEDIUM_OBJ_WSIZE_MAX + 2 + && self.pages_free_direct@.value.unwrap()@.len() == PAGES_DIRECT + && self.pages@.value.unwrap()@.len() == BIN_FULL + 1 } } +} + +///////////////////////////////////////////// +///////////////////////////////////////////// Thread local data +///////////////////////////////////////////// +//pub struct OsTld { +// pub region_idx: usize, +//} +pub struct SegmentsTld { + pub span_queue_headers: [SpanQueueHeader; 32], // len = SEGMENT_BIN_MAX + 1 + pub count: usize, + pub peak_count: usize, + pub current_size: usize, + pub peak_size: usize, +} + +pub struct SpanQueueHeader { + pub first: PPtr, + pub last: PPtr, +} - pub open spec fn pages_free_direct_match(pfd_val: int, p_val: int, emp: int) -> bool { - (p_val == 0 ==> pfd_val == emp) && (p_val != 0 ==> pfd_val == p_val) +impl Clone for SpanQueueHeader { + fn clone(&self) -> (s: Self) + ensures + s == *self, + { + SpanQueueHeader { first: self.first, last: self.last } } +} - pub open spec fn pages_free_direct_is_correct( - pfd: Seq>, - pages: Seq, - emp: int, - ) -> bool { - &&& pfd.len() == PAGES_DIRECT - &&& pages.len() == BIN_FULL + 1 - &&& (forall|wsize| - 0 <= wsize < pfd.len() ==> pages_free_direct_match( - (#[trigger] - pfd[wsize]).id(), - pages[smallest_bin_fitting_size(wsize * INTPTR_SIZE)].first.id(), - emp, +impl Copy for SpanQueueHeader { + +} + +pub struct Tld { + // TODO mimalloc allows multiple heaps per thread + pub heap_backing: PPtr, + pub segments: SegmentsTld, +} + +pub tracked struct Local { + pub ghost thread_id: ThreadId, + pub tracked my_inst: Mim::my_inst, + pub tracked instance: Mim::Instance, + pub tracked thread_token: Mim::thread_local_state, + pub tracked checked_token: Mim::thread_checked_state, + pub tracked is_thread: crate::thread::IsThread, + pub ghost heap_id: HeapId, + pub tracked heap: HeapLocalAccess, + pub ghost tld_id: TldId, + pub tracked tld: ptr::PointsTo, + pub tracked segments: Map, + // All pages, used and unused + pub tracked pages: Map, + pub ghost psa: Map, + // All unused pages + // (used pages are in the token system) + pub tracked unused_pages: Map, + pub ghost page_organization: PageOrg::State, + pub tracked page_empty_global: Duplicable, +} + +pub open spec fn common_preserves(l1: Local, l2: Local) -> bool { + l1.heap_id == l2.heap_id && l1.tld_id == l2.tld_id && l1.instance == l2.instance +} + +impl Local { + pub open spec fn wf(&self) -> bool { + self.wf_main() && self.page_organization.popped == Popped::No + } + + pub open spec fn wf_basic(&self) -> bool { + &&& is_tld_ptr(self.tld@.pptr, self.tld_id) + &&& self.thread_token@.instance == self.instance + &&& self.thread_token@.key == self.thread_id + &&& self.thread_token@.value.segments.dom() == self.segments.dom() + &&& self.thread_token@.value.heap_id == self.heap_id + &&& self.heap.wf_basic( + self.heap_id, + self.thread_token@.value.heap, + self.tld_id, + self.instance, + ) + &&& self.thread_token@.value.heap.shared_access.points_to@.value.unwrap().page_empty_ptr.id() + == self.page_empty_global@.s.points_to@.pptr + &&& self.page_empty_global@.wf_empty_page_global() + } + + pub open spec fn wf_main(&self) -> bool { + &&& is_tld_ptr(self.tld@.pptr, self.tld_id) + &&& self.thread_token@.instance == self.instance + &&& self.thread_token@.key == self.thread_id + &&& self.thread_id == self.is_thread@ + &&& self.checked_token@.instance == self.instance + &&& self.checked_token@.key == self.thread_id + &&& self.my_inst@.instance == self.instance + &&& self.my_inst@.value == self.instance //&&& (forall |page_id| + // self.thread_token@.value.pages.dom().contains(page_id) <==> + // self.pages.dom().contains(page_id)) + //&&& self.thread_token@.value.pages.dom() == self.pages.dom() + + &&& self.thread_token@.value.segments.dom() == self.segments.dom() + &&& self.thread_token@.value.heap_id == self.heap_id + &&& self.heap.wf( + self.heap_id, + self.thread_token@.value.heap, + self.tld_id, + self.instance, + self.page_empty_global@.s.points_to@.pptr, + ) + &&& (forall|page_id| #[trigger] + self.pages.dom().contains(page_id) + ==> // Page is either 'used' or 'unused' + (self.unused_pages.dom().contains(page_id) + <==> !self.thread_token@.value.pages.dom().contains(page_id))) + &&& self.thread_token@.value.pages.dom().subset_of(self.pages.dom()) + &&& (forall|page_id| #[trigger] + self.pages.dom().contains(page_id) ==> self.thread_token@.value.pages.dom().contains( + page_id, + ) ==> self.pages.index(page_id).wf( + page_id, + self.thread_token@.value.pages.index(page_id), + self.instance, + )) + &&& (forall|page_id| #[trigger] + self.pages.dom().contains(page_id) ==> self.unused_pages.dom().contains(page_id) + ==> self.pages.index(page_id).wf_unused( + page_id, + self.unused_pages[page_id], + self.page_organization.popped, + self.instance, + )) + &&& (forall|segment_id| #[trigger] + self.segments.dom().contains(segment_id) ==> self.segments[segment_id].wf( + segment_id, + self.thread_token@.value.segments.index(segment_id), + self.instance, + )) + &&& (forall|segment_id| #[trigger] + self.segments.dom().contains(segment_id) ==> self.mem_chunk_good(segment_id)) + &&& self.tld@.value.is_Some() + &&& self.page_organization_valid() + &&& self.page_empty_global@.wf_empty_page_global() + } + + pub open spec fn page_organization_valid(&self) -> bool { + &&& self.page_organization.invariant() + &&& self.tld@.value.is_Some() + &&& page_organization_queues_match( + self.page_organization.unused_dlist_headers, + self.tld@.value.get_Some_0().segments.span_queue_headers@, + ) + &&& page_organization_used_queues_match( + self.page_organization.used_dlist_headers, + self.heap.pages@.value.unwrap()@, + ) + &&& page_organization_pages_match( + self.page_organization.pages, + self.pages, + self.psa, + self.page_organization.popped, + ) + &&& page_organization_segments_match(self.page_organization.segments, self.segments) + &&& (forall|page_id: PageId| #[trigger] + self.page_organization.pages.dom().contains(page_id) ==> ( + !self.page_organization.pages[page_id].is_used <==> self.unused_pages.dom().contains( + page_id, + ))) //&&& (forall |page_id: PageId| + // #[trigger] self.page_organization.pages.dom().contains(page_id) + // ==> self.page_organization.pages[page_id].is_used + // ==> self.page_organization.pages[page_id].offset == Some(0nat) + // ==> self.thread_token@.value.pages[page_id].offset == 0) + + &&& (forall|page_id| #[trigger] + self.page_organization.pages.dom().contains(page_id) + ==> self.page_organization.pages[page_id].is_used + ==> page_organization_matches_token_page( + self.page_organization.pages[page_id], + self.thread_token@.value.pages[page_id], )) + &&& (forall|page_id: PageId| + (#[trigger] self.unused_pages.dom().contains(page_id)) + ==> self.page_organization.pages.dom().contains(page_id)) + &&& (forall|page_id: PageId| #[trigger] + self.unused_pages.dom().contains(page_id) ==> self.unused_pages[page_id] + == self.psa[page_id]) + &&& (forall|page_id: PageId| #[trigger] + self.thread_token@.value.pages.dom().contains(page_id) + ==> self.thread_token@.value.pages[page_id].shared_access == self.psa[page_id]) + } + + pub open spec fn page_state(&self, page_id: PageId) -> PageState + recommends + self.thread_token@.value.pages.dom().contains(page_id), + { + self.thread_token@.value.pages.index(page_id) } - impl HeapLocalAccess { - pub open spec fn wf( - &self, - heap_id: HeapId, - heap_state: HeapState, - tld_id: TldId, - mim_instance: Mim::Instance, - emp: int, - ) -> bool { - self.wf_basic(heap_id, heap_state, tld_id, mim_instance) && pages_free_direct_is_correct( - self.pages_free_direct@.value.unwrap()@, - self.pages@.value.unwrap()@, - emp, - ) && heap_state.shared_access.points_to@.value.unwrap().page_empty_ptr.id() == emp - } + pub open spec fn page_inner(&self, page_id: PageId) -> PageInner + recommends + self.pages.dom().contains(page_id), + self.pages.index(page_id).inner@.value.is_Some(), + { + self.pages.index(page_id).inner@.value.get_Some_0() + } - pub open spec fn wf_basic( - &self, - heap_id: HeapId, - heap_state: HeapState, - tld_id: TldId, - mim_instance: Mim::Instance, - ) -> bool { - heap_state.shared_access.wf(heap_id, tld_id, mim_instance) && { - let heap = heap_state.shared_access.points_to@.value.unwrap(); - heap.pages_free_direct.id() == self.pages_free_direct@.pcell && heap.pages.id() - == self.pages@.pcell && heap.page_count.id() == self.page_count@.pcell - && heap.page_retired_min.id() == self.page_retired_min@.pcell - && heap.page_retired_max.id() == self.page_retired_max@.pcell - && self.pages_free_direct@.value.is_some() && self.pages@.value.is_some() - && self.page_count@.value.is_some() && self.page_retired_min@.value.is_some() - && self.page_retired_max@.value.is_some() && (forall|i: int| - #[trigger] - valid_bin_idx(i) ==> self.pages@.value.unwrap()[i].block_size == size_of_bin( - i, - ))// 0 isn't a valid_bin_idx - && self.pages@.value.unwrap()[0].block_size == 8 - && self.pages@.value.unwrap()[BIN_FULL as int].block_size == 8 * (524288 - + 2) //MEDIUM_OBJ_WSIZE_MAX + 2 - && self.pages_free_direct@.value.unwrap()@.len() == PAGES_DIRECT - && self.pages@.value.unwrap()@.len() == BIN_FULL + 1 - } - } - } - - ///////////////////////////////////////////// - ///////////////////////////////////////////// Thread local data - ///////////////////////////////////////////// - //pub struct OsTld { - // pub region_idx: usize, - //} - pub struct SegmentsTld { - pub span_queue_headers: [SpanQueueHeader; 32], // len = SEGMENT_BIN_MAX + 1 - pub count: usize, - pub peak_count: usize, - pub current_size: usize, - pub peak_size: usize, + // This is for when we need to obtain ownership of the ThreadToken + // but when we have a &mut reference to the Local + pub proof fn take_thread_token(tracked &mut self) -> (tracked tt: Mim::thread_local_state) + ensures // All fields remain the same except thread_token which is set to an + // arbitrary value + + *self == (Local { thread_token: self.thread_token, ..*old(self) }), + tt == old(self).thread_token, + { + let tracked mut t = Mim::thread_local_state::arbitrary(); + tracked_swap(&mut t, &mut self.thread_token); + t } - pub struct SpanQueueHeader { - pub first: PPtr, - pub last: PPtr, + pub proof fn take_checked_token(tracked &mut self) -> (tracked tt: Mim::thread_checked_state) + ensures // All fields remain the same except thread_token which is set to an + // arbitrary value + + *self == (Local { checked_token: self.checked_token, ..*old(self) }), + tt == old(self).checked_token, + { + let tracked mut t = Mim::thread_checked_state::arbitrary(); + tracked_swap(&mut t, &mut self.checked_token); + t } - impl Clone for SpanQueueHeader { - fn clone(&self) -> (s: Self) - ensures - s == *self, - { - SpanQueueHeader { first: self.first, last: self.last } - } + pub open spec fn commit_mask(&self, segment_id: SegmentId) -> CommitMask { + self.segments[segment_id].main@.value.unwrap().commit_mask } - impl Copy for SpanQueueHeader { + pub open spec fn decommit_mask(&self, segment_id: SegmentId) -> CommitMask { + self.segments[segment_id].main@.value.unwrap().decommit_mask + } + pub open spec fn is_used_primary(&self, page_id: PageId) -> bool { + self.page_organization.pages.dom().contains(page_id) + && self.page_organization.pages[page_id].is_used + && self.page_organization.pages[page_id].offset == Some(0nat) } - pub struct Tld { - // TODO mimalloc allows multiple heaps per thread - pub heap_backing: PPtr, - pub segments: SegmentsTld, + pub open spec fn page_reserved(&self, page_id: PageId) -> int { + self.pages[page_id].inner@.value.unwrap().reserved as int } - pub tracked struct Local { - pub ghost thread_id: ThreadId, - pub tracked my_inst: Mim::my_inst, - pub tracked instance: Mim::Instance, - pub tracked thread_token: Mim::thread_local_state, - pub tracked checked_token: Mim::thread_checked_state, - pub tracked is_thread: crate::thread::IsThread, - pub ghost heap_id: HeapId, - pub tracked heap: HeapLocalAccess, - pub ghost tld_id: TldId, - pub tracked tld: ptr::PointsTo, - pub tracked segments: Map, - // All pages, used and unused - pub tracked pages: Map, - pub ghost psa: Map, - // All unused pages - // (used pages are in the token system) - pub tracked unused_pages: Map, - pub ghost page_organization: PageOrg::State, - pub tracked page_empty_global: Duplicable, + pub open spec fn page_count(&self, page_id: PageId) -> int { + self.pages[page_id].count@.value.unwrap() as int } - pub open spec fn common_preserves(l1: Local, l2: Local) -> bool { - l1.heap_id == l2.heap_id && l1.tld_id == l2.tld_id && l1.instance == l2.instance + pub open spec fn page_capacity(&self, page_id: PageId) -> int { + self.pages[page_id].inner@.value.unwrap().capacity as int } - impl Local { - pub open spec fn wf(&self) -> bool { - self.wf_main() && self.page_organization.popped == Popped::No - } - - pub open spec fn wf_basic(&self) -> bool { - &&& is_tld_ptr(self.tld@.pptr, self.tld_id) - &&& self.thread_token@.instance == self.instance - &&& self.thread_token@.key == self.thread_id - &&& self.thread_token@.value.segments.dom() == self.segments.dom() - &&& self.thread_token@.value.heap_id == self.heap_id - &&& self.heap.wf_basic( - self.heap_id, - self.thread_token@.value.heap, - self.tld_id, - self.instance, - ) - &&& self.thread_token@.value.heap.shared_access.points_to@.value.unwrap().page_empty_ptr.id() - == self.page_empty_global@.s.points_to@.pptr - &&& self.page_empty_global@.wf_empty_page_global() - } - - pub open spec fn wf_main(&self) -> bool { - &&& is_tld_ptr(self.tld@.pptr, self.tld_id) - &&& self.thread_token@.instance == self.instance - &&& self.thread_token@.key == self.thread_id - &&& self.thread_id == self.is_thread@ - &&& self.checked_token@.instance == self.instance - &&& self.checked_token@.key == self.thread_id - &&& self.my_inst@.instance == self.instance - &&& self.my_inst@.value == self.instance//&&& (forall |page_id| - // self.thread_token@.value.pages.dom().contains(page_id) <==> - // self.pages.dom().contains(page_id)) - //&&& self.thread_token@.value.pages.dom() == self.pages.dom() - - &&& self.thread_token@.value.segments.dom() == self.segments.dom() - &&& self.thread_token@.value.heap_id == self.heap_id - &&& self.heap.wf( - self.heap_id, - self.thread_token@.value.heap, - self.tld_id, - self.instance, - self.page_empty_global@.s.points_to@.pptr, - ) - &&& (forall|page_id| - #[trigger] - self.pages.dom().contains(page_id) ==> // Page is either 'used' or 'unused' - (self.unused_pages.dom().contains(page_id) - <==> !self.thread_token@.value.pages.dom().contains(page_id))) - &&& self.thread_token@.value.pages.dom().subset_of(self.pages.dom()) - &&& (forall|page_id| - #[trigger] - self.pages.dom().contains(page_id) ==> self.thread_token@.value.pages.dom().contains( - page_id, - ) ==> self.pages.index(page_id).wf( - page_id, - self.thread_token@.value.pages.index(page_id), - self.instance, - )) - &&& (forall|page_id| - #[trigger] - self.pages.dom().contains(page_id) ==> self.unused_pages.dom().contains(page_id) - ==> self.pages.index(page_id).wf_unused( - page_id, - self.unused_pages[page_id], - self.page_organization.popped, - self.instance, - )) - &&& (forall|segment_id| - #[trigger] - self.segments.dom().contains(segment_id) ==> self.segments[segment_id].wf( - segment_id, - self.thread_token@.value.segments.index(segment_id), - self.instance, - )) - &&& (forall|segment_id| - #[trigger] - self.segments.dom().contains(segment_id) ==> self.mem_chunk_good(segment_id)) - &&& self.tld@.value.is_Some() - &&& self.page_organization_valid() - &&& self.page_empty_global@.wf_empty_page_global() - } - - pub open spec fn page_organization_valid(&self) -> bool { - &&& self.page_organization.invariant() - &&& self.tld@.value.is_Some() - &&& page_organization_queues_match( - self.page_organization.unused_dlist_headers, - self.tld@.value.get_Some_0().segments.span_queue_headers@, - ) - &&& page_organization_used_queues_match( - self.page_organization.used_dlist_headers, - self.heap.pages@.value.unwrap()@, - ) - &&& page_organization_pages_match( - self.page_organization.pages, - self.pages, - self.psa, - self.page_organization.popped, - ) - &&& page_organization_segments_match(self.page_organization.segments, self.segments) - &&& (forall|page_id: PageId| - #[trigger] - self.page_organization.pages.dom().contains(page_id) ==> ( - !self.page_organization.pages[page_id].is_used <==> self.unused_pages.dom().contains( - page_id, - )))//&&& (forall |page_id: PageId| - // #[trigger] self.page_organization.pages.dom().contains(page_id) - // ==> self.page_organization.pages[page_id].is_used - // ==> self.page_organization.pages[page_id].offset == Some(0nat) - // ==> self.thread_token@.value.pages[page_id].offset == 0) - - &&& (forall|page_id| - #[trigger] - self.page_organization.pages.dom().contains(page_id) - ==> self.page_organization.pages[page_id].is_used - ==> page_organization_matches_token_page( - self.page_organization.pages[page_id], - self.thread_token@.value.pages[page_id], - )) - &&& (forall|page_id: PageId| - (#[trigger] - self.unused_pages.dom().contains(page_id)) - ==> self.page_organization.pages.dom().contains(page_id)) - &&& (forall|page_id: PageId| - #[trigger] - self.unused_pages.dom().contains(page_id) ==> self.unused_pages[page_id] - == self.psa[page_id]) - &&& (forall|page_id: PageId| - #[trigger] - self.thread_token@.value.pages.dom().contains(page_id) - ==> self.thread_token@.value.pages[page_id].shared_access == self.psa[page_id]) - } - - pub open spec fn page_state(&self, page_id: PageId) -> PageState - recommends - self.thread_token@.value.pages.dom().contains(page_id), - { - self.thread_token@.value.pages.index(page_id) - } + pub open spec fn block_size(&self, page_id: PageId) -> int { + self.pages[page_id].inner@.value.unwrap().xblock_size as int + } +} - pub open spec fn page_inner(&self, page_id: PageId) -> PageInner - recommends - self.pages.dom().contains(page_id), - self.pages.index(page_id).inner@.value.is_Some(), - { - self.pages.index(page_id).inner@.value.get_Some_0() - } +pub open spec fn page_organization_queues_match( + org_queues: Seq, + queues: Seq, +) -> bool { + org_queues.len() == queues.len() && (forall|i: int| + 0 <= i < org_queues.len() ==> is_page_ptr_opt( + (#[trigger] queues[i]).first, + org_queues[i].first, + )) && (forall|i: int| + 0 <= i < org_queues.len() ==> is_page_ptr_opt( + (#[trigger] queues[i]).last, + org_queues[i].last, + )) +} - // This is for when we need to obtain ownership of the ThreadToken - // but when we have a &mut reference to the Local - pub proof fn take_thread_token(tracked &mut self) -> (tracked tt: Mim::thread_local_state) - ensures// All fields remain the same except thread_token which is set to an - // arbitrary value +pub open spec fn page_organization_used_queues_match( + org_queues: Seq, + queues: Seq, +) -> bool { + org_queues.len() == queues.len() && (forall|i: int| + 0 <= i < org_queues.len() ==> is_page_ptr_opt( + (#[trigger] queues[i]).first, + org_queues[i].first, + )) && (forall|i: int| + 0 <= i < org_queues.len() ==> is_page_ptr_opt( + (#[trigger] queues[i]).last, + org_queues[i].last, + )) +} - *self == (Local { thread_token: self.thread_token, ..*old(self) }), - tt == old(self).thread_token, - { - let tracked mut t = Mim::thread_local_state::arbitrary(); - tracked_swap(&mut t, &mut self.thread_token); - t - } +pub open spec fn page_organization_pages_match( + org_pages: Map, + pages: Map, + psa: Map, + popped: Popped, +) -> bool { + &&& org_pages.dom() =~= pages.dom() + &&& org_pages.dom() + =~= psa.dom() //&&& (forall |page_id| #[trigger] org_pages.dom().contains(page_id) + // && !org_pages[page_id].is_used ==> unused_pages.dom().contains(page_id)) + // + //&&& (forall |page_id| #[trigger] org_pages.dom().contains(page_id) + // && !org_pages[page_id].is_used ==> unused_pages[page_id].wf_unused(page_id)) + + &&& (forall|page_id| #[trigger] + org_pages.dom().contains(page_id) ==> page_organization_pages_match_data( + org_pages[page_id], + pages[page_id], + psa[page_id], + page_id, + popped, + )) +} - pub proof fn take_checked_token(tracked &mut self) -> (tracked tt: Mim::thread_checked_state) - ensures// All fields remain the same except thread_token which is set to an - // arbitrary value +pub open spec fn page_organization_pages_match_data( + page_data: PageData, + pla: PageLocalAccess, + psa: PageSharedAccess, + page_id: PageId, + popped: Popped, +) -> bool { + psa.points_to@.value.is_Some() && (match ( + pla.count@.value, + pla.inner@.value, + pla.prev@.value, + pla.next@.value, + ) { + (Some(count), Some(inner), Some(prev), Some(next)) => { + &&& (match page_data.count { + None => true, + Some(c) => count as int == c, + }) + &&& (match page_data.full { + None => true, + Some(b) => inner.in_full() == b, + }) + &&& (match page_data.offset { + None => true, + Some(o) => psa.points_to@.value.get_Some_0().offset as int == o + * SIZEOF_PAGE_HEADER, + }) + &&& (match page_data.dlist_entry { + None => true, + Some(page_queue_data) => { + &&& is_page_ptr_opt(prev, page_queue_data.prev) + &&& is_page_ptr_opt(next, page_queue_data.next) + }, + }) + &&& (match page_data.page_header_kind { + None => { + (page_id.idx == 0 ==> { + &&& !page_data.is_used + &&& (match popped { + Popped::SegmentCreating(sid) if sid == page_id.segment_id => true, + _ => inner.xblock_size != 0, + }) + &&& (!popped.is_SegmentCreating() ==> inner.xblock_size != 0) + }) && (page_id.idx != 0 ==> page_data.offset == Some(0nat) ==> ((!( + popped.is_Ready() && popped.get_Ready_0() == page_id) && !( + popped.is_VeryUnready() && popped.get_VeryUnready_0() == page_id.segment_id + && popped.get_VeryUnready_1() == page_id.idx)) ==> (page_data.is_used + <==> inner.xblock_size != 0))) + }, + Some(PageHeaderKind::Normal(_, bsize)) => { + &&& page_id.idx != 0 + &&& page_data.is_used + &&& inner.xblock_size != 0 + &&& inner.xblock_size == bsize + &&& page_data.is_used + &&& page_data.offset == Some(0nat) + }, + }) + }, + _ => false, + }) +} - *self == (Local { checked_token: self.checked_token, ..*old(self) }), - tt == old(self).checked_token, - { - let tracked mut t = Mim::thread_checked_state::arbitrary(); - tracked_swap(&mut t, &mut self.checked_token); - t - } +pub open spec fn page_organization_segments_match( + org_segments: Map, + segments: Map, +) -> bool { + org_segments.dom() =~= segments.dom() && (forall|segment_id: SegmentId| + segments.dom().contains(segment_id) ==> org_segments[segment_id].used + == segments[segment_id].main2@.value.unwrap().used) +} - pub open spec fn commit_mask(&self, segment_id: SegmentId) -> CommitMask { - self.segments[segment_id].main@.value.unwrap().commit_mask - } +pub open spec fn page_organization_matches_token_page( + page_data: PageData, + page_state: PageState, +) -> bool { + page_data.offset.is_some() && page_data.offset.unwrap() + == page_state.offset/*&& (match page_data.page_header_kind { + Some(PageHeaderKind::Normal(bsize)) => bsize == page_state.block_size, + _ => true, + })*/ - pub open spec fn decommit_mask(&self, segment_id: SegmentId) -> CommitMask { - self.segments[segment_id].main@.value.unwrap().decommit_mask - } +} - pub open spec fn is_used_primary(&self, page_id: PageId) -> bool { - self.page_organization.pages.dom().contains(page_id) - && self.page_organization.pages[page_id].is_used - && self.page_organization.pages[page_id].offset == Some(0nat) - } +///////////////////////////////////////////// +///////////////////////////////////////////// +///////////////////////////////////////////// +///////////////////////////////////////////// +///////////////////////////////////////////// +///////////////////////////////////////////// +///////////////////////////////////////////// +////// Utilities for local access +pub struct HeapPtr { + pub heap_ptr: PPtr, + pub heap_id: Ghost, +} - pub open spec fn page_reserved(&self, page_id: PageId) -> int { - self.pages[page_id].inner@.value.unwrap().reserved as int - } +impl Clone for HeapPtr { + #[inline(always)] + fn clone(&self) -> (s: Self) + ensures + *self == s, + { + HeapPtr { heap_ptr: self.heap_ptr, heap_id: Ghost(self.heap_id@) } + } +} - pub open spec fn page_count(&self, page_id: PageId) -> int { - self.pages[page_id].count@.value.unwrap() as int - } +impl Copy for HeapPtr { - pub open spec fn page_capacity(&self, page_id: PageId) -> int { - self.pages[page_id].inner@.value.unwrap().capacity as int - } +} - pub open spec fn block_size(&self, page_id: PageId) -> int { - self.pages[page_id].inner@.value.unwrap().xblock_size as int - } +impl HeapPtr { + #[verifier(inline)] + pub open spec fn wf(&self) -> bool { + is_heap_ptr(self.heap_ptr.id(), self.heap_id@) } - pub open spec fn page_organization_queues_match( - org_queues: Seq, - queues: Seq, - ) -> bool { - org_queues.len() == queues.len() && (forall|i: int| - 0 <= i < org_queues.len() ==> is_page_ptr_opt( - (#[trigger] - queues[i]).first, - org_queues[i].first, - )) && (forall|i: int| - 0 <= i < org_queues.len() ==> is_page_ptr_opt( - (#[trigger] - queues[i]).last, - org_queues[i].last, - )) + #[verifier(inline)] + pub open spec fn is_in(&self, local: Local) -> bool { + local.heap_id == self.heap_id@ } - pub open spec fn page_organization_used_queues_match( - org_queues: Seq, - queues: Seq, - ) -> bool { - org_queues.len() == queues.len() && (forall|i: int| - 0 <= i < org_queues.len() ==> is_page_ptr_opt( - (#[trigger] - queues[i]).first, - org_queues[i].first, - )) && (forall|i: int| - 0 <= i < org_queues.len() ==> is_page_ptr_opt( - (#[trigger] - queues[i]).last, - org_queues[i].last, - )) + #[inline(always)] + pub fn get_ref<'a>(&self, Tracked(local): Tracked<&'a Local>) -> (heap: &'a Heap) + requires + local.wf_basic(), + self.wf(), + self.is_in(*local), + ensures + Some(*heap) == local.thread_token@.value.heap.shared_access.points_to@.value, + { + let tracked perm = &local.instance.thread_local_state_guards_heap( + local.thread_id, + &local.thread_token, + ).points_to; + self.heap_ptr.borrow(Tracked(perm)) } - pub open spec fn page_organization_pages_match( - org_pages: Map, - pages: Map, - psa: Map, - popped: Popped, - ) -> bool { - &&& org_pages.dom() =~= pages.dom() - &&& org_pages.dom() - =~= psa.dom()//&&& (forall |page_id| #[trigger] org_pages.dom().contains(page_id) - // && !org_pages[page_id].is_used ==> unused_pages.dom().contains(page_id)) - // - //&&& (forall |page_id| #[trigger] org_pages.dom().contains(page_id) - // && !org_pages[page_id].is_used ==> unused_pages[page_id].wf_unused(page_id)) - - &&& (forall|page_id| - #[trigger] - org_pages.dom().contains(page_id) ==> page_organization_pages_match_data( - org_pages[page_id], - pages[page_id], - psa[page_id], - page_id, - popped, - )) + #[inline(always)] + pub fn get_pages<'a>(&self, Tracked(local): Tracked<&'a Local>) -> (pages: &'a [PageQueue; 75]) + requires + local.wf_basic(), + self.wf(), + self.is_in(*local), + ensures + Some(*pages) == local.heap.pages@.value, + { + self.get_ref(Tracked(local)).pages.borrow(Tracked(&local.heap.pages)) } - pub open spec fn page_organization_pages_match_data( - page_data: PageData, - pla: PageLocalAccess, - psa: PageSharedAccess, - page_id: PageId, - popped: Popped, - ) -> bool { - psa.points_to@.value.is_Some() && (match ( - pla.count@.value, - pla.inner@.value, - pla.prev@.value, - pla.next@.value, - ) { - (Some(count), Some(inner), Some(prev), Some(next)) => { - &&& (match page_data.count { - None => true, - Some(c) => count as int == c, - }) - &&& (match page_data.full { - None => true, - Some(b) => inner.in_full() == b, - }) - &&& (match page_data.offset { - None => true, - Some(o) => psa.points_to@.value.get_Some_0().offset as int == o - * SIZEOF_PAGE_HEADER, - }) - &&& (match page_data.dlist_entry { - None => true, - Some(page_queue_data) => { - &&& is_page_ptr_opt(prev, page_queue_data.prev) - &&& is_page_ptr_opt(next, page_queue_data.next) - }, - }) - &&& (match page_data.page_header_kind { - None => { - (page_id.idx == 0 ==> { - &&& !page_data.is_used - &&& (match popped { - Popped::SegmentCreating(sid) if sid == page_id.segment_id => true, - _ => inner.xblock_size != 0, - }) - &&& (!popped.is_SegmentCreating() ==> inner.xblock_size != 0) - }) && (page_id.idx != 0 ==> page_data.offset == Some(0nat) ==> ((!( - popped.is_Ready() && popped.get_Ready_0() == page_id) && !( - popped.is_VeryUnready() && popped.get_VeryUnready_0() == page_id.segment_id - && popped.get_VeryUnready_1() == page_id.idx)) ==> (page_data.is_used - <==> inner.xblock_size != 0))) - }, - Some(PageHeaderKind::Normal(_, bsize)) => { - &&& page_id.idx != 0 - &&& page_data.is_used - &&& inner.xblock_size != 0 - &&& inner.xblock_size == bsize - &&& page_data.is_used - &&& page_data.offset == Some(0nat) - }, - }) - }, - _ => false, - }) + #[inline(always)] + pub fn get_page_count<'a>(&self, Tracked(local): Tracked<&'a Local>) -> (page_count: usize) + requires + local.wf_basic(), + self.wf(), + self.is_in(*local), + ensures + Some(page_count) == local.heap.page_count@.value, + { + *self.get_ref(Tracked(local)).page_count.borrow(Tracked(&local.heap.page_count)) } - pub open spec fn page_organization_segments_match( - org_segments: Map, - segments: Map, - ) -> bool { - org_segments.dom() =~= segments.dom() && (forall|segment_id: SegmentId| - segments.dom().contains(segment_id) ==> org_segments[segment_id].used - == segments[segment_id].main2@.value.unwrap().used) + #[inline(always)] + pub fn set_page_count<'a>(&self, Tracked(local): Tracked<&mut Local>, page_count: usize) + requires + old(local).wf_basic(), + self.wf(), + self.is_in(*old(local)), + ensures + local_page_count_update(*old(local), *local), + { + let tracked perm = &local.instance.thread_local_state_guards_heap( + local.thread_id, + &local.thread_token, + ).points_to; + let heap = self.heap_ptr.borrow(Tracked(perm)); + let _ = heap.page_count.take(Tracked(&mut local.heap.page_count)); + heap.page_count.put(Tracked(&mut local.heap.page_count), page_count); } - pub open spec fn page_organization_matches_token_page( - page_data: PageData, - page_state: PageState, - ) -> bool { - page_data.offset.is_some() && page_data.offset.unwrap() - == page_state.offset/*&& (match page_data.page_header_kind { - Some(PageHeaderKind::Normal(bsize)) => bsize == page_state.block_size, - _ => true, - })*/ + #[inline(always)] + pub fn get_page_retired_min<'a>(&self, Tracked(local): Tracked<&'a Local>) -> (page_retired_min: + usize) + requires + local.wf_basic(), + self.wf(), + self.is_in(*local), + ensures + Some(page_retired_min) == local.heap.page_retired_min@.value, + { + *self.get_ref(Tracked(local)).page_retired_min.borrow(Tracked(&local.heap.page_retired_min)) + } + #[inline(always)] + pub fn set_page_retired_min<'a>( + &self, + Tracked(local): Tracked<&mut Local>, + page_retired_min: usize, + ) + requires + old(local).wf_basic(), + self.wf(), + self.is_in(*old(local)), + ensures + local_page_retired_min_update(*old(local), *local), + { + let tracked perm = &local.instance.thread_local_state_guards_heap( + local.thread_id, + &local.thread_token, + ).points_to; + let heap = self.heap_ptr.borrow(Tracked(perm)); + let _ = heap.page_retired_min.take(Tracked(&mut local.heap.page_retired_min)); + heap.page_retired_min.put(Tracked(&mut local.heap.page_retired_min), page_retired_min); } - ///////////////////////////////////////////// - ///////////////////////////////////////////// - ///////////////////////////////////////////// - ///////////////////////////////////////////// - ///////////////////////////////////////////// - ///////////////////////////////////////////// - ///////////////////////////////////////////// - ////// Utilities for local access - pub struct HeapPtr { - pub heap_ptr: PPtr, - pub heap_id: Ghost, + #[inline(always)] + pub fn get_page_retired_max<'a>(&self, Tracked(local): Tracked<&'a Local>) -> (page_retired_max: + usize) + requires + local.wf_basic(), + self.wf(), + self.is_in(*local), + ensures + Some(page_retired_max) == local.heap.page_retired_max@.value, + { + *self.get_ref(Tracked(local)).page_retired_max.borrow(Tracked(&local.heap.page_retired_max)) } - impl Clone for HeapPtr { - #[inline(always)] - fn clone(&self) -> (s: Self) - ensures - *self == s, - { - HeapPtr { heap_ptr: self.heap_ptr, heap_id: Ghost(self.heap_id@) } - } + #[inline(always)] + pub fn set_page_retired_max<'a>( + &self, + Tracked(local): Tracked<&mut Local>, + page_retired_max: usize, + ) + requires + old(local).wf_basic(), + self.wf(), + self.is_in(*old(local)), + ensures + local_page_retired_max_update(*old(local), *local), + { + let tracked perm = &local.instance.thread_local_state_guards_heap( + local.thread_id, + &local.thread_token, + ).points_to; + let heap = self.heap_ptr.borrow(Tracked(perm)); + let _ = heap.page_retired_max.take(Tracked(&mut local.heap.page_retired_max)); + heap.page_retired_max.put(Tracked(&mut local.heap.page_retired_max), page_retired_max); } - impl Copy for HeapPtr { + #[inline(always)] + pub fn get_pages_free_direct<'a>(&self, Tracked(local): Tracked<&'a Local>) -> (pages: + &'a [PPtr; 129]) + requires + local.wf_basic(), + self.wf(), + self.is_in(*local), + ensures + Some(*pages) == local.heap.pages_free_direct@.value, + { + self.get_ref(Tracked(local)).pages_free_direct.borrow( + Tracked(&local.heap.pages_free_direct), + ) + } + #[inline(always)] + pub fn get_arena_id<'a>(&self, Tracked(local): Tracked<&'a Local>) -> (arena_id: ArenaId) + requires + local.wf_main(), + self.wf(), + self.is_in(*local), + ensures + arena_id + == local.thread_token@.value.heap.shared_access.points_to@.value.unwrap().arena_id, + { + self.get_ref(Tracked(local)).arena_id } - impl HeapPtr { - #[verifier(inline)] - pub open spec fn wf(&self) -> bool { - is_heap_ptr(self.heap_ptr.id(), self.heap_id@) + #[inline(always)] + pub fn get_page_empty(&self, Tracked(local): Tracked<&Local>) -> (res: ( + PPtr, + Tracked>, + )) + requires + local.wf_basic(), + self.wf(), + self.is_in(*local), + ensures + ({ + let (page_ptr, pfa) = res; + { + pfa@@.wf_empty_page_global() && pfa@@.s.points_to@.pptr == page_ptr.id() + && page_ptr.id() != 0 && page_ptr.id() + == local.page_empty_global@.s.points_to@.pptr + } + }), + { + let page_ptr = self.get_ref(Tracked(local)).page_empty_ptr; + let tracked pfa = local.page_empty_global.clone(); + proof { + pfa.borrow().s.points_to.is_nonnull(); } + (page_ptr, Tracked(pfa)) + } +} - #[verifier(inline)] - pub open spec fn is_in(&self, local: Local) -> bool { - local.heap_id == self.heap_id@ - } +pub open spec fn local_page_count_update(loc1: Local, loc2: Local) -> bool { + &&& loc2 == Local { heap: loc2.heap, ..loc1 } + &&& loc2.heap == HeapLocalAccess { page_count: loc2.heap.page_count, ..loc1.heap } + &&& loc1.heap.page_count@.pcell == loc2.heap.page_count@.pcell + &&& loc1.heap.page_count@.value.is_some() + &&& loc2.heap.page_count@.value.is_some() +} - #[inline(always)] - pub fn get_ref<'a>(&self, Tracked(local): Tracked<&'a Local>) -> (heap: &'a Heap) - requires - local.wf_basic(), - self.wf(), - self.is_in(*local), - ensures - Some(*heap) == local.thread_token@.value.heap.shared_access.points_to@.value, - { - let tracked perm = &local.instance.thread_local_state_guards_heap( - local.thread_id, - &local.thread_token, - ).points_to; - self.heap_ptr.borrow(Tracked(perm)) - } +pub open spec fn local_page_retired_min_update(loc1: Local, loc2: Local) -> bool { + &&& loc2 == Local { heap: loc2.heap, ..loc1 } + &&& loc2.heap == HeapLocalAccess { page_retired_min: loc2.heap.page_retired_min, ..loc1.heap } + &&& loc1.heap.page_retired_min@.pcell == loc2.heap.page_retired_min@.pcell + &&& loc1.heap.page_retired_min@.value.is_some() + &&& loc2.heap.page_retired_min@.value.is_some() +} - #[inline(always)] - pub fn get_pages<'a>(&self, Tracked(local): Tracked<&'a Local>) -> (pages: &'a [PageQueue; 75]) - requires - local.wf_basic(), - self.wf(), - self.is_in(*local), - ensures - Some(*pages) == local.heap.pages@.value, - { - self.get_ref(Tracked(local)).pages.borrow(Tracked(&local.heap.pages)) - } +pub open spec fn local_page_retired_max_update(loc1: Local, loc2: Local) -> bool { + &&& loc2 == Local { heap: loc2.heap, ..loc1 } + &&& loc2.heap == HeapLocalAccess { page_retired_max: loc2.heap.page_retired_max, ..loc1.heap } + &&& loc1.heap.page_retired_max@.pcell == loc2.heap.page_retired_max@.pcell + &&& loc1.heap.page_retired_max@.value.is_some() + &&& loc2.heap.page_retired_max@.value.is_some() +} - #[inline(always)] - pub fn get_page_count<'a>(&self, Tracked(local): Tracked<&'a Local>) -> (page_count: usize) - requires - local.wf_basic(), - self.wf(), - self.is_in(*local), - ensures - Some(page_count) == local.heap.page_count@.value, - { - *self.get_ref(Tracked(local)).page_count.borrow(Tracked(&local.heap.page_count)) - } +pub struct TldPtr { + pub tld_ptr: PPtr, + pub tld_id: Ghost, +} - #[inline(always)] - pub fn set_page_count<'a>(&self, Tracked(local): Tracked<&mut Local>, page_count: usize) - requires - old(local).wf_basic(), - self.wf(), - self.is_in(*old(local)), - ensures - local_page_count_update(*old(local), *local), - { - let tracked perm = &local.instance.thread_local_state_guards_heap( - local.thread_id, - &local.thread_token, - ).points_to; - let heap = self.heap_ptr.borrow(Tracked(perm)); - let _ = heap.page_count.take(Tracked(&mut local.heap.page_count)); - heap.page_count.put(Tracked(&mut local.heap.page_count), page_count); - } +impl Clone for TldPtr { + #[inline(always)] + fn clone(&self) -> (s: Self) + ensures + *self == s, + { + TldPtr { tld_ptr: self.tld_ptr, tld_id: Ghost(self.tld_id@) } + } +} - #[inline(always)] - pub fn get_page_retired_min<'a>(&self, Tracked(local): Tracked<&'a Local>) -> (page_retired_min: - usize) - requires - local.wf_basic(), - self.wf(), - self.is_in(*local), - ensures - Some(page_retired_min) == local.heap.page_retired_min@.value, - { - *self.get_ref(Tracked(local)).page_retired_min.borrow(Tracked(&local.heap.page_retired_min)) - } +impl Copy for TldPtr { - #[inline(always)] - pub fn set_page_retired_min<'a>( - &self, - Tracked(local): Tracked<&mut Local>, - page_retired_min: usize, - ) - requires - old(local).wf_basic(), - self.wf(), - self.is_in(*old(local)), - ensures - local_page_retired_min_update(*old(local), *local), - { - let tracked perm = &local.instance.thread_local_state_guards_heap( - local.thread_id, - &local.thread_token, - ).points_to; - let heap = self.heap_ptr.borrow(Tracked(perm)); - let _ = heap.page_retired_min.take(Tracked(&mut local.heap.page_retired_min)); - heap.page_retired_min.put(Tracked(&mut local.heap.page_retired_min), page_retired_min); - } +} - #[inline(always)] - pub fn get_page_retired_max<'a>(&self, Tracked(local): Tracked<&'a Local>) -> (page_retired_max: - usize) - requires - local.wf_basic(), - self.wf(), - self.is_in(*local), - ensures - Some(page_retired_max) == local.heap.page_retired_max@.value, - { - *self.get_ref(Tracked(local)).page_retired_max.borrow(Tracked(&local.heap.page_retired_max)) - } +impl TldPtr { + #[verifier(inline)] + pub open spec fn wf(&self) -> bool { + is_tld_ptr(self.tld_ptr.id(), self.tld_id@) + } - #[inline(always)] - pub fn set_page_retired_max<'a>( - &self, - Tracked(local): Tracked<&mut Local>, - page_retired_max: usize, - ) - requires - old(local).wf_basic(), - self.wf(), - self.is_in(*old(local)), - ensures - local_page_retired_max_update(*old(local), *local), - { - let tracked perm = &local.instance.thread_local_state_guards_heap( - local.thread_id, - &local.thread_token, - ).points_to; - let heap = self.heap_ptr.borrow(Tracked(perm)); - let _ = heap.page_retired_max.take(Tracked(&mut local.heap.page_retired_max)); - heap.page_retired_max.put(Tracked(&mut local.heap.page_retired_max), page_retired_max); - } + #[verifier(inline)] + pub open spec fn is_in(&self, local: Local) -> bool { + local.tld_id == self.tld_id@ + } - #[inline(always)] - pub fn get_pages_free_direct<'a>(&self, Tracked(local): Tracked<&'a Local>) -> (pages: - &'a [PPtr; 129]) - requires - local.wf_basic(), - self.wf(), - self.is_in(*local), - ensures - Some(*pages) == local.heap.pages_free_direct@.value, - { - self.get_ref(Tracked(local)).pages_free_direct.borrow( - Tracked(&local.heap.pages_free_direct), - ) - } + #[inline(always)] + pub fn get_ref<'a>(&self, Tracked(local): Tracked<&'a Local>) -> (tld: &'a Tld) + requires + local.wf_main(), + self.wf(), + self.is_in(*local), + ensures + Some(*tld) == local.tld@.value, + { + self.tld_ptr.borrow(Tracked(&local.tld)) + } - #[inline(always)] - pub fn get_arena_id<'a>(&self, Tracked(local): Tracked<&'a Local>) -> (arena_id: ArenaId) - requires - local.wf_main(), - self.wf(), - self.is_in(*local), - ensures - arena_id - == local.thread_token@.value.heap.shared_access.points_to@.value.unwrap().arena_id, - { - self.get_ref(Tracked(local)).arena_id - } + #[inline(always)] + pub fn get_segments_count(&self, Tracked(local): Tracked<&Local>) -> (count: usize) + requires + self.wf(), + self.is_in(*local), + local.wf_main(), + ensures + count == local.tld@.value.unwrap().segments.count, + { + self.get_ref(Tracked(local)).segments.count + } +} - #[inline(always)] - pub fn get_page_empty(&self, Tracked(local): Tracked<&Local>) -> (res: ( - PPtr, - Tracked>, - )) - requires - local.wf_basic(), - self.wf(), - self.is_in(*local), - ensures - ({ - let (page_ptr, pfa) = res; - { - pfa@@.wf_empty_page_global() && pfa@@.s.points_to@.pptr == page_ptr.id() - && page_ptr.id() != 0 && page_ptr.id() - == local.page_empty_global@.s.points_to@.pptr - } - }), - { - let page_ptr = self.get_ref(Tracked(local)).page_empty_ptr; - let tracked pfa = local.page_empty_global.clone(); - proof { - pfa.borrow().s.points_to.is_nonnull(); - } - (page_ptr, Tracked(pfa)) - } +pub struct SegmentPtr { + pub segment_ptr: PPtr, + pub segment_id: Ghost, +} + +impl Clone for SegmentPtr { + #[inline(always)] + fn clone(&self) -> (s: Self) + ensures + *self == s, + { + SegmentPtr { segment_ptr: self.segment_ptr, segment_id: Ghost(self.segment_id@) } } +} + +impl Copy for SegmentPtr { + +} - pub open spec fn local_page_count_update(loc1: Local, loc2: Local) -> bool { - &&& loc2 == Local { heap: loc2.heap, ..loc1 } - &&& loc2.heap == HeapLocalAccess { page_count: loc2.heap.page_count, ..loc1.heap } - &&& loc1.heap.page_count@.pcell == loc2.heap.page_count@.pcell - &&& loc1.heap.page_count@.value.is_some() - &&& loc2.heap.page_count@.value.is_some() +impl SegmentPtr { + #[verifier(inline)] + pub open spec fn wf(&self) -> bool { + is_segment_ptr(self.segment_ptr.id(), self.segment_id@) } - pub open spec fn local_page_retired_min_update(loc1: Local, loc2: Local) -> bool { - &&& loc2 == Local { heap: loc2.heap, ..loc1 } - &&& loc2.heap == HeapLocalAccess { page_retired_min: loc2.heap.page_retired_min, ..loc1.heap } - &&& loc1.heap.page_retired_min@.pcell == loc2.heap.page_retired_min@.pcell - &&& loc1.heap.page_retired_min@.value.is_some() - &&& loc2.heap.page_retired_min@.value.is_some() + #[verifier(inline)] + pub open spec fn is_in(&self, local: Local) -> bool { + local.segments.dom().contains(self.segment_id@) } - pub open spec fn local_page_retired_max_update(loc1: Local, loc2: Local) -> bool { - &&& loc2 == Local { heap: loc2.heap, ..loc1 } - &&& loc2.heap == HeapLocalAccess { page_retired_max: loc2.heap.page_retired_max, ..loc1.heap } - &&& loc1.heap.page_retired_max@.pcell == loc2.heap.page_retired_max@.pcell - &&& loc1.heap.page_retired_max@.value.is_some() - &&& loc2.heap.page_retired_max@.value.is_some() + #[inline(always)] + pub fn is_null(&self) -> (b: bool) + ensures + b == (self.segment_ptr.id() == 0), + { + self.segment_ptr.to_usize() == 0 } - pub struct TldPtr { - pub tld_ptr: PPtr, - pub tld_id: Ghost, + #[inline(always)] + pub fn null() -> (s: Self) + ensures + s.segment_ptr.id() == 0, + { + SegmentPtr { segment_ptr: PPtr::from_usize(0), segment_id: Ghost(arbitrary()) } } - impl Clone for TldPtr { - #[inline(always)] - fn clone(&self) -> (s: Self) - ensures - *self == s, - { - TldPtr { tld_ptr: self.tld_ptr, tld_id: Ghost(self.tld_id@) } + #[inline(always)] + pub fn get_page_header_ptr(&self, idx: usize) -> (page_ptr: PagePtr) + requires + self.wf(), + 0 <= idx <= SLICES_PER_SEGMENT, + ensures + page_ptr.wf(), + page_ptr.page_id@.segment_id == self.segment_id@, + page_ptr.page_id@.idx == idx, + { + proof { + const_facts(); } + let j = self.segment_ptr.to_usize() + SIZEOF_SEGMENT_HEADER + idx * SIZEOF_PAGE_HEADER; + return PagePtr { + page_ptr: PPtr::from_usize(j), + page_id: Ghost(PageId { segment_id: self.segment_id@, idx: idx as nat }), + }; } - impl Copy for TldPtr { + #[inline] + pub fn get_page_after_end(&self) -> (page_ptr: PPtr) + requires + self.wf(), + ensures + page_ptr.id() == crate::layout::page_header_start( + PageId { segment_id: self.segment_id@, idx: SLICES_PER_SEGMENT as nat }, + ), + { + proof { + const_facts(); + } + let j = self.segment_ptr.to_usize() + SIZEOF_SEGMENT_HEADER + SLICES_PER_SEGMENT as usize + * SIZEOF_PAGE_HEADER; + PPtr::from_usize(j) + } + #[inline(always)] + pub fn get_ref<'a>(&self, Tracked(local): Tracked<&'a Local>) -> (segment: &'a SegmentHeader) + requires //local.wf_main(), + + local.thread_token@.value.segments.dom().contains(self.segment_id@), + local.thread_token@.value.segments[self.segment_id@].shared_access.points_to@.pptr + == self.segment_ptr.id(), + local.thread_token@.value.segments[self.segment_id@].shared_access.points_to@.value.is_some(), + local.thread_token@.value.segments[self.segment_id@].is_enabled, + local.thread_token@.key == local.thread_id, + local.thread_token@.instance == local.instance, + self.wf(), + self.is_in(*local), + ensures + Some(*segment) == local.thread_token@.value.segments.index( + self.segment_id@, + ).shared_access.points_to@.value, + { + let tracked perm = &local.instance.thread_local_state_guards_segment( + local.thread_id, + self.segment_id@, + &local.thread_token, + ).points_to; + self.segment_ptr.borrow(Tracked(perm)) } - impl TldPtr { - #[verifier(inline)] - pub open spec fn wf(&self) -> bool { - is_tld_ptr(self.tld_ptr.id(), self.tld_id@) - } + #[inline(always)] + pub fn get_main_ref<'a>(&self, Tracked(local): Tracked<&'a Local>) -> (segment_header_main: + &'a SegmentHeaderMain) + requires + self.wf(), + self.is_in(*local), + //local.wf_main(), + local.thread_token@.value.segments.dom().contains(self.segment_id@), + local.thread_token@.value.segments[self.segment_id@].shared_access.points_to@.pptr + == self.segment_ptr.id(), + local.thread_token@.value.segments.index( + self.segment_id@, + ).shared_access.points_to@.value.is_some(), + local.thread_token@.value.segments[self.segment_id@].is_enabled, + local.thread_token@.key == local.thread_id, + local.thread_token@.instance == local.instance, + local.thread_token@.value.segments.index( + self.segment_id@, + ).shared_access.points_to@.value.unwrap().main.id() + == local.segments[self.segment_id@].main@.pcell, + local.segments.dom().contains(self.segment_id@), + local.segments[self.segment_id@].main@.value.is_some(), + ensures + Some(*segment_header_main) == local.segments.index(self.segment_id@).main@.value, + { + let segment = self.get_ref(Tracked(local)); + segment.main.borrow(Tracked(&local.segments.tracked_borrow(self.segment_id@).main)) + } - #[verifier(inline)] - pub open spec fn is_in(&self, local: Local) -> bool { - local.tld_id == self.tld_id@ - } + #[inline(always)] + pub fn get_main2_ref<'a>(&self, Tracked(local): Tracked<&'a Local>) -> (segment_header_main2: + &'a SegmentHeaderMain2) + requires + local.wf_main(), + self.wf(), + self.is_in(*local), + ensures + Some(*segment_header_main2) == local.segments.index(self.segment_id@).main2@.value, + { + let segment = self.get_ref(Tracked(local)); + segment.main2.borrow(Tracked(&local.segments.tracked_borrow(self.segment_id@).main2)) + } - #[inline(always)] - pub fn get_ref<'a>(&self, Tracked(local): Tracked<&'a Local>) -> (tld: &'a Tld) - requires - local.wf_main(), - self.wf(), - self.is_in(*local), - ensures - Some(*tld) == local.tld@.value, - { - self.tld_ptr.borrow(Tracked(&local.tld)) - } + #[inline(always)] + pub fn get_commit_mask<'a>(&self, Tracked(local): Tracked<&'a Local>) -> (cm: &'a CommitMask) + requires + self.wf(), + self.is_in(*local), + local.wf_main(), + ensures + cm == local.segments[self.segment_id@].main@.value.unwrap().commit_mask, + { + &self.get_main_ref(Tracked(local)).commit_mask + } - #[inline(always)] - pub fn get_segments_count(&self, Tracked(local): Tracked<&Local>) -> (count: usize) - requires - self.wf(), - self.is_in(*local), - local.wf_main(), - ensures - count == local.tld@.value.unwrap().segments.count, - { - self.get_ref(Tracked(local)).segments.count - } + #[inline(always)] + pub fn get_decommit_mask<'a>(&self, Tracked(local): Tracked<&'a Local>) -> (cm: &'a CommitMask) + requires + self.wf(), + self.is_in(*local), + local.wf_main(), + ensures + cm == local.segments[self.segment_id@].main@.value.unwrap().decommit_mask, + { + &self.get_main_ref(Tracked(local)).decommit_mask } - pub struct SegmentPtr { - pub segment_ptr: PPtr, - pub segment_id: Ghost, + #[inline(always)] + pub fn get_decommit_expire(&self, Tracked(local): Tracked<&Local>) -> (i: i64) + requires + self.wf(), + self.is_in(*local), + local.wf_main(), + ensures + i == local.segments[self.segment_id@].main@.value.unwrap().decommit_expire, + { + self.get_main_ref(Tracked(local)).decommit_expire } - impl Clone for SegmentPtr { - #[inline(always)] - fn clone(&self) -> (s: Self) - ensures - *self == s, - { - SegmentPtr { segment_ptr: self.segment_ptr, segment_id: Ghost(self.segment_id@) } - } + #[inline(always)] + pub fn get_allow_decommit(&self, Tracked(local): Tracked<&Local>) -> (b: bool) + requires + self.wf(), + self.is_in(*local), + local.wf_main(), + ensures + b == local.segments[self.segment_id@].main@.value.unwrap().allow_decommit, + { + self.get_main_ref(Tracked(local)).allow_decommit } - impl Copy for SegmentPtr { + #[inline(always)] + pub fn get_used(&self, Tracked(local): Tracked<&Local>) -> (used: usize) + requires + self.wf(), + self.is_in(*local), + local.wf_main(), + ensures + used == local.segments[self.segment_id@].main2@.value.unwrap().used, + { + self.get_main2_ref(Tracked(local)).used + } + #[inline(always)] + pub fn get_abandoned(&self, Tracked(local): Tracked<&Local>) -> (abandoned: usize) + requires + self.wf(), + self.is_in(*local), + local.wf_main(), + ensures + abandoned == local.segments[self.segment_id@].main2@.value.unwrap().abandoned, + { + self.get_main2_ref(Tracked(local)).abandoned } - impl SegmentPtr { - #[verifier(inline)] - pub open spec fn wf(&self) -> bool { - is_segment_ptr(self.segment_ptr.id(), self.segment_id@) - } + #[inline(always)] + pub fn get_mem_is_pinned(&self, Tracked(local): Tracked<&Local>) -> (mem_is_pinned: bool) + requires + self.wf(), + self.is_in(*local), + local.wf_main(), + ensures + mem_is_pinned == local.segments[self.segment_id@].main@.value.unwrap().mem_is_pinned, + { + self.get_main_ref(Tracked(local)).mem_is_pinned + } - #[verifier(inline)] - pub open spec fn is_in(&self, local: Local) -> bool { - local.segments.dom().contains(self.segment_id@) - } + #[inline(always)] + pub fn is_abandoned(&self, Tracked(local): Tracked<&Local>) -> (is_ab: bool) + requires + self.wf(), + self.is_in(*local), + local.wf_main(), + { + self.get_ref(Tracked(local)).thread_id.load() == 0 + } - #[inline(always)] - pub fn is_null(&self) -> (b: bool) - ensures - b == (self.segment_ptr.id() == 0), - { - self.segment_ptr.to_usize() == 0 - } + #[inline(always)] + pub fn get_segment_kind(&self, Tracked(local): Tracked<&Local>) -> (kind: SegmentKind) + requires + self.wf(), + self.is_in(*local), + local.wf_main(), + ensures + kind == local.segments[self.segment_id@].main2@.value.unwrap().kind, + { + self.get_main2_ref(Tracked(local)).kind + } - #[inline(always)] - pub fn null() -> (s: Self) - ensures - s.segment_ptr.id() == 0, - { - SegmentPtr { segment_ptr: PPtr::from_usize(0), segment_id: Ghost(arbitrary()) } - } + #[inline(always)] + pub fn is_kind_huge(&self, Tracked(local): Tracked<&Local>) -> (b: bool) + requires + self.wf(), + self.is_in(*local), + local.wf_main(), + ensures + b == (local.segments[self.segment_id@].main2@.value.unwrap().kind == SegmentKind::Huge), + { + let kind = self.get_main2_ref(Tracked(local)).kind; + matches!(kind, SegmentKind::Huge) + } +} - #[inline(always)] - pub fn get_page_header_ptr(&self, idx: usize) -> (page_ptr: PagePtr) - requires - self.wf(), - 0 <= idx <= SLICES_PER_SEGMENT, - ensures - page_ptr.wf(), - page_ptr.page_id@.segment_id == self.segment_id@, - page_ptr.page_id@.idx == idx, - { - proof { - const_facts(); - } - let j = self.segment_ptr.to_usize() + SIZEOF_SEGMENT_HEADER + idx * SIZEOF_PAGE_HEADER; - return PagePtr { - page_ptr: PPtr::from_usize(j), - page_id: Ghost(PageId { segment_id: self.segment_id@, idx: idx as nat }), - }; - } +pub struct PagePtr { + pub page_ptr: PPtr, + pub page_id: Ghost, +} - #[inline] - pub fn get_page_after_end(&self) -> (page_ptr: PPtr) - requires - self.wf(), - ensures - page_ptr.id() == crate::layout::page_header_start( - PageId { segment_id: self.segment_id@, idx: SLICES_PER_SEGMENT as nat }, - ), - { - proof { - const_facts(); - } - let j = self.segment_ptr.to_usize() + SIZEOF_SEGMENT_HEADER + SLICES_PER_SEGMENT as usize - * SIZEOF_PAGE_HEADER; - PPtr::from_usize(j) - } +impl Clone for PagePtr { + #[inline(always)] + fn clone(&self) -> (s: Self) + ensures + *self == s, + { + PagePtr { page_ptr: self.page_ptr, page_id: Ghost(self.page_id@) } + } +} - #[inline(always)] - pub fn get_ref<'a>(&self, Tracked(local): Tracked<&'a Local>) -> (segment: &'a SegmentHeader) - requires//local.wf_main(), - - local.thread_token@.value.segments.dom().contains(self.segment_id@), - local.thread_token@.value.segments[self.segment_id@].shared_access.points_to@.pptr - == self.segment_ptr.id(), - local.thread_token@.value.segments[self.segment_id@].shared_access.points_to@.value.is_some(), - local.thread_token@.value.segments[self.segment_id@].is_enabled, - local.thread_token@.key == local.thread_id, - local.thread_token@.instance == local.instance, - self.wf(), - self.is_in(*local), - ensures - Some(*segment) == local.thread_token@.value.segments.index( - self.segment_id@, - ).shared_access.points_to@.value, - { - let tracked perm = &local.instance.thread_local_state_guards_segment( - local.thread_id, - self.segment_id@, - &local.thread_token, - ).points_to; - self.segment_ptr.borrow(Tracked(perm)) - } +impl Copy for PagePtr { - #[inline(always)] - pub fn get_main_ref<'a>(&self, Tracked(local): Tracked<&'a Local>) -> (segment_header_main: - &'a SegmentHeaderMain) - requires - self.wf(), - self.is_in(*local), - //local.wf_main(), - local.thread_token@.value.segments.dom().contains(self.segment_id@), - local.thread_token@.value.segments[self.segment_id@].shared_access.points_to@.pptr - == self.segment_ptr.id(), - local.thread_token@.value.segments.index( - self.segment_id@, - ).shared_access.points_to@.value.is_some(), - local.thread_token@.value.segments[self.segment_id@].is_enabled, - local.thread_token@.key == local.thread_id, - local.thread_token@.instance == local.instance, - local.thread_token@.value.segments.index( - self.segment_id@, - ).shared_access.points_to@.value.unwrap().main.id() - == local.segments[self.segment_id@].main@.pcell, - local.segments.dom().contains(self.segment_id@), - local.segments[self.segment_id@].main@.value.is_some(), - ensures - Some(*segment_header_main) == local.segments.index(self.segment_id@).main@.value, - { - let segment = self.get_ref(Tracked(local)); - segment.main.borrow(Tracked(&local.segments.tracked_borrow(self.segment_id@).main)) - } +} - #[inline(always)] - pub fn get_main2_ref<'a>(&self, Tracked(local): Tracked<&'a Local>) -> (segment_header_main2: - &'a SegmentHeaderMain2) - requires - local.wf_main(), - self.wf(), - self.is_in(*local), - ensures - Some(*segment_header_main2) == local.segments.index(self.segment_id@).main2@.value, - { - let segment = self.get_ref(Tracked(local)); - segment.main2.borrow(Tracked(&local.segments.tracked_borrow(self.segment_id@).main2)) - } +impl PagePtr { + #[verifier(inline)] + pub open spec fn wf(&self) -> bool { + is_page_ptr(self.page_ptr.id(), self.page_id@) && self.page_ptr.id() != 0 + } - #[inline(always)] - pub fn get_commit_mask<'a>(&self, Tracked(local): Tracked<&'a Local>) -> (cm: &'a CommitMask) - requires - self.wf(), - self.is_in(*local), - local.wf_main(), - ensures - cm == local.segments[self.segment_id@].main@.value.unwrap().commit_mask, - { - &self.get_main_ref(Tracked(local)).commit_mask - } + #[verifier(inline)] + pub open spec fn is_in(&self, local: Local) -> bool { + local.pages.dom().contains(self.page_id@) + } - #[inline(always)] - pub fn get_decommit_mask<'a>(&self, Tracked(local): Tracked<&'a Local>) -> (cm: &'a CommitMask) - requires - self.wf(), - self.is_in(*local), - local.wf_main(), - ensures - cm == local.segments[self.segment_id@].main@.value.unwrap().decommit_mask, - { - &self.get_main_ref(Tracked(local)).decommit_mask - } - - #[inline(always)] - pub fn get_decommit_expire(&self, Tracked(local): Tracked<&Local>) -> (i: i64) - requires - self.wf(), - self.is_in(*local), - local.wf_main(), - ensures - i == local.segments[self.segment_id@].main@.value.unwrap().decommit_expire, - { - self.get_main_ref(Tracked(local)).decommit_expire - } - - #[inline(always)] - pub fn get_allow_decommit(&self, Tracked(local): Tracked<&Local>) -> (b: bool) - requires - self.wf(), - self.is_in(*local), - local.wf_main(), - ensures - b == local.segments[self.segment_id@].main@.value.unwrap().allow_decommit, - { - self.get_main_ref(Tracked(local)).allow_decommit - } - - #[inline(always)] - pub fn get_used(&self, Tracked(local): Tracked<&Local>) -> (used: usize) - requires - self.wf(), - self.is_in(*local), - local.wf_main(), - ensures - used == local.segments[self.segment_id@].main2@.value.unwrap().used, - { - self.get_main2_ref(Tracked(local)).used - } - - #[inline(always)] - pub fn get_abandoned(&self, Tracked(local): Tracked<&Local>) -> (abandoned: usize) - requires - self.wf(), - self.is_in(*local), - local.wf_main(), - ensures - abandoned == local.segments[self.segment_id@].main2@.value.unwrap().abandoned, - { - self.get_main2_ref(Tracked(local)).abandoned - } - - #[inline(always)] - pub fn get_mem_is_pinned(&self, Tracked(local): Tracked<&Local>) -> (mem_is_pinned: bool) - requires - self.wf(), - self.is_in(*local), - local.wf_main(), - ensures - mem_is_pinned == local.segments[self.segment_id@].main@.value.unwrap().mem_is_pinned, - { - self.get_main_ref(Tracked(local)).mem_is_pinned - } - - #[inline(always)] - pub fn is_abandoned(&self, Tracked(local): Tracked<&Local>) -> (is_ab: bool) - requires - self.wf(), - self.is_in(*local), - local.wf_main(), - { - self.get_ref(Tracked(local)).thread_id.load() == 0 - } - - #[inline(always)] - pub fn get_segment_kind(&self, Tracked(local): Tracked<&Local>) -> (kind: SegmentKind) - requires - self.wf(), - self.is_in(*local), - local.wf_main(), - ensures - kind == local.segments[self.segment_id@].main2@.value.unwrap().kind, - { - self.get_main2_ref(Tracked(local)).kind - } - - #[inline(always)] - pub fn is_kind_huge(&self, Tracked(local): Tracked<&Local>) -> (b: bool) - requires - self.wf(), - self.is_in(*local), - local.wf_main(), - ensures - b == (local.segments[self.segment_id@].main2@.value.unwrap().kind == SegmentKind::Huge), - { - let kind = self.get_main2_ref(Tracked(local)).kind; - matches!(kind, SegmentKind::Huge) - } + pub open spec fn is_empty_global(&self, local: Local) -> bool { + self.page_ptr.id() == local.page_empty_global@.s.points_to@.pptr } - pub struct PagePtr { - pub page_ptr: PPtr, - pub page_id: Ghost, + #[verifier(inline)] + pub open spec fn is_used_and_primary(&self, local: Local) -> bool { + local.pages.dom().contains(self.page_id@) && local.thread_token@.value.pages.dom().contains( + self.page_id@, + ) && local.thread_token@.value.pages[self.page_id@].offset == 0 } - impl Clone for PagePtr { - #[inline(always)] - fn clone(&self) -> (s: Self) - ensures - *self == s, - { - PagePtr { page_ptr: self.page_ptr, page_id: Ghost(self.page_id@) } - } + #[verifier(inline)] + pub open spec fn is_in_unused(&self, local: Local) -> bool { + local.unused_pages.dom().contains(self.page_id@) } - impl Copy for PagePtr { - + #[verifier(inline)] + pub open spec fn is_used(&self, local: Local) -> bool { + local.pages.dom().contains(self.page_id@) && local.thread_token@.value.pages.dom().contains( + self.page_id@, + ) } - impl PagePtr { - #[verifier(inline)] - pub open spec fn wf(&self) -> bool { - is_page_ptr(self.page_ptr.id(), self.page_id@) && self.page_ptr.id() != 0 - } - - #[verifier(inline)] - pub open spec fn is_in(&self, local: Local) -> bool { - local.pages.dom().contains(self.page_id@) - } + #[inline(always)] + pub fn null() -> (s: Self) + ensures + s.page_ptr.id() == 0, + { + PagePtr { page_ptr: PPtr::from_usize(0), page_id: Ghost(arbitrary()) } + } - pub open spec fn is_empty_global(&self, local: Local) -> bool { - self.page_ptr.id() == local.page_empty_global@.s.points_to@.pptr - } + #[inline(always)] + pub fn is_null(&self) -> (b: bool) + ensures + b == (self.page_ptr.id() == 0), + { + self.page_ptr.to_usize() == 0 + } - #[verifier(inline)] - pub open spec fn is_used_and_primary(&self, local: Local) -> bool { - local.pages.dom().contains(self.page_id@) && local.thread_token@.value.pages.dom().contains( + #[inline(always)] + pub fn get_ref<'a>(&self, Tracked(local): Tracked<&'a Local>) -> (page: &'a Page) + requires + local.wf_main(), + self.wf(), + self.is_in(*local), + ensures + !self.is_in_unused(*local) ==> Some(*page) == local.thread_token@.value.pages.index( self.page_id@, - ) && local.thread_token@.value.pages[self.page_id@].offset == 0 - } - - #[verifier(inline)] - pub open spec fn is_in_unused(&self, local: Local) -> bool { - local.unused_pages.dom().contains(self.page_id@) - } - - #[verifier(inline)] - pub open spec fn is_used(&self, local: Local) -> bool { - local.pages.dom().contains(self.page_id@) && local.thread_token@.value.pages.dom().contains( + ).shared_access.points_to@.value, + self.is_in_unused(*local) ==> Some(*page) + == local.unused_pages[self.page_id@].points_to@.value, + { + let tracked perm = if self.is_in_unused(*local) { + &local.unused_pages.tracked_borrow(self.page_id@).points_to + } else { + &local.instance.thread_local_state_guards_page( + local.thread_id, self.page_id@, - ) - } - - #[inline(always)] - pub fn null() -> (s: Self) - ensures - s.page_ptr.id() == 0, - { - PagePtr { page_ptr: PPtr::from_usize(0), page_id: Ghost(arbitrary()) } - } - - #[inline(always)] - pub fn is_null(&self) -> (b: bool) - ensures - b == (self.page_ptr.id() == 0), - { - self.page_ptr.to_usize() == 0 - } - - #[inline(always)] - pub fn get_ref<'a>(&self, Tracked(local): Tracked<&'a Local>) -> (page: &'a Page) - requires - local.wf_main(), - self.wf(), - self.is_in(*local), - ensures - !self.is_in_unused(*local) ==> Some(*page) == local.thread_token@.value.pages.index( - self.page_id@, - ).shared_access.points_to@.value, - self.is_in_unused(*local) ==> Some(*page) - == local.unused_pages[self.page_id@].points_to@.value, - { - let tracked perm = if self.is_in_unused(*local) { - &local.unused_pages.tracked_borrow(self.page_id@).points_to - } else { - &local.instance.thread_local_state_guards_page( - local.thread_id, - self.page_id@, - &local.thread_token, - ).points_to - }; - self.page_ptr.borrow(Tracked(perm)) - } + &local.thread_token, + ).points_to + }; + self.page_ptr.borrow(Tracked(perm)) + } - #[inline(always)] - pub fn get_inner_ref<'a>(&self, Tracked(local): Tracked<&'a Local>) -> (page_inner: - &'a PageInner) - requires - local.wf_main(), - self.wf(), - self.is_in(*local), - ensures - Some(*page_inner) == local.pages.index(self.page_id@).inner@.value, - { - let page = self.get_ref(Tracked(local)); - page.inner.borrow(Tracked(&local.pages.tracked_borrow(self.page_id@).inner)) - } + #[inline(always)] + pub fn get_inner_ref<'a>(&self, Tracked(local): Tracked<&'a Local>) -> (page_inner: + &'a PageInner) + requires + local.wf_main(), + self.wf(), + self.is_in(*local), + ensures + Some(*page_inner) == local.pages.index(self.page_id@).inner@.value, + { + let page = self.get_ref(Tracked(local)); + page.inner.borrow(Tracked(&local.pages.tracked_borrow(self.page_id@).inner)) + } - #[inline(always)] - pub fn get_inner_ref_maybe_empty<'a>(&self, Tracked(local): Tracked<&'a Local>) -> (page_inner: - &'a PageInner) - requires - local.wf_main(), - !self.is_empty_global(*local) ==> (self.wf() && self.is_in(*local)), - ensures - !self.is_empty_global(*local) ==> (Some(*page_inner) == local.pages.index( - self.page_id@, - ).inner@.value), - self.is_empty_global(*local) ==> (Some(*page_inner) - == local.page_empty_global@.l.inner@.value), - { - let tracked perm = if self.is_empty_global(*local) { - &local.page_empty_global.borrow().s.points_to - } else if self.is_in_unused(*local) { - &local.unused_pages.tracked_borrow(self.page_id@).points_to - } else { - &local.instance.thread_local_state_guards_page( - local.thread_id, - self.page_id@, - &local.thread_token, - ).points_to - }; - let page = self.page_ptr.borrow(Tracked(perm)); - page.inner.borrow( - Tracked( - if self.is_empty_global(*local) { - &local.page_empty_global.borrow().l.inner - } else { - &local.pages.tracked_borrow(self.page_id@).inner - }, - ), - ) - } + #[inline(always)] + pub fn get_inner_ref_maybe_empty<'a>(&self, Tracked(local): Tracked<&'a Local>) -> (page_inner: + &'a PageInner) + requires + local.wf_main(), + !self.is_empty_global(*local) ==> (self.wf() && self.is_in(*local)), + ensures + !self.is_empty_global(*local) ==> (Some(*page_inner) == local.pages.index( + self.page_id@, + ).inner@.value), + self.is_empty_global(*local) ==> (Some(*page_inner) + == local.page_empty_global@.l.inner@.value), + { + let tracked perm = if self.is_empty_global(*local) { + &local.page_empty_global.borrow().s.points_to + } else if self.is_in_unused(*local) { + &local.unused_pages.tracked_borrow(self.page_id@).points_to + } else { + &local.instance.thread_local_state_guards_page( + local.thread_id, + self.page_id@, + &local.thread_token, + ).points_to + }; + let page = self.page_ptr.borrow(Tracked(perm)); + page.inner.borrow( + Tracked( + if self.is_empty_global(*local) { + &local.page_empty_global.borrow().l.inner + } else { + &local.pages.tracked_borrow(self.page_id@).inner + }, + ), + ) + } - #[inline(always)] - pub fn get_count<'a>(&self, Tracked(local): Tracked<&Local>) -> (count: u32) - requires - local.wf_main(), - self.wf(), - self.is_in(*local), - ensures - Some(count) == local.pages.index(self.page_id@).count@.value, - { - let page = self.get_ref(Tracked(local)); - *page.count.borrow(Tracked(&local.pages.tracked_borrow(self.page_id@).count)) - } + #[inline(always)] + pub fn get_count<'a>(&self, Tracked(local): Tracked<&Local>) -> (count: u32) + requires + local.wf_main(), + self.wf(), + self.is_in(*local), + ensures + Some(count) == local.pages.index(self.page_id@).count@.value, + { + let page = self.get_ref(Tracked(local)); + *page.count.borrow(Tracked(&local.pages.tracked_borrow(self.page_id@).count)) + } - #[inline(always)] - pub fn get_next<'a>(&self, Tracked(local): Tracked<&Local>) -> (next: PPtr) - requires - local.wf_main(), - self.wf(), - self.is_in(*local), - ensures - Some(next) == local.pages.index(self.page_id@).next@.value, - { - let page = self.get_ref(Tracked(local)); - *page.next.borrow(Tracked(&local.pages.tracked_borrow(self.page_id@).next)) - } + #[inline(always)] + pub fn get_next<'a>(&self, Tracked(local): Tracked<&Local>) -> (next: PPtr) + requires + local.wf_main(), + self.wf(), + self.is_in(*local), + ensures + Some(next) == local.pages.index(self.page_id@).next@.value, + { + let page = self.get_ref(Tracked(local)); + *page.next.borrow(Tracked(&local.pages.tracked_borrow(self.page_id@).next)) + } - #[inline(always)] - pub fn get_prev<'a>(&self, Tracked(local): Tracked<&Local>) -> (prev: PPtr) - requires - local.wf_main(), - self.wf(), - self.is_in(*local), - ensures - Some(prev) == local.pages.index(self.page_id@).prev@.value, - { - let page = self.get_ref(Tracked(local)); - *page.prev.borrow(Tracked(&local.pages.tracked_borrow(self.page_id@).prev)) - } + #[inline(always)] + pub fn get_prev<'a>(&self, Tracked(local): Tracked<&Local>) -> (prev: PPtr) + requires + local.wf_main(), + self.wf(), + self.is_in(*local), + ensures + Some(prev) == local.pages.index(self.page_id@).prev@.value, + { + let page = self.get_ref(Tracked(local)); + *page.prev.borrow(Tracked(&local.pages.tracked_borrow(self.page_id@).prev)) + } - #[inline(always)] - pub fn add_offset(&self, count: usize) -> (p: Self) - requires - self.wf(), - self.page_id@.idx + count <= SLICES_PER_SEGMENT, - ensures - p.wf(), - p.page_id@.segment_id == self.page_id@.segment_id, - p.page_id@.idx == self.page_id@.idx + count as int, - p.page_ptr.id() != 0, - { - proof { - const_facts(); - assert(SIZEOF_PAGE_HEADER == 80); - } - let p = self.page_ptr.to_usize(); - let q = p + count * SIZEOF_PAGE_HEADER; - PagePtr { - page_ptr: PPtr::from_usize(q), - page_id: Ghost( - PageId { - segment_id: self.page_id@.segment_id, - idx: (self.page_id@.idx + count) as nat, - }, - ), - } + #[inline(always)] + pub fn add_offset(&self, count: usize) -> (p: Self) + requires + self.wf(), + self.page_id@.idx + count <= SLICES_PER_SEGMENT, + ensures + p.wf(), + p.page_id@.segment_id == self.page_id@.segment_id, + p.page_id@.idx == self.page_id@.idx + count as int, + p.page_ptr.id() != 0, + { + proof { + const_facts(); + assert(SIZEOF_PAGE_HEADER == 80); + } + let p = self.page_ptr.to_usize(); + let q = p + count * SIZEOF_PAGE_HEADER; + PagePtr { + page_ptr: PPtr::from_usize(q), + page_id: Ghost( + PageId { + segment_id: self.page_id@.segment_id, + idx: (self.page_id@.idx + count) as nat, + }, + ), } + } - #[inline(always)] - pub fn sub_offset(&self, count: usize) -> (p: Self) - requires - self.wf(), - self.page_id@.idx >= count, - ensures - p.wf(), - p.page_id@.segment_id == self.page_id@.segment_id, - p.page_id@.idx == self.page_id@.idx - count as int, - p.page_ptr.id() != 0, - { - proof { - const_facts(); - assert(SIZEOF_PAGE_HEADER == 80); - crate::layout::segment_start_ge0(self.page_id@.segment_id); - } - let p = self.page_ptr.to_usize(); - let q = p - count * SIZEOF_PAGE_HEADER; - let ghost page_id = PageId { - segment_id: self.page_id@.segment_id, - idx: (self.page_id@.idx - count) as nat, - }; - proof { - crate::layout::is_page_ptr_nonzero(q as int, page_id); - } - PagePtr { page_ptr: PPtr::from_usize(q), page_id: Ghost(page_id) } + #[inline(always)] + pub fn sub_offset(&self, count: usize) -> (p: Self) + requires + self.wf(), + self.page_id@.idx >= count, + ensures + p.wf(), + p.page_id@.segment_id == self.page_id@.segment_id, + p.page_id@.idx == self.page_id@.idx - count as int, + p.page_ptr.id() != 0, + { + proof { + const_facts(); + assert(SIZEOF_PAGE_HEADER == 80); + crate::layout::segment_start_ge0(self.page_id@.segment_id); + } + let p = self.page_ptr.to_usize(); + let q = p - count * SIZEOF_PAGE_HEADER; + let ghost page_id = PageId { + segment_id: self.page_id@.segment_id, + idx: (self.page_id@.idx - count) as nat, + }; + proof { + crate::layout::is_page_ptr_nonzero(q as int, page_id); } + PagePtr { page_ptr: PPtr::from_usize(q), page_id: Ghost(page_id) } + } - #[inline(always)] - pub fn is_gt_0th_slice(&self, segment: SegmentPtr) -> (res: bool) - requires - self.wf(), - segment.wf(), - segment.segment_id@ == self.page_id@.segment_id, - ensures - res == (self.page_id@.idx > 0), - { - proof { - const_facts(); - } - self.page_ptr.to_usize() > segment.get_page_header_ptr(0).page_ptr.to_usize() + #[inline(always)] + pub fn is_gt_0th_slice(&self, segment: SegmentPtr) -> (res: bool) + requires + self.wf(), + segment.wf(), + segment.segment_id@ == self.page_id@.segment_id, + ensures + res == (self.page_id@.idx > 0), + { + proof { + const_facts(); } + self.page_ptr.to_usize() > segment.get_page_header_ptr(0).page_ptr.to_usize() + } - #[inline(always)] - pub fn get_index(&self) -> (idx: usize) - requires - self.wf(), - ensures - idx == self.page_id@.idx, - { - proof { - const_facts(); - } - let segment = SegmentPtr::ptr_segment(*self); - (self.page_ptr.to_usize() - segment.segment_ptr.to_usize() - SIZEOF_SEGMENT_HEADER) - / SIZEOF_PAGE_HEADER + #[inline(always)] + pub fn get_index(&self) -> (idx: usize) + requires + self.wf(), + ensures + idx == self.page_id@.idx, + { + proof { + const_facts(); } + let segment = SegmentPtr::ptr_segment(*self); + (self.page_ptr.to_usize() - segment.segment_ptr.to_usize() - SIZEOF_SEGMENT_HEADER) + / SIZEOF_PAGE_HEADER + } - pub fn slice_start(&self) -> (p: usize) - requires - self.wf(), - ensures - p == crate::layout::page_start(self.page_id@), - { - proof { - const_facts(); - assert(SLICE_SIZE as usize == 65536); - } - let segment = SegmentPtr::ptr_segment(*self); - let s = segment.segment_ptr.to_usize(); - s + ((self.page_ptr.to_usize() - s - SIZEOF_SEGMENT_HEADER) / SIZEOF_PAGE_HEADER) - * SLICE_SIZE as usize + pub fn slice_start(&self) -> (p: usize) + requires + self.wf(), + ensures + p == crate::layout::page_start(self.page_id@), + { + proof { + const_facts(); + assert(SLICE_SIZE as usize == 65536); } + let segment = SegmentPtr::ptr_segment(*self); + let s = segment.segment_ptr.to_usize(); + s + ((self.page_ptr.to_usize() - s - SIZEOF_SEGMENT_HEADER) / SIZEOF_PAGE_HEADER) + * SLICE_SIZE as usize + } - #[inline(always)] - pub fn add_offset_and_check(&self, count: usize, segment: SegmentPtr) -> (res: (Self, bool)) - requires - self.wf(), - self.page_id@.idx + count <= SLICES_PER_SEGMENT, - segment.wf(), - self.page_id@.segment_id == segment.segment_id@, - ensures - ({ - let (p, b) = res; - { - b ==> ({ - &&& p.wf() - &&& p.page_id@.segment_id == self.page_id@.segment_id - &&& p.page_id@.idx == self.page_id@.idx + count as int - &&& p.page_ptr.id() != 0 - }) && (b <==> self.page_id@.idx + count < SLICES_PER_SEGMENT) - } - }), - { - proof { - const_facts(); - assert(SIZEOF_PAGE_HEADER == 80); - } - let p = self.page_ptr.to_usize(); - let q = p + count * SIZEOF_PAGE_HEADER; - let page_ptr = PagePtr { - page_ptr: PPtr::from_usize(q), - page_id: Ghost( - PageId { - segment_id: self.page_id@.segment_id, - idx: (self.page_id@.idx + count) as nat, - }, - ), - }; - let last = segment.get_page_after_end(); - (page_ptr, page_ptr.page_ptr.to_usize() < last.to_usize()) - } + #[inline(always)] + pub fn add_offset_and_check(&self, count: usize, segment: SegmentPtr) -> (res: (Self, bool)) + requires + self.wf(), + self.page_id@.idx + count <= SLICES_PER_SEGMENT, + segment.wf(), + self.page_id@.segment_id == segment.segment_id@, + ensures + ({ + let (p, b) = res; + { + b ==> ({ + &&& p.wf() + &&& p.page_id@.segment_id == self.page_id@.segment_id + &&& p.page_id@.idx == self.page_id@.idx + count as int + &&& p.page_ptr.id() != 0 + }) && (b <==> self.page_id@.idx + count < SLICES_PER_SEGMENT) + } + }), + { + proof { + const_facts(); + assert(SIZEOF_PAGE_HEADER == 80); + } + let p = self.page_ptr.to_usize(); + let q = p + count * SIZEOF_PAGE_HEADER; + let page_ptr = PagePtr { + page_ptr: PPtr::from_usize(q), + page_id: Ghost( + PageId { + segment_id: self.page_id@.segment_id, + idx: (self.page_id@.idx + count) as nat, + }, + ), + }; + let last = segment.get_page_after_end(); + (page_ptr, page_ptr.page_ptr.to_usize() < last.to_usize()) + } - #[inline(always)] - pub fn get_block_size(&self, Tracked(local): Tracked<&Local>) -> (bsize: u32) - requires - local.wf_main(), - self.wf(), - self.is_in(*local), - ensures - bsize == local.pages.index(self.page_id@).inner@.value.unwrap().xblock_size, - { - self.get_inner_ref(Tracked(local)).xblock_size - } + #[inline(always)] + pub fn get_block_size(&self, Tracked(local): Tracked<&Local>) -> (bsize: u32) + requires + local.wf_main(), + self.wf(), + self.is_in(*local), + ensures + bsize == local.pages.index(self.page_id@).inner@.value.unwrap().xblock_size, + { + self.get_inner_ref(Tracked(local)).xblock_size + } - #[inline(always)] - pub fn get_heap(&self, Tracked(local): Tracked<&Local>) -> (heap: HeapPtr) - requires - local.wf_main(), - self.wf(), - self.is_in(*local), - self.is_used_and_primary(*local), - ensures - heap.wf(), - heap.is_in(*local), - { - let page_ref = self.get_ref(Tracked(&*local)); - let h = - my_atomic_with_ghost!( + #[inline(always)] + pub fn get_heap(&self, Tracked(local): Tracked<&Local>) -> (heap: HeapPtr) + requires + local.wf_main(), + self.wf(), + self.is_in(*local), + self.is_used_and_primary(*local), + ensures + heap.wf(), + heap.is_in(*local), + { + let page_ref = self.get_ref(Tracked(&*local)); + let h = + my_atomic_with_ghost!( &page_ref.xheap.atomic => load(); ghost g => { page_ref.xheap.emp_inst.borrow().agree(page_ref.xheap.emp.borrow(), &g.0); @@ -4102,12 +4083,12 @@ mod types { heap_of_page); } ); - HeapPtr { heap_ptr: PPtr::from_usize(h), heap_id: Ghost(local.heap_id) } - } + HeapPtr { heap_ptr: PPtr::from_usize(h), heap_id: Ghost(local.heap_id) } } +} - // Use macro as a work-arounds for not supporting functions that return &mut - #[macro_export] +// Use macro as a work-arounds for not supporting functions that return &mut +#[macro_export] macro_rules! tld_get_mut { [$($tail:tt)*] => { ::builtin_macros::verus_exec_macro_exprs!( @@ -4115,7 +4096,7 @@ mod types { }; } - #[macro_export] +#[macro_export] macro_rules! tld_get_mut_internal { ($ptr:expr, $local:ident, $tld:ident => $body:expr) => { ::builtin_macros::verus_exec_expr!{ { @@ -4130,10 +4111,10 @@ mod types { } } - pub use tld_get_mut; - pub use tld_get_mut_internal; +pub use tld_get_mut; +pub use tld_get_mut_internal; - #[macro_export] +#[macro_export] macro_rules! page_get_mut_inner { [$($tail:tt)*] => { ::builtin_macros::verus_exec_macro_exprs!( @@ -4141,7 +4122,7 @@ mod types { }; } - #[macro_export] +#[macro_export] macro_rules! page_get_mut_inner_internal { ($ptr:expr, $local:ident, $page_inner:ident => $body:expr) => { ::builtin_macros::verus_exec_expr!{ { @@ -4167,10 +4148,10 @@ mod types { } } - pub use page_get_mut_inner; - pub use page_get_mut_inner_internal; +pub use page_get_mut_inner; +pub use page_get_mut_inner_internal; - #[macro_export] +#[macro_export] macro_rules! unused_page_get_mut_prev { [$($tail:tt)*] => { ::builtin_macros::verus_exec_macro_exprs!( @@ -4178,7 +4159,7 @@ mod types { }; } - #[macro_export] +#[macro_export] macro_rules! unused_page_get_mut_prev_internal { ($ptr:expr, $local:ident, $page_prev:ident => $body:expr) => { ::builtin_macros::verus_exec_expr!{ { @@ -4204,10 +4185,10 @@ mod types { } } - pub use unused_page_get_mut_prev; - pub use unused_page_get_mut_prev_internal; +pub use unused_page_get_mut_prev; +pub use unused_page_get_mut_prev_internal; - #[macro_export] +#[macro_export] macro_rules! unused_page_get_mut_inner { [$($tail:tt)*] => { ::builtin_macros::verus_exec_macro_exprs!( @@ -4215,7 +4196,7 @@ mod types { }; } - #[macro_export] +#[macro_export] macro_rules! unused_page_get_mut_inner_internal { ($ptr:expr, $local:ident, $page_inner:ident => $body:expr) => { ::builtin_macros::verus_exec_expr!{ { @@ -4240,10 +4221,10 @@ mod types { } } - pub use unused_page_get_mut_inner; - pub use unused_page_get_mut_inner_internal; +pub use unused_page_get_mut_inner; +pub use unused_page_get_mut_inner_internal; - #[macro_export] +#[macro_export] macro_rules! unused_page_get_mut_next { [$($tail:tt)*] => { ::builtin_macros::verus_exec_macro_exprs!( @@ -4251,7 +4232,7 @@ mod types { }; } - #[macro_export] +#[macro_export] macro_rules! unused_page_get_mut_next_internal { ($ptr:expr, $local:ident, $page_next:ident => $body:expr) => { ::builtin_macros::verus_exec_expr!{ { @@ -4276,10 +4257,10 @@ mod types { } } - pub use unused_page_get_mut_next; - pub use unused_page_get_mut_next_internal; +pub use unused_page_get_mut_next; +pub use unused_page_get_mut_next_internal; - #[macro_export] +#[macro_export] macro_rules! unused_page_get_mut_count { [$($tail:tt)*] => { ::builtin_macros::verus_exec_macro_exprs!( @@ -4287,7 +4268,7 @@ mod types { }; } - #[macro_export] +#[macro_export] macro_rules! unused_page_get_mut_count_internal { ($ptr:expr, $local:ident, $page_count:ident => $body:expr) => { ::builtin_macros::verus_exec_expr!{ { @@ -4312,10 +4293,10 @@ mod types { } } - pub use unused_page_get_mut_count; - pub use unused_page_get_mut_count_internal; +pub use unused_page_get_mut_count; +pub use unused_page_get_mut_count_internal; - #[macro_export] +#[macro_export] macro_rules! unused_page_get_mut { [$($tail:tt)*] => { ::builtin_macros::verus_exec_macro_exprs!( @@ -4323,7 +4304,7 @@ mod types { }; } - #[macro_export] +#[macro_export] macro_rules! unused_page_get_mut_internal { ($ptr:expr, $local:ident, $page:ident => $body:expr) => { ::builtin_macros::verus_exec_expr!{ { @@ -4344,10 +4325,10 @@ mod types { } } - pub use unused_page_get_mut; - pub use unused_page_get_mut_internal; +pub use unused_page_get_mut; +pub use unused_page_get_mut_internal; - #[macro_export] +#[macro_export] macro_rules! used_page_get_mut_prev { [$($tail:tt)*] => { ::builtin_macros::verus_exec_macro_exprs!( @@ -4355,7 +4336,7 @@ mod types { }; } - #[macro_export] +#[macro_export] macro_rules! used_page_get_mut_prev_internal { ($ptr:expr, $local:ident, $page_prev:ident => $body:expr) => { ::builtin_macros::verus_exec_expr!{ { @@ -4382,10 +4363,10 @@ mod types { } } - pub use used_page_get_mut_prev; - pub use used_page_get_mut_prev_internal; +pub use used_page_get_mut_prev; +pub use used_page_get_mut_prev_internal; - #[macro_export] +#[macro_export] macro_rules! heap_get_pages { [$($tail:tt)*] => { ::builtin_macros::verus_exec_macro_exprs!( @@ -4393,7 +4374,7 @@ mod types { }; } - #[macro_export] +#[macro_export] macro_rules! heap_get_pages_internal { ($ptr:expr, $local:ident, $pages:ident => $body:expr) => { ::builtin_macros::verus_exec_expr!{ { @@ -4411,10 +4392,10 @@ mod types { } } - pub use heap_get_pages; - pub use heap_get_pages_internal; +pub use heap_get_pages; +pub use heap_get_pages_internal; - #[macro_export] +#[macro_export] macro_rules! heap_get_pages_free_direct { [$($tail:tt)*] => { ::builtin_macros::verus_exec_macro_exprs!( @@ -4422,7 +4403,7 @@ mod types { }; } - #[macro_export] +#[macro_export] macro_rules! heap_get_pages_free_direct_internal { ($ptr:expr, $local:ident, $pages_free_direct:ident => $body:expr) => { ::builtin_macros::verus_exec_expr!{ { @@ -4440,10 +4421,10 @@ mod types { } } - pub use heap_get_pages_free_direct; - pub use heap_get_pages_free_direct_internal; +pub use heap_get_pages_free_direct; +pub use heap_get_pages_free_direct_internal; - #[macro_export] +#[macro_export] macro_rules! used_page_get_mut_next { [$($tail:tt)*] => { ::builtin_macros::verus_exec_macro_exprs!( @@ -4451,7 +4432,7 @@ mod types { }; } - #[macro_export] +#[macro_export] macro_rules! used_page_get_mut_next_internal { ($ptr:expr, $local:ident, $page_next:ident => $body:expr) => { ::builtin_macros::verus_exec_expr!{ { @@ -4478,36 +4459,36 @@ mod types { } } - pub use used_page_get_mut_next; - pub use used_page_get_mut_next_internal; +pub use used_page_get_mut_next; +pub use used_page_get_mut_next_internal; - #[verus::trusted] - #[verifier::external_body] - pub fn print_hex(s: StrSlice<'static>, u: usize) { - println!("{:} {:x}", s.into_rust_str(), u); - } +#[verus::trusted] +#[verifier::external_body] +pub fn print_hex(s: StrSlice<'static>, u: usize) { + println!("{:} {:x}", s.into_rust_str(), u); +} - #[verus::trusted] - #[cfg(feature = "override_system_allocator")] - #[verifier::external_body] - pub fn todo() - ensures - false, - { - std::process::abort(); - } +#[verus::trusted] +#[cfg(feature = "override_system_allocator")] +#[verifier::external_body] +pub fn todo() + ensures + false, +{ + std::process::abort(); +} - #[verus::trusted] - #[cfg(not(feature = "override_system_allocator"))] - #[verifier::external_body] - pub fn todo() - ensures - false, - { - panic!("todo"); - } +#[verus::trusted] +#[cfg(not(feature = "override_system_allocator"))] +#[verifier::external_body] +pub fn todo() + ensures + false, +{ + panic!("todo"); +} - #[macro_export] +#[macro_export] macro_rules! segment_get_mut_main { [$($tail:tt)*] => { ::builtin_macros::verus_exec_macro_exprs!( @@ -4515,7 +4496,7 @@ mod types { }; } - #[macro_export] +#[macro_export] macro_rules! segment_get_mut_main_internal { ($ptr:expr, $local:ident, $segment_main:ident => $body:expr) => { ::builtin_macros::verus_exec_expr!{ { @@ -4541,10 +4522,10 @@ mod types { } } - pub use segment_get_mut_main; - pub use segment_get_mut_main_internal; +pub use segment_get_mut_main; +pub use segment_get_mut_main_internal; - #[macro_export] +#[macro_export] macro_rules! segment_get_mut_main2 { [$($tail:tt)*] => { ::builtin_macros::verus_exec_macro_exprs!( @@ -4552,7 +4533,7 @@ mod types { }; } - #[macro_export] +#[macro_export] macro_rules! segment_get_mut_main2_internal { ($ptr:expr, $local:ident, $segment_main2:ident => $body:expr) => { ::builtin_macros::verus_exec_expr!{ { @@ -4578,10 +4559,10 @@ mod types { } } - pub use segment_get_mut_main2; - pub use segment_get_mut_main2_internal; +pub use segment_get_mut_main2; +pub use segment_get_mut_main2_internal; - #[macro_export] +#[macro_export] macro_rules! segment_get_mut_local { [$($tail:tt)*] => { ::builtin_macros::verus_exec_macro_exprs!( @@ -4589,7 +4570,7 @@ mod types { }; } - #[macro_export] +#[macro_export] macro_rules! segment_get_mut_local_internal { ($ptr:expr, $local:ident, $segment_local:ident => $body:expr) => { ::builtin_macros::verus_exec_expr!{ { @@ -4611,10 +4592,10 @@ mod types { } } - pub use segment_get_mut_local; - pub use segment_get_mut_local_internal; +pub use segment_get_mut_local; +pub use segment_get_mut_local_internal; - } // verus! +} // verus! } mod flags { @@ -4630,291 +4611,291 @@ mod flags { verus! { - pub closed spec fn flags0_is_reset(u: u8) -> bool { - u & 1 != 0 - } +pub closed spec fn flags0_is_reset(u: u8) -> bool { + u & 1 != 0 +} - pub closed spec fn flags0_is_committed(u: u8) -> bool { - u & 2 != 0 - } +pub closed spec fn flags0_is_committed(u: u8) -> bool { + u & 2 != 0 +} - pub closed spec fn flags0_is_zero_init(u: u8) -> bool { - u & 4 != 0 - } +pub closed spec fn flags0_is_zero_init(u: u8) -> bool { + u & 4 != 0 +} - pub closed spec fn flags1_in_full(u: u8) -> bool { - u & 1 != 0 - } +pub closed spec fn flags1_in_full(u: u8) -> bool { + u & 1 != 0 +} - pub closed spec fn flags1_has_aligned(u: u8) -> bool { - u & 2 != 0 - } +pub closed spec fn flags1_has_aligned(u: u8) -> bool { + u & 2 != 0 +} - pub closed spec fn flags2_is_zero(u: u8) -> bool { - u & 1 != 0 - } +pub closed spec fn flags2_is_zero(u: u8) -> bool { + u & 1 != 0 +} - pub closed spec fn flags2_retire_expire(u: u8) -> int { - (u >> 1u8) as int - } +pub closed spec fn flags2_retire_expire(u: u8) -> int { + (u >> 1u8) as int +} - impl PageInner { - pub open spec fn is_reset(&self) -> bool { - flags0_is_reset(self.flags0) - } +impl PageInner { + pub open spec fn is_reset(&self) -> bool { + flags0_is_reset(self.flags0) + } - pub open spec fn is_committed(&self) -> bool { - flags0_is_committed(self.flags0) - } + pub open spec fn is_committed(&self) -> bool { + flags0_is_committed(self.flags0) + } - pub open spec fn is_zero_init(&self) -> bool { - flags0_is_zero_init(self.flags0) - } + pub open spec fn is_zero_init(&self) -> bool { + flags0_is_zero_init(self.flags0) + } - pub open spec fn in_full(&self) -> bool { - flags1_in_full(self.flags1) - } + pub open spec fn in_full(&self) -> bool { + flags1_in_full(self.flags1) + } - pub open spec fn has_aligned(&self) -> bool { - flags1_has_aligned(self.flags1) - } + pub open spec fn has_aligned(&self) -> bool { + flags1_has_aligned(self.flags1) + } - pub open spec fn is_zero(&self) -> bool { - flags2_is_zero(self.flags2) - } + pub open spec fn is_zero(&self) -> bool { + flags2_is_zero(self.flags2) + } - pub open spec fn retire_expire(&self) -> int { - flags2_retire_expire(self.flags2) - } + pub open spec fn retire_expire(&self) -> int { + flags2_retire_expire(self.flags2) + } - // getters - #[inline(always)] - pub fn get_is_reset(&self) -> (b: bool) - ensures - b == self.is_reset(), - { - (self.flags0 & 1) != 0 - } + // getters + #[inline(always)] + pub fn get_is_reset(&self) -> (b: bool) + ensures + b == self.is_reset(), + { + (self.flags0 & 1) != 0 + } - #[inline(always)] - pub fn get_is_committed(&self) -> (b: bool) - ensures - b == self.is_committed(), - { - (self.flags0 & 2) != 0 - } + #[inline(always)] + pub fn get_is_committed(&self) -> (b: bool) + ensures + b == self.is_committed(), + { + (self.flags0 & 2) != 0 + } - #[inline(always)] - pub fn get_is_zero_init(&self) -> (b: bool) - ensures - b == self.is_zero_init(), - { - (self.flags0 & 4) != 0 - } + #[inline(always)] + pub fn get_is_zero_init(&self) -> (b: bool) + ensures + b == self.is_zero_init(), + { + (self.flags0 & 4) != 0 + } - #[inline(always)] - pub fn get_in_full(&self) -> (b: bool) - ensures - b == self.in_full(), - { - (self.flags1 & 1) != 0 - } + #[inline(always)] + pub fn get_in_full(&self) -> (b: bool) + ensures + b == self.in_full(), + { + (self.flags1 & 1) != 0 + } - #[inline(always)] - pub fn get_has_aligned(&self) -> (b: bool) - ensures - b == self.has_aligned(), - { - (self.flags1 & 2) != 0 - } + #[inline(always)] + pub fn get_has_aligned(&self) -> (b: bool) + ensures + b == self.has_aligned(), + { + (self.flags1 & 2) != 0 + } - #[inline(always)] - pub fn get_is_zero(&self) -> (b: bool) - ensures - b == self.is_zero(), - { - (self.flags2 & 1) != 0 - } + #[inline(always)] + pub fn get_is_zero(&self) -> (b: bool) + ensures + b == self.is_zero(), + { + (self.flags2 & 1) != 0 + } - #[inline(always)] - pub fn get_retire_expire(&self) -> (u: u8) - ensures - u == self.retire_expire(), - u <= 127, - { - let x = self.flags2 >> 1u8; - proof { - assert(x == (self.flags2 >> 1u8)); - let y = self.flags2; - assert((y >> 1u8) <= 127) by (bit_vector); - } - x + #[inline(always)] + pub fn get_retire_expire(&self) -> (u: u8) + ensures + u == self.retire_expire(), + u <= 127, + { + let x = self.flags2 >> 1u8; + proof { + assert(x == (self.flags2 >> 1u8)); + let y = self.flags2; + assert((y >> 1u8) <= 127) by (bit_vector); } + x + } - #[inline(always)] - pub fn not_full_nor_aligned(&self) -> (b: bool) - ensures - b ==> (!self.in_full() && !self.has_aligned()), - { - proof { - let x = self.flags1; - assert(x == 0 ==> (x & 1u8 == 0u8) && (x & 2u8 == 0u8)) by (bit_vector); - } - self.flags1 == 0 + #[inline(always)] + pub fn not_full_nor_aligned(&self) -> (b: bool) + ensures + b ==> (!self.in_full() && !self.has_aligned()), + { + proof { + let x = self.flags1; + assert(x == 0 ==> (x & 1u8 == 0u8) && (x & 2u8 == 0u8)) by (bit_vector); } + self.flags1 == 0 + } - // setters - #[inline(always)] - pub fn set_retire_expire(&mut self, u: u8) - requires - u <= 127, - ensures - *self == (PageInner { flags2: self.flags2, ..*old(self) }), - self.is_zero() == old(self).is_zero(), - self.retire_expire() == u, - { - proof { - let x = self.flags2; - assert(((x & 1) | (u << 1)) & 1 == (x & 1)) by (bit_vector); - assert(((x & 1) | (u << 1)) >> 1 == u) by (bit_vector) - requires - u <= 127, - ; - } - self.flags2 = (self.flags2 & 1) | (u << 1u8); + // setters + #[inline(always)] + pub fn set_retire_expire(&mut self, u: u8) + requires + u <= 127, + ensures + *self == (PageInner { flags2: self.flags2, ..*old(self) }), + self.is_zero() == old(self).is_zero(), + self.retire_expire() == u, + { + proof { + let x = self.flags2; + assert(((x & 1) | (u << 1)) & 1 == (x & 1)) by (bit_vector); + assert(((x & 1) | (u << 1)) >> 1 == u) by (bit_vector) + requires + u <= 127, + ; } + self.flags2 = (self.flags2 & 1) | (u << 1u8); + } - #[inline(always)] - pub fn set_is_reset(&mut self, b: bool) - ensures - *self == (PageInner { flags0: self.flags0, ..*old(self) }), - self.is_reset() == b, - self.is_committed() == old(self).is_committed(), - self.is_zero_init() == old(self).is_zero_init(), - { - proof { - let y = (if b { - 1 - } else { - 0 - }); - let x = self.flags0; - assert(y == 1 || y == 0 ==> ((x & !1) | y) & 2 == x & 2) by (bit_vector); - assert(y == 1 || y == 0 ==> ((x & !1) | y) & 4 == x & 4) by (bit_vector); - assert(y == 1 || y == 0 ==> ((x & !1) | y) & 1 == y) by (bit_vector); - } - self.flags0 = (self.flags0 & !1) | (if b { + #[inline(always)] + pub fn set_is_reset(&mut self, b: bool) + ensures + *self == (PageInner { flags0: self.flags0, ..*old(self) }), + self.is_reset() == b, + self.is_committed() == old(self).is_committed(), + self.is_zero_init() == old(self).is_zero_init(), + { + proof { + let y = (if b { 1 } else { 0 - }) + }); + let x = self.flags0; + assert(y == 1 || y == 0 ==> ((x & !1) | y) & 2 == x & 2) by (bit_vector); + assert(y == 1 || y == 0 ==> ((x & !1) | y) & 4 == x & 4) by (bit_vector); + assert(y == 1 || y == 0 ==> ((x & !1) | y) & 1 == y) by (bit_vector); } + self.flags0 = (self.flags0 & !1) | (if b { + 1 + } else { + 0 + }) + } - #[inline(always)] - pub fn set_is_committed(&mut self, b: bool) - ensures - *self == (PageInner { flags0: self.flags0, ..*old(self) }), - self.is_reset() == old(self).is_reset(), - self.is_committed() == b, - self.is_zero_init() == old(self).is_zero_init(), - { - proof { - let y: u8 = (if b { - 1 - } else { - 0 - }); - let x = self.flags0; - assert(y == 1 || y == 0 ==> ((x & !2) | (y << 1)) & 1 == x & 1) by (bit_vector); - assert(y == 1 || y == 0 ==> ((x & !2) | (y << 1)) & 4 == x & 4) by (bit_vector); - assert(y == 1 || y == 0 ==> (((x & !2) | (y << 1)) & 2 == 0 <==> y == 0)) - by (bit_vector); - } - self.flags0 = (self.flags0 & !2) | ((if b { + #[inline(always)] + pub fn set_is_committed(&mut self, b: bool) + ensures + *self == (PageInner { flags0: self.flags0, ..*old(self) }), + self.is_reset() == old(self).is_reset(), + self.is_committed() == b, + self.is_zero_init() == old(self).is_zero_init(), + { + proof { + let y: u8 = (if b { 1 } else { 0 - }) << 1u8) + }); + let x = self.flags0; + assert(y == 1 || y == 0 ==> ((x & !2) | (y << 1)) & 1 == x & 1) by (bit_vector); + assert(y == 1 || y == 0 ==> ((x & !2) | (y << 1)) & 4 == x & 4) by (bit_vector); + assert(y == 1 || y == 0 ==> (((x & !2) | (y << 1)) & 2 == 0 <==> y == 0)) + by (bit_vector); } + self.flags0 = (self.flags0 & !2) | ((if b { + 1 + } else { + 0 + }) << 1u8) + } - #[inline(always)] - pub fn set_is_zero_init(&mut self, b: bool) - ensures - *self == (PageInner { flags0: self.flags0, ..*old(self) }), - self.is_reset() == old(self).is_reset(), - self.is_committed() == old(self).is_committed(), - self.is_zero_init() == b, - { - proof { - let y: u8 = (if b { - 1 - } else { - 0 - }); - let x = self.flags0; - assert(y == 1 || y == 0 ==> ((x & !4) | (y << 2)) & 1 == x & 1) by (bit_vector); - assert(y == 1 || y == 0 ==> ((x & !4) | (y << 2)) & 2 == x & 2) by (bit_vector); - assert(y == 1 || y == 0 ==> (((x & !4) | (y << 2)) & 4 == 0 <==> y == 0)) - by (bit_vector); - } - self.flags0 = (self.flags0 & !4) | ((if b { + #[inline(always)] + pub fn set_is_zero_init(&mut self, b: bool) + ensures + *self == (PageInner { flags0: self.flags0, ..*old(self) }), + self.is_reset() == old(self).is_reset(), + self.is_committed() == old(self).is_committed(), + self.is_zero_init() == b, + { + proof { + let y: u8 = (if b { 1 } else { 0 - }) << 2u8) + }); + let x = self.flags0; + assert(y == 1 || y == 0 ==> ((x & !4) | (y << 2)) & 1 == x & 1) by (bit_vector); + assert(y == 1 || y == 0 ==> ((x & !4) | (y << 2)) & 2 == x & 2) by (bit_vector); + assert(y == 1 || y == 0 ==> (((x & !4) | (y << 2)) & 4 == 0 <==> y == 0)) + by (bit_vector); } + self.flags0 = (self.flags0 & !4) | ((if b { + 1 + } else { + 0 + }) << 2u8) + } - #[inline(always)] - pub fn set_in_full(&mut self, b: bool) - ensures - *self == (PageInner { flags1: self.flags1, ..*old(self) }), - self.has_aligned() == old(self).has_aligned(), - self.in_full() == b, - { - proof { - let y = (if b { - 1 - } else { - 0 - }); - let x = self.flags1; - assert(y == 1 || y == 0 ==> ((x & !1) | y) & 2 == x & 2) by (bit_vector); - assert(y == 1 || y == 0 ==> ((x & !1) | y) & 1 == y) by (bit_vector); - } - self.flags1 = (self.flags1 & !1) | (if b { + #[inline(always)] + pub fn set_in_full(&mut self, b: bool) + ensures + *self == (PageInner { flags1: self.flags1, ..*old(self) }), + self.has_aligned() == old(self).has_aligned(), + self.in_full() == b, + { + proof { + let y = (if b { 1 } else { 0 - }) + }); + let x = self.flags1; + assert(y == 1 || y == 0 ==> ((x & !1) | y) & 2 == x & 2) by (bit_vector); + assert(y == 1 || y == 0 ==> ((x & !1) | y) & 1 == y) by (bit_vector); } + self.flags1 = (self.flags1 & !1) | (if b { + 1 + } else { + 0 + }) + } - #[inline(always)] - pub fn set_has_aligned(&mut self, b: bool) - ensures - *self == (PageInner { flags1: self.flags1, ..*old(self) }), - self.has_aligned() == b, - self.in_full() == old(self).in_full(), - { - proof { - let y: u8 = (if b { - 1 - } else { - 0 - }); - let x = self.flags1; - assert(y == 1 || y == 0 ==> ((x & !2) | (y << 1)) & 1 == x & 1) by (bit_vector); - assert(y == 1 || y == 0 ==> (((x & !2) | (y << 1)) & 2 == 0 <==> y == 0)) - by (bit_vector); - } - self.flags1 = (self.flags1 & !2) | ((if b { + #[inline(always)] + pub fn set_has_aligned(&mut self, b: bool) + ensures + *self == (PageInner { flags1: self.flags1, ..*old(self) }), + self.has_aligned() == b, + self.in_full() == old(self).in_full(), + { + proof { + let y: u8 = (if b { 1 } else { 0 - }) << 1u8); + }); + let x = self.flags1; + assert(y == 1 || y == 0 ==> ((x & !2) | (y << 1)) & 1 == x & 1) by (bit_vector); + assert(y == 1 || y == 0 ==> (((x & !2) | (y << 1)) & 2 == 0 <==> y == 0)) + by (bit_vector); } + self.flags1 = (self.flags1 & !2) | ((if b { + 1 + } else { + 0 + }) << 1u8); } +} - } // verus! +} // verus! } mod layout { @@ -4935,148 +4916,150 @@ mod layout { verus! { - pub open spec fn is_page_ptr(ptr: int, page_id: PageId) -> bool { - ptr == page_header_start(page_id) && 0 <= page_id.idx <= SLICES_PER_SEGMENT && segment_start( - page_id.segment_id, - ) + SEGMENT_SIZE < usize::MAX - } - - pub open spec fn is_segment_ptr(ptr: int, segment_id: SegmentId) -> bool { - ptr == segment_start(segment_id) && ptr + SEGMENT_SIZE < usize::MAX - } - - pub open spec fn is_heap_ptr(ptr: int, heap_id: HeapId) -> bool { - heap_id.id == ptr - } +pub open spec fn is_page_ptr(ptr: int, page_id: PageId) -> bool { + ptr == page_header_start(page_id) && 0 <= page_id.idx <= SLICES_PER_SEGMENT && segment_start( + page_id.segment_id, + ) + SEGMENT_SIZE < usize::MAX +} - pub open spec fn is_tld_ptr(ptr: int, tld_id: TldId) -> bool { - tld_id.id == ptr - } +pub open spec fn is_segment_ptr(ptr: int, segment_id: SegmentId) -> bool { + ptr == segment_start(segment_id) && ptr + SEGMENT_SIZE < usize::MAX +} - pub closed spec fn segment_start(segment_id: SegmentId) -> int { - segment_id.id * (SEGMENT_SIZE as int) - } +pub open spec fn is_heap_ptr(ptr: int, heap_id: HeapId) -> bool { + heap_id.id == ptr +} - pub open spec fn page_header_start(page_id: PageId) -> int { - segment_start(page_id.segment_id) + SIZEOF_SEGMENT_HEADER + page_id.idx * SIZEOF_PAGE_HEADER - } +pub open spec fn is_tld_ptr(ptr: int, tld_id: TldId) -> bool { + tld_id.id == ptr +} - pub open spec fn page_start(page_id: PageId) -> int { - segment_start(page_id.segment_id) + SLICE_SIZE * page_id.idx - } +pub closed spec fn segment_start(segment_id: SegmentId) -> int { + segment_id.id * (SEGMENT_SIZE as int) +} - pub closed spec fn start_offset(block_size: int) -> int { - // Based on _mi_segment_page_start_from_slice - if block_size >= INTPTR_SIZE as int && block_size <= 1024 { - 3 * MAX_ALIGN_GUARANTEE - } else { - 0 - } - } +pub open spec fn page_header_start(page_id: PageId) -> int { + segment_start(page_id.segment_id) + SIZEOF_SEGMENT_HEADER + page_id.idx * SIZEOF_PAGE_HEADER +} - pub open spec fn block_start_at(page_id: PageId, block_size: int, block_idx: int) -> int { - page_start(page_id) + start_offset(block_size) + block_idx * block_size - } +pub open spec fn page_start(page_id: PageId) -> int { + segment_start(page_id.segment_id) + SLICE_SIZE * page_id.idx +} - pub closed spec fn block_start(block_id: BlockId) -> int { - block_start_at(block_id.page_id, block_id.block_size as int, block_id.idx as int) +pub closed spec fn start_offset(block_size: int) -> int { + // Based on _mi_segment_page_start_from_slice + if block_size >= INTPTR_SIZE as int && block_size <= 1024 { + 3 * MAX_ALIGN_GUARANTEE + } else { + 0 } +} - #[verifier::opaque] - pub open spec fn is_block_ptr(ptr: int, block_id: BlockId) -> bool { - // ptr should be in the range (segment start, segment end] - // Yes, that's open at the start and closed at the end - // - segment start is invalid since that's where the SegmentHeader is - // - segment end is valid because there might be a huge block there - &&& segment_start(block_id.page_id.segment_id) < ptr <= segment_start( - block_id.page_id.segment_id, - ) + (SEGMENT_SIZE as int) - < usize::MAX// Has valid slice_idx (again this is <= to account for the huge slice) - - &&& 0 <= block_id.slice_idx <= SLICES_PER_SEGMENT// It also has to be in the right slice +pub open spec fn block_start_at(page_id: PageId, block_size: int, block_idx: int) -> int { + page_start(page_id) + start_offset(block_size) + block_idx * block_size +} - &&& segment_start(block_id.page_id.segment_id) + (block_id.slice_idx * SLICE_SIZE) <= ptr - < segment_start(block_id.page_id.segment_id) + (block_id.slice_idx * SLICE_SIZE) - + SLICE_SIZE// the pptr should actually agree with the block_id +pub closed spec fn block_start(block_id: BlockId) -> int { + block_start_at(block_id.page_id, block_id.block_size as int, block_id.idx as int) +} - &&& ptr == block_start(block_id) - &&& 0 <= block_id.page_id.segment_id.id// The block size must be a multiple of the word size +#[verifier::opaque] +pub open spec fn is_block_ptr(ptr: int, block_id: BlockId) -> bool { + // ptr should be in the range (segment start, segment end] + // Yes, that's open at the start and closed at the end + // - segment start is invalid since that's where the SegmentHeader is + // - segment end is valid because there might be a huge block there + &&& segment_start(block_id.page_id.segment_id) < ptr <= segment_start( + block_id.page_id.segment_id, + ) + (SEGMENT_SIZE as int) + < usize::MAX // Has valid slice_idx (again this is <= to account for the huge slice) + + &&& 0 <= block_id.slice_idx + <= SLICES_PER_SEGMENT // It also has to be in the right slice + + &&& segment_start(block_id.page_id.segment_id) + (block_id.slice_idx * SLICE_SIZE) <= ptr + < segment_start(block_id.page_id.segment_id) + (block_id.slice_idx * SLICE_SIZE) + + SLICE_SIZE // the pptr should actually agree with the block_id + + &&& ptr == block_start(block_id) + &&& 0 + <= block_id.page_id.segment_id.id // The block size must be a multiple of the word size + + &&& block_id.block_size >= size_of::() + &&& block_id.block_size % size_of::() == 0 +} - &&& block_id.block_size >= size_of::() - &&& block_id.block_size % size_of::() == 0 +pub open spec fn is_page_ptr_opt(pptr: PPtr, opt_page_id: Option) -> bool { + match opt_page_id { + Some(page_id) => is_page_ptr(pptr.id(), page_id) && pptr.id() != 0, + None => pptr.id() == 0, } +} - pub open spec fn is_page_ptr_opt(pptr: PPtr, opt_page_id: Option) -> bool { - match opt_page_id { - Some(page_id) => is_page_ptr(pptr.id(), page_id) && pptr.id() != 0, - None => pptr.id() == 0, - } - } +pub proof fn block_size_ge_word() + ensures + forall|p, block_id| + is_block_ptr(p, block_id) ==> block_id.block_size >= size_of::< + crate::linked_list::Node, + >(), +{ + reveal(is_block_ptr); +} - pub proof fn block_size_ge_word() - ensures - forall|p, block_id| - is_block_ptr(p, block_id) ==> block_id.block_size >= size_of::< - crate::linked_list::Node, - >(), - { +#[verifier::spinoff_prover] +pub proof fn block_ptr_aligned_to_word() + ensures + forall|p, block_id| + is_block_ptr(p, block_id) ==> p % align_of::() as int == 0, +{ + assert forall|p, block_id| is_block_ptr(p, block_id) implies p % align_of::< + crate::linked_list::Node, + >() as int == 0 by { + const_facts(); reveal(is_block_ptr); + crate::linked_list::size_of_node(); + let page_id = block_id.page_id; + assert(segment_start(page_id.segment_id) % 8 == 0); + assert(SLICE_SIZE % 8 == 0); + assert(page_start(page_id) % 8 == 0); + let block_size = block_id.block_size; + assert(start_offset(block_size as int) % 8 == 0); + assert(block_size % 8 == 0); + let block_idx = block_id.idx as int; + mod_mul(block_idx, block_size as int, 8); + assert((block_idx * block_size) % 8 == 0); + assert(block_start(block_id) % 8 == 0); + assert(p % 8 == 0); } +} - #[verifier::spinoff_prover] - pub proof fn block_ptr_aligned_to_word() - ensures - forall|p, block_id| - is_block_ptr(p, block_id) ==> p % align_of::() as int == 0, - { - assert forall|p, block_id| is_block_ptr(p, block_id) implies p % align_of::< - crate::linked_list::Node, - >() as int == 0 by { - const_facts(); - reveal(is_block_ptr); - crate::linked_list::size_of_node(); - let page_id = block_id.page_id; - assert(segment_start(page_id.segment_id) % 8 == 0); - assert(SLICE_SIZE % 8 == 0); - assert(page_start(page_id) % 8 == 0); - let block_size = block_id.block_size; - assert(start_offset(block_size as int) % 8 == 0); - assert(block_size % 8 == 0); - let block_idx = block_id.idx as int; - mod_mul(block_idx, block_size as int, 8); - assert((block_idx * block_size) % 8 == 0); - assert(block_start(block_id) % 8 == 0); - assert(p % 8 == 0); - } - } +pub proof fn block_start_at_diff(page_id: PageId, block_size: nat, block_idx1: nat, block_idx2: nat) + ensures + block_start_at(page_id, block_size as int, block_idx2 as int) == block_start_at( + page_id, + block_size as int, + block_idx1 as int, + ) + (block_idx2 - block_idx1) * block_size, +{ + assert(block_idx1 as int * block_size + (block_idx2 - block_idx1) * block_size + == block_idx2 as int * block_size) by (nonlinear_arith); + //assert(block_idx2 as int * block_size == block_idx2 * block_size); + //assert(block_idx1 as int * block_size == block_idx1 * block_size); + //assert(block_start_at(page_id, block_size as int, block_idx2 as int) + // == page_start(page_id) + start_offset(block_size as int) + block_idx2 * block_size); + //assert(block_start_at(page_id, block_size as int, block_idx1 as int) + // == page_start(page_id) + start_offset(block_size as int) + block_idx1 * block_size); +} - pub proof fn block_start_at_diff(page_id: PageId, block_size: nat, block_idx1: nat, block_idx2: nat) - ensures - block_start_at(page_id, block_size as int, block_idx2 as int) == block_start_at( - page_id, - block_size as int, - block_idx1 as int, - ) + (block_idx2 - block_idx1) * block_size, - { - assert(block_idx1 as int * block_size + (block_idx2 - block_idx1) * block_size - == block_idx2 as int * block_size) by (nonlinear_arith); - //assert(block_idx2 as int * block_size == block_idx2 * block_size); - //assert(block_idx1 as int * block_size == block_idx1 * block_size); - //assert(block_start_at(page_id, block_size as int, block_idx2 as int) - // == page_start(page_id) + start_offset(block_size as int) + block_idx2 * block_size); - //assert(block_start_at(page_id, block_size as int, block_idx1 as int) - // == page_start(page_id) + start_offset(block_size as int) + block_idx1 * block_size); - } - - // Bit lemmas - /*proof fn bitmask_is_mod(t: usize) +// Bit lemmas +/*proof fn bitmask_is_mod(t: usize) ensures (t & (((1usize << 26usize) - 1) as usize)) == (t % (1usize << 26usize)), { //assert((t & (sub(1usize << 26usize, 1) as usize)) == (t % (1usize << 26usize))) // by(bit_vector); }*/ - /*proof fn bitmask_is_rounded_down(t: usize) +/*proof fn bitmask_is_rounded_down(t: usize) ensures (t & !(((1usize << 26usize) - 1) as usize)) == t - (t % (1usize << 26usize)) { assert((t & !(sub((1usize << 26usize), 1) as usize)) == sub(t, (t % (1usize << 26usize)))) @@ -5085,7 +5068,7 @@ mod layout { assert(t >= (t % (1usize << 26usize))) by(bit_vector); }*/ - /*proof fn mod_removes_remainder(s: int, t: int, r: int) +/*proof fn mod_removes_remainder(s: int, t: int, r: int) requires 0 <= r < t, 0 <= s, @@ -5107,38 +5090,38 @@ mod layout { //assert((x / t) * t + x % t == x) by(nonlinear_arith); }*/ - // Executable calculations - pub fn calculate_segment_ptr_from_block(ptr: PPtr, Ghost(block_id): Ghost) -> (res: - PPtr) - requires - is_block_ptr(ptr.id(), block_id), - ensures - is_segment_ptr(res.id(), block_id.page_id.segment_id), - { - let block_p = ptr.to_usize(); - proof { - reveal(is_block_ptr); - const_facts(); - assert(block_p > 0); - //bitmask_is_rounded_down((block_p - 1) as usize); - //mod_removes_remainder(block_id.page_id.segment_id.id as int, SEGMENT_SIZE as int, - // block_p - 1 - segment_start(block_id.page_id.segment_id)); - //assert(SEGMENT_SHIFT == 26); - //assert(SEGMENT_SIZE >= 1); - let id = block_id.page_id.segment_id.id as usize; - assert(id == block_id.page_id.segment_id.id); - assert(id < 0x7fffffffff); - assert(sub(block_p, 1) & (!0x1ffffffusize) == mul(id, 0x2000000)) by (bit_vector) - requires - mul(id, 0x2000000) < block_p <= add(mul(id, 0x2000000), 0x2000000), - id < 0x7fffffffffusize, - ; - assert(mul(id, 0x2000000) == id * 0x2000000); - assert(add(mul(id, 0x2000000), 0x2000000) == id * 0x2000000 + 0x2000000); - } - // Based on _mi_ptr_segment - let segment_p = (block_p - 1) & (!((SEGMENT_SIZE - 1) as usize)); - /*proof { +// Executable calculations +pub fn calculate_segment_ptr_from_block(ptr: PPtr, Ghost(block_id): Ghost) -> (res: + PPtr) + requires + is_block_ptr(ptr.id(), block_id), + ensures + is_segment_ptr(res.id(), block_id.page_id.segment_id), +{ + let block_p = ptr.to_usize(); + proof { + reveal(is_block_ptr); + const_facts(); + assert(block_p > 0); + //bitmask_is_rounded_down((block_p - 1) as usize); + //mod_removes_remainder(block_id.page_id.segment_id.id as int, SEGMENT_SIZE as int, + // block_p - 1 - segment_start(block_id.page_id.segment_id)); + //assert(SEGMENT_SHIFT == 26); + //assert(SEGMENT_SIZE >= 1); + let id = block_id.page_id.segment_id.id as usize; + assert(id == block_id.page_id.segment_id.id); + assert(id < 0x7fffffffff); + assert(sub(block_p, 1) & (!0x1ffffffusize) == mul(id, 0x2000000)) by (bit_vector) + requires + mul(id, 0x2000000) < block_p <= add(mul(id, 0x2000000), 0x2000000), + id < 0x7fffffffffusize, + ; + assert(mul(id, 0x2000000) == id * 0x2000000); + assert(add(mul(id, 0x2000000), 0x2000000) == id * 0x2000000 + 0x2000000); + } + // Based on _mi_ptr_segment + let segment_p = (block_p - 1) & (!((SEGMENT_SIZE - 1) as usize)); + /*proof { let s = block_id.page_id.segment_id.id; let t = SEGMENT_SIZE as int; let r = block_p - 1 - segment_start(block_id.page_id.segment_id); @@ -5148,10 +5131,10 @@ mod layout { (block_p - 1) as int - ((block_p - 1) as int % SEGMENT_SIZE as int)); assert(segment_p as int == (s*t + r) - ((s*t + r) % t)); }*/ - PPtr::::from_usize(segment_p) - } + PPtr::::from_usize(segment_p) +} - /* +/* pub fn calculate_slice_idx_from_block(block_ptr: PPtr, segment_ptr: PPtr, Ghost(block_id): Ghost) -> (slice_idx: usize) requires is_block_ptr(block_ptr.id(), block_id), @@ -5167,76 +5150,76 @@ mod layout { } */ - pub fn calculate_slice_page_ptr_from_block( - block_ptr: PPtr, - segment_ptr: PPtr, - Ghost(block_id): Ghost, - ) -> (page_ptr: PPtr) - requires - is_block_ptr(block_ptr.id(), block_id), - is_segment_ptr(segment_ptr.id(), block_id.page_id.segment_id), - ensures - is_page_ptr(page_ptr.id(), block_id.page_id_for_slice()), - { - let b = block_ptr.to_usize(); - let s = segment_ptr.to_usize(); - proof { - reveal(is_block_ptr); - const_facts(); - assert(b - s <= SEGMENT_SIZE); - } - let q = (b - s) / SLICE_SIZE as usize; - proof { - assert((b - s) / SLICE_SIZE as int <= SLICES_PER_SEGMENT) by (nonlinear_arith) - requires - SLICES_PER_SEGMENT == SEGMENT_SIZE as int / SLICE_SIZE as int, - b - s <= SEGMENT_SIZE as int, - SLICE_SIZE > 0, - ; - } - let h = s + SIZEOF_SEGMENT_HEADER + q * SIZEOF_PAGE_HEADER; - PPtr::from_usize(h) +pub fn calculate_slice_page_ptr_from_block( + block_ptr: PPtr, + segment_ptr: PPtr, + Ghost(block_id): Ghost, +) -> (page_ptr: PPtr) + requires + is_block_ptr(block_ptr.id(), block_id), + is_segment_ptr(segment_ptr.id(), block_id.page_id.segment_id), + ensures + is_page_ptr(page_ptr.id(), block_id.page_id_for_slice()), +{ + let b = block_ptr.to_usize(); + let s = segment_ptr.to_usize(); + proof { + reveal(is_block_ptr); + const_facts(); + assert(b - s <= SEGMENT_SIZE); + } + let q = (b - s) / SLICE_SIZE as usize; + proof { + assert((b - s) / SLICE_SIZE as int <= SLICES_PER_SEGMENT) by (nonlinear_arith) + requires + SLICES_PER_SEGMENT == SEGMENT_SIZE as int / SLICE_SIZE as int, + b - s <= SEGMENT_SIZE as int, + SLICE_SIZE > 0, + ; } + let h = s + SIZEOF_SEGMENT_HEADER + q * SIZEOF_PAGE_HEADER; + PPtr::from_usize(h) +} - #[inline(always)] - pub fn calculate_page_ptr_subtract_offset( - page_ptr: PPtr, - offset: u32, - Ghost(page_id): Ghost, - Ghost(target_page_id): Ghost, - ) -> (result: PPtr) - requires - is_page_ptr(page_ptr.id(), page_id), - page_id.segment_id == target_page_id.segment_id, - offset == (page_id.idx - target_page_id.idx) * SIZEOF_PAGE_HEADER, - ensures - is_page_ptr(result.id(), target_page_id), - { - proof { - segment_start_ge0(page_id.segment_id); - } - let p = page_ptr.to_usize(); - let q = p - offset as usize; - PPtr::from_usize(q) +#[inline(always)] +pub fn calculate_page_ptr_subtract_offset( + page_ptr: PPtr, + offset: u32, + Ghost(page_id): Ghost, + Ghost(target_page_id): Ghost, +) -> (result: PPtr) + requires + is_page_ptr(page_ptr.id(), page_id), + page_id.segment_id == target_page_id.segment_id, + offset == (page_id.idx - target_page_id.idx) * SIZEOF_PAGE_HEADER, + ensures + is_page_ptr(result.id(), target_page_id), +{ + proof { + segment_start_ge0(page_id.segment_id); } + let p = page_ptr.to_usize(); + let q = p - offset as usize; + PPtr::from_usize(q) +} - pub fn calculate_page_ptr_add_offset( - page_ptr: PPtr, - offset: u32, - Ghost(page_id): Ghost, - ) -> (result: PPtr) - requires - is_page_ptr(page_ptr.id(), page_id), - offset <= 0x1_0000, - ensures - is_page_ptr(result.id(), PageId { idx: (page_id.idx + offset) as nat, ..page_id }), - { - todo(); - loop { - } +pub fn calculate_page_ptr_add_offset( + page_ptr: PPtr, + offset: u32, + Ghost(page_id): Ghost, +) -> (result: PPtr) + requires + is_page_ptr(page_ptr.id(), page_id), + offset <= 0x1_0000, + ensures + is_page_ptr(result.id(), PageId { idx: (page_id.idx + offset) as nat, ..page_id }), +{ + todo(); + loop { } +} - /* +/* pub fn calculate_segment_page_start( segment_ptr: SegmentPtr, page_ptr: PagePtr) @@ -5247,415 +5230,415 @@ mod layout { } */ - pub fn calculate_page_start(page_ptr: PagePtr, block_size: usize) -> (addr: usize) - requires - block_size > 0, - page_ptr.wf(), - ensures - addr == block_start_at(page_ptr.page_id@, block_size as int, 0), - { - let segment_ptr = SegmentPtr::ptr_segment(page_ptr); - segment_page_start_from_slice(segment_ptr, page_ptr, block_size) - } - - pub fn calculate_page_block_at( - page_start: usize, - block_size: usize, - idx: usize, - Ghost(page_id): Ghost, - ) -> (p: PPtr) - requires - page_start == block_start_at(page_id, block_size as int, 0), - block_start_at(page_id, block_size as int, 0) + idx as int * block_size as int - <= segment_start(page_id.segment_id) + SEGMENT_SIZE, - segment_start(page_id.segment_id) + SEGMENT_SIZE < usize::MAX, - ensures - p.id() == block_start_at(page_id, block_size as int, idx as int), - p.id() == page_start + idx as int * block_size as int, - { - proof { - const_facts(); - assert(block_size * idx >= 0) by (nonlinear_arith) - requires - block_size >= 0, - idx >= 0, - ; - assert(block_size * idx == idx * block_size) by (nonlinear_arith); - } - let p = page_start + block_size * idx; - return PPtr::from_usize(p); - } +pub fn calculate_page_start(page_ptr: PagePtr, block_size: usize) -> (addr: usize) + requires + block_size > 0, + page_ptr.wf(), + ensures + addr == block_start_at(page_ptr.page_id@, block_size as int, 0), +{ + let segment_ptr = SegmentPtr::ptr_segment(page_ptr); + segment_page_start_from_slice(segment_ptr, page_ptr, block_size) +} - pub proof fn mk_segment_id(p: int) -> (id: SegmentId) - requires - p >= 0, - p % SEGMENT_SIZE as int == 0, - ((p + SEGMENT_SIZE as int) < usize::MAX), - ensures - is_segment_ptr(p, id), - { +pub fn calculate_page_block_at( + page_start: usize, + block_size: usize, + idx: usize, + Ghost(page_id): Ghost, +) -> (p: PPtr) + requires + page_start == block_start_at(page_id, block_size as int, 0), + block_start_at(page_id, block_size as int, 0) + idx as int * block_size as int + <= segment_start(page_id.segment_id) + SEGMENT_SIZE, + segment_start(page_id.segment_id) + SEGMENT_SIZE < usize::MAX, + ensures + p.id() == block_start_at(page_id, block_size as int, idx as int), + p.id() == page_start + idx as int * block_size as int, +{ + proof { const_facts(); - SegmentId { id: p as nat / SEGMENT_SIZE as nat, uniq: 0 } + assert(block_size * idx >= 0) by (nonlinear_arith) + requires + block_size >= 0, + idx >= 0, + ; + assert(block_size * idx == idx * block_size) by (nonlinear_arith); } + let p = page_start + block_size * idx; + return PPtr::from_usize(p); +} - pub proof fn segment_id_divis(sp: SegmentPtr) - requires - sp.wf(), - ensures - sp.segment_ptr.id() % SEGMENT_SIZE as int == 0, - { - const_facts(); - } +pub proof fn mk_segment_id(p: int) -> (id: SegmentId) + requires + p >= 0, + p % SEGMENT_SIZE as int == 0, + ((p + SEGMENT_SIZE as int) < usize::MAX), + ensures + is_segment_ptr(p, id), +{ + const_facts(); + SegmentId { id: p as nat / SEGMENT_SIZE as nat, uniq: 0 } +} - pub fn segment_page_start_from_slice( - segment_ptr: SegmentPtr, - slice: PagePtr, - xblock_size: usize, - ) -> (res: usize) // start_offset - requires - segment_ptr.wf(), - slice.wf(), - slice.page_id@.segment_id == segment_ptr.segment_id@, - ensures - ({ - let start_offset = res; - { - &&& xblock_size == 0 ==> start_offset == segment_start(segment_ptr.segment_id@) - + slice.page_id@.idx * SLICE_SIZE - &&& xblock_size > 0 ==> start_offset == block_start_at( - slice.page_id@, - xblock_size as int, - 0, - ) - } - }), - { - proof { - const_facts(); - } - let idxx = slice.page_ptr.to_usize() - (segment_ptr.segment_ptr.to_usize() - + SIZEOF_SEGMENT_HEADER); - let idx = idxx / SIZEOF_PAGE_HEADER; - let start_offset = if xblock_size >= INTPTR_SIZE as usize && xblock_size <= 1024 { - 3 * MAX_ALIGN_GUARANTEE - } else { - 0 - }; - segment_ptr.segment_ptr.to_usize() + (idx * SLICE_SIZE as usize) + start_offset - } +pub proof fn segment_id_divis(sp: SegmentPtr) + requires + sp.wf(), + ensures + sp.segment_ptr.id() % SEGMENT_SIZE as int == 0, +{ + const_facts(); +} - proof fn bitand_with_mask_gives_rounding(x: usize, y: usize) - requires - y != 0, - y & sub(y, 1) == 0, - ensures - x & !sub(y, 1) == (x / y) * y, - decreases y, - { - if y == 1 { - assert(x & !sub(1, 1) == x) by (bit_vector); - assert(x & !sub(y, 1) == (x / y) * y); - } else { - assert((y >> 1) < y) by (bit_vector) - requires - y != 0usize, - ; - assert((y >> 1) != 0usize) by (bit_vector) - requires - y != 0usize, - y != 1usize, - ; - assert(y & sub(y, 1) == 0usize ==> (y >> 1) & sub(y >> 1, 1) == 0usize) by (bit_vector) - requires - y != 0usize, - y != 1usize, - ; - bitand_with_mask_gives_rounding(x >> 1, y >> 1); - assert(x & !sub(y, 1) == mul(2, (x >> 1) & !sub(y >> 1, 1)) && (x >> 1) & !sub(y >> 1, 1) - < 0x8000_0000_0000_0000usize) by (bit_vector) - requires - y != 0usize, - y != 1usize, - y & sub(y, 1) == 0usize, - ; - let y1 = y >> 1; - let x1 = x >> 1; - let b = x % 2; - assert(y >> 1 == y / 2) by (bit_vector); - assert(x >> 1 == x / 2) by (bit_vector); - assert(y == 2 * y1) by { - assert(y & sub(y, 1) == 0usize ==> y % 2usize == 0usize) by (bit_vector) - requires - y != 0usize, - y != 1usize, - ; - } - assert(x == 2 * x1 + b); - assert((2 * x1 + b) / (2 * y1) * (2 * y1) == 2 * (x1 / y1 * y1)) by { - let t = (2 * x1 + b) / (2 * y1); - assert(t * (2 * y1) == 2 * (t * y1)) by (nonlinear_arith); - two_mul_with_bit0(x1 as int, y1 as int); - two_mul_with_bit1(x1 as int, y1 as int); - assert((2 * x1 + b) / (2 * y1) == x1 / y1); // by(nonlinear_arith) - //requires b == 0 || b == 1; +pub fn segment_page_start_from_slice( + segment_ptr: SegmentPtr, + slice: PagePtr, + xblock_size: usize, +) -> (res: usize) // start_offset + requires + segment_ptr.wf(), + slice.wf(), + slice.page_id@.segment_id == segment_ptr.segment_id@, + ensures + ({ + let start_offset = res; + { + &&& xblock_size == 0 ==> start_offset == segment_start(segment_ptr.segment_id@) + + slice.page_id@.idx * SLICE_SIZE + &&& xblock_size > 0 ==> start_offset == block_start_at( + slice.page_id@, + xblock_size as int, + 0, + ) } - assert(x / y * y == 2 * (((x >> 1) / (y >> 1)) * (y >> 1))); - //assert(((x >> 1) / (y >> 1)) * (y >> 1) == ((x >> 1) & !sub(y >> 1, 1))); - //assert(x & !sub(y, 1) == 2 * ((x >> 1) & !sub(y >> 1, 1))); - //assert(x & !sub(y, 1) == (x / y) * y); - } + }), +{ + proof { + const_facts(); } + let idxx = slice.page_ptr.to_usize() - (segment_ptr.segment_ptr.to_usize() + + SIZEOF_SEGMENT_HEADER); + let idx = idxx / SIZEOF_PAGE_HEADER; + let start_offset = if xblock_size >= INTPTR_SIZE as usize && xblock_size <= 1024 { + 3 * MAX_ALIGN_GUARANTEE + } else { + 0 + }; + segment_ptr.segment_ptr.to_usize() + (idx * SLICE_SIZE as usize) + start_offset +} - #[verifier::spinoff_prover] - proof fn two_mul_with_bit0(x1: int, y1: int) - requires - y1 != 0, - ensures - (2 * x1) / (2 * y1) == x1 / y1, - { - assert((2 * x1) / (2 * y1) == ((2 * x1) / 2) / y1) by (nonlinear_arith) +proof fn bitand_with_mask_gives_rounding(x: usize, y: usize) + requires + y != 0, + y & sub(y, 1) == 0, + ensures + x & !sub(y, 1) == (x / y) * y, + decreases y, +{ + if y == 1 { + assert(x & !sub(1, 1) == x) by (bit_vector); + assert(x & !sub(y, 1) == (x / y) * y); + } else { + assert((y >> 1) < y) by (bit_vector) requires - y1 != 0, + y != 0usize, ; - assert((2 * x1) / 2 == x1); - } - - #[verifier::spinoff_prover] - proof fn two_mul_with_bit1(x1: int, y1: int) - requires - y1 != 0, - ensures - (2 * x1 + 1) / (2 * y1) == x1 / y1, - { - assert((2 * x1 + 1) / (2 * y1) == ((2 * x1 + 1) / 2) / y1) by (nonlinear_arith) + assert((y >> 1) != 0usize) by (bit_vector) requires - y1 != 0, + y != 0usize, + y != 1usize, ; - assert((2 * x1 + 1) / 2 == x1); - } - - #[verifier::spinoff_prover] - #[inline] - pub fn align_down(x: usize, y: usize) -> (res: usize) - requires - y != 0, - ensures - res == (x as int / y as int) * y, - res <= x < res + y, - res % y == 0, - (res / y * y) == res, - { - let mask = y - 1; - proof { - assert(0 <= (x / y) * y <= x) by (nonlinear_arith) - requires - y > 0, - x >= 0, - ; - //assert((y & mask) == 0usize ==> (x & !mask) == sub(x, x % y)) by(bit_vector) - // requires mask == sub(y, 1), y >= 1usize; - if y & mask == 0usize { - bitand_with_mask_gives_rounding(x, y); - assert((x & !mask) == (x / y) * y); - assert((x & !mask) == (x as int / y as int) * y); - } - assert((x as int / y as int) == (x / y) as int); - assert(x / y * y + x % y == x) by (nonlinear_arith) - requires - y != 0, - ; - assert(0 <= x % y < y); - let t = x / y; - mul_mod_right(t as int, y as int); - assert(y != 0 ==> (t * y) / y as int * y == t * y) by (nonlinear_arith); - } - if ((y & mask) == 0) { // power of two? - x & !mask - } else { - (x / y) * y - } - } - - #[inline] - pub fn align_up(x: usize, y: usize) -> (res: usize) - requires - y != 0, - x + y - 1 <= usize::MAX, - ensures - res == ((x + y - 1) / y as int) * y, - x <= res <= x + y - 1, - res % y == 0, - (res / y * y) == res, - { - let mask = y - 1; - proof { - if y & mask == 0 { - bitand_with_mask_gives_rounding((x + y - 1) as usize, y); - assert(((x + mask) as usize) & !mask == ((x + y - 1) / y as int) * y); - } - let z = x + mask; - assert(z / y as int * y + z % y as int == z) by (nonlinear_arith) + assert(y & sub(y, 1) == 0usize ==> (y >> 1) & sub(y >> 1, 1) == 0usize) by (bit_vector) + requires + y != 0usize, + y != 1usize, + ; + bitand_with_mask_gives_rounding(x >> 1, y >> 1); + assert(x & !sub(y, 1) == mul(2, (x >> 1) & !sub(y >> 1, 1)) && (x >> 1) & !sub(y >> 1, 1) + < 0x8000_0000_0000_0000usize) by (bit_vector) + requires + y != 0usize, + y != 1usize, + y & sub(y, 1) == 0usize, + ; + let y1 = y >> 1; + let x1 = x >> 1; + let b = x % 2; + assert(y >> 1 == y / 2) by (bit_vector); + assert(x >> 1 == x / 2) by (bit_vector); + assert(y == 2 * y1) by { + assert(y & sub(y, 1) == 0usize ==> y % 2usize == 0usize) by (bit_vector) requires - y != 0, + y != 0usize, + y != 1usize, ; - let t = (x + y - 1) / y as int; - mul_mod_right(t, y as int); - assert(y != 0 ==> (t * y) / y as int * y == t * y) by (nonlinear_arith); } - if ((y & mask) == 0) { // power of two? - (x + mask) & !mask - } else { - ((x + mask) / y) * y + assert(x == 2 * x1 + b); + assert((2 * x1 + b) / (2 * y1) * (2 * y1) == 2 * (x1 / y1 * y1)) by { + let t = (2 * x1 + b) / (2 * y1); + assert(t * (2 * y1) == 2 * (t * y1)) by (nonlinear_arith); + two_mul_with_bit0(x1 as int, y1 as int); + two_mul_with_bit1(x1 as int, y1 as int); + assert((2 * x1 + b) / (2 * y1) == x1 / y1); // by(nonlinear_arith) + //requires b == 0 || b == 1; } + assert(x / y * y == 2 * (((x >> 1) / (y >> 1)) * (y >> 1))); + //assert(((x >> 1) / (y >> 1)) * (y >> 1) == ((x >> 1) & !sub(y >> 1, 1))); + //assert(x & !sub(y, 1) == 2 * ((x >> 1) & !sub(y >> 1, 1))); + //assert(x & !sub(y, 1) == (x / y) * y); } +} - #[verifier::integer_ring] - pub proof fn mod_trans(a: int, b: int, c: int) - requires /*b != 0, c != 0,*/ - - a % b == 0, - b % c == 0, - ensures - a % c == 0, - { - } +#[verifier::spinoff_prover] +proof fn two_mul_with_bit0(x1: int, y1: int) + requires + y1 != 0, + ensures + (2 * x1) / (2 * y1) == x1 / y1, +{ + assert((2 * x1) / (2 * y1) == ((2 * x1) / 2) / y1) by (nonlinear_arith) + requires + y1 != 0, + ; + assert((2 * x1) / 2 == x1); +} - #[verifier::integer_ring] - pub proof fn mod_mul(a: int, b: int, c: int) +#[verifier::spinoff_prover] +proof fn two_mul_with_bit1(x1: int, y1: int) + requires + y1 != 0, + ensures + (2 * x1 + 1) / (2 * y1) == x1 / y1, +{ + assert((2 * x1 + 1) / (2 * y1) == ((2 * x1 + 1) / 2) / y1) by (nonlinear_arith) requires - b % c == 0, // c > 0 + y1 != 0, + ; + assert((2 * x1 + 1) / 2 == x1); +} - ensures - (a * b) % c == 0, - { +#[verifier::spinoff_prover] +#[inline] +pub fn align_down(x: usize, y: usize) -> (res: usize) + requires + y != 0, + ensures + res == (x as int / y as int) * y, + res <= x < res + y, + res % y == 0, + (res / y * y) == res, +{ + let mask = y - 1; + proof { + assert(0 <= (x / y) * y <= x) by (nonlinear_arith) + requires + y > 0, + x >= 0, + ; + //assert((y & mask) == 0usize ==> (x & !mask) == sub(x, x % y)) by(bit_vector) + // requires mask == sub(y, 1), y >= 1usize; + if y & mask == 0usize { + bitand_with_mask_gives_rounding(x, y); + assert((x & !mask) == (x / y) * y); + assert((x & !mask) == (x as int / y as int) * y); + } + assert((x as int / y as int) == (x / y) as int); + assert(x / y * y + x % y == x) by (nonlinear_arith) + requires + y != 0, + ; + assert(0 <= x % y < y); + let t = x / y; + mul_mod_right(t as int, y as int); + assert(y != 0 ==> (t * y) / y as int * y == t * y) by (nonlinear_arith); } - - #[verifier::integer_ring] - pub proof fn mul_mod_right(a: int, b: int) - ensures - (a * b) % b == 0, - { + if ((y & mask) == 0) { // power of two? + x & !mask + } else { + (x / y) * y } +} - impl SegmentPtr { - #[inline] - pub fn ptr_segment(page_ptr: PagePtr) -> (segment_ptr: SegmentPtr) +#[inline] +pub fn align_up(x: usize, y: usize) -> (res: usize) + requires + y != 0, + x + y - 1 <= usize::MAX, + ensures + res == ((x + y - 1) / y as int) * y, + x <= res <= x + y - 1, + res % y == 0, + (res / y * y) == res, +{ + let mask = y - 1; + proof { + if y & mask == 0 { + bitand_with_mask_gives_rounding((x + y - 1) as usize, y); + assert(((x + mask) as usize) & !mask == ((x + y - 1) / y as int) * y); + } + let z = x + mask; + assert(z / y as int * y + z % y as int == z) by (nonlinear_arith) requires - page_ptr.wf(), - ensures - segment_ptr.wf(), - segment_ptr.segment_id == page_ptr.page_id@.segment_id, - { - proof { - const_facts(); - let p = page_ptr.page_ptr.id(); - let sid = page_ptr.page_id@.segment_id; - assert((p / SEGMENT_SIZE as int) * SEGMENT_SIZE as int == segment_start(sid)); - } - let p = page_ptr.page_ptr.to_usize(); - let s = (p / SEGMENT_SIZE as usize) * SEGMENT_SIZE as usize; - SegmentPtr { - segment_ptr: PPtr::from_usize(s), - segment_id: Ghost(page_ptr.page_id@.segment_id), - } - } + y != 0, + ; + let t = (x + y - 1) / y as int; + mul_mod_right(t, y as int); + assert(y != 0 ==> (t * y) / y as int * y == t * y) by (nonlinear_arith); } - - pub proof fn is_page_ptr_nonzero(ptr: int, page_id: PageId) - requires - is_page_ptr(ptr, page_id), - ensures - ptr != 0, - { - segment_start_ge0(page_id.segment_id); + if ((y & mask) == 0) { // power of two? + (x + mask) & !mask + } else { + ((x + mask) / y) * y } +} - pub proof fn is_block_ptr_mult4(ptr: int, block_id: BlockId) - requires - is_block_ptr(ptr, block_id), - ensures - ptr % 4 == 0, - { - hide(is_block_ptr); - crate::linked_list::size_of_node(); - block_ptr_aligned_to_word(); - } +#[verifier::integer_ring] +pub proof fn mod_trans(a: int, b: int, c: int) + requires /*b != 0, c != 0,*/ - pub proof fn segment_start_mult_commit_size(segment_id: SegmentId) - ensures - segment_start(segment_id) % COMMIT_SIZE as int == 0, - { - const_facts(); - assert(COMMIT_SIZE as int == 65536); - } + a % b == 0, + b % c == 0, + ensures + a % c == 0, +{ +} - pub proof fn segment_start_mult8(segment_id: SegmentId) - ensures - segment_start(segment_id) % 8 == 0, - { - const_facts(); - } +#[verifier::integer_ring] +pub proof fn mod_mul(a: int, b: int, c: int) + requires + b % c == 0, // c > 0 - pub proof fn segment_start_ge0(segment_id: SegmentId) - ensures - segment_start(segment_id) >= 0, - { - const_facts(); - } + ensures + (a * b) % c == 0, +{ +} + +#[verifier::integer_ring] +pub proof fn mul_mod_right(a: int, b: int) + ensures + (a * b) % b == 0, +{ +} - pub fn calculate_start_offset(block_size: usize) -> (res: u32) +impl SegmentPtr { + #[inline] + pub fn ptr_segment(page_ptr: PagePtr) -> (segment_ptr: SegmentPtr) + requires + page_ptr.wf(), ensures - res == start_offset(block_size as int), + segment_ptr.wf(), + segment_ptr.segment_id == page_ptr.page_id@.segment_id, { - if block_size >= 8 && block_size <= 1024 { - 3 * MAX_ALIGN_GUARANTEE as u32 - } else { - 0 + proof { + const_facts(); + let p = page_ptr.page_ptr.id(); + let sid = page_ptr.page_id@.segment_id; + assert((p / SEGMENT_SIZE as int) * SEGMENT_SIZE as int == segment_start(sid)); + } + let p = page_ptr.page_ptr.to_usize(); + let s = (p / SEGMENT_SIZE as usize) * SEGMENT_SIZE as usize; + SegmentPtr { + segment_ptr: PPtr::from_usize(s), + segment_id: Ghost(page_ptr.page_id@.segment_id), } } +} - pub proof fn start_offset_le_slice_size(block_size: int) - ensures - 0 <= start_offset(block_size) <= SLICE_SIZE, - start_offset(block_size) == 0 || start_offset(block_size) == 3 * MAX_ALIGN_GUARANTEE, - { - } +pub proof fn is_page_ptr_nonzero(ptr: int, page_id: PageId) + requires + is_page_ptr(ptr, page_id), + ensures + ptr != 0, +{ + segment_start_ge0(page_id.segment_id); +} - pub proof fn segment_start_eq(sid: SegmentId, sid2: SegmentId) - requires - sid.id == sid2.id, - ensures - segment_start(sid) == segment_start(sid2), - { - } +pub proof fn is_block_ptr_mult4(ptr: int, block_id: BlockId) + requires + is_block_ptr(ptr, block_id), + ensures + ptr % 4 == 0, +{ + hide(is_block_ptr); + crate::linked_list::size_of_node(); + block_ptr_aligned_to_word(); +} - pub proof fn get_block_start_from_is_block_ptr(ptr: int, block_id: BlockId) - requires - is_block_ptr(ptr, block_id), - ensures - ptr == block_start(block_id), - { - reveal(is_block_ptr); - } +pub proof fn segment_start_mult_commit_size(segment_id: SegmentId) + ensures + segment_start(segment_id) % COMMIT_SIZE as int == 0, +{ + const_facts(); + assert(COMMIT_SIZE as int == 65536); +} - pub proof fn get_block_start_defn(block_id: BlockId) - ensures - block_start(block_id) == block_start_at( - block_id.page_id, - block_id.block_size as int, - block_id.idx as int, - ), - { - } +pub proof fn segment_start_mult8(segment_id: SegmentId) + ensures + segment_start(segment_id) % 8 == 0, +{ + const_facts(); +} - pub proof fn sub_distribute(a: int, b: int, c: int) - ensures - a * c - b * c == (a - b) * c, - { - assert(a * c - b * c == (a - b) * c) by (nonlinear_arith); +pub proof fn segment_start_ge0(segment_id: SegmentId) + ensures + segment_start(segment_id) >= 0, +{ + const_facts(); +} + +pub fn calculate_start_offset(block_size: usize) -> (res: u32) + ensures + res == start_offset(block_size as int), +{ + if block_size >= 8 && block_size <= 1024 { + 3 * MAX_ALIGN_GUARANTEE as u32 + } else { + 0 } +} + +pub proof fn start_offset_le_slice_size(block_size: int) + ensures + 0 <= start_offset(block_size) <= SLICE_SIZE, + start_offset(block_size) == 0 || start_offset(block_size) == 3 * MAX_ALIGN_GUARANTEE, +{ +} + +pub proof fn segment_start_eq(sid: SegmentId, sid2: SegmentId) + requires + sid.id == sid2.id, + ensures + segment_start(sid) == segment_start(sid2), +{ +} + +pub proof fn get_block_start_from_is_block_ptr(ptr: int, block_id: BlockId) + requires + is_block_ptr(ptr, block_id), + ensures + ptr == block_start(block_id), +{ + reveal(is_block_ptr); +} + +pub proof fn get_block_start_defn(block_id: BlockId) + ensures + block_start(block_id) == block_start_at( + block_id.page_id, + block_id.block_size as int, + block_id.idx as int, + ), +{ +} + +pub proof fn sub_distribute(a: int, b: int, c: int) + ensures + a * c - b * c == (a - b) * c, +{ + assert(a * c - b * c == (a - b) * c) by (nonlinear_arith); +} - } // verus! +} // verus! } mod config { @@ -5663,158 +5646,160 @@ mod config { verus! { - // Log of the (pointer-size in bytes) // TODO make configurable - pub const INTPTR_SHIFT: u64 = 3; +// Log of the (pointer-size in bytes) // TODO make configurable +pub const INTPTR_SHIFT: u64 = 3; - pub const INTPTR_SIZE: u64 = 8; +pub const INTPTR_SIZE: u64 = 8; - global size_of usize == 8; +global size_of usize == 8; - // Log of the size of a 'slice' - pub const SLICE_SHIFT: u64 = 13 + INTPTR_SHIFT; +// Log of the size of a 'slice' +pub const SLICE_SHIFT: u64 = 13 + INTPTR_SHIFT; - // Size of a slice - pub const SLICE_SIZE: u64 = 65536; - //(1 << SLICE_SHIFT); - // Log of the size of a 'segment' - pub const SEGMENT_SHIFT: u64 = 9 + SLICE_SHIFT; +// Size of a slice +pub const SLICE_SIZE: u64 = 65536; - // Log of the size of a 'segment' - pub const SEGMENT_SIZE: u64 = (1 << SEGMENT_SHIFT); +//(1 << SLICE_SHIFT); +// Log of the size of a 'segment' +pub const SEGMENT_SHIFT: u64 = 9 + SLICE_SHIFT; - // Log of the size of a 'segment' - pub const SEGMENT_ALIGN: u64 = SEGMENT_SIZE; +// Log of the size of a 'segment' +pub const SEGMENT_SIZE: u64 = (1 << SEGMENT_SHIFT); - // Size of a 'segment' - pub const SLICES_PER_SEGMENT: u64 = (SEGMENT_SIZE / SLICE_SIZE); +// Log of the size of a 'segment' +pub const SEGMENT_ALIGN: u64 = SEGMENT_SIZE; - pub const BIN_HUGE: u64 = 73; +// Size of a 'segment' +pub const SLICES_PER_SEGMENT: u64 = (SEGMENT_SIZE / SLICE_SIZE); - // Fake bin that contains the "full" list. This is not a valid bin idx - // according to the valid_bin_idx spec in bin_sizes.rs. - pub const BIN_FULL: u64 = BIN_HUGE + 1; +pub const BIN_HUGE: u64 = 73; - pub const SMALL_PAGE_SHIFT: u64 = SLICE_SHIFT; +// Fake bin that contains the "full" list. This is not a valid bin idx +// according to the valid_bin_idx spec in bin_sizes.rs. +pub const BIN_FULL: u64 = BIN_HUGE + 1; - pub const MEDIUM_PAGE_SHIFT: u64 = 3 + SMALL_PAGE_SHIFT; +pub const SMALL_PAGE_SHIFT: u64 = SLICE_SHIFT; - pub const SMALL_PAGE_SIZE: u64 = 1u64 << SMALL_PAGE_SHIFT; +pub const MEDIUM_PAGE_SHIFT: u64 = 3 + SMALL_PAGE_SHIFT; - pub const MEDIUM_PAGE_SIZE: u64 = 1u64 << MEDIUM_PAGE_SHIFT; +pub const SMALL_PAGE_SIZE: u64 = 1u64 << SMALL_PAGE_SHIFT; - pub const SMALL_OBJ_SIZE_MAX: u64 = (SMALL_PAGE_SIZE / 4); +pub const MEDIUM_PAGE_SIZE: u64 = 1u64 << MEDIUM_PAGE_SHIFT; - pub const MEDIUM_OBJ_SIZE_MAX: u64 = MEDIUM_PAGE_SIZE / 4; +pub const SMALL_OBJ_SIZE_MAX: u64 = (SMALL_PAGE_SIZE / 4); - pub const MEDIUM_OBJ_WSIZE_MAX: u64 = MEDIUM_OBJ_SIZE_MAX / (usize::BITS as u64 / 8); +pub const MEDIUM_OBJ_SIZE_MAX: u64 = MEDIUM_PAGE_SIZE / 4; - pub const LARGE_OBJ_SIZE_MAX: u64 = (SEGMENT_SIZE / 2); +pub const MEDIUM_OBJ_WSIZE_MAX: u64 = MEDIUM_OBJ_SIZE_MAX / (usize::BITS as u64 / 8); - // maximum alloc size the user is allowed to request - // note: mimalloc use ptrdiff_t max here - pub const MAX_ALLOC_SIZE: usize = isize::MAX as usize; +pub const LARGE_OBJ_SIZE_MAX: u64 = (SEGMENT_SIZE / 2); - pub const SMALL_WSIZE_MAX: usize = 128; +// maximum alloc size the user is allowed to request +// note: mimalloc use ptrdiff_t max here +pub const MAX_ALLOC_SIZE: usize = isize::MAX as usize; - pub const PAGES_DIRECT: usize = SMALL_WSIZE_MAX + 1; +pub const SMALL_WSIZE_MAX: usize = 128; - pub const SMALL_SIZE_MAX: usize = SMALL_WSIZE_MAX * INTPTR_SIZE as usize; +pub const PAGES_DIRECT: usize = SMALL_WSIZE_MAX + 1; - pub const MAX_ALIGN_SIZE: usize = 16; +pub const SMALL_SIZE_MAX: usize = SMALL_WSIZE_MAX * INTPTR_SIZE as usize; - pub const MAX_ALIGN_GUARANTEE: usize = 8 * MAX_ALIGN_SIZE; +pub const MAX_ALIGN_SIZE: usize = 16; - pub const SEGMENT_BIN_MAX: usize = 31; +pub const MAX_ALIGN_GUARANTEE: usize = 8 * MAX_ALIGN_SIZE; - pub const ALIGNMENT_MAX: u64 = (SEGMENT_SIZE / 2); +pub const SEGMENT_BIN_MAX: usize = 31; - pub const SIZEOF_SEGMENT_HEADER: usize = 264; +pub const ALIGNMENT_MAX: u64 = (SEGMENT_SIZE / 2); - pub const SIZEOF_PAGE_HEADER: usize = 80; +pub const SIZEOF_SEGMENT_HEADER: usize = 264; - pub const SIZEOF_HEAP: usize = 2904; +pub const SIZEOF_PAGE_HEADER: usize = 80; - pub const SIZEOF_TLD: usize = 552; +pub const SIZEOF_HEAP: usize = 2904; - use crate::types::*; +pub const SIZEOF_TLD: usize = 552; - global layout SegmentHeader is size == 264, align == 8; +use crate::types::*; - global layout Page is size == 80, align == 8; +global layout SegmentHeader is size == 264, align == 8; - global layout Heap is size == 2904, align == 8; +global layout Page is size == 80, align == 8; - global layout Tld is size == 552, align == 8; +global layout Heap is size == 2904, align == 8; - // commit mask - pub const COMMIT_SIZE: u64 = SLICE_SIZE; +global layout Tld is size == 552, align == 8; - pub const COMMIT_MASK_BITS: u64 = SLICES_PER_SEGMENT; +// commit mask +pub const COMMIT_SIZE: u64 = SLICE_SIZE; - pub const COMMIT_MASK_FIELD_COUNT: u64 = COMMIT_MASK_BITS / (usize::BITS as u64); +pub const COMMIT_MASK_BITS: u64 = SLICES_PER_SEGMENT; - // huge - pub const HUGE_BLOCK_SIZE: u32 = 0x80000000; - // 2 GiB - // Helpers - pub proof fn const_facts() - ensures - SLICE_SIZE == 65536, - SEGMENT_SIZE == 33554432, - SLICES_PER_SEGMENT == 512, - SMALL_PAGE_SIZE == 65536, - MEDIUM_PAGE_SIZE == 524288, - SMALL_OBJ_SIZE_MAX == 16384, - MEDIUM_OBJ_SIZE_MAX == 131072, - MEDIUM_OBJ_WSIZE_MAX == 16384, - SMALL_SIZE_MAX == 1024, - LARGE_OBJ_SIZE_MAX == 16777216, - COMMIT_MASK_FIELD_COUNT == 8, - vstd::layout::size_of::() == SIZEOF_SEGMENT_HEADER, - vstd::layout::size_of::() == SIZEOF_PAGE_HEADER, - vstd::layout::size_of::() == SIZEOF_HEAP, - vstd::layout::size_of::() == SIZEOF_TLD, - vstd::layout::align_of::() == 8, - vstd::layout::align_of::() == 8, - vstd::layout::align_of::() == 8, - vstd::layout::align_of::() == 8, - { - assert(SLICE_SIZE == 65536) by (compute); - assert(SEGMENT_SIZE == 33554432) by (compute); - assert(SMALL_PAGE_SIZE == 65536) by (compute); - assert(MEDIUM_PAGE_SIZE == 524288) by (compute); - assert(COMMIT_MASK_FIELD_COUNT == 8) by (compute); - } +pub const COMMIT_MASK_FIELD_COUNT: u64 = COMMIT_MASK_BITS / (usize::BITS as u64); - use crate::types::todo; +// huge +pub const HUGE_BLOCK_SIZE: u32 = 0x80000000; - pub fn option_eager_commit_delay() -> i64 { - 1 - } +// 2 GiB +// Helpers +pub proof fn const_facts() + ensures + SLICE_SIZE == 65536, + SEGMENT_SIZE == 33554432, + SLICES_PER_SEGMENT == 512, + SMALL_PAGE_SIZE == 65536, + MEDIUM_PAGE_SIZE == 524288, + SMALL_OBJ_SIZE_MAX == 16384, + MEDIUM_OBJ_SIZE_MAX == 131072, + MEDIUM_OBJ_WSIZE_MAX == 16384, + SMALL_SIZE_MAX == 1024, + LARGE_OBJ_SIZE_MAX == 16777216, + COMMIT_MASK_FIELD_COUNT == 8, + vstd::layout::size_of::() == SIZEOF_SEGMENT_HEADER, + vstd::layout::size_of::() == SIZEOF_PAGE_HEADER, + vstd::layout::size_of::() == SIZEOF_HEAP, + vstd::layout::size_of::() == SIZEOF_TLD, + vstd::layout::align_of::() == 8, + vstd::layout::align_of::() == 8, + vstd::layout::align_of::() == 8, + vstd::layout::align_of::() == 8, +{ + assert(SLICE_SIZE == 65536) by (compute); + assert(SEGMENT_SIZE == 33554432) by (compute); + assert(SMALL_PAGE_SIZE == 65536) by (compute); + assert(MEDIUM_PAGE_SIZE == 524288) by (compute); + assert(COMMIT_MASK_FIELD_COUNT == 8) by (compute); +} - pub fn option_eager_commit() -> bool { - true - } +use crate::types::todo; - pub fn option_allow_decommit() -> bool { - true - } +pub fn option_eager_commit_delay() -> i64 { + 1 +} - pub fn option_page_reset() -> bool { - false - } +pub fn option_eager_commit() -> bool { + true +} - //pub fn option_decommit_delay() -> i64 { assume(false); 1 /*25*/ } - //pub fn option_decommit_extend_delay() -> i64 { assume(false); 0 /*1*/ } - pub fn option_decommit_delay() -> i64 { - 25 - } +pub fn option_allow_decommit() -> bool { + true +} - pub fn option_decommit_extend_delay() -> i64 { - 1 - } +pub fn option_page_reset() -> bool { + false +} + +//pub fn option_decommit_delay() -> i64 { assume(false); 1 /*25*/ } +//pub fn option_decommit_extend_delay() -> i64 { assume(false); 0 /*1*/ } +pub fn option_decommit_delay() -> i64 { + 25 +} + +pub fn option_decommit_extend_delay() -> i64 { + 1 +} - } // verus! +} // verus! } mod bin_sizes { @@ -5908,8 +5893,8 @@ mod bin_sizes { verus! { - // TODO: Pulled in constants to make this a standalone file - /* +// TODO: Pulled in constants to make this a standalone file +/* global size_of usize == 8; // Log of the (pointer-size in bytes) // TODO make configurable @@ -5946,542 +5931,542 @@ mod bin_sizes { // note: mimalloc use ptrdiff_t max here pub const MAX_ALLOC_SIZE: usize = isize::MAX as usize; */ - pub open spec fn valid_bin_idx(bin_idx: int) -> bool { - 1 <= bin_idx <= BIN_HUGE - } +pub open spec fn valid_bin_idx(bin_idx: int) -> bool { + 1 <= bin_idx <= BIN_HUGE +} - #[verifier::opaque] - pub open spec fn size_of_bin(bin_idx: int) -> nat - recommends - valid_bin_idx(bin_idx), - { - if 1 <= bin_idx <= 8 { - (usize::BITS / 8) as nat * (bin_idx as nat) - } else if bin_idx == BIN_HUGE { - // the "real" upper bound on this bucket is infinite - // the lemmas on bin sizes assume each bin has a lower bound and upper bound - // so we pretend this is the upper bound - 8 * (524288 + 1)//8 * (MEDIUM_OBJ_WSIZE_MAX as nat + 1) +#[verifier::opaque] +pub open spec fn size_of_bin(bin_idx: int) -> nat + recommends + valid_bin_idx(bin_idx), +{ + if 1 <= bin_idx <= 8 { + (usize::BITS / 8) as nat * (bin_idx as nat) + } else if bin_idx == BIN_HUGE { + // the "real" upper bound on this bucket is infinite + // the lemmas on bin sizes assume each bin has a lower bound and upper bound + // so we pretend this is the upper bound + 8 * (524288 + 1) //8 * (MEDIUM_OBJ_WSIZE_MAX as nat + 1) - } else { - let group = (bin_idx - 9) / 4; - let inner = (bin_idx - 9) % 4; - ((usize::BITS / 8) * (inner + 5) * pow2(group + 1)) as nat - } + } else { + let group = (bin_idx - 9) / 4; + let inner = (bin_idx - 9) % 4; + ((usize::BITS / 8) * (inner + 5) * pow2(group + 1)) as nat } +} - proof fn mod8(x: int, y: int) - by (nonlinear_arith) - requires - x == 8 * y, - ensures - x % 8 == 0, - { - } +proof fn mod8(x: int, y: int) + by (nonlinear_arith) + requires + x == 8 * y, + ensures + x % 8 == 0, +{ +} - pub proof fn size_of_bin_mult_word_size(bin_idx: int) - ensures - size_of_bin(bin_idx) % 8 == 0, - { - reveal(size_of_bin); - if 1 <= bin_idx <= 8 { - assert(size_of_bin(bin_idx) == (usize::BITS / 8) as nat * (bin_idx as nat)); - assert(size_of_bin(bin_idx) == 8 * (bin_idx as nat)); - assert(size_of_bin(bin_idx) == 8 * bin_idx); - assert((8 * bin_idx) % 8 == 0) by (nonlinear_arith); - } else if bin_idx == BIN_HUGE { - } else { - let group = (bin_idx - 9) / 4; - let inner = (bin_idx - 9) % 4; - assert(size_of_bin(bin_idx) == ((usize::BITS / 8) * (inner + 5) * pow2(group + 1)) as nat); - assert(size_of_bin(bin_idx) == (8 * (inner + 5) * pow2(group + 1)) as nat); - assert(size_of_bin(bin_idx) == 8 * (inner + 5) * pow2(group + 1)); - let sum = (inner + 5); - let product = sum * pow2(group + 1); - assert({ - let s = inner + 5; - let p = s * pow2(group + 1); - 8 * (inner + 5) * pow2(group + 1) == 8 * p - }) by (nonlinear_arith); - assert(size_of_bin(bin_idx) == 8 * product); - mod8(8 * product, product); - } - } - - // spec equivalent of bin - pub open spec fn smallest_bin_fitting_size(size: int) -> int { - let bytes_per_word = (usize::BITS / 8) as int; - let wsize = (size + bytes_per_word - 1) / bytes_per_word; - if wsize <= 1 { - 1 - } else if wsize <= 8 { - wsize - } else if wsize > 524288 { - BIN_HUGE as int - } else { - let w = (wsize - 1) as u64; - //let lz = w.leading_zeros(); - let lz = u64_leading_zeros(w); - let b = (usize::BITS - 1 - lz) as u8; - let shifted = (w >> (b - 2) as u64) as u8; - let bin_idx = ((b * 4) + (shifted & 0x03)) - 3; - bin_idx - } +pub proof fn size_of_bin_mult_word_size(bin_idx: int) + ensures + size_of_bin(bin_idx) % 8 == 0, +{ + reveal(size_of_bin); + if 1 <= bin_idx <= 8 { + assert(size_of_bin(bin_idx) == (usize::BITS / 8) as nat * (bin_idx as nat)); + assert(size_of_bin(bin_idx) == 8 * (bin_idx as nat)); + assert(size_of_bin(bin_idx) == 8 * bin_idx); + assert((8 * bin_idx) % 8 == 0) by (nonlinear_arith); + } else if bin_idx == BIN_HUGE { + } else { + let group = (bin_idx - 9) / 4; + let inner = (bin_idx - 9) % 4; + assert(size_of_bin(bin_idx) == ((usize::BITS / 8) * (inner + 5) * pow2(group + 1)) as nat); + assert(size_of_bin(bin_idx) == (8 * (inner + 5) * pow2(group + 1)) as nat); + assert(size_of_bin(bin_idx) == 8 * (inner + 5) * pow2(group + 1)); + let sum = (inner + 5); + let product = sum * pow2(group + 1); + assert({ + let s = inner + 5; + let p = s * pow2(group + 1); + 8 * (inner + 5) * pow2(group + 1) == 8 * p + }) by (nonlinear_arith); + assert(size_of_bin(bin_idx) == 8 * product); + mod8(8 * product, product); } +} - pub open spec fn pfd_lower(bin_idx: int) -> nat - recommends - valid_bin_idx(bin_idx), - { - if bin_idx == 1 { - 0 - } else { - size_of_bin(bin_idx - 1) / INTPTR_SIZE as nat + 1 - } +// spec equivalent of bin +pub open spec fn smallest_bin_fitting_size(size: int) -> int { + let bytes_per_word = (usize::BITS / 8) as int; + let wsize = (size + bytes_per_word - 1) / bytes_per_word; + if wsize <= 1 { + 1 + } else if wsize <= 8 { + wsize + } else if wsize > 524288 { + BIN_HUGE as int + } else { + let w = (wsize - 1) as u64; + //let lz = w.leading_zeros(); + let lz = u64_leading_zeros(w); + let b = (usize::BITS - 1 - lz) as u8; + let shifted = (w >> (b - 2) as u64) as u8; + let bin_idx = ((b * 4) + (shifted & 0x03)) - 3; + bin_idx } +} - pub open spec fn pfd_upper(bin_idx: int) -> nat - recommends - valid_bin_idx(bin_idx), - { - size_of_bin(bin_idx) / INTPTR_SIZE as nat +pub open spec fn pfd_lower(bin_idx: int) -> nat + recommends + valid_bin_idx(bin_idx), +{ + if bin_idx == 1 { + 0 + } else { + size_of_bin(bin_idx - 1) / INTPTR_SIZE as nat + 1 } +} - // TODO: The assertions in this lemma are duplicated in init.rs - pub proof fn lemma_bin_sizes_constants() - ensures - size_of_bin(1) == 8, - size_of_bin(1) / 8 == 1, - size_of_bin(2) == 16, - size_of_bin(2) / 8 == 2, - size_of_bin(3) == 24, - size_of_bin(3) / 8 == 3, - size_of_bin(4) == 32, - size_of_bin(4) / 8 == 4, - size_of_bin(5) == 40, - size_of_bin(5) / 8 == 5, - size_of_bin(6) == 48, - size_of_bin(6) / 8 == 6, - size_of_bin(7) == 56, - size_of_bin(7) / 8 == 7, - size_of_bin(8) == 64, - size_of_bin(8) / 8 == 8, - size_of_bin(9) == 80, - size_of_bin(9) / 8 == 10, - size_of_bin(10) == 96, - size_of_bin(10) / 8 == 12, - size_of_bin(11) == 112, - size_of_bin(11) / 8 == 14, - size_of_bin(12) == 128, - size_of_bin(12) / 8 == 16, - size_of_bin(13) == 160, - size_of_bin(13) / 8 == 20, - size_of_bin(14) == 192, - size_of_bin(14) / 8 == 24, - size_of_bin(15) == 224, - size_of_bin(15) / 8 == 28, - size_of_bin(16) == 256, - size_of_bin(16) / 8 == 32, - size_of_bin(17) == 320, - size_of_bin(17) / 8 == 40, - size_of_bin(18) == 384, - size_of_bin(18) / 8 == 48, - size_of_bin(19) == 448, - size_of_bin(19) / 8 == 56, - size_of_bin(20) == 512, - size_of_bin(20) / 8 == 64, - size_of_bin(21) == 640, - size_of_bin(21) / 8 == 80, - size_of_bin(22) == 768, - size_of_bin(22) / 8 == 96, - size_of_bin(23) == 896, - size_of_bin(23) / 8 == 112, - size_of_bin(24) == 1024, - size_of_bin(24) / 8 == 128, - size_of_bin(25) == 1280, - size_of_bin(25) / 8 == 160, - size_of_bin(26) == 1536, - size_of_bin(26) / 8 == 192, - size_of_bin(27) == 1792, - size_of_bin(27) / 8 == 224, - size_of_bin(28) == 2048, - size_of_bin(28) / 8 == 256, - size_of_bin(29) == 2560, - size_of_bin(29) / 8 == 320, - size_of_bin(30) == 3072, - size_of_bin(30) / 8 == 384, - size_of_bin(31) == 3584, - size_of_bin(31) / 8 == 448, - size_of_bin(32) == 4096, - size_of_bin(32) / 8 == 512, - size_of_bin(33) == 5120, - size_of_bin(33) / 8 == 640, - size_of_bin(34) == 6144, - size_of_bin(34) / 8 == 768, - size_of_bin(35) == 7168, - size_of_bin(35) / 8 == 896, - size_of_bin(36) == 8192, - size_of_bin(36) / 8 == 1024, - size_of_bin(37) == 10240, - size_of_bin(37) / 8 == 1280, - size_of_bin(38) == 12288, - size_of_bin(38) / 8 == 1536, - size_of_bin(39) == 14336, - size_of_bin(39) / 8 == 1792, - size_of_bin(40) == 16384, - size_of_bin(40) / 8 == 2048, - size_of_bin(41) == 20480, - size_of_bin(41) / 8 == 2560, - size_of_bin(42) == 24576, - size_of_bin(42) / 8 == 3072, - size_of_bin(43) == 28672, - size_of_bin(43) / 8 == 3584, - size_of_bin(44) == 32768, - size_of_bin(44) / 8 == 4096, - size_of_bin(45) == 40960, - size_of_bin(45) / 8 == 5120, - size_of_bin(46) == 49152, - size_of_bin(46) / 8 == 6144, - size_of_bin(47) == 57344, - size_of_bin(47) / 8 == 7168, - size_of_bin(48) == 65536, - size_of_bin(48) / 8 == 8192, - size_of_bin(49) == 81920, - size_of_bin(49) / 8 == 10240, - size_of_bin(50) == 98304, - size_of_bin(50) / 8 == 12288, - size_of_bin(51) == 114688, - size_of_bin(51) / 8 == 14336, - size_of_bin(52) == 131072, - size_of_bin(52) / 8 == 16384, - size_of_bin(53) == 163840, - size_of_bin(53) / 8 == 20480, - size_of_bin(54) == 196608, - size_of_bin(54) / 8 == 24576, - size_of_bin(55) == 229376, - size_of_bin(55) / 8 == 28672, - size_of_bin(56) == 262144, - size_of_bin(56) / 8 == 32768, - size_of_bin(57) == 327680, - size_of_bin(57) / 8 == 40960, - size_of_bin(58) == 393216, - size_of_bin(58) / 8 == 49152, - size_of_bin(59) == 458752, - size_of_bin(59) / 8 == 57344, - size_of_bin(60) == 524288, - size_of_bin(60) / 8 == 65536, - size_of_bin(61) == 655360, - size_of_bin(61) / 8 == 81920, - size_of_bin(62) == 786432, - size_of_bin(62) / 8 == 98304, - size_of_bin(63) == 917504, - size_of_bin(63) / 8 == 114688, - size_of_bin(64) == 1048576, - size_of_bin(64) / 8 == 131072, - size_of_bin(65) == 1310720, - size_of_bin(65) / 8 == 163840, - size_of_bin(66) == 1572864, - size_of_bin(66) / 8 == 196608, - size_of_bin(67) == 1835008, - size_of_bin(67) / 8 == 229376, - size_of_bin(68) == 2097152, - size_of_bin(68) / 8 == 262144, - size_of_bin(69) == 2621440, - size_of_bin(69) / 8 == 327680, - size_of_bin(70) == 3145728, - size_of_bin(70) / 8 == 393216, - size_of_bin(71) == 3670016, - size_of_bin(71) / 8 == 458752, - size_of_bin(72) == 4194304, - size_of_bin(72) / 8 == 524288, - size_of_bin(73) == 4194312, - size_of_bin(73) / 8 == 524289, - { - assert(size_of_bin(1) == 8) by (compute_only); - assert(size_of_bin(2) == 16) by (compute_only); - assert(size_of_bin(3) == 24) by (compute_only); - assert(size_of_bin(4) == 32) by (compute_only); - assert(size_of_bin(5) == 40) by (compute_only); - assert(size_of_bin(6) == 48) by (compute_only); - assert(size_of_bin(7) == 56) by (compute_only); - assert(size_of_bin(8) == 64) by (compute_only); - assert(size_of_bin(9) == 80) by (compute_only); - assert(size_of_bin(10) == 96) by (compute_only); - assert(size_of_bin(11) == 112) by (compute_only); - assert(size_of_bin(12) == 128) by (compute_only); - assert(size_of_bin(13) == 160) by (compute_only); - assert(size_of_bin(14) == 192) by (compute_only); - assert(size_of_bin(15) == 224) by (compute_only); - assert(size_of_bin(16) == 256) by (compute_only); - assert(size_of_bin(17) == 320) by (compute_only); - assert(size_of_bin(18) == 384) by (compute_only); - assert(size_of_bin(19) == 448) by (compute_only); - assert(size_of_bin(20) == 512) by (compute_only); - assert(size_of_bin(21) == 640) by (compute_only); - assert(size_of_bin(22) == 768) by (compute_only); - assert(size_of_bin(23) == 896) by (compute_only); - assert(size_of_bin(24) == 1024) by (compute_only); - assert(size_of_bin(25) == 1280) by (compute_only); - assert(size_of_bin(26) == 1536) by (compute_only); - assert(size_of_bin(27) == 1792) by (compute_only); - assert(size_of_bin(28) == 2048) by (compute_only); - assert(size_of_bin(29) == 2560) by (compute_only); - assert(size_of_bin(30) == 3072) by (compute_only); - assert(size_of_bin(31) == 3584) by (compute_only); - assert(size_of_bin(32) == 4096) by (compute_only); - assert(size_of_bin(33) == 5120) by (compute_only); - assert(size_of_bin(34) == 6144) by (compute_only); - assert(size_of_bin(35) == 7168) by (compute_only); - assert(size_of_bin(36) == 8192) by (compute_only); - assert(size_of_bin(37) == 10240) by (compute_only); - assert(size_of_bin(38) == 12288) by (compute_only); - assert(size_of_bin(39) == 14336) by (compute_only); - assert(size_of_bin(40) == 16384) by (compute_only); - assert(size_of_bin(41) == 20480) by (compute_only); - assert(size_of_bin(42) == 24576) by (compute_only); - assert(size_of_bin(43) == 28672) by (compute_only); - assert(size_of_bin(44) == 32768) by (compute_only); - assert(size_of_bin(45) == 40960) by (compute_only); - assert(size_of_bin(46) == 49152) by (compute_only); - assert(size_of_bin(47) == 57344) by (compute_only); - assert(size_of_bin(48) == 65536) by (compute_only); - assert(size_of_bin(49) == 81920) by (compute_only); - assert(size_of_bin(50) == 98304) by (compute_only); - assert(size_of_bin(51) == 114688) by (compute_only); - assert(size_of_bin(52) == 131072) by (compute_only); - assert(size_of_bin(53) == 163840) by (compute_only); - assert(size_of_bin(54) == 196608) by (compute_only); - assert(size_of_bin(55) == 229376) by (compute_only); - assert(size_of_bin(56) == 262144) by (compute_only); - assert(size_of_bin(57) == 327680) by (compute_only); - assert(size_of_bin(58) == 393216) by (compute_only); - assert(size_of_bin(59) == 458752) by (compute_only); - assert(size_of_bin(60) == 524288) by (compute_only); - assert(size_of_bin(61) == 655360) by (compute_only); - assert(size_of_bin(62) == 786432) by (compute_only); - assert(size_of_bin(63) == 917504) by (compute_only); - assert(size_of_bin(64) == 1048576) by (compute_only); - assert(size_of_bin(65) == 1310720) by (compute_only); - assert(size_of_bin(66) == 1572864) by (compute_only); - assert(size_of_bin(67) == 1835008) by (compute_only); - assert(size_of_bin(68) == 2097152) by (compute_only); - assert(size_of_bin(69) == 2621440) by (compute_only); - assert(size_of_bin(70) == 3145728) by (compute_only); - assert(size_of_bin(71) == 3670016) by (compute_only); - assert(size_of_bin(72) == 4194304) by (compute_only); - assert(size_of_bin(73) == 8 * (524288 + 1)) by (compute_only); - } - - /** Put our desired property into a proof-by-compute-friendly form **/ - - spec fn property_idx_out_of_range_has_different_bin_size(bin_idx: int, wsize: int) -> bool { - valid_bin_idx(bin_idx) && !(pfd_lower(bin_idx) <= wsize <= pfd_upper(bin_idx)) && 0 <= wsize - <= 128 ==> smallest_bin_fitting_size(wsize * INTPTR_SIZE) != bin_idx - } - - spec fn check_idx_out_of_range_has_different_bin_size( - bin_idx: int, - wsize_start: int, - wsize_end: int, - ) -> bool - decreases wsize_end - wsize_start, - { - if wsize_start >= wsize_end { - true - } else { - property_idx_out_of_range_has_different_bin_size(bin_idx, wsize_start) - && check_idx_out_of_range_has_different_bin_size(bin_idx, wsize_start + 1, wsize_end) - } - } +pub open spec fn pfd_upper(bin_idx: int) -> nat + recommends + valid_bin_idx(bin_idx), +{ + size_of_bin(bin_idx) / INTPTR_SIZE as nat +} - proof fn result_idx_out_of_range_has_different_bin_size( - bin_idx: int, - wsize_start: int, - wsize_end: int, - ) - ensures - check_idx_out_of_range_has_different_bin_size(bin_idx, wsize_start, wsize_end) ==> (forall| +// TODO: The assertions in this lemma are duplicated in init.rs +pub proof fn lemma_bin_sizes_constants() + ensures + size_of_bin(1) == 8, + size_of_bin(1) / 8 == 1, + size_of_bin(2) == 16, + size_of_bin(2) / 8 == 2, + size_of_bin(3) == 24, + size_of_bin(3) / 8 == 3, + size_of_bin(4) == 32, + size_of_bin(4) / 8 == 4, + size_of_bin(5) == 40, + size_of_bin(5) / 8 == 5, + size_of_bin(6) == 48, + size_of_bin(6) / 8 == 6, + size_of_bin(7) == 56, + size_of_bin(7) / 8 == 7, + size_of_bin(8) == 64, + size_of_bin(8) / 8 == 8, + size_of_bin(9) == 80, + size_of_bin(9) / 8 == 10, + size_of_bin(10) == 96, + size_of_bin(10) / 8 == 12, + size_of_bin(11) == 112, + size_of_bin(11) / 8 == 14, + size_of_bin(12) == 128, + size_of_bin(12) / 8 == 16, + size_of_bin(13) == 160, + size_of_bin(13) / 8 == 20, + size_of_bin(14) == 192, + size_of_bin(14) / 8 == 24, + size_of_bin(15) == 224, + size_of_bin(15) / 8 == 28, + size_of_bin(16) == 256, + size_of_bin(16) / 8 == 32, + size_of_bin(17) == 320, + size_of_bin(17) / 8 == 40, + size_of_bin(18) == 384, + size_of_bin(18) / 8 == 48, + size_of_bin(19) == 448, + size_of_bin(19) / 8 == 56, + size_of_bin(20) == 512, + size_of_bin(20) / 8 == 64, + size_of_bin(21) == 640, + size_of_bin(21) / 8 == 80, + size_of_bin(22) == 768, + size_of_bin(22) / 8 == 96, + size_of_bin(23) == 896, + size_of_bin(23) / 8 == 112, + size_of_bin(24) == 1024, + size_of_bin(24) / 8 == 128, + size_of_bin(25) == 1280, + size_of_bin(25) / 8 == 160, + size_of_bin(26) == 1536, + size_of_bin(26) / 8 == 192, + size_of_bin(27) == 1792, + size_of_bin(27) / 8 == 224, + size_of_bin(28) == 2048, + size_of_bin(28) / 8 == 256, + size_of_bin(29) == 2560, + size_of_bin(29) / 8 == 320, + size_of_bin(30) == 3072, + size_of_bin(30) / 8 == 384, + size_of_bin(31) == 3584, + size_of_bin(31) / 8 == 448, + size_of_bin(32) == 4096, + size_of_bin(32) / 8 == 512, + size_of_bin(33) == 5120, + size_of_bin(33) / 8 == 640, + size_of_bin(34) == 6144, + size_of_bin(34) / 8 == 768, + size_of_bin(35) == 7168, + size_of_bin(35) / 8 == 896, + size_of_bin(36) == 8192, + size_of_bin(36) / 8 == 1024, + size_of_bin(37) == 10240, + size_of_bin(37) / 8 == 1280, + size_of_bin(38) == 12288, + size_of_bin(38) / 8 == 1536, + size_of_bin(39) == 14336, + size_of_bin(39) / 8 == 1792, + size_of_bin(40) == 16384, + size_of_bin(40) / 8 == 2048, + size_of_bin(41) == 20480, + size_of_bin(41) / 8 == 2560, + size_of_bin(42) == 24576, + size_of_bin(42) / 8 == 3072, + size_of_bin(43) == 28672, + size_of_bin(43) / 8 == 3584, + size_of_bin(44) == 32768, + size_of_bin(44) / 8 == 4096, + size_of_bin(45) == 40960, + size_of_bin(45) / 8 == 5120, + size_of_bin(46) == 49152, + size_of_bin(46) / 8 == 6144, + size_of_bin(47) == 57344, + size_of_bin(47) / 8 == 7168, + size_of_bin(48) == 65536, + size_of_bin(48) / 8 == 8192, + size_of_bin(49) == 81920, + size_of_bin(49) / 8 == 10240, + size_of_bin(50) == 98304, + size_of_bin(50) / 8 == 12288, + size_of_bin(51) == 114688, + size_of_bin(51) / 8 == 14336, + size_of_bin(52) == 131072, + size_of_bin(52) / 8 == 16384, + size_of_bin(53) == 163840, + size_of_bin(53) / 8 == 20480, + size_of_bin(54) == 196608, + size_of_bin(54) / 8 == 24576, + size_of_bin(55) == 229376, + size_of_bin(55) / 8 == 28672, + size_of_bin(56) == 262144, + size_of_bin(56) / 8 == 32768, + size_of_bin(57) == 327680, + size_of_bin(57) / 8 == 40960, + size_of_bin(58) == 393216, + size_of_bin(58) / 8 == 49152, + size_of_bin(59) == 458752, + size_of_bin(59) / 8 == 57344, + size_of_bin(60) == 524288, + size_of_bin(60) / 8 == 65536, + size_of_bin(61) == 655360, + size_of_bin(61) / 8 == 81920, + size_of_bin(62) == 786432, + size_of_bin(62) / 8 == 98304, + size_of_bin(63) == 917504, + size_of_bin(63) / 8 == 114688, + size_of_bin(64) == 1048576, + size_of_bin(64) / 8 == 131072, + size_of_bin(65) == 1310720, + size_of_bin(65) / 8 == 163840, + size_of_bin(66) == 1572864, + size_of_bin(66) / 8 == 196608, + size_of_bin(67) == 1835008, + size_of_bin(67) / 8 == 229376, + size_of_bin(68) == 2097152, + size_of_bin(68) / 8 == 262144, + size_of_bin(69) == 2621440, + size_of_bin(69) / 8 == 327680, + size_of_bin(70) == 3145728, + size_of_bin(70) / 8 == 393216, + size_of_bin(71) == 3670016, + size_of_bin(71) / 8 == 458752, + size_of_bin(72) == 4194304, + size_of_bin(72) / 8 == 524288, + size_of_bin(73) == 4194312, + size_of_bin(73) / 8 == 524289, +{ + assert(size_of_bin(1) == 8) by (compute_only); + assert(size_of_bin(2) == 16) by (compute_only); + assert(size_of_bin(3) == 24) by (compute_only); + assert(size_of_bin(4) == 32) by (compute_only); + assert(size_of_bin(5) == 40) by (compute_only); + assert(size_of_bin(6) == 48) by (compute_only); + assert(size_of_bin(7) == 56) by (compute_only); + assert(size_of_bin(8) == 64) by (compute_only); + assert(size_of_bin(9) == 80) by (compute_only); + assert(size_of_bin(10) == 96) by (compute_only); + assert(size_of_bin(11) == 112) by (compute_only); + assert(size_of_bin(12) == 128) by (compute_only); + assert(size_of_bin(13) == 160) by (compute_only); + assert(size_of_bin(14) == 192) by (compute_only); + assert(size_of_bin(15) == 224) by (compute_only); + assert(size_of_bin(16) == 256) by (compute_only); + assert(size_of_bin(17) == 320) by (compute_only); + assert(size_of_bin(18) == 384) by (compute_only); + assert(size_of_bin(19) == 448) by (compute_only); + assert(size_of_bin(20) == 512) by (compute_only); + assert(size_of_bin(21) == 640) by (compute_only); + assert(size_of_bin(22) == 768) by (compute_only); + assert(size_of_bin(23) == 896) by (compute_only); + assert(size_of_bin(24) == 1024) by (compute_only); + assert(size_of_bin(25) == 1280) by (compute_only); + assert(size_of_bin(26) == 1536) by (compute_only); + assert(size_of_bin(27) == 1792) by (compute_only); + assert(size_of_bin(28) == 2048) by (compute_only); + assert(size_of_bin(29) == 2560) by (compute_only); + assert(size_of_bin(30) == 3072) by (compute_only); + assert(size_of_bin(31) == 3584) by (compute_only); + assert(size_of_bin(32) == 4096) by (compute_only); + assert(size_of_bin(33) == 5120) by (compute_only); + assert(size_of_bin(34) == 6144) by (compute_only); + assert(size_of_bin(35) == 7168) by (compute_only); + assert(size_of_bin(36) == 8192) by (compute_only); + assert(size_of_bin(37) == 10240) by (compute_only); + assert(size_of_bin(38) == 12288) by (compute_only); + assert(size_of_bin(39) == 14336) by (compute_only); + assert(size_of_bin(40) == 16384) by (compute_only); + assert(size_of_bin(41) == 20480) by (compute_only); + assert(size_of_bin(42) == 24576) by (compute_only); + assert(size_of_bin(43) == 28672) by (compute_only); + assert(size_of_bin(44) == 32768) by (compute_only); + assert(size_of_bin(45) == 40960) by (compute_only); + assert(size_of_bin(46) == 49152) by (compute_only); + assert(size_of_bin(47) == 57344) by (compute_only); + assert(size_of_bin(48) == 65536) by (compute_only); + assert(size_of_bin(49) == 81920) by (compute_only); + assert(size_of_bin(50) == 98304) by (compute_only); + assert(size_of_bin(51) == 114688) by (compute_only); + assert(size_of_bin(52) == 131072) by (compute_only); + assert(size_of_bin(53) == 163840) by (compute_only); + assert(size_of_bin(54) == 196608) by (compute_only); + assert(size_of_bin(55) == 229376) by (compute_only); + assert(size_of_bin(56) == 262144) by (compute_only); + assert(size_of_bin(57) == 327680) by (compute_only); + assert(size_of_bin(58) == 393216) by (compute_only); + assert(size_of_bin(59) == 458752) by (compute_only); + assert(size_of_bin(60) == 524288) by (compute_only); + assert(size_of_bin(61) == 655360) by (compute_only); + assert(size_of_bin(62) == 786432) by (compute_only); + assert(size_of_bin(63) == 917504) by (compute_only); + assert(size_of_bin(64) == 1048576) by (compute_only); + assert(size_of_bin(65) == 1310720) by (compute_only); + assert(size_of_bin(66) == 1572864) by (compute_only); + assert(size_of_bin(67) == 1835008) by (compute_only); + assert(size_of_bin(68) == 2097152) by (compute_only); + assert(size_of_bin(69) == 2621440) by (compute_only); + assert(size_of_bin(70) == 3145728) by (compute_only); + assert(size_of_bin(71) == 3670016) by (compute_only); + assert(size_of_bin(72) == 4194304) by (compute_only); + assert(size_of_bin(73) == 8 * (524288 + 1)) by (compute_only); +} + +/** Put our desired property into a proof-by-compute-friendly form **/ + +spec fn property_idx_out_of_range_has_different_bin_size(bin_idx: int, wsize: int) -> bool { + valid_bin_idx(bin_idx) && !(pfd_lower(bin_idx) <= wsize <= pfd_upper(bin_idx)) && 0 <= wsize + <= 128 ==> smallest_bin_fitting_size(wsize * INTPTR_SIZE) != bin_idx +} + +spec fn check_idx_out_of_range_has_different_bin_size( + bin_idx: int, + wsize_start: int, + wsize_end: int, +) -> bool + decreases wsize_end - wsize_start, +{ + if wsize_start >= wsize_end { + true + } else { + property_idx_out_of_range_has_different_bin_size(bin_idx, wsize_start) + && check_idx_out_of_range_has_different_bin_size(bin_idx, wsize_start + 1, wsize_end) + } +} + +proof fn result_idx_out_of_range_has_different_bin_size( + bin_idx: int, + wsize_start: int, + wsize_end: int, +) + ensures + check_idx_out_of_range_has_different_bin_size(bin_idx, wsize_start, wsize_end) ==> (forall| + wsize, + | + wsize_start <= wsize < wsize_end ==> property_idx_out_of_range_has_different_bin_size( + bin_idx, wsize, - | - wsize_start <= wsize < wsize_end ==> property_idx_out_of_range_has_different_bin_size( - bin_idx, - wsize, - )), - decreases wsize_end - wsize_start, - { - if wsize_start >= wsize_end { - } else { - result_idx_out_of_range_has_different_bin_size(bin_idx, wsize_start + 1, wsize_end); - } + )), + decreases wsize_end - wsize_start, +{ + if wsize_start >= wsize_end { + } else { + result_idx_out_of_range_has_different_bin_size(bin_idx, wsize_start + 1, wsize_end); } +} - spec fn check2_idx_out_of_range_has_different_bin_size( - bin_idx_start: int, - bin_idx_end: int, - wsize_start: int, - wsize_end: int, - ) -> bool - decreases bin_idx_end - bin_idx_start, - { - if bin_idx_start >= bin_idx_end { - true - } else { - check_idx_out_of_range_has_different_bin_size(bin_idx_start, wsize_start, wsize_end) - && check2_idx_out_of_range_has_different_bin_size( - bin_idx_start + 1, - bin_idx_end, - wsize_start, - wsize_end, - ) - } +spec fn check2_idx_out_of_range_has_different_bin_size( + bin_idx_start: int, + bin_idx_end: int, + wsize_start: int, + wsize_end: int, +) -> bool + decreases bin_idx_end - bin_idx_start, +{ + if bin_idx_start >= bin_idx_end { + true + } else { + check_idx_out_of_range_has_different_bin_size(bin_idx_start, wsize_start, wsize_end) + && check2_idx_out_of_range_has_different_bin_size( + bin_idx_start + 1, + bin_idx_end, + wsize_start, + wsize_end, + ) } +} - proof fn result2_idx_out_of_range_has_different_bin_size( - bin_idx_start: int, - bin_idx_end: int, - wsize_start: int, - wsize_end: int, - ) - ensures - check2_idx_out_of_range_has_different_bin_size( - bin_idx_start, - bin_idx_end, - wsize_start, - wsize_end, - ) ==> (forall|bin_idx, wsize| - bin_idx_start <= bin_idx < bin_idx_end && wsize_start <= wsize < wsize_end - ==> property_idx_out_of_range_has_different_bin_size(bin_idx, wsize)), - decreases bin_idx_end - bin_idx_start, - { - if bin_idx_start >= bin_idx_end { - } else { - result2_idx_out_of_range_has_different_bin_size( - bin_idx_start + 1, - bin_idx_end, - wsize_start, - wsize_end, - ); - if check2_idx_out_of_range_has_different_bin_size( - bin_idx_start, - bin_idx_end, - wsize_start, - wsize_end, - ) { - assert forall|bin_idx, wsize| - bin_idx_start <= bin_idx < bin_idx_end && wsize_start <= wsize - < wsize_end implies property_idx_out_of_range_has_different_bin_size( - bin_idx, - wsize, - ) by { - result_idx_out_of_range_has_different_bin_size(bin_idx, wsize_start, wsize_end); - } +proof fn result2_idx_out_of_range_has_different_bin_size( + bin_idx_start: int, + bin_idx_end: int, + wsize_start: int, + wsize_end: int, +) + ensures + check2_idx_out_of_range_has_different_bin_size( + bin_idx_start, + bin_idx_end, + wsize_start, + wsize_end, + ) ==> (forall|bin_idx, wsize| + bin_idx_start <= bin_idx < bin_idx_end && wsize_start <= wsize < wsize_end + ==> property_idx_out_of_range_has_different_bin_size(bin_idx, wsize)), + decreases bin_idx_end - bin_idx_start, +{ + if bin_idx_start >= bin_idx_end { + } else { + result2_idx_out_of_range_has_different_bin_size( + bin_idx_start + 1, + bin_idx_end, + wsize_start, + wsize_end, + ); + if check2_idx_out_of_range_has_different_bin_size( + bin_idx_start, + bin_idx_end, + wsize_start, + wsize_end, + ) { + assert forall|bin_idx, wsize| + bin_idx_start <= bin_idx < bin_idx_end && wsize_start <= wsize + < wsize_end implies property_idx_out_of_range_has_different_bin_size( + bin_idx, + wsize, + ) by { + result_idx_out_of_range_has_different_bin_size(bin_idx, wsize_start, wsize_end); } } } +} - pub proof fn different_bin_size(bin_idx1: int, bin_idx2: int) - requires - valid_bin_idx(bin_idx1), - valid_bin_idx(bin_idx2), - bin_idx1 != bin_idx2, - ensures - size_of_bin(bin_idx1) != size_of_bin(bin_idx2), - { - lemma_bin_sizes_constants(); - } +pub proof fn different_bin_size(bin_idx1: int, bin_idx2: int) + requires + valid_bin_idx(bin_idx1), + valid_bin_idx(bin_idx2), + bin_idx1 != bin_idx2, + ensures + size_of_bin(bin_idx1) != size_of_bin(bin_idx2), +{ + lemma_bin_sizes_constants(); +} - pub proof fn idx_out_of_range_has_different_bin_size(bin_idx: int, wsize: int) - requires - valid_bin_idx(bin_idx), - !(pfd_lower(bin_idx) <= wsize <= pfd_upper(bin_idx)), - 0 <= wsize <= 128, - ensures - smallest_bin_fitting_size(wsize * INTPTR_SIZE) != bin_idx, - { - lemma_bin_sizes_constants(); - assert(usize::BITS / 8 == 8) by (nonlinear_arith); - assert(((wsize * 8) + 8 - 1) / 8 == wsize) by (nonlinear_arith); - if wsize <= 1 { - } else if wsize <= 8 { - } else { - assert(9 <= wsize <= 128); - assert(72 <= wsize * INTPTR_SIZE <= 1024); - assert(check2_idx_out_of_range_has_different_bin_size(1, 74, 9, 129)) by (compute_only); - //assume(check2_idx_out_of_range_has_different_bin_size(1, 74, 9, 129)); - result2_idx_out_of_range_has_different_bin_size(1, 74, 9, 129); - assert(property_idx_out_of_range_has_different_bin_size(bin_idx, wsize)); // Trigger result2_idx_out_of_range_has_different_bin_size - } +pub proof fn idx_out_of_range_has_different_bin_size(bin_idx: int, wsize: int) + requires + valid_bin_idx(bin_idx), + !(pfd_lower(bin_idx) <= wsize <= pfd_upper(bin_idx)), + 0 <= wsize <= 128, + ensures + smallest_bin_fitting_size(wsize * INTPTR_SIZE) != bin_idx, +{ + lemma_bin_sizes_constants(); + assert(usize::BITS / 8 == 8) by (nonlinear_arith); + assert(((wsize * 8) + 8 - 1) / 8 == wsize) by (nonlinear_arith); + if wsize <= 1 { + } else if wsize <= 8 { + } else { + assert(9 <= wsize <= 128); + assert(72 <= wsize * INTPTR_SIZE <= 1024); + assert(check2_idx_out_of_range_has_different_bin_size(1, 74, 9, 129)) by (compute_only); + //assume(check2_idx_out_of_range_has_different_bin_size(1, 74, 9, 129)); + result2_idx_out_of_range_has_different_bin_size(1, 74, 9, 129); + assert(property_idx_out_of_range_has_different_bin_size(bin_idx, wsize)); // Trigger result2_idx_out_of_range_has_different_bin_size } +} - /******************************************************** +/******************************************************** * TODO: All of these should be standard library proofs ********************************************************/ - proof fn div2(x: u64, y: int) - by (nonlinear_arith) - requires - y > 0, - ensures - x as int / (y * 2) == (x as int / y) / 2, - { - } +proof fn div2(x: u64, y: int) + by (nonlinear_arith) + requires + y > 0, + ensures + x as int / (y * 2) == (x as int / y) / 2, +{ +} - proof fn lemma_div_is_ordered(x: int, y: int, z: int) - by (nonlinear_arith) - requires - x <= y, - 0 < z, - ensures - x / z <= y / z, - { - } +proof fn lemma_div_is_ordered(x: int, y: int, z: int) + by (nonlinear_arith) + requires + x <= y, + 0 < z, + ensures + x / z <= y / z, +{ +} - pub proof fn lemma_div_by_multiple(b: int, d: int) - by (nonlinear_arith) - requires - 0 <= b, - 0 < d, - ensures - (b * d) / d == b, - { - } +pub proof fn lemma_div_by_multiple(b: int, d: int) + by (nonlinear_arith) + requires + 0 <= b, + 0 < d, + ensures + (b * d) / d == b, +{ +} - proof fn mul_assoc(x: nat, y: nat, z: nat) - by (nonlinear_arith) - ensures - (x * y) * z == y * (x * z), - { - } +proof fn mul_assoc(x: nat, y: nat, z: nat) + by (nonlinear_arith) + ensures + (x * y) * z == y * (x * z), +{ +} - proof fn mul_ordering(x: nat, y: nat, z: nat) - by (nonlinear_arith) - requires - 0 < x && 1 < y && 0 < z, - x * y == z, - ensures - x < z, - { - } +proof fn mul_ordering(x: nat, y: nat, z: nat) + by (nonlinear_arith) + requires + 0 < x && 1 < y && 0 < z, + x * y == z, + ensures + x < z, +{ +} - proof fn pow2_positive(e: int) - ensures - pow2(e) > 0, - decreases e, - { - if e <= 0 { - } else { - pow2_positive(e - 1); - } +proof fn pow2_positive(e: int) + ensures + pow2(e) > 0, + decreases e, +{ + if e <= 0 { + } else { + pow2_positive(e - 1); } +} - proof fn pow2_adds(e1: nat, e2: nat) - ensures - pow2(e1 as int) * pow2(e2 as int) == pow2((e1 + e2) as int), - decreases e1, - { - if e1 == 0 { - } else { - calc! { (==) +proof fn pow2_adds(e1: nat, e2: nat) + ensures + pow2(e1 as int) * pow2(e2 as int) == pow2((e1 + e2) as int), + decreases e1, +{ + if e1 == 0 { + } else { + calc! { (==) pow2(e1 as int) * pow2(e2 as int); {} (pow2((e1 as int - 1) as int) * 2) * pow2(e2 as int); { mul_assoc(pow2((e1 as int - 1) as int), 2, pow2(e2 as int)); } @@ -6490,16 +6475,16 @@ mod bin_sizes { 2 * pow2((e1 - 1 + e2) as int); {} pow2((e1 + e2) as int); } - } } +} - proof fn pow2_subtracts(e1: nat, e2: nat) - requires - e1 <= e2, - ensures - pow2(e2 as int) / pow2(e1 as int) == pow2((e2 - e1) as int), - { - calc! { (==) +proof fn pow2_subtracts(e1: nat, e2: nat) + requires + e1 <= e2, + ensures + pow2(e2 as int) / pow2(e1 as int) == pow2((e2 - e1) as int), +{ + calc! { (==) pow2(e2 as int) / pow2(e1 as int); { pow2_adds((e2 - e1) as nat, e1); } pow2((e2 - e1) as int) * pow2(e1 as int) / pow2(e1 as int); @@ -6509,75 +6494,68 @@ mod bin_sizes { } pow2((e2 - e1) as int); } - } +} - proof fn pow2_properties() - ensures - forall|e: int| pow2(e) > 0, - forall|e: int| - e > 0 ==> #[trigger] - pow2(e) / 2 == pow2(e - 1), - forall|e1, e2| 0 <= e1 < e2 ==> pow2(e1) < pow2(e2), - forall|e1, e2| - 0 <= e1 && 0 <= e2 ==> pow2(e1) * pow2(e2) == #[trigger] - pow2(e1 + e2), - forall|e1, e2| - 0 <= e1 <= e2 ==> pow2(e2) / pow2(e1) == #[trigger] - pow2(e2 - e1), - { - assert forall|e: int| pow2(e) > 0 by { - pow2_positive(e); - } - assert forall|e: int| e > 0 implies #[trigger] - pow2(e) / 2 == pow2(e - 1) by { - assert(pow2(1) == 2) by (compute_only); - pow2_subtracts(1, e as nat); - } - assert forall|e1, e2| 0 <= e1 < e2 implies pow2(e1) < pow2(e2) by { - let diff = e2 - e1; - assert(pow2(diff) > 1); - pow2_positive(diff); - pow2_positive(e1); - pow2_positive(e2); - assert(pow2(e1) * pow2(diff) == pow2(e2)) by { - pow2_adds(e1 as nat, diff as nat); - } - mul_ordering(pow2(e1), pow2(diff), pow2(e2)); - } - assert forall|e1, e2| 0 <= e1 && 0 <= e2 implies pow2(e1) * pow2(e2) == #[trigger] - pow2(e1 + e2) by { - pow2_adds(e1 as nat, e2 as nat); - } - assert forall|e1, e2| 0 <= e1 <= e2 implies pow2(e2) / pow2(e1) == #[trigger] - pow2(e2 - e1) by { - pow2_subtracts(e1 as nat, e2 as nat); +proof fn pow2_properties() + ensures + forall|e: int| pow2(e) > 0, + forall|e: int| e > 0 ==> #[trigger] pow2(e) / 2 == pow2(e - 1), + forall|e1, e2| 0 <= e1 < e2 ==> pow2(e1) < pow2(e2), + forall|e1, e2| 0 <= e1 && 0 <= e2 ==> pow2(e1) * pow2(e2) == #[trigger] pow2(e1 + e2), + forall|e1, e2| 0 <= e1 <= e2 ==> pow2(e2) / pow2(e1) == #[trigger] pow2(e2 - e1), +{ + assert forall|e: int| pow2(e) > 0 by { + pow2_positive(e); + } + assert forall|e: int| e > 0 implies #[trigger] pow2(e) / 2 == pow2(e - 1) by { + assert(pow2(1) == 2) by (compute_only); + pow2_subtracts(1, e as nat); + } + assert forall|e1, e2| 0 <= e1 < e2 implies pow2(e1) < pow2(e2) by { + let diff = e2 - e1; + assert(pow2(diff) > 1); + pow2_positive(diff); + pow2_positive(e1); + pow2_positive(e2); + assert(pow2(e1) * pow2(diff) == pow2(e2)) by { + pow2_adds(e1 as nat, diff as nat); } + mul_ordering(pow2(e1), pow2(diff), pow2(e2)); + } + assert forall|e1, e2| 0 <= e1 && 0 <= e2 implies pow2(e1) * pow2(e2) == #[trigger] pow2( + e1 + e2, + ) by { + pow2_adds(e1 as nat, e2 as nat); } + assert forall|e1, e2| 0 <= e1 <= e2 implies pow2(e2) / pow2(e1) == #[trigger] pow2(e2 - e1) by { + pow2_subtracts(e1 as nat, e2 as nat); + } +} - proof fn shift_is_div(x: u64, shift: u64) - requires - 0 <= shift < 64, - ensures - x >> shift == x as nat / pow2(shift as int), - decreases shift, - { - if shift == 0 { - assert(x >> 0 == x) by (bit_vector); - assert(pow2(0) == 1) by (compute_only); - } else { - assert(x >> shift == (x >> ((sub(shift, 1)) as u64)) / 2) by (bit_vector) - requires - 0 < shift < 64, - ; - assert(x as nat / pow2(shift as int) == (x as nat / (pow2((shift - 1) as int) * pow2(1)))) - by { - pow2_adds((shift - 1) as nat, 1); - } - assert(x as nat / pow2(shift as int) == (x as nat / pow2((shift - 1) as int)) / 2) by { - pow2_positive((shift - 1) as int); - div2(x, pow2((shift - 1) as int) as int); - } - calc!{ (==) +proof fn shift_is_div(x: u64, shift: u64) + requires + 0 <= shift < 64, + ensures + x >> shift == x as nat / pow2(shift as int), + decreases shift, +{ + if shift == 0 { + assert(x >> 0 == x) by (bit_vector); + assert(pow2(0) == 1) by (compute_only); + } else { + assert(x >> shift == (x >> ((sub(shift, 1)) as u64)) / 2) by (bit_vector) + requires + 0 < shift < 64, + ; + assert(x as nat / pow2(shift as int) == (x as nat / (pow2((shift - 1) as int) * pow2(1)))) + by { + pow2_adds((shift - 1) as nat, 1); + } + assert(x as nat / pow2(shift as int) == (x as nat / pow2((shift - 1) as int)) / 2) by { + pow2_positive((shift - 1) as int); + div2(x, pow2((shift - 1) as int) as int); + } + calc!{ (==) (x >> shift) as nat; {} ((x >> ((sub(shift, 1)) as u64)) / 2) as nat; @@ -6586,763 +6564,764 @@ mod bin_sizes { {} x as nat / pow2(shift as int); } - } } +} - /******************************************************** +/******************************************************** * END: All of these should be standard library proofs ********************************************************/ - proof fn leading_zeros_powers_of_2(i: u64, exp: nat) - requires - i == pow2(exp as int), - exp < 64, - ensures - u64_leading_zeros(i) == 64 - exp - 1, - decreases i, - { - assert(pow2(0) == 1); - reveal(u64_leading_zeros); - if exp == 0 { - assert(u64_leading_zeros(1) == 63) by (compute_only); - } else { - assert(pow2(exp as int) > pow2(0)) by { - pow2_properties(); - } - assert(i / 2 == pow2(exp as int) / 2 == pow2(exp as int - 1)) by { - pow2_properties(); - } - assert(pow2(exp as int - 1) < pow2(exp as int)) by { - pow2_properties(); - } - leading_zeros_powers_of_2(i / 2, (exp - 1) as nat); - assert(u64_leading_zeros(i / 2) == 64 - (exp - 1) - 1); - assert(u64_leading_zeros(i) == (u64_leading_zeros(i / 2) - 1) as u32 == (64 - (exp - 1) - 1 - - 1) as u32 == (64 - exp - 1) as u32); - } - } - - proof fn leading_zeros_between_powers_of_2(i: u64, exp: nat) - requires - pow2(exp as int) <= i < pow2((exp + 1) as int), - 1 <= exp < 64, - ensures - u64_leading_zeros(i) == 64 - exp - 1, - decreases exp, - { - reveal(u64_leading_zeros); - if exp == 1 { - assert(pow2(1) == 2 && pow2(2) == 4) by (compute_only); - assert(2 <= i < 4); - assert(u64_leading_zeros(2) == 64 - 1 - 1) by (compute_only); - assert(u64_leading_zeros(3) == 64 - 1 - 1) by (compute_only); - } else { - assert(i / 2 < pow2(exp as int)); - assert(pow2((exp - 1) as int) <= i / 2); - leading_zeros_between_powers_of_2(i / 2, (exp - 1) as nat); +proof fn leading_zeros_powers_of_2(i: u64, exp: nat) + requires + i == pow2(exp as int), + exp < 64, + ensures + u64_leading_zeros(i) == 64 - exp - 1, + decreases i, +{ + assert(pow2(0) == 1); + reveal(u64_leading_zeros); + if exp == 0 { + assert(u64_leading_zeros(1) == 63) by (compute_only); + } else { + assert(pow2(exp as int) > pow2(0)) by { + pow2_properties(); } - } - - proof fn log2(i: u64) -> (e: nat) - requires - i >= 1, - ensures - pow2(e as int) <= i < pow2((e + 1) as int), - decreases i, - { - if i == 1 { - 0 - } else { - log2(i / 2) + 1 + assert(i / 2 == pow2(exp as int) / 2 == pow2(exp as int - 1)) by { + pow2_properties(); } - } - - /** Put our desired property into a proof-by-compute-friendly form **/ - - spec fn property_idx_in_range_has_bin_size(bin_idx: int, wsize: int) -> bool { - valid_bin_idx(bin_idx) && (pfd_lower(bin_idx) <= wsize <= pfd_upper(bin_idx)) - ==> smallest_bin_fitting_size(wsize * INTPTR_SIZE) == bin_idx - } - - spec fn check_idx_in_range_has_bin_size(bin_idx: int, wsize_start: int, wsize_end: int) -> bool - decreases wsize_end - wsize_start, - { - if wsize_start >= wsize_end { - true - } else { - property_idx_in_range_has_bin_size(bin_idx, wsize_start) && check_idx_in_range_has_bin_size( - bin_idx, - wsize_start + 1, - wsize_end, - ) + assert(pow2(exp as int - 1) < pow2(exp as int)) by { + pow2_properties(); } + leading_zeros_powers_of_2(i / 2, (exp - 1) as nat); + assert(u64_leading_zeros(i / 2) == 64 - (exp - 1) - 1); + assert(u64_leading_zeros(i) == (u64_leading_zeros(i / 2) - 1) as u32 == (64 - (exp - 1) - 1 + - 1) as u32 == (64 - exp - 1) as u32); } +} - proof fn result_idx_in_range_has_bin_size(bin_idx: int, wsize_start: int, wsize_end: int) - ensures - check_idx_in_range_has_bin_size(bin_idx, wsize_start, wsize_end) ==> (forall|wsize| - wsize_start <= wsize < wsize_end ==> property_idx_in_range_has_bin_size( - bin_idx, - wsize, - )), - decreases wsize_end - wsize_start, - { - if wsize_start >= wsize_end { - } else { - result_idx_in_range_has_bin_size(bin_idx, wsize_start + 1, wsize_end); - } +proof fn leading_zeros_between_powers_of_2(i: u64, exp: nat) + requires + pow2(exp as int) <= i < pow2((exp + 1) as int), + 1 <= exp < 64, + ensures + u64_leading_zeros(i) == 64 - exp - 1, + decreases exp, +{ + reveal(u64_leading_zeros); + if exp == 1 { + assert(pow2(1) == 2 && pow2(2) == 4) by (compute_only); + assert(2 <= i < 4); + assert(u64_leading_zeros(2) == 64 - 1 - 1) by (compute_only); + assert(u64_leading_zeros(3) == 64 - 1 - 1) by (compute_only); + } else { + assert(i / 2 < pow2(exp as int)); + assert(pow2((exp - 1) as int) <= i / 2); + leading_zeros_between_powers_of_2(i / 2, (exp - 1) as nat); } +} - spec fn check2_idx_in_range_has_bin_size( - bin_idx_start: int, - bin_idx_end: int, - wsize_start: int, - wsize_end: int, - ) -> bool - decreases bin_idx_end - bin_idx_start, - { - if bin_idx_start >= bin_idx_end { - true - } else { - check_idx_in_range_has_bin_size(bin_idx_start, wsize_start, wsize_end) - && check2_idx_in_range_has_bin_size( - bin_idx_start + 1, - bin_idx_end, - wsize_start, - wsize_end, - ) - } +proof fn log2(i: u64) -> (e: nat) + requires + i >= 1, + ensures + pow2(e as int) <= i < pow2((e + 1) as int), + decreases i, +{ + if i == 1 { + 0 + } else { + log2(i / 2) + 1 } +} - proof fn result2_idx_in_range_has_bin_size( - bin_idx_start: int, - bin_idx_end: int, - wsize_start: int, - wsize_end: int, - ) - ensures - check2_idx_in_range_has_bin_size(bin_idx_start, bin_idx_end, wsize_start, wsize_end) ==> ( - forall|bin_idx, wsize| - bin_idx_start <= bin_idx < bin_idx_end && wsize_start <= wsize < wsize_end - ==> property_idx_in_range_has_bin_size(bin_idx, wsize)), - decreases bin_idx_end - bin_idx_start, - { - if bin_idx_start >= bin_idx_end { - } else { - result2_idx_in_range_has_bin_size(bin_idx_start + 1, bin_idx_end, wsize_start, wsize_end); - if check2_idx_in_range_has_bin_size(bin_idx_start, bin_idx_end, wsize_start, wsize_end) { - assert forall|bin_idx, wsize| - bin_idx_start <= bin_idx < bin_idx_end && wsize_start <= wsize - < wsize_end implies property_idx_in_range_has_bin_size(bin_idx, wsize) by { - result_idx_in_range_has_bin_size(bin_idx, wsize_start, wsize_end); - } - } - } - } +/** Put our desired property into a proof-by-compute-friendly form **/ - pub proof fn idx_in_range_has_bin_size(bin_idx: int, wsize: int) - requires - valid_bin_idx(bin_idx), - (pfd_lower(bin_idx) <= wsize <= pfd_upper(bin_idx)), - wsize <= 128, - ensures - smallest_bin_fitting_size(wsize * INTPTR_SIZE) == bin_idx, - { - lemma_bin_sizes_constants(); - assert(INTPTR_SIZE == 8); - assert(usize::BITS / 8 == 8) by (nonlinear_arith); - assert(((wsize * 8) + 8 - 1) / 8 == wsize) by (nonlinear_arith); - if wsize <= 1 { - } else if wsize <= 8 { - } else if wsize > 524288 { - } else { - assert(8 < wsize <= 128); - assert(check2_idx_in_range_has_bin_size(1, 74, 9, 129)) by (compute_only); - //assume(check2_idx_in_range_has_bin_size(1, 74, 9, 129)); - result2_idx_in_range_has_bin_size(1, 74, 9, 129); - assert(property_idx_in_range_has_bin_size(bin_idx, wsize)); // Trigger result2_idx_in_range_has_bin_size - } - } +spec fn property_idx_in_range_has_bin_size(bin_idx: int, wsize: int) -> bool { + valid_bin_idx(bin_idx) && (pfd_lower(bin_idx) <= wsize <= pfd_upper(bin_idx)) + ==> smallest_bin_fitting_size(wsize * INTPTR_SIZE) == bin_idx +} - pub proof fn pfd_lower_le_upper(bin_idx: int) - requires - valid_bin_idx(bin_idx), - ensures - pfd_lower(bin_idx) <= pfd_upper(bin_idx), - { - lemma_bin_sizes_constants(); +spec fn check_idx_in_range_has_bin_size(bin_idx: int, wsize_start: int, wsize_end: int) -> bool + decreases wsize_end - wsize_start, +{ + if wsize_start >= wsize_end { + true + } else { + property_idx_in_range_has_bin_size(bin_idx, wsize_start) && check_idx_in_range_has_bin_size( + bin_idx, + wsize_start + 1, + wsize_end, + ) } +} - pub proof fn size_of_bin_bounds(b: int) - requires - valid_bin_idx(b), - ensures - size_of_bin(b) >= INTPTR_SIZE, - { - lemma_bin_sizes_constants(); +proof fn result_idx_in_range_has_bin_size(bin_idx: int, wsize_start: int, wsize_end: int) + ensures + check_idx_in_range_has_bin_size(bin_idx, wsize_start, wsize_end) ==> (forall|wsize| + wsize_start <= wsize < wsize_end ==> property_idx_in_range_has_bin_size( + bin_idx, + wsize, + )), + decreases wsize_end - wsize_start, +{ + if wsize_start >= wsize_end { + } else { + result_idx_in_range_has_bin_size(bin_idx, wsize_start + 1, wsize_end); } +} - pub proof fn size_of_bin_bounds_not_huge(b: int) - requires - valid_bin_idx(b), - b != BIN_HUGE, - ensures - 8 <= size_of_bin(b) <= 4194304, - { - lemma_bin_sizes_constants(); +spec fn check2_idx_in_range_has_bin_size( + bin_idx_start: int, + bin_idx_end: int, + wsize_start: int, + wsize_end: int, +) -> bool + decreases bin_idx_end - bin_idx_start, +{ + if bin_idx_start >= bin_idx_end { + true + } else { + check_idx_in_range_has_bin_size(bin_idx_start, wsize_start, wsize_end) + && check2_idx_in_range_has_bin_size( + bin_idx_start + 1, + bin_idx_end, + wsize_start, + wsize_end, + ) } +} - pub proof fn out_of_small_range(bin_idx: int) - requires - valid_bin_idx(bin_idx), - size_of_bin(bin_idx) > SMALL_SIZE_MAX, - ensures - pfd_lower(bin_idx) >= PAGES_DIRECT, - { - lemma_bin_sizes_constants(); +proof fn result2_idx_in_range_has_bin_size( + bin_idx_start: int, + bin_idx_end: int, + wsize_start: int, + wsize_end: int, +) + ensures + check2_idx_in_range_has_bin_size(bin_idx_start, bin_idx_end, wsize_start, wsize_end) ==> ( + forall|bin_idx, wsize| + bin_idx_start <= bin_idx < bin_idx_end && wsize_start <= wsize < wsize_end + ==> property_idx_in_range_has_bin_size(bin_idx, wsize)), + decreases bin_idx_end - bin_idx_start, +{ + if bin_idx_start >= bin_idx_end { + } else { + result2_idx_in_range_has_bin_size(bin_idx_start + 1, bin_idx_end, wsize_start, wsize_end); + if check2_idx_in_range_has_bin_size(bin_idx_start, bin_idx_end, wsize_start, wsize_end) { + assert forall|bin_idx, wsize| + bin_idx_start <= bin_idx < bin_idx_end && wsize_start <= wsize + < wsize_end implies property_idx_in_range_has_bin_size(bin_idx, wsize) by { + result_idx_in_range_has_bin_size(bin_idx, wsize_start, wsize_end); + } + } } +} - pub proof fn size_le_8_implies_idx_eq_1(bin_idx: int) - requires - valid_bin_idx(bin_idx), - size_of_bin(bin_idx) / 8 <= 1, - ensures - bin_idx == 1, - { - lemma_bin_sizes_constants(); +pub proof fn idx_in_range_has_bin_size(bin_idx: int, wsize: int) + requires + valid_bin_idx(bin_idx), + (pfd_lower(bin_idx) <= wsize <= pfd_upper(bin_idx)), + wsize <= 128, + ensures + smallest_bin_fitting_size(wsize * INTPTR_SIZE) == bin_idx, +{ + lemma_bin_sizes_constants(); + assert(INTPTR_SIZE == 8); + assert(usize::BITS / 8 == 8) by (nonlinear_arith); + assert(((wsize * 8) + 8 - 1) / 8 == wsize) by (nonlinear_arith); + if wsize <= 1 { + } else if wsize <= 8 { + } else if wsize > 524288 { + } else { + assert(8 < wsize <= 128); + assert(check2_idx_in_range_has_bin_size(1, 74, 9, 129)) by (compute_only); + //assume(check2_idx_in_range_has_bin_size(1, 74, 9, 129)); + result2_idx_in_range_has_bin_size(1, 74, 9, 129); + assert(property_idx_in_range_has_bin_size(bin_idx, wsize)); // Trigger result2_idx_in_range_has_bin_size } +} - pub proof fn size_gt_8_implies_idx_gt_1(bin_idx: int) - requires - valid_bin_idx(bin_idx), - size_of_bin(bin_idx) / 8 > 1, - ensures - bin_idx > 1, - { - lemma_bin_sizes_constants(); - } +pub proof fn pfd_lower_le_upper(bin_idx: int) + requires + valid_bin_idx(bin_idx), + ensures + pfd_lower(bin_idx) <= pfd_upper(bin_idx), +{ + lemma_bin_sizes_constants(); +} - pub open spec fn pow2(i: int) -> nat - decreases i, - { - if i <= 0 { - 1 - } else { - pow2(i - 1) * 2 - } - } +pub proof fn size_of_bin_bounds(b: int) + requires + valid_bin_idx(b), + ensures + size_of_bin(b) >= INTPTR_SIZE, +{ + lemma_bin_sizes_constants(); +} - /** Put our desired property into a proof-by-compute-friendly form **/ +pub proof fn size_of_bin_bounds_not_huge(b: int) + requires + valid_bin_idx(b), + b != BIN_HUGE, + ensures + 8 <= size_of_bin(b) <= 4194304, +{ + lemma_bin_sizes_constants(); +} - spec fn property_bounds_for_smallest_bitting_size(size: int) -> bool { - valid_bin_idx(smallest_bin_fitting_size(size)) && size_of_bin(smallest_bin_fitting_size(size)) - >= size - } +pub proof fn out_of_small_range(bin_idx: int) + requires + valid_bin_idx(bin_idx), + size_of_bin(bin_idx) > SMALL_SIZE_MAX, + ensures + pfd_lower(bin_idx) >= PAGES_DIRECT, +{ + lemma_bin_sizes_constants(); +} - spec fn check_bounds_for_smallest_bitting_size(size_start: int, size_end: int) -> bool - decreases size_end - size_start, - { - if size_start >= size_end { - true - } else { - property_bounds_for_smallest_bitting_size(size_start) - && check_bounds_for_smallest_bitting_size(size_start + 1, size_end) - } +pub proof fn size_le_8_implies_idx_eq_1(bin_idx: int) + requires + valid_bin_idx(bin_idx), + size_of_bin(bin_idx) / 8 <= 1, + ensures + bin_idx == 1, +{ + lemma_bin_sizes_constants(); +} + +pub proof fn size_gt_8_implies_idx_gt_1(bin_idx: int) + requires + valid_bin_idx(bin_idx), + size_of_bin(bin_idx) / 8 > 1, + ensures + bin_idx > 1, +{ + lemma_bin_sizes_constants(); +} + +pub open spec fn pow2(i: int) -> nat + decreases i, +{ + if i <= 0 { + 1 + } else { + pow2(i - 1) * 2 } +} - proof fn result_bounds_for_smallest_bitting_size(size_start: int, size_end: int) - ensures - check_bounds_for_smallest_bitting_size(size_start, size_end) ==> (forall|size| - size_start <= size < size_end ==> property_bounds_for_smallest_bitting_size(size)), - decreases size_end - size_start, - { - if size_start >= size_end { - } else { - result_bounds_for_smallest_bitting_size(size_start + 1, size_end); - } +/** Put our desired property into a proof-by-compute-friendly form **/ + +spec fn property_bounds_for_smallest_bitting_size(size: int) -> bool { + valid_bin_idx(smallest_bin_fitting_size(size)) && size_of_bin(smallest_bin_fitting_size(size)) + >= size +} + +spec fn check_bounds_for_smallest_bitting_size(size_start: int, size_end: int) -> bool + decreases size_end - size_start, +{ + if size_start >= size_end { + true + } else { + property_bounds_for_smallest_bitting_size(size_start) + && check_bounds_for_smallest_bitting_size(size_start + 1, size_end) } +} - pub proof fn bounds_for_smallest_bin_fitting_size(size: int) - requires - 0 <= size <= 128 * 8, - ensures - valid_bin_idx(smallest_bin_fitting_size(size)), - size_of_bin(smallest_bin_fitting_size(size)) >= size, - { - assert(check_bounds_for_smallest_bitting_size(0, (128 * 8 + 1 as int))) by (compute_only); - //assume(check_bounds_for_smallest_bitting_size(0, (128*8+1 as int))); - result_bounds_for_smallest_bitting_size(0, (128 * 8 + 1) as int); - assert(property_bounds_for_smallest_bitting_size(size)); // Trigger result_idx_in_range_has_bin_size +proof fn result_bounds_for_smallest_bitting_size(size_start: int, size_end: int) + ensures + check_bounds_for_smallest_bitting_size(size_start, size_end) ==> (forall|size| + size_start <= size < size_end ==> property_bounds_for_smallest_bitting_size(size)), + decreases size_end - size_start, +{ + if size_start >= size_end { + } else { + result_bounds_for_smallest_bitting_size(size_start + 1, size_end); } +} - /** Put our desired property into a proof-by-compute-friendly form **/ +pub proof fn bounds_for_smallest_bin_fitting_size(size: int) + requires + 0 <= size <= 128 * 8, + ensures + valid_bin_idx(smallest_bin_fitting_size(size)), + size_of_bin(smallest_bin_fitting_size(size)) >= size, +{ + assert(check_bounds_for_smallest_bitting_size(0, (128 * 8 + 1 as int))) by (compute_only); + //assume(check_bounds_for_smallest_bitting_size(0, (128*8+1 as int))); + result_bounds_for_smallest_bitting_size(0, (128 * 8 + 1) as int); + assert(property_bounds_for_smallest_bitting_size(size)); // Trigger result_idx_in_range_has_bin_size +} - spec fn property_smallest_bin_fitting_size_size_of_bin(bin_idx: int) -> bool { - smallest_bin_fitting_size(size_of_bin(bin_idx) as int) == bin_idx - } +/** Put our desired property into a proof-by-compute-friendly form **/ - spec fn check_smallest_bin_fitting_size_size_of_bin(bin_idx_start: int, bin_idx_end: int) -> bool - decreases bin_idx_end - bin_idx_start, - { - if bin_idx_start >= bin_idx_end { - true - } else { - property_smallest_bin_fitting_size_size_of_bin(bin_idx_start) - && check_smallest_bin_fitting_size_size_of_bin(bin_idx_start + 1, bin_idx_end) - } +spec fn property_smallest_bin_fitting_size_size_of_bin(bin_idx: int) -> bool { + smallest_bin_fitting_size(size_of_bin(bin_idx) as int) == bin_idx +} + +spec fn check_smallest_bin_fitting_size_size_of_bin(bin_idx_start: int, bin_idx_end: int) -> bool + decreases bin_idx_end - bin_idx_start, +{ + if bin_idx_start >= bin_idx_end { + true + } else { + property_smallest_bin_fitting_size_size_of_bin(bin_idx_start) + && check_smallest_bin_fitting_size_size_of_bin(bin_idx_start + 1, bin_idx_end) } +} - proof fn result_smallest_bin_fitting_size_size_of_bin(bin_idx_start: int, bin_idx_end: int) - ensures - check_smallest_bin_fitting_size_size_of_bin(bin_idx_start, bin_idx_end) ==> (forall|bin_idx| +proof fn result_smallest_bin_fitting_size_size_of_bin(bin_idx_start: int, bin_idx_end: int) + ensures + check_smallest_bin_fitting_size_size_of_bin(bin_idx_start, bin_idx_end) ==> (forall|bin_idx| - bin_idx_start <= bin_idx < bin_idx_end - ==> property_smallest_bin_fitting_size_size_of_bin(bin_idx)), - decreases bin_idx_end - bin_idx_start, - { - if bin_idx_start >= bin_idx_end { - } else { - result_smallest_bin_fitting_size_size_of_bin(bin_idx_start + 1, bin_idx_end); - } + bin_idx_start <= bin_idx < bin_idx_end + ==> property_smallest_bin_fitting_size_size_of_bin(bin_idx)), + decreases bin_idx_end - bin_idx_start, +{ + if bin_idx_start >= bin_idx_end { + } else { + result_smallest_bin_fitting_size_size_of_bin(bin_idx_start + 1, bin_idx_end); } +} - pub proof fn smallest_bin_fitting_size_size_of_bin(bin_idx: int) - requires - valid_bin_idx(bin_idx), - ensures - smallest_bin_fitting_size(size_of_bin(bin_idx) as int) == bin_idx, - { - lemma_bin_sizes_constants(); - assert(forall|j: int| 1 <= j <= 8 ==> (size_of_bin(j) + 8 - 1) / 8 == j); - if 1 <= bin_idx <= 8 { - } else if 8 < bin_idx < 73 { - assert(check_smallest_bin_fitting_size_size_of_bin(9, 73)) by (compute_only); - //assume(check_smallest_bin_fitting_size_size_of_bin(9, 73)); - result_smallest_bin_fitting_size_size_of_bin(9, 73); - assert(property_smallest_bin_fitting_size_size_of_bin(bin_idx)); // Trigger result_smallest_bin_fitting_size_size_of_bin - } else if bin_idx == 73 { - assert((size_of_bin(BIN_HUGE as int) + 8 - 1) / 8 > 524288); - } else { - } +pub proof fn smallest_bin_fitting_size_size_of_bin(bin_idx: int) + requires + valid_bin_idx(bin_idx), + ensures + smallest_bin_fitting_size(size_of_bin(bin_idx) as int) == bin_idx, +{ + lemma_bin_sizes_constants(); + assert(forall|j: int| 1 <= j <= 8 ==> (size_of_bin(j) + 8 - 1) / 8 == j); + if 1 <= bin_idx <= 8 { + } else if 8 < bin_idx < 73 { + assert(check_smallest_bin_fitting_size_size_of_bin(9, 73)) by (compute_only); + //assume(check_smallest_bin_fitting_size_size_of_bin(9, 73)); + result_smallest_bin_fitting_size_size_of_bin(9, 73); + assert(property_smallest_bin_fitting_size_size_of_bin(bin_idx)); // Trigger result_smallest_bin_fitting_size_size_of_bin + } else if bin_idx == 73 { + assert((size_of_bin(BIN_HUGE as int) + 8 - 1) / 8 > 524288); + } else { } +} - proof fn leading_zeros_monotonic(w: u64) - ensures - forall|x: u64| x < w ==> u64_leading_zeros(w) <= u64_leading_zeros(x), - decreases w, - { - if w == 0 { - } else { - reveal(u64_leading_zeros); - assert forall|x: u64| x < w implies u64_leading_zeros(w) <= u64_leading_zeros(x) by { - leading_zeros_monotonic(w / 2); - if x < w / 2 { +proof fn leading_zeros_monotonic(w: u64) + ensures + forall|x: u64| x < w ==> u64_leading_zeros(w) <= u64_leading_zeros(x), + decreases w, +{ + if w == 0 { + } else { + reveal(u64_leading_zeros); + assert forall|x: u64| x < w implies u64_leading_zeros(w) <= u64_leading_zeros(x) by { + leading_zeros_monotonic(w / 2); + if x < w / 2 { + } else { + assert(x / 2 <= w / 2); + if (x / 2 < w / 2) { + assert(u64_leading_zeros(w / 2) <= u64_leading_zeros(x / 2)); } else { - assert(x / 2 <= w / 2); - if (x / 2 < w / 2) { - assert(u64_leading_zeros(w / 2) <= u64_leading_zeros(x / 2)); - } else { - } } } } } +} - proof fn leading_zeros_between(lo: u64, mid: u64, hi: u64) - requires - lo <= mid < hi, - ensures - u64_leading_zeros(lo) >= u64_leading_zeros(mid) >= u64_leading_zeros(hi), - { - leading_zeros_monotonic(hi); - leading_zeros_monotonic(mid); - } - - /** Put our desired property into a proof-by-compute-friendly form **/ +proof fn leading_zeros_between(lo: u64, mid: u64, hi: u64) + requires + lo <= mid < hi, + ensures + u64_leading_zeros(lo) >= u64_leading_zeros(mid) >= u64_leading_zeros(hi), +{ + leading_zeros_monotonic(hi); + leading_zeros_monotonic(mid); +} - spec fn property_bin(size: int) -> bool { - 131072 >= size_of_bin(smallest_bin_fitting_size(size)) >= size - } +/** Put our desired property into a proof-by-compute-friendly form **/ - spec fn check_bin(size_start: int, size_end: int) -> bool - decreases size_end - size_start + 8, - { - if size_start >= size_end { - true - } else { - property_bin(size_start) && check_bin(size_start + 8, size_end) - } - } +spec fn property_bin(size: int) -> bool { + 131072 >= size_of_bin(smallest_bin_fitting_size(size)) >= size +} - spec fn id(i: int) -> bool { +spec fn check_bin(size_start: int, size_end: int) -> bool + decreases size_end - size_start + 8, +{ + if size_start >= size_end { true + } else { + property_bin(size_start) && check_bin(size_start + 8, size_end) } +} - #[verifier::spinoff_prover] - proof fn result_bin(size_start: int, size_end: int) - requires - size_start % 8 == 0, - ensures - check_bin(size_start, size_end) ==> (forall|size: int| - size_start <= size < size_end && size % 8 == 0 ==> #[trigger] - id(size) && property_bin(size)), - decreases size_end - size_start + 8, - { - hide(property_bin); - if size_start >= size_end { - } else { - result_bin(size_start + 8, size_end); - } +spec fn id(i: int) -> bool { + true +} + +#[verifier::spinoff_prover] +proof fn result_bin(size_start: int, size_end: int) + requires + size_start % 8 == 0, + ensures + check_bin(size_start, size_end) ==> (forall|size: int| + size_start <= size < size_end && size % 8 == 0 ==> #[trigger] id(size) && property_bin( + size, + )), + decreases size_end - size_start + 8, +{ + hide(property_bin); + if size_start >= size_end { + } else { + result_bin(size_start + 8, size_end); } +} - pub proof fn bin_size_result(size: usize) - requires - size <= 131072, // == MEDIUM_OBJ_SIZE_MAX - valid_bin_idx(smallest_bin_fitting_size(size as int)), - ensures - 131072 >= size_of_bin(smallest_bin_fitting_size(size as int) as int) >= size, - decreases 8 - ((size + 7) % 8), - { - if size % 8 == 0 { - bin_size_result_mul8(size); - } else { - bin_size_result((size + 1) as usize); - } +pub proof fn bin_size_result(size: usize) + requires + size <= 131072, // == MEDIUM_OBJ_SIZE_MAX + valid_bin_idx(smallest_bin_fitting_size(size as int)), + ensures + 131072 >= size_of_bin(smallest_bin_fitting_size(size as int) as int) >= size, + decreases 8 - ((size + 7) % 8), +{ + if size % 8 == 0 { + bin_size_result_mul8(size); + } else { + bin_size_result((size + 1) as usize); } +} - // The "proof" is below is broken into chunks, - // so (a) we don't exceed the interpreter's stack limit, - // and (b) because the interpreter time seems to scale - // non-linearly with recursion depth - pub proof fn bin_size_result_mul8(size: usize) - requires - size % 8 == 0, - size <= 131072, // == MEDIUM_OBJ_SIZE_MAX - valid_bin_idx(smallest_bin_fitting_size(size as int)), - ensures - 131072 >= size_of_bin(smallest_bin_fitting_size(size as int) as int) >= size, - { - // TODO: Swap these asserts for the assumes below - // - assert(check_bin(0, 8192)) by (compute_only); - assert(check_bin(8192, 16384)) by (compute_only); - assert(check_bin(16384, 24576)) by (compute_only); - assert(check_bin(24576, 32768)) by (compute_only); - assert(check_bin(32768, 40960)) by (compute_only); - assert(check_bin(40960, 49152)) by (compute_only); - assert(check_bin(49152, 57344)) by (compute_only); - assert(check_bin(57344, 65536)) by (compute_only); - assert(check_bin(65536, 73728)) by (compute_only); - assert(check_bin(73728, 81920)) by (compute_only); - assert(check_bin(81920, 90112)) by (compute_only); - assert(check_bin(90112, 98304)) by (compute_only); - assert(check_bin(98304, 106496)) by (compute_only); - assert(check_bin(106496, 114688)) by (compute_only); - assert(check_bin(114688, 122880)) by (compute_only); - assert(check_bin(122880, 131080)) by (compute_only); - //assume(check_bin(0, 8192)); - //assume(check_bin(8192, 16384)); - //assume(check_bin(16384, 24576)); - //assume(check_bin(24576, 32768)); - //assume(check_bin(32768, 40960)); - //assume(check_bin(40960, 49152)); - //assume(check_bin(49152, 57344)); - //assume(check_bin(57344, 65536)); - //assume(check_bin(65536, 73728)); - //assume(check_bin(73728, 81920)); - //assume(check_bin(81920, 90112)); - //assume(check_bin(90112, 98304)); - //assume(check_bin(98304, 106496)); - //assume(check_bin(106496, 114688)); - //assume(check_bin(114688, 122880)); - //assume(check_bin(122880, 131080)); - result_bin(0, 8192); - result_bin(8192, 16384); - result_bin(16384, 24576); - result_bin(24576, 32768); - result_bin(32768, 40960); - result_bin(40960, 49152); - result_bin(49152, 57344); - result_bin(57344, 65536); - result_bin(65536, 73728); - result_bin(73728, 81920); - result_bin(81920, 90112); - result_bin(90112, 98304); - result_bin(98304, 106496); - result_bin(106496, 114688); - result_bin(114688, 122880); - result_bin(122880, 131080); - assert(id(size as int)); - } - - // Used to compute a bin for a given size - pub fn bin(size: usize) -> (bin_idx: u8) - requires - size <= MAX_ALLOC_SIZE, - size <= 131072, // == MEDIUM_OBJ_SIZE_MAX +// The "proof" is below is broken into chunks, +// so (a) we don't exceed the interpreter's stack limit, +// and (b) because the interpreter time seems to scale +// non-linearly with recursion depth +pub proof fn bin_size_result_mul8(size: usize) + requires + size % 8 == 0, + size <= 131072, // == MEDIUM_OBJ_SIZE_MAX + valid_bin_idx(smallest_bin_fitting_size(size as int)), + ensures + 131072 >= size_of_bin(smallest_bin_fitting_size(size as int) as int) >= size, +{ + // TODO: Swap these asserts for the assumes below + // + assert(check_bin(0, 8192)) by (compute_only); + assert(check_bin(8192, 16384)) by (compute_only); + assert(check_bin(16384, 24576)) by (compute_only); + assert(check_bin(24576, 32768)) by (compute_only); + assert(check_bin(32768, 40960)) by (compute_only); + assert(check_bin(40960, 49152)) by (compute_only); + assert(check_bin(49152, 57344)) by (compute_only); + assert(check_bin(57344, 65536)) by (compute_only); + assert(check_bin(65536, 73728)) by (compute_only); + assert(check_bin(73728, 81920)) by (compute_only); + assert(check_bin(81920, 90112)) by (compute_only); + assert(check_bin(90112, 98304)) by (compute_only); + assert(check_bin(98304, 106496)) by (compute_only); + assert(check_bin(106496, 114688)) by (compute_only); + assert(check_bin(114688, 122880)) by (compute_only); + assert(check_bin(122880, 131080)) by (compute_only); + //assume(check_bin(0, 8192)); + //assume(check_bin(8192, 16384)); + //assume(check_bin(16384, 24576)); + //assume(check_bin(24576, 32768)); + //assume(check_bin(32768, 40960)); + //assume(check_bin(40960, 49152)); + //assume(check_bin(49152, 57344)); + //assume(check_bin(57344, 65536)); + //assume(check_bin(65536, 73728)); + //assume(check_bin(73728, 81920)); + //assume(check_bin(81920, 90112)); + //assume(check_bin(90112, 98304)); + //assume(check_bin(98304, 106496)); + //assume(check_bin(106496, 114688)); + //assume(check_bin(114688, 122880)); + //assume(check_bin(122880, 131080)); + result_bin(0, 8192); + result_bin(8192, 16384); + result_bin(16384, 24576); + result_bin(24576, 32768); + result_bin(32768, 40960); + result_bin(40960, 49152); + result_bin(49152, 57344); + result_bin(57344, 65536); + result_bin(65536, 73728); + result_bin(73728, 81920); + result_bin(81920, 90112); + result_bin(90112, 98304); + result_bin(98304, 106496); + result_bin(106496, 114688); + result_bin(114688, 122880); + result_bin(122880, 131080); + assert(id(size as int)); +} - ensures - valid_bin_idx(bin_idx as int), - size_of_bin(bin_idx as int) >= size, - bin_idx == smallest_bin_fitting_size(size as int), - { +// Used to compute a bin for a given size +pub fn bin(size: usize) -> (bin_idx: u8) + requires + size <= MAX_ALLOC_SIZE, + size <= 131072, // == MEDIUM_OBJ_SIZE_MAX + + ensures + valid_bin_idx(bin_idx as int), + size_of_bin(bin_idx as int) >= size, + bin_idx == smallest_bin_fitting_size(size as int), +{ + proof { + lemma_bin_sizes_constants(); + } + let bytes_per_word = usize::BITS as usize / 8; + assert(usize::BITS / 8 == 8) by (nonlinear_arith); + let wsize = (size + bytes_per_word - 1) / bytes_per_word; + assert(((wsize * 8) + 8 - 1) / 8 == wsize) by (nonlinear_arith); + if wsize <= 1 { + 1 + } else if wsize <= 8 { + wsize as u8 + } else { + assert(9 <= wsize < 131073); + let w: u64 = (wsize - 1) as u64; + assert(8 <= w < 131072); + let lz: u32 = w.leading_zeros(); + assert(46 <= lz <= 60) by { + assert(u64_leading_zeros(8) == 60) by (compute_only); + assert(u64_leading_zeros(131072) == 46) by (compute_only); + leading_zeros_between(8, w, 131072); + } + let ghost log2_w = log2(w); proof { - lemma_bin_sizes_constants(); - } - let bytes_per_word = usize::BITS as usize / 8; - assert(usize::BITS / 8 == 8) by (nonlinear_arith); - let wsize = (size + bytes_per_word - 1) / bytes_per_word; - assert(((wsize * 8) + 8 - 1) / 8 == wsize) by (nonlinear_arith); - if wsize <= 1 { - 1 - } else if wsize <= 8 { - wsize as u8 - } else { - assert(9 <= wsize < 131073); - let w: u64 = (wsize - 1) as u64; - assert(8 <= w < 131072); - let lz: u32 = w.leading_zeros(); - assert(46 <= lz <= 60) by { - assert(u64_leading_zeros(8) == 60) by (compute_only); - assert(u64_leading_zeros(131072) == 46) by (compute_only); - leading_zeros_between(8, w, 131072); - } - let ghost log2_w = log2(w); - proof { - assert(log2_w >= 2) by { - assert(pow2(1) == 2 && pow2(2) == 4 && pow2(3) == 8) by (compute_only); - } - assert_by_contradiction!(log2_w < 64, { + assert(log2_w >= 2) by { + assert(pow2(1) == 2 && pow2(2) == 4 && pow2(3) == 8) by (compute_only); + } + assert_by_contradiction!(log2_w < 64, { assert(pow2(64) == 0x10000000000000000) by (compute_only); assert(pow2(log2_w as int) >= pow2(64)) by { pow2_properties(); } assert(w >= 0x10000000000000000); }); - leading_zeros_between_powers_of_2(w, log2_w); - assert(lz == 63 - log2_w); - } - let b = (usize::BITS - 1 - lz) as u8; - assert(b == log2_w); - assert(3 <= b <= 17); - // assert(w > 255 ==> u64_leading_zeros(w) <= 52) by { - // if w > 255 { - // assert(u64_leading_zeros(256) == 55) by (compute_only); - // leading_zeros_between(256, w, 131072); - // } - // } - // This isn't true with this limited context, b/c we need to know how w and b scale relative to each other - // assert((w >> sub(b as u64, 2)) < 256) by (bit_vector) - // requires 8 <= w < 131072 && 3 <= b <= 17; - assert(w >> ((b as u64 - 2) as u64) <= 8) by { - assert(w < pow2((log2_w + 1) as int)); - assert(pow2((log2_w - 2) as int) > 0) by { - pow2_properties(); - } - assert(w as nat / pow2((log2_w - 2) as int) <= pow2((log2_w + 1) as int) / pow2( - (log2_w - 2) as int, - )) by { - lemma_div_is_ordered( - w as int, - pow2((log2_w + 1) as int) as int, - pow2((log2_w - 2) as int) as int, - ); - } - assert(pow2((log2_w + 1) as int) / pow2((log2_w - 2) as int) == pow2(3)) by { - pow2_subtracts((log2_w - 2) as nat, log2_w + 1); - } - assert(pow2(3) == 8) by (compute_only); - shift_is_div(w, ((b as u64 - 2) as u64)); + leading_zeros_between_powers_of_2(w, log2_w); + assert(lz == 63 - log2_w); + } + let b = (usize::BITS - 1 - lz) as u8; + assert(b == log2_w); + assert(3 <= b <= 17); + // assert(w > 255 ==> u64_leading_zeros(w) <= 52) by { + // if w > 255 { + // assert(u64_leading_zeros(256) == 55) by (compute_only); + // leading_zeros_between(256, w, 131072); + // } + // } + // This isn't true with this limited context, b/c we need to know how w and b scale relative to each other + // assert((w >> sub(b as u64, 2)) < 256) by (bit_vector) + // requires 8 <= w < 131072 && 3 <= b <= 17; + assert(w >> ((b as u64 - 2) as u64) <= 8) by { + assert(w < pow2((log2_w + 1) as int)); + assert(pow2((log2_w - 2) as int) > 0) by { + pow2_properties(); } - assert((w >> sub(b as u64, 2)) < 256); - let shifted = (w >> (b as u64 - 2)) as u8; - assert((w >> sub(sub(63, lz as u64), 2)) & 0x03 < 4) by (bit_vector) - requires - 8 <= w < 131073 && 46 <= lz <= 60, - ; - //assert(((w >> sub(63 - lz as u64), 2)) & 0x03 < 4); - //assert((w >> ((63 - lz as u64) - 2)) & 0x03 < 4); - assert(shifted & 0x03 < 4) by (bit_vector); - let bin_idx = ((b * 4) + (shifted & 0x03)) - 3; - assert(valid_bin_idx(bin_idx as int)); - assert(bin_idx == smallest_bin_fitting_size(size as int)); - assert(size_of_bin(bin_idx as int) >= size) by { - bin_size_result(size); - }; - //assert(size_of_bin(bin_idx as int) >= size) - // Can't call this because the precondition restricts it to small sizes - // by { bounds_for_smallest_bin_fitting_size(size as int); } - bin_idx + assert(w as nat / pow2((log2_w - 2) as int) <= pow2((log2_w + 1) as int) / pow2( + (log2_w - 2) as int, + )) by { + lemma_div_is_ordered( + w as int, + pow2((log2_w + 1) as int) as int, + pow2((log2_w - 2) as int) as int, + ); + } + assert(pow2((log2_w + 1) as int) / pow2((log2_w - 2) as int) == pow2(3)) by { + pow2_subtracts((log2_w - 2) as nat, log2_w + 1); + } + assert(pow2(3) == 8) by (compute_only); + shift_is_div(w, ((b as u64 - 2) as u64)); } + assert((w >> sub(b as u64, 2)) < 256); + let shifted = (w >> (b as u64 - 2)) as u8; + assert((w >> sub(sub(63, lz as u64), 2)) & 0x03 < 4) by (bit_vector) + requires + 8 <= w < 131073 && 46 <= lz <= 60, + ; + //assert(((w >> sub(63 - lz as u64), 2)) & 0x03 < 4); + //assert((w >> ((63 - lz as u64) - 2)) & 0x03 < 4); + assert(shifted & 0x03 < 4) by (bit_vector); + let bin_idx = ((b * 4) + (shifted & 0x03)) - 3; + assert(valid_bin_idx(bin_idx as int)); + assert(bin_idx == smallest_bin_fitting_size(size as int)); + assert(size_of_bin(bin_idx as int) >= size) by { + bin_size_result(size); + }; + //assert(size_of_bin(bin_idx as int) >= size) + // Can't call this because the precondition restricts it to small sizes + // by { bounds_for_smallest_bin_fitting_size(size as int); } + bin_idx } +} - //////// Segment bins - pub open spec fn valid_sbin_idx(sbin_idx: int) -> bool { - 0 <= sbin_idx <= SEGMENT_BIN_MAX - } +//////// Segment bins +pub open spec fn valid_sbin_idx(sbin_idx: int) -> bool { + 0 <= sbin_idx <= SEGMENT_BIN_MAX +} - pub closed spec fn size_of_sbin(sbin_idx: int) -> nat - recommends - valid_sbin_idx(sbin_idx), - { - if 0 <= sbin_idx <= 7 { - sbin_idx as nat - } else if sbin_idx == 8 { - 10 - } else { - let group = (sbin_idx - 8) / 4; - let inner = (sbin_idx - 8) % 4; - ((inner + 5) * pow2(group + 1)) as nat - } +pub closed spec fn size_of_sbin(sbin_idx: int) -> nat + recommends + valid_sbin_idx(sbin_idx), +{ + if 0 <= sbin_idx <= 7 { + sbin_idx as nat + } else if sbin_idx == 8 { + 10 + } else { + let group = (sbin_idx - 8) / 4; + let inner = (sbin_idx - 8) % 4; + ((inner + 5) * pow2(group + 1)) as nat } +} - pub open spec fn smallest_sbin_fitting_size(i: int) -> int { - if i <= 8 { - i - } else { - let w = (i - 1) as u64; - //let lz = w.leading_zeros(); - let lz = u64_leading_zeros(w); - let b = (usize::BITS - 1 - lz) as u8; - let sbin_idx = ((b << 2u8) as u64 | ((w >> (b as u64 - 2) as u64) & 0x03)) - 4; - sbin_idx - } +pub open spec fn smallest_sbin_fitting_size(i: int) -> int { + if i <= 8 { + i + } else { + let w = (i - 1) as u64; + //let lz = w.leading_zeros(); + let lz = u64_leading_zeros(w); + let b = (usize::BITS - 1 - lz) as u8; + let sbin_idx = ((b << 2u8) as u64 | ((w >> (b as u64 - 2) as u64) & 0x03)) - 4; + sbin_idx } +} - /** Put our desired property into a proof-by-compute-friendly form **/ +/** Put our desired property into a proof-by-compute-friendly form **/ - spec fn property_sbin_idx_smallest_sbin_fitting_size(size: int) -> bool { - valid_sbin_idx(smallest_sbin_fitting_size(size)) - } +spec fn property_sbin_idx_smallest_sbin_fitting_size(size: int) -> bool { + valid_sbin_idx(smallest_sbin_fitting_size(size)) +} - spec fn check_sbin_idx_smallest_sbin_fitting_size(size_start: int, size_end: int) -> bool - decreases size_end - size_start, - { - if size_start >= size_end { - true - } else { - property_sbin_idx_smallest_sbin_fitting_size(size_start) - && check_sbin_idx_smallest_sbin_fitting_size(size_start + 1, size_end) - } +spec fn check_sbin_idx_smallest_sbin_fitting_size(size_start: int, size_end: int) -> bool + decreases size_end - size_start, +{ + if size_start >= size_end { + true + } else { + property_sbin_idx_smallest_sbin_fitting_size(size_start) + && check_sbin_idx_smallest_sbin_fitting_size(size_start + 1, size_end) } +} - proof fn result_sbin_idx_smallest_sbin_fitting_size(size_start: int, size_end: int) - ensures - check_sbin_idx_smallest_sbin_fitting_size(size_start, size_end) ==> (forall|size| - size_start <= size < size_end ==> property_sbin_idx_smallest_sbin_fitting_size(size)), - decreases size_end - size_start, - { - if size_start >= size_end { - } else { - result_sbin_idx_smallest_sbin_fitting_size(size_start + 1, size_end); - } +proof fn result_sbin_idx_smallest_sbin_fitting_size(size_start: int, size_end: int) + ensures + check_sbin_idx_smallest_sbin_fitting_size(size_start, size_end) ==> (forall|size| + size_start <= size < size_end ==> property_sbin_idx_smallest_sbin_fitting_size(size)), + decreases size_end - size_start, +{ + if size_start >= size_end { + } else { + result_sbin_idx_smallest_sbin_fitting_size(size_start + 1, size_end); } +} - pub proof fn valid_sbin_idx_smallest_sbin_fitting_size(i: int) - requires - 0 <= i <= SLICES_PER_SEGMENT, - ensures - valid_sbin_idx(smallest_sbin_fitting_size(i)), - { - assert(SLICES_PER_SEGMENT == 512) by (compute_only); - assert(check_sbin_idx_smallest_sbin_fitting_size(0, 513)) by (compute_only); - result_sbin_idx_smallest_sbin_fitting_size(0, 513); - assert(property_sbin_idx_smallest_sbin_fitting_size(i)); // Trigger result_sbin_idx_smallest_sbin_fitting_size - } +pub proof fn valid_sbin_idx_smallest_sbin_fitting_size(i: int) + requires + 0 <= i <= SLICES_PER_SEGMENT, + ensures + valid_sbin_idx(smallest_sbin_fitting_size(i)), +{ + assert(SLICES_PER_SEGMENT == 512) by (compute_only); + assert(check_sbin_idx_smallest_sbin_fitting_size(0, 513)) by (compute_only); + result_sbin_idx_smallest_sbin_fitting_size(0, 513); + assert(property_sbin_idx_smallest_sbin_fitting_size(i)); // Trigger result_sbin_idx_smallest_sbin_fitting_size +} - /** Put our desired property into a proof-by-compute-friendly form **/ +/** Put our desired property into a proof-by-compute-friendly form **/ - spec fn property_sbin_bounds(size: int) -> bool { - let lz = u64_leading_zeros(size as u64); - let b = (63 - lz) as u8; - // Satisfy various type requirements - (b >= 2) && (((b << 2u8) as u64 | ((size as u64 >> (b as u64 - 2) as u64) & 0x03)) >= 4) - } +spec fn property_sbin_bounds(size: int) -> bool { + let lz = u64_leading_zeros(size as u64); + let b = (63 - lz) as u8; + // Satisfy various type requirements + (b >= 2) && (((b << 2u8) as u64 | ((size as u64 >> (b as u64 - 2) as u64) & 0x03)) >= 4) +} - spec fn check_sbin_bounds(size_start: int, size_end: int) -> bool - decreases size_end - size_start, - { - if size_start >= size_end { - true - } else { - property_sbin_bounds(size_start) && check_sbin_bounds(size_start + 1, size_end) - } +spec fn check_sbin_bounds(size_start: int, size_end: int) -> bool + decreases size_end - size_start, +{ + if size_start >= size_end { + true + } else { + property_sbin_bounds(size_start) && check_sbin_bounds(size_start + 1, size_end) } +} - proof fn result_sbin_bounds(size_start: int, size_end: int) - ensures - check_sbin_bounds(size_start, size_end) ==> (forall|size| - size_start <= size < size_end ==> property_sbin_bounds(size)), - decreases size_end - size_start, - { - if size_start >= size_end { - } else { - result_sbin_bounds(size_start + 1, size_end); - } +proof fn result_sbin_bounds(size_start: int, size_end: int) + ensures + check_sbin_bounds(size_start, size_end) ==> (forall|size| + size_start <= size < size_end ==> property_sbin_bounds(size)), + decreases size_end - size_start, +{ + if size_start >= size_end { + } else { + result_sbin_bounds(size_start + 1, size_end); } +} - /** Put our desired property into a proof-by-compute-friendly form **/ +/** Put our desired property into a proof-by-compute-friendly form **/ - spec fn property_sbin(slice_count: int) -> bool { - let sbin_idx = smallest_sbin_fitting_size(slice_count as int); - valid_sbin_idx(sbin_idx as int) && size_of_sbin(sbin_idx as int) >= slice_count - } +spec fn property_sbin(slice_count: int) -> bool { + let sbin_idx = smallest_sbin_fitting_size(slice_count as int); + valid_sbin_idx(sbin_idx as int) && size_of_sbin(sbin_idx as int) >= slice_count +} - spec fn check_sbin(size_start: int, size_end: int) -> bool - decreases size_end - size_start, - { - if size_start >= size_end { - true - } else { - property_sbin(size_start) && check_sbin(size_start + 1, size_end) - } +spec fn check_sbin(size_start: int, size_end: int) -> bool + decreases size_end - size_start, +{ + if size_start >= size_end { + true + } else { + property_sbin(size_start) && check_sbin(size_start + 1, size_end) } +} - proof fn result_sbin(size_start: int, size_end: int) - ensures - check_sbin(size_start, size_end) ==> (forall|size| - size_start <= size < size_end ==> property_sbin(size)), - decreases size_end - size_start, - { - if size_start >= size_end { - } else { - result_sbin(size_start + 1, size_end); - } +proof fn result_sbin(size_start: int, size_end: int) + ensures + check_sbin(size_start, size_end) ==> (forall|size| + size_start <= size < size_end ==> property_sbin(size)), + decreases size_end - size_start, +{ + if size_start >= size_end { + } else { + result_sbin(size_start + 1, size_end); } +} - pub fn slice_bin(slice_count: usize) -> (sbin_idx: usize) - requires - slice_count <= SLICES_PER_SEGMENT, - ensures - valid_sbin_idx(sbin_idx as int), - size_of_sbin(sbin_idx as int) >= slice_count, - sbin_idx == smallest_sbin_fitting_size(slice_count as int), - { - // Based on mi_slice_bin8 - if slice_count <= 8 { - slice_count - } else { - let w = (slice_count - 1) as u64; - assert(SLICES_PER_SEGMENT == 512) by (compute_only); - assert(9 <= slice_count <= 512); - assert(8 <= w <= 511); - let lz = w.leading_zeros(); - proof { - assert(check_sbin_bounds(8, 512)) by (compute_only); - result_sbin_bounds(8, 512); - assert(property_sbin_bounds(w as int)); - } - let b = (usize::BITS - 1 - lz) as u8; - let sbin_idx = ((b << 2u8) as u64 | ((w >> (b as u64 - 2)) & 0x03)) - 4; - assert(sbin_idx == smallest_sbin_fitting_size(slice_count as int)); - proof { - assert(check_sbin(9, 513)) by (compute_only); - result_sbin(9, 513); - assert(property_sbin(slice_count as int)); - } - sbin_idx as usize +pub fn slice_bin(slice_count: usize) -> (sbin_idx: usize) + requires + slice_count <= SLICES_PER_SEGMENT, + ensures + valid_sbin_idx(sbin_idx as int), + size_of_sbin(sbin_idx as int) >= slice_count, + sbin_idx == smallest_sbin_fitting_size(slice_count as int), +{ + // Based on mi_slice_bin8 + if slice_count <= 8 { + slice_count + } else { + let w = (slice_count - 1) as u64; + assert(SLICES_PER_SEGMENT == 512) by (compute_only); + assert(9 <= slice_count <= 512); + assert(8 <= w <= 511); + let lz = w.leading_zeros(); + proof { + assert(check_sbin_bounds(8, 512)) by (compute_only); + result_sbin_bounds(8, 512); + assert(property_sbin_bounds(w as int)); + } + let b = (usize::BITS - 1 - lz) as u8; + let sbin_idx = ((b << 2u8) as u64 | ((w >> (b as u64 - 2)) & 0x03)) - 4; + assert(sbin_idx == smallest_sbin_fitting_size(slice_count as int)); + proof { + assert(check_sbin(9, 513)) by (compute_only); + result_sbin(9, 513); + assert(property_sbin(slice_count as int)); } + sbin_idx as usize } +} - } // verus! +} // verus! } mod dealloc_token { @@ -7356,126 +7335,126 @@ mod dealloc_token { verus! { - pub tracked struct MimDealloc { - pub tracked padding: PointsToRaw, - // Size of the allocation from the user perspective, <= the block size - pub ghost size: int, - // Memory to make up the difference between user size and block size - pub tracked inner: MimDeallocInner, - } +pub tracked struct MimDealloc { + pub tracked padding: PointsToRaw, + // Size of the allocation from the user perspective, <= the block size + pub ghost size: int, + // Memory to make up the difference between user size and block size + pub tracked inner: MimDeallocInner, +} - pub tracked struct MimDeallocInner { - pub tracked mim_instance: Mim::Instance, - pub tracked mim_block: Mim::block, - pub ghost ptr: int, - } +pub tracked struct MimDeallocInner { + pub tracked mim_instance: Mim::Instance, + pub tracked mim_block: Mim::block, + pub ghost ptr: int, +} - pub open spec fn valid_block_token(block: Mim::block, instance: Mim::Instance) -> bool { - &&& block@.key.wf() - &&& block@.instance == instance// TODO factor this stuff into wf predicates - // Valid segment +pub open spec fn valid_block_token(block: Mim::block, instance: Mim::Instance) -> bool { + &&& block@.key.wf() + &&& block@.instance == instance // TODO factor this stuff into wf predicates + // Valid segment - &&& is_segment_ptr( - block@.value.segment_shared_access.points_to@.pptr, - block@.key.page_id.segment_id, - ) - &&& block@.value.segment_shared_access.points_to@.value.is_some() - &&& block@.value.segment_shared_access.points_to@.value.get_Some_0().wf( - instance, - block@.key.page_id.segment_id, - )// Valid slice page + &&& is_segment_ptr( + block@.value.segment_shared_access.points_to@.pptr, + block@.key.page_id.segment_id, + ) + &&& block@.value.segment_shared_access.points_to@.value.is_some() + &&& block@.value.segment_shared_access.points_to@.value.get_Some_0().wf( + instance, + block@.key.page_id.segment_id, + ) // Valid slice page + + &&& is_page_ptr( + block@.value.page_slice_shared_access.points_to@.pptr, + block@.key.page_id_for_slice(), + ) + &&& block@.value.page_slice_shared_access.points_to@.value.is_some() + &&& block@.value.page_slice_shared_access.points_to@.value.get_Some_0().offset as int == ( + block@.key.slice_idx - block@.key.page_id.idx) + * crate::config::SIZEOF_PAGE_HEADER // Valid main page - &&& is_page_ptr( - block@.value.page_slice_shared_access.points_to@.pptr, - block@.key.page_id_for_slice(), - ) - &&& block@.value.page_slice_shared_access.points_to@.value.is_some() - &&& block@.value.page_slice_shared_access.points_to@.value.get_Some_0().offset as int == ( - block@.key.slice_idx - block@.key.page_id.idx) - * crate::config::SIZEOF_PAGE_HEADER// Valid main page + &&& block@.value.page_shared_access.wf(block@.key.page_id, block@.key.block_size, instance) +} - &&& block@.value.page_shared_access.wf(block@.key.page_id, block@.key.block_size, instance) +impl MimDeallocInner { + #[verifier(inline)] + pub open spec fn block_id(&self) -> BlockId { + self.mim_block@.key } - impl MimDeallocInner { - #[verifier(inline)] - pub open spec fn block_id(&self) -> BlockId { - self.mim_block@.key - } - - pub open spec fn wf(&self) -> bool { - &&& valid_block_token(self.mim_block, self.mim_instance) - &&& is_block_ptr(self.ptr, self.block_id()) - } + pub open spec fn wf(&self) -> bool { + &&& valid_block_token(self.mim_block, self.mim_instance) + &&& is_block_ptr(self.ptr, self.block_id()) + } - pub proof fn into_user( - tracked self, - tracked points_to_raw: PointsToRaw, - sz: int, - ) -> (tracked res: (MimDealloc, PointsToRaw)) - requires - self.wf(), - points_to_raw.is_range(self.ptr, self.block_id().block_size as int), - 0 <= sz <= self.block_id().block_size, - ensures - ({ - let (md, points_to_raw) = res; - md.wf() && points_to_raw.is_range(self.ptr, sz) && md.size == sz && md.block_id() - == self.block_id() && md.ptr() == self.ptr && md.instance() == self.mim_instance - }), - { - let tracked (x, y) = points_to_raw.split(set_int_range(self.ptr, self.ptr + sz)); - let tracked md = MimDealloc { padding: y, size: sz, inner: self }; - (md, x) - } + pub proof fn into_user( + tracked self, + tracked points_to_raw: PointsToRaw, + sz: int, + ) -> (tracked res: (MimDealloc, PointsToRaw)) + requires + self.wf(), + points_to_raw.is_range(self.ptr, self.block_id().block_size as int), + 0 <= sz <= self.block_id().block_size, + ensures + ({ + let (md, points_to_raw) = res; + md.wf() && points_to_raw.is_range(self.ptr, sz) && md.size == sz && md.block_id() + == self.block_id() && md.ptr() == self.ptr && md.instance() == self.mim_instance + }), + { + let tracked (x, y) = points_to_raw.split(set_int_range(self.ptr, self.ptr + sz)); + let tracked md = MimDealloc { padding: y, size: sz, inner: self }; + (md, x) } +} - impl MimDealloc { - #[verifier(inline)] - pub open spec fn block_id(&self) -> BlockId { - self.inner.block_id() - } +impl MimDealloc { + #[verifier(inline)] + pub open spec fn block_id(&self) -> BlockId { + self.inner.block_id() + } - pub open spec fn ptr(&self) -> int { - self.inner.ptr - } + pub open spec fn ptr(&self) -> int { + self.inner.ptr + } - pub open spec fn instance(&self) -> Mim::Instance { - self.inner.mim_instance - } + pub open spec fn instance(&self) -> Mim::Instance { + self.inner.mim_instance + } - pub open spec fn wf(&self) -> bool { - self.inner.wf()// PAPER CUT: is_range should probably have this condition in it - && self.block_id().block_size - self.size >= 0 && self.size >= 0 && self.padding.is_range( - self.inner.ptr + self.size, - self.block_id().block_size - self.size, - ) - } + pub open spec fn wf(&self) -> bool { + self.inner.wf() // PAPER CUT: is_range should probably have this condition in it + && self.block_id().block_size - self.size >= 0 && self.size >= 0 && self.padding.is_range( + self.inner.ptr + self.size, + self.block_id().block_size - self.size, + ) + } - pub proof fn into_internal(tracked self, tracked points_to_raw: PointsToRaw) -> (tracked res: ( - MimDeallocInner, - PointsToRaw, - )) - requires - self.wf(), - points_to_raw.is_range(self.ptr(), self.size), - ensures - ({ - let (md, points_to_raw_full) = res; - md.wf() && points_to_raw_full.is_range( - self.ptr(), - self.block_id().block_size as int, - ) && self.ptr() == md.ptr && self.block_id().block_size - == md.mim_block@.key.block_size && md.mim_instance == self.instance() - }), - { - let tracked MimDealloc { padding, size, inner } = self; - let tracked p = points_to_raw.join(padding); - (inner, p) - } + pub proof fn into_internal(tracked self, tracked points_to_raw: PointsToRaw) -> (tracked res: ( + MimDeallocInner, + PointsToRaw, + )) + requires + self.wf(), + points_to_raw.is_range(self.ptr(), self.size), + ensures + ({ + let (md, points_to_raw_full) = res; + md.wf() && points_to_raw_full.is_range( + self.ptr(), + self.block_id().block_size as int, + ) && self.ptr() == md.ptr && self.block_id().block_size + == md.mim_block@.key.block_size && md.mim_instance == self.instance() + }), + { + let tracked MimDealloc { padding, size, inner } = self; + let tracked p = points_to_raw.join(padding); + (inner, p) } +} - } // verus! +} // verus! } mod page_organization { @@ -7494,47 +7473,47 @@ mod page_organization { verus! { - pub ghost struct DlistHeader { - pub first: Option, - pub last: Option, - } +pub ghost struct DlistHeader { + pub first: Option, + pub last: Option, +} - pub ghost struct DlistEntry { - pub prev: Option, - pub next: Option, - } +pub ghost struct DlistEntry { + pub prev: Option, + pub next: Option, +} - #[is_variant] - pub ghost enum PageHeaderKind { - Normal(int, int), - } +#[is_variant] +pub ghost enum PageHeaderKind { + Normal(int, int), +} - pub ghost struct PageData { - // Option means unspecified (i.e., does not constrain the physical value) - pub dlist_entry: Option, - pub count: Option, - pub offset: Option, - pub is_used: bool, - pub full: Option, - pub page_header_kind: Option, - } +pub ghost struct PageData { + // Option means unspecified (i.e., does not constrain the physical value) + pub dlist_entry: Option, + pub count: Option, + pub offset: Option, + pub is_used: bool, + pub full: Option, + pub page_header_kind: Option, +} - pub ghost struct SegmentData { - pub used: int, - } +pub ghost struct SegmentData { + pub used: int, +} - #[is_variant] - pub ghost enum Popped { - No, - Ready(PageId, bool), // set up the offsets (all pages have offsets set) - Used(PageId, bool), // everything is set to 'used' - SegmentCreating(SegmentId), // just created - VeryUnready(SegmentId, int, int, bool), // no pages are set, not even first or last - SegmentFreeing(SegmentId, int), - ExtraCount(SegmentId), - } +#[is_variant] +pub ghost enum Popped { + No, + Ready(PageId, bool), // set up the offsets (all pages have offsets set) + Used(PageId, bool), // everything is set to 'used' + SegmentCreating(SegmentId), // just created + VeryUnready(SegmentId, int, int, bool), // no pages are set, not even first or last + SegmentFreeing(SegmentId, int), + ExtraCount(SegmentId), +} - state_machine!{ PageOrg { +state_machine!{ PageOrg { fields { // Roughly corresponds to physical state pub unused_dlist_headers: Seq, // indices are sbin @@ -12463,61 +12442,61 @@ mod page_organization { } }} - pub open spec fn is_header(pd: PageData) -> bool { - pd.offset == Some(0nat) - } +pub open spec fn is_header(pd: PageData) -> bool { + pd.offset == Some(0nat) +} - pub open spec fn is_unused_header(pd: PageData) -> bool { - pd.offset == Some(0nat) && !pd.is_used - } +pub open spec fn is_unused_header(pd: PageData) -> bool { + pd.offset == Some(0nat) && !pd.is_used +} - pub open spec fn is_used_header(pd: PageData) -> bool { - pd.offset == Some(0nat) && pd.is_used - } +pub open spec fn is_used_header(pd: PageData) -> bool { + pd.offset == Some(0nat) && pd.is_used +} - pub open spec fn get_next(ll: Seq, j: int) -> Option { - if j == ll.len() - 1 { - None - } else { - Some(ll[j + 1]) - } +pub open spec fn get_next(ll: Seq, j: int) -> Option { + if j == ll.len() - 1 { + None + } else { + Some(ll[j + 1]) } +} - pub open spec fn get_prev(ll: Seq, j: int) -> Option { - if j == 0 { - None - } else { - Some(ll[j - 1]) - } +pub open spec fn get_prev(ll: Seq, j: int) -> Option { + if j == 0 { + None + } else { + Some(ll[j - 1]) } +} - pub open spec fn valid_ll_i(pages: Map, ll: Seq, j: int) -> bool { - 0 <= j < ll.len() && pages.dom().contains(ll[j]) && pages[ll[j]].dlist_entry.is_some() - && pages[ll[j]].dlist_entry.unwrap().prev == get_prev(ll, j) - && pages[ll[j]].dlist_entry.unwrap().next == get_next(ll, j) - } +pub open spec fn valid_ll_i(pages: Map, ll: Seq, j: int) -> bool { + 0 <= j < ll.len() && pages.dom().contains(ll[j]) && pages[ll[j]].dlist_entry.is_some() + && pages[ll[j]].dlist_entry.unwrap().prev == get_prev(ll, j) + && pages[ll[j]].dlist_entry.unwrap().next == get_next(ll, j) +} - pub open spec fn valid_ll( - pages: Map, - header: DlistHeader, - ll: Seq, - ) -> bool { - &&& (match header.first { - Some(first_id) => ll.len() != 0 && ll[0] == first_id, - None => ll.len() == 0, - }) - &&& (match header.last { - Some(last_id) => ll.len() != 0 && ll[ll.len() - 1] == last_id, - None => ll.len() == 0, - }) - &&& (forall|j| 0 <= j < ll.len() ==> valid_ll_i(pages, ll, j)) - } +pub open spec fn valid_ll( + pages: Map, + header: DlistHeader, + ll: Seq, +) -> bool { + &&& (match header.first { + Some(first_id) => ll.len() != 0 && ll[0] == first_id, + None => ll.len() == 0, + }) + &&& (match header.last { + Some(last_id) => ll.len() != 0 && ll[ll.len() - 1] == last_id, + None => ll.len() == 0, + }) + &&& (forall|j| 0 <= j < ll.len() ==> valid_ll_i(pages, ll, j)) +} - pub open spec fn is_in_lls(page_id: PageId, s: Seq>) -> bool { - exists|i: int, j: int| 0 <= i < s.len() && 0 <= j < s[i].len() && s[i][j] == page_id - } +pub open spec fn is_in_lls(page_id: PageId, s: Seq>) -> bool { + exists|i: int, j: int| 0 <= i < s.len() && 0 <= j < s[i].len() && s[i][j] == page_id +} - } // verus! +} // verus! } mod os_mem_util { @@ -12535,565 +12514,368 @@ mod os_mem_util { verus! { - impl MemChunk { - pub proof fn empty() -> (tracked mc: MemChunk) { - MemChunk { os: Map::tracked_empty(), points_to: PointsToRaw::empty() } - } - - #[verifier::inline] - pub open spec fn pointsto_has_range(&self, start: int, len: int) -> bool { - set_int_range(start, start + len) <= self.range_points_to() - } - - pub open spec fn os_rw_bytes(&self) -> Set { - self.range_os_rw() - } - - pub open spec fn committed_pointsto_has_range(&self, start: int, len: int) -> bool { - self.pointsto_has_range(start, len) && self.os_has_range_read_write(start, len) - } - - pub proof fn split(tracked &mut self, start: int, len: int) -> (tracked t: Self) - ensures - t.points_to@ == old(self).points_to@.restrict(set_int_range(start, start + len)), - t.os == old(self).os.restrict(set_int_range(start, start + len)), - self.points_to@ == old(self).points_to@.remove_keys(set_int_range(start, start + len)), - self.os == old(self).os.remove_keys(set_int_range(start, start + len)), - { - let tracked split_os = self.os.tracked_remove_keys( - set_int_range(start, start + len).intersect(self.os.dom()), - ); - let tracked mut pt = PointsToRaw::empty(); - tracked_swap(&mut pt, &mut self.points_to); - let tracked (rt, pt) = pt.split(set_int_range(start, start + len).intersect(pt@.dom())); - self.points_to = pt; - let tracked t = MemChunk { os: split_os, points_to: rt }; - assert(self.points_to@ =~= old(self).points_to@.remove_keys( - set_int_range(start, start + len), - )); - assert(self.os =~= old(self).os.remove_keys(set_int_range(start, start + len))); - assert(t.points_to@ =~= old(self).points_to@.restrict(set_int_range(start, start + len))); - assert(t.os =~= old(self).os.restrict(set_int_range(start, start + len))); - t - } - - pub proof fn join(tracked &mut self, tracked t: Self) - ensures - self.points_to@ == old(self).points_to@.union_prefer_right(t.points_to@), - self.os == old(self).os.union_prefer_right(t.os), - { - let tracked MemChunk { os, points_to } = t; - self.os.tracked_union_prefer_right(os); - let tracked mut pt = PointsToRaw::empty(); - tracked_swap(&mut pt, &mut self.points_to); - let tracked pt = pt.join(points_to); - self.points_to = pt; - } - - pub proof fn os_restrict(tracked &mut self, start: int, len: int) - requires - old(self).os_has_range(start, len), - ensures - self.points_to == old(self).points_to, - self.os == old(self).os.restrict(set_int_range(start, start + len)), - { - self.os.tracked_remove_keys(self.os.dom() - set_int_range(start, start + len)); - assert(self.os =~= old(self).os.restrict(set_int_range(start, start + len))); - } - - pub proof fn take_points_to_set(tracked &mut self, s: Set) -> (tracked points_to: - PointsToRaw) - requires - s <= old(self).points_to@.dom(), - ensures - self.os == old(self).os, - self.points_to@ == old(self).points_to@.remove_keys(s), - points_to@.dom() == s, - { - let tracked mut pt = PointsToRaw::empty(); - tracked_swap(&mut pt, &mut self.points_to); - let tracked (rt, pt) = pt.split(s); - self.points_to = pt; - assert(rt@.dom() =~= s); - rt - } - - pub proof fn take_points_to_range( - tracked &mut self, - start: int, - len: int, - ) -> (tracked points_to: PointsToRaw) - requires - len >= 0, - old(self).pointsto_has_range(start, len), - ensures - self.os == old(self).os, - self.points_to@ == old(self).points_to@.remove_keys(set_int_range(start, start + len)), - points_to.is_range(start, len), - { - let tracked mut pt = PointsToRaw::empty(); - tracked_swap(&mut pt, &mut self.points_to); - let tracked (rt, pt) = pt.split(set_int_range(start, start + len)); - self.points_to = pt; - rt - } - - pub proof fn give_points_to_range(tracked &mut self, tracked points_to: PointsToRaw) - requires - old(self).wf(), - ensures - self.wf(), - self.os == old(self).os, - self.points_to@.dom() == old(self).points_to@.dom() + points_to@.dom(), - { - let tracked mut pt = PointsToRaw::empty(); - tracked_swap(&mut pt, &mut self.points_to); - let tracked pt = pt.join(points_to); - self.points_to = pt; - assert(self.points_to@.dom() =~= old(self).points_to@.dom() + points_to@.dom()); - } - } - - pub open spec fn segment_info_range(segment_id: SegmentId) -> Set { - set_int_range( - segment_start(segment_id), - segment_start(segment_id) + SIZEOF_SEGMENT_HEADER + SIZEOF_PAGE_HEADER * (SLICES_PER_SEGMENT - + 1), - ) - } - - pub open spec fn mem_chunk_good1( - mem: MemChunk, - segment_id: SegmentId, - commit_bytes: Set, - decommit_bytes: Set, - pages_range_total: Set, - pages_used_total: Set, - ) -> bool { - &&& mem.wf() - &&& mem.os_exact_range(segment_start(segment_id), SEGMENT_SIZE as int) - &&& commit_bytes.subset_of(mem.os_rw_bytes()) - &&& decommit_bytes <= commit_bytes - &&& segment_info_range(segment_id) <= commit_bytes - decommit_bytes - &&& pages_used_total <= commit_bytes - decommit_bytes - &&& mem.os_rw_bytes() <= mem.points_to@.dom() + segment_info_range(segment_id) - + pages_range_total - } - - impl Local { - spec fn segment_page_range(&self, segment_id: SegmentId, page_id: PageId) -> Set { - if page_id.segment_id == segment_id && self.is_used_primary(page_id) { - set_int_range( - page_start(page_id) + start_offset(self.block_size(page_id)), - page_start(page_id) + start_offset(self.block_size(page_id)) + self.page_capacity( - page_id, - ) * self.block_size(page_id), - ) - } else { - Set::empty() - } - } - - pub closed spec fn segment_pages_range_total(&self, segment_id: SegmentId) -> Set { - Set::::new( - |addr| exists|page_id| self.segment_page_range(segment_id, page_id).contains(addr), - ) - } - - spec fn segment_page_used(&self, segment_id: SegmentId, page_id: PageId) -> Set { - if page_id.segment_id == segment_id && self.is_used_primary(page_id) { - set_int_range( - page_start(page_id), - page_start(page_id) + self.page_count(page_id) * SLICE_SIZE, - ) - } else { - Set::empty() - } - } - - pub closed spec fn segment_pages_used_total(&self, segment_id: SegmentId) -> Set { - Set::::new( - |addr| exists|page_id| self.segment_page_used(segment_id, page_id).contains(addr), - ) - } - - /*spec fn segment_page_range_reserved(&self, segment_id: SegmentId, page_id: PageId) -> Set { - if page_id.segment_id == segment_id && self.is_used_primary(page_id) { - set_int_range( - page_start(page_id) + start_offset(self.block_size(page_id)), - page_start(page_id) + start_offset(self.block_size(page_id)) - + self.page_reserved(page_id) * self.block_size(page_id) - ) - } else { - Set::empty() - } - } - - spec fn segment_pages_range_reserved_total(&self, segment_id: SegmentId) -> Set { - Set::::new(|addr| exists |page_id| - self.segment_page_range_reserved(segment_id, page_id).contains(addr) - ) - }*/ - pub open spec fn mem_chunk_good(&self, segment_id: SegmentId) -> bool { - self.segments.dom().contains(segment_id) && mem_chunk_good1( - self.segments[segment_id].mem, - segment_id, - self.commit_mask(segment_id).bytes(segment_id), - self.decommit_mask(segment_id).bytes(segment_id), - self.segment_pages_range_total(segment_id), - self.segment_pages_used_total(segment_id), - ) - } - } - - pub proof fn range_total_le_used_total(local: Local, sid: SegmentId) - requires - local.wf_main(), - local.segments.dom().contains(sid), - ensures - local.segment_pages_range_total(sid) <= local.segment_pages_used_total(sid), - { - assert forall|addr| - local.segment_pages_range_total(sid).contains(addr) implies local.segment_pages_used_total( - sid, - ).contains(addr) by { - let pid = choose|pid: PageId| local.segment_page_range(sid, pid).contains(addr); - let p_blocksize = local.block_size(pid); - let p_capacity = local.page_capacity(pid); - let p_reserved = local.page_reserved(pid); - start_offset_le_slice_size(p_blocksize); - assert(p_capacity * p_blocksize <= p_reserved * p_blocksize) by (nonlinear_arith) - requires - p_capacity <= p_reserved, - p_blocksize >= 0, - ; - assert(local.segment_page_used(sid, pid).contains(addr)); - } - } - - pub proof fn decommit_subset_of_pointsto(local: Local, sid: SegmentId) - requires - local.wf_main(), - local.segments.dom().contains(sid), - local.mem_chunk_good(sid), - ensures - local.decommit_mask(sid).bytes(sid) <= local.segments[sid].mem.points_to@.dom(), - { - range_total_le_used_total(local, sid); - } - - pub proof fn very_unready_range_okay_to_decommit(local: Local) - requires - local.wf_main(), - local.page_organization.popped.is_VeryUnready(), - ensures - (match local.page_organization.popped { - Popped::VeryUnready(segment_id, idx, count, _) => { - set_int_range( - segment_start(segment_id) + idx * SLICE_SIZE, - segment_start(segment_id) + idx * SLICE_SIZE + count * SLICE_SIZE, - ).disjoint( - segment_info_range(segment_id) + local.segment_pages_used_total(segment_id), - ) - }, - _ => false, - }), - { - match local.page_organization.popped { - Popped::VeryUnready(segment_id, idx, count, _) => { - const_facts(); - local.page_organization.get_count_bound_very_unready(); - assert(idx > 0); - assert forall|addr| - local.segment_pages_used_total(segment_id).contains(addr) && set_int_range( - segment_start(segment_id) + idx * SLICE_SIZE, - segment_start(segment_id) + idx * SLICE_SIZE + count * SLICE_SIZE, - ).contains(addr) implies false by { - let page_id = choose|page_id| - local.segment_page_used(segment_id, page_id).contains(addr); - local.page_organization.lemma_range_disjoint_very_unready(page_id); - let p_count = local.page_count(page_id); - assert(page_id.idx + p_count <= idx || idx + count <= page_id.idx); - } - }, - _ => {}, - } - } - - pub proof fn preserves_mem_chunk_good(local1: Local, local2: Local) - requires//local2.page_organization == local1.page_organization, - //local2.pages == local1.pages, - //local2.commit_mask(sid).bytes(sid) == local1.commit_mask(sid).bytes(sid), - //local2.segments[sid].mem.has_new_pointsto(&local1.segments[sid].mem), - - local1.segments.dom() == local2.segments.dom(), - forall|sid| - local1.segments.dom().contains(sid) ==> local2.commit_mask(sid).bytes(sid) - == local1.commit_mask(sid).bytes(sid), - forall|sid| - local1.segments.dom().contains(sid) ==> local2.decommit_mask(sid).bytes(sid) - == local1.decommit_mask(sid).bytes(sid), - forall|sid| - local1.segments.dom().contains(sid) ==> local2.segments[sid].mem - == local1.segments[sid].mem, - forall|page_id| - local1.is_used_primary(page_id) ==> local2.is_used_primary(page_id) - && local1.page_capacity(page_id) <= local2.page_capacity(page_id) - && local1.page_reserved(page_id) <= local2.page_reserved(page_id) - && local1.page_count(page_id) == local2.page_count(page_id) && local1.block_size( - page_id, - ) == local2.block_size(page_id), - forall|page_id: PageId| - #[trigger] - local2.is_used_primary(page_id) ==> local1.is_used_primary(page_id), - ensures - forall|sid| - #[trigger] - local1.segments.dom().contains(sid) ==> local1.mem_chunk_good(sid) - ==> local2.mem_chunk_good(sid), - { - let sid1 = SegmentId { id: 0, uniq: 0 }; - let sid2 = SegmentId { id: 1, uniq: 0 }; - preserves_mem_chunk_good_except(local1, local2, sid1); - preserves_mem_chunk_good_except(local1, local2, sid2); - } - - pub proof fn preserves_mem_chunk_good_except(local1: Local, local2: Local, esegment_id: SegmentId) - requires//local2.page_organization == local1.page_organization, - //local2.pages == local1.pages, - //local2.commit_mask(sid).bytes(sid) == local1.commit_mask(sid).bytes(sid), - //local2.segments[sid].mem.has_new_pointsto(&local1.segments[sid].mem), - - local1.segments.dom().subset_of(local2.segments.dom()), - forall|sid| - sid != esegment_id ==> #[trigger] - local1.segments.dom().contains(sid) ==> local2.commit_mask(sid).bytes(sid) - == local1.commit_mask(sid).bytes(sid), - forall|sid| - sid != esegment_id ==> #[trigger] - local1.segments.dom().contains(sid) ==> local2.decommit_mask(sid).bytes(sid) - == local1.decommit_mask(sid).bytes(sid), - forall|sid| - sid != esegment_id ==> #[trigger] - local1.segments.dom().contains(sid) ==> local2.segments[sid].mem - == local1.segments[sid].mem, - forall|page_id: PageId| - page_id.segment_id != esegment_id && #[trigger] - local1.is_used_primary(page_id) ==> local2.is_used_primary(page_id) - && local1.page_capacity(page_id) <= local2.page_capacity(page_id) - && local1.page_reserved(page_id) <= local2.page_reserved(page_id) - && local1.page_count(page_id) == local2.page_count(page_id) && local1.block_size( - page_id, - ) == local2.block_size(page_id), - forall|page_id: PageId| - page_id.segment_id != esegment_id && #[trigger] - local2.is_used_primary(page_id) ==> local1.is_used_primary(page_id), - ensures - forall|sid| - sid != esegment_id ==> #[trigger] - local1.segments.dom().contains(sid) ==> local1.mem_chunk_good(sid) - ==> local2.mem_chunk_good(sid), - { - assert forall|sid| - sid != esegment_id && #[trigger] - local1.segments.dom().contains(sid) && local1.mem_chunk_good( - sid, - ) implies local2.mem_chunk_good(sid) by { - let mem = local2.segments[sid].mem; - let commit_bytes = local2.commit_mask(sid).bytes(sid); - let decommit_bytes = local2.decommit_mask(sid).bytes(sid); - let pages_range_total1 = local1.segment_pages_range_total(sid); - let pages_range_total2 = local2.segment_pages_range_total(sid); - //let pages_range_reserved_total1 = local1.segment_pages_range_reserved_total(sid); - //let pages_range_reserved_total2 = local2.segment_pages_range_reserved_total(sid); - assert(mem.wf()); - assert(mem.os_exact_range(segment_start(sid), SEGMENT_SIZE as int)); - assert(commit_bytes.subset_of(mem.os_rw_bytes())); - assert forall|addr| pages_range_total1.contains(addr) implies pages_range_total2.contains( - addr, - ) by { - let page_id = choose|page_id| local1.segment_page_range(sid, page_id).contains(addr); - assert(page_id.segment_id == sid); - assert(local1.is_used_primary(page_id)); - assert(local2.is_used_primary(page_id)); - assert(local1.page_capacity(page_id) * local1.block_size(page_id) - <= local2.page_capacity(page_id) * local2.block_size(page_id)) by (nonlinear_arith) - requires - local1.page_capacity(page_id) <= local2.page_capacity(page_id), - local1.block_size(page_id) == local2.block_size(page_id), - ; - assert(local2.segment_page_range(sid, page_id).contains(addr)); - } - assert(pages_range_total1.subset_of(pages_range_total2)); - assert(mem.os_rw_bytes().subset_of( - mem.points_to@.dom() + segment_info_range(sid) + pages_range_total2, - )); - //assert(pages_range_reserved_total2.subset_of(commit_bytes - decommit_bytes)); - preserves_segment_pages_used_total(local1, local2, sid); - assert(mem_chunk_good1( - local2.segments[sid].mem, - sid, - local2.commit_mask(sid).bytes(sid), - local2.decommit_mask(sid).bytes(sid), - local2.segment_pages_range_total(sid), - local2.segment_pages_used_total(sid), - )); - } +impl MemChunk { + pub proof fn empty() -> (tracked mc: MemChunk) { + MemChunk { os: Map::tracked_empty(), points_to: PointsToRaw::empty() } } - pub proof fn empty_segment_pages_used_total(local1: Local, sid: SegmentId) - requires - forall|pid: PageId| pid.segment_id == sid ==> !local1.is_used_primary(pid), + #[verifier::inline] + pub open spec fn pointsto_has_range(&self, start: int, len: int) -> bool { + set_int_range(start, start + len) <= self.range_points_to() + } + + pub open spec fn os_rw_bytes(&self) -> Set { + self.range_os_rw() + } + + pub open spec fn committed_pointsto_has_range(&self, start: int, len: int) -> bool { + self.pointsto_has_range(start, len) && self.os_has_range_read_write(start, len) + } + + pub proof fn split(tracked &mut self, start: int, len: int) -> (tracked t: Self) ensures - local1.segment_pages_used_total(sid) =~= Set::empty(), + t.points_to@ == old(self).points_to@.restrict(set_int_range(start, start + len)), + t.os == old(self).os.restrict(set_int_range(start, start + len)), + self.points_to@ == old(self).points_to@.remove_keys(set_int_range(start, start + len)), + self.os == old(self).os.remove_keys(set_int_range(start, start + len)), { + let tracked split_os = self.os.tracked_remove_keys( + set_int_range(start, start + len).intersect(self.os.dom()), + ); + let tracked mut pt = PointsToRaw::empty(); + tracked_swap(&mut pt, &mut self.points_to); + let tracked (rt, pt) = pt.split(set_int_range(start, start + len).intersect(pt@.dom())); + self.points_to = pt; + let tracked t = MemChunk { os: split_os, points_to: rt }; + assert(self.points_to@ =~= old(self).points_to@.remove_keys( + set_int_range(start, start + len), + )); + assert(self.os =~= old(self).os.remove_keys(set_int_range(start, start + len))); + assert(t.points_to@ =~= old(self).points_to@.restrict(set_int_range(start, start + len))); + assert(t.os =~= old(self).os.restrict(set_int_range(start, start + len))); + t } - pub proof fn preserves_segment_pages_used_total(local1: Local, local2: Local, sid: SegmentId) - requires - forall|page_id: PageId| - page_id.segment_id == sid && #[trigger] - local2.is_used_primary(page_id) ==> local1.is_used_primary(page_id) - && local1.page_count(page_id) == local2.page_count(page_id), + pub proof fn join(tracked &mut self, tracked t: Self) ensures - local2.segment_pages_used_total(sid) <= local1.segment_pages_used_total(sid), + self.points_to@ == old(self).points_to@.union_prefer_right(t.points_to@), + self.os == old(self).os.union_prefer_right(t.os), { - assert forall|addr| - local2.segment_pages_used_total(sid).contains(addr) implies local1.segment_pages_used_total( - sid, - ).contains(addr) by { - let pid = choose|pid| local2.segment_page_used(sid, pid).contains(addr); - assert(local1.segment_page_used(sid, pid).contains(addr)); - } + let tracked MemChunk { os, points_to } = t; + self.os.tracked_union_prefer_right(os); + let tracked mut pt = PointsToRaw::empty(); + tracked_swap(&mut pt, &mut self.points_to); + let tracked pt = pt.join(points_to); + self.points_to = pt; } - pub proof fn preserve_totals(local1: Local, local2: Local, sid: SegmentId) + pub proof fn os_restrict(tracked &mut self, start: int, len: int) requires - forall|page_id: PageId| - page_id.segment_id == sid && #[trigger] - local2.is_used_primary(page_id) ==> local1.is_used_primary(page_id) - && local1.page_count(page_id) == local2.page_count(page_id) && local1.page_capacity( - page_id, - ) == local2.page_capacity(page_id) && local1.block_size(page_id) == local2.block_size( - page_id, - ), - forall|page_id: PageId| - page_id.segment_id == sid && #[trigger] - local1.is_used_primary(page_id) ==> local2.is_used_primary(page_id), + old(self).os_has_range(start, len), ensures - local2.segment_pages_used_total(sid) =~= local1.segment_pages_used_total(sid), - local2.segment_pages_range_total(sid) =~= local1.segment_pages_range_total(sid), + self.points_to == old(self).points_to, + self.os == old(self).os.restrict(set_int_range(start, start + len)), { - assert forall|addr| - local2.segment_pages_used_total(sid).contains(addr) implies local1.segment_pages_used_total( - sid, - ).contains(addr) by { - let pid = choose|pid| local2.segment_page_used(sid, pid).contains(addr); - assert(local1.segment_page_used(sid, pid).contains(addr)); - } - assert forall|addr| - local1.segment_pages_used_total(sid).contains(addr) implies local2.segment_pages_used_total( - sid, - ).contains(addr) by { - let pid = choose|pid| local1.segment_page_used(sid, pid).contains(addr); - assert(local2.segment_page_used(sid, pid).contains(addr)); - } - assert forall|addr| - local2.segment_pages_range_total(sid).contains( - addr, - ) implies local1.segment_pages_range_total(sid).contains(addr) by { - let pid = choose|pid| local2.segment_page_range(sid, pid).contains(addr); - assert(local1.segment_page_range(sid, pid).contains(addr)); - } - assert forall|addr| - local1.segment_pages_range_total(sid).contains( - addr, - ) implies local2.segment_pages_range_total(sid).contains(addr) by { - let pid = choose|pid| local1.segment_page_range(sid, pid).contains(addr); - assert(local2.segment_page_range(sid, pid).contains(addr)); - } + self.os.tracked_remove_keys(self.os.dom() - set_int_range(start, start + len)); + assert(self.os =~= old(self).os.restrict(set_int_range(start, start + len))); } - pub proof fn preserves_mem_chunk_good_on_commit(local1: Local, local2: Local, sid: SegmentId) + pub proof fn take_points_to_set(tracked &mut self, s: Set) -> (tracked points_to: + PointsToRaw) requires - local1.segments.dom().contains(sid), - local2.segments.dom().contains(sid), - local1.mem_chunk_good(sid), - local2.page_organization == local1.page_organization, - local2.pages == local1.pages, - local2.commit_mask(sid).bytes(sid) == local1.commit_mask(sid).bytes(sid), - local2.decommit_mask(sid).bytes(sid) == local1.decommit_mask(sid).bytes(sid), - local2.segments[sid].mem.wf(), - local2.segments[sid].mem.has_new_pointsto(&local1.segments[sid].mem), + s <= old(self).points_to@.dom(), ensures - local2.mem_chunk_good(sid), + self.os == old(self).os, + self.points_to@ == old(self).points_to@.remove_keys(s), + points_to@.dom() == s, { - preserves_mem_chunk_good_on_commit_with_mask_set(local1, local2, sid); - } - - pub proof fn preserves_mem_chunk_good_on_decommit(local1: Local, local2: Local, sid: SegmentId) + let tracked mut pt = PointsToRaw::empty(); + tracked_swap(&mut pt, &mut self.points_to); + let tracked (rt, pt) = pt.split(s); + self.points_to = pt; + assert(rt@.dom() =~= s); + rt + } + + pub proof fn take_points_to_range( + tracked &mut self, + start: int, + len: int, + ) -> (tracked points_to: PointsToRaw) requires - local1.segments.dom().contains(sid), - local2.segments.dom().contains(sid), - local1.mem_chunk_good(sid), - local2.page_organization == local1.page_organization, - local2.pages == local1.pages, - local2.segments[sid].mem.wf(), - local2.decommit_mask(sid).bytes(sid) <= local1.decommit_mask(sid).bytes(sid), - local2.commit_mask(sid).bytes(sid) =~= local1.commit_mask(sid).bytes(sid) - ( - local1.decommit_mask(sid).bytes(sid) - local2.decommit_mask(sid).bytes(sid)), - local2.segments[sid].mem.os_rw_bytes() <= local1.segments[sid].mem.os_rw_bytes(), - local2.segments[sid].mem.points_to@.dom() =~= local1.segments[sid].mem.points_to@.dom() - ( - local1.segments[sid].mem.os_rw_bytes() - local2.segments[sid].mem.os_rw_bytes()), - (local1.segments[sid].mem.os_rw_bytes() - local2.segments[sid].mem.os_rw_bytes()) <= ( - local1.decommit_mask(sid).bytes(sid) - local2.decommit_mask(sid).bytes(sid)), - //(local1.decommit_mask(sid).bytes(sid) - local2.decommit_mask(sid).bytes(sid)), - local2.segments[sid].mem.os.dom() =~= local1.segments[sid].mem.os.dom(), + len >= 0, + old(self).pointsto_has_range(start, len), ensures - local2.mem_chunk_good(sid), + self.os == old(self).os, + self.points_to@ == old(self).points_to@.remove_keys(set_int_range(start, start + len)), + points_to.is_range(start, len), { - preserve_totals(local1, local2, sid); - assert(mem_chunk_good1( - local2.segments[sid].mem, - sid, - local2.commit_mask(sid).bytes(sid), - local2.decommit_mask(sid).bytes(sid), - local2.segment_pages_range_total(sid), - local2.segment_pages_used_total(sid), - )); + let tracked mut pt = PointsToRaw::empty(); + tracked_swap(&mut pt, &mut self.points_to); + let tracked (rt, pt) = pt.split(set_int_range(start, start + len)); + self.points_to = pt; + rt } - pub proof fn preserves_mem_chunk_good_on_commit_with_mask_set( - local1: Local, - local2: Local, - sid: SegmentId, - ) + pub proof fn give_points_to_range(tracked &mut self, tracked points_to: PointsToRaw) requires - local1.segments.dom().contains(sid), - local2.segments.dom().contains(sid), - local1.mem_chunk_good(sid), - local2.page_organization == local1.page_organization, - local2.pages == local1.pages, - local2.segments[sid].mem.wf(), - local2.segments[sid].mem.has_new_pointsto(&local1.segments[sid].mem), - local2.decommit_mask(sid).bytes(sid).subset_of(local1.decommit_mask(sid).bytes(sid)), - local1.commit_mask(sid).bytes(sid).subset_of(local2.commit_mask(sid).bytes(sid)), - local2.decommit_mask(sid).bytes(sid).disjoint( - local2.commit_mask(sid).bytes(sid) - local1.commit_mask(sid).bytes(sid), - ), - (local1.segments[sid].mem.os_rw_bytes() + (local2.commit_mask(sid).bytes(sid) - - local1.commit_mask(sid).bytes(sid))).subset_of( - local2.segments[sid].mem.os_rw_bytes(), - ), + old(self).wf(), ensures - local2.mem_chunk_good(sid), + self.wf(), + self.os == old(self).os, + self.points_to@.dom() == old(self).points_to@.dom() + points_to@.dom(), { - let old_mem = local1.segments[sid].mem; + let tracked mut pt = PointsToRaw::empty(); + tracked_swap(&mut pt, &mut self.points_to); + let tracked pt = pt.join(points_to); + self.points_to = pt; + assert(self.points_to@.dom() =~= old(self).points_to@.dom() + points_to@.dom()); + } +} + +pub open spec fn segment_info_range(segment_id: SegmentId) -> Set { + set_int_range( + segment_start(segment_id), + segment_start(segment_id) + SIZEOF_SEGMENT_HEADER + SIZEOF_PAGE_HEADER * (SLICES_PER_SEGMENT + + 1), + ) +} + +pub open spec fn mem_chunk_good1( + mem: MemChunk, + segment_id: SegmentId, + commit_bytes: Set, + decommit_bytes: Set, + pages_range_total: Set, + pages_used_total: Set, +) -> bool { + &&& mem.wf() + &&& mem.os_exact_range(segment_start(segment_id), SEGMENT_SIZE as int) + &&& commit_bytes.subset_of(mem.os_rw_bytes()) + &&& decommit_bytes <= commit_bytes + &&& segment_info_range(segment_id) <= commit_bytes - decommit_bytes + &&& pages_used_total <= commit_bytes - decommit_bytes + &&& mem.os_rw_bytes() <= mem.points_to@.dom() + segment_info_range(segment_id) + + pages_range_total +} + +impl Local { + spec fn segment_page_range(&self, segment_id: SegmentId, page_id: PageId) -> Set { + if page_id.segment_id == segment_id && self.is_used_primary(page_id) { + set_int_range( + page_start(page_id) + start_offset(self.block_size(page_id)), + page_start(page_id) + start_offset(self.block_size(page_id)) + self.page_capacity( + page_id, + ) * self.block_size(page_id), + ) + } else { + Set::empty() + } + } + + pub closed spec fn segment_pages_range_total(&self, segment_id: SegmentId) -> Set { + Set::::new( + |addr| exists|page_id| self.segment_page_range(segment_id, page_id).contains(addr), + ) + } + + spec fn segment_page_used(&self, segment_id: SegmentId, page_id: PageId) -> Set { + if page_id.segment_id == segment_id && self.is_used_primary(page_id) { + set_int_range( + page_start(page_id), + page_start(page_id) + self.page_count(page_id) * SLICE_SIZE, + ) + } else { + Set::empty() + } + } + + pub closed spec fn segment_pages_used_total(&self, segment_id: SegmentId) -> Set { + Set::::new( + |addr| exists|page_id| self.segment_page_used(segment_id, page_id).contains(addr), + ) + } + + /*spec fn segment_page_range_reserved(&self, segment_id: SegmentId, page_id: PageId) -> Set { + if page_id.segment_id == segment_id && self.is_used_primary(page_id) { + set_int_range( + page_start(page_id) + start_offset(self.block_size(page_id)), + page_start(page_id) + start_offset(self.block_size(page_id)) + + self.page_reserved(page_id) * self.block_size(page_id) + ) + } else { + Set::empty() + } + } + + spec fn segment_pages_range_reserved_total(&self, segment_id: SegmentId) -> Set { + Set::::new(|addr| exists |page_id| + self.segment_page_range_reserved(segment_id, page_id).contains(addr) + ) + }*/ + pub open spec fn mem_chunk_good(&self, segment_id: SegmentId) -> bool { + self.segments.dom().contains(segment_id) && mem_chunk_good1( + self.segments[segment_id].mem, + segment_id, + self.commit_mask(segment_id).bytes(segment_id), + self.decommit_mask(segment_id).bytes(segment_id), + self.segment_pages_range_total(segment_id), + self.segment_pages_used_total(segment_id), + ) + } +} + +pub proof fn range_total_le_used_total(local: Local, sid: SegmentId) + requires + local.wf_main(), + local.segments.dom().contains(sid), + ensures + local.segment_pages_range_total(sid) <= local.segment_pages_used_total(sid), +{ + assert forall|addr| + local.segment_pages_range_total(sid).contains(addr) implies local.segment_pages_used_total( + sid, + ).contains(addr) by { + let pid = choose|pid: PageId| local.segment_page_range(sid, pid).contains(addr); + let p_blocksize = local.block_size(pid); + let p_capacity = local.page_capacity(pid); + let p_reserved = local.page_reserved(pid); + start_offset_le_slice_size(p_blocksize); + assert(p_capacity * p_blocksize <= p_reserved * p_blocksize) by (nonlinear_arith) + requires + p_capacity <= p_reserved, + p_blocksize >= 0, + ; + assert(local.segment_page_used(sid, pid).contains(addr)); + } +} + +pub proof fn decommit_subset_of_pointsto(local: Local, sid: SegmentId) + requires + local.wf_main(), + local.segments.dom().contains(sid), + local.mem_chunk_good(sid), + ensures + local.decommit_mask(sid).bytes(sid) <= local.segments[sid].mem.points_to@.dom(), +{ + range_total_le_used_total(local, sid); +} + +pub proof fn very_unready_range_okay_to_decommit(local: Local) + requires + local.wf_main(), + local.page_organization.popped.is_VeryUnready(), + ensures + (match local.page_organization.popped { + Popped::VeryUnready(segment_id, idx, count, _) => { + set_int_range( + segment_start(segment_id) + idx * SLICE_SIZE, + segment_start(segment_id) + idx * SLICE_SIZE + count * SLICE_SIZE, + ).disjoint( + segment_info_range(segment_id) + local.segment_pages_used_total(segment_id), + ) + }, + _ => false, + }), +{ + match local.page_organization.popped { + Popped::VeryUnready(segment_id, idx, count, _) => { + const_facts(); + local.page_organization.get_count_bound_very_unready(); + assert(idx > 0); + assert forall|addr| + local.segment_pages_used_total(segment_id).contains(addr) && set_int_range( + segment_start(segment_id) + idx * SLICE_SIZE, + segment_start(segment_id) + idx * SLICE_SIZE + count * SLICE_SIZE, + ).contains(addr) implies false by { + let page_id = choose|page_id| + local.segment_page_used(segment_id, page_id).contains(addr); + local.page_organization.lemma_range_disjoint_very_unready(page_id); + let p_count = local.page_count(page_id); + assert(page_id.idx + p_count <= idx || idx + count <= page_id.idx); + } + }, + _ => {}, + } +} + +pub proof fn preserves_mem_chunk_good(local1: Local, local2: Local) + requires //local2.page_organization == local1.page_organization, +//local2.pages == local1.pages, +//local2.commit_mask(sid).bytes(sid) == local1.commit_mask(sid).bytes(sid), +//local2.segments[sid].mem.has_new_pointsto(&local1.segments[sid].mem), + + local1.segments.dom() == local2.segments.dom(), + forall|sid| + local1.segments.dom().contains(sid) ==> local2.commit_mask(sid).bytes(sid) + == local1.commit_mask(sid).bytes(sid), + forall|sid| + local1.segments.dom().contains(sid) ==> local2.decommit_mask(sid).bytes(sid) + == local1.decommit_mask(sid).bytes(sid), + forall|sid| + local1.segments.dom().contains(sid) ==> local2.segments[sid].mem + == local1.segments[sid].mem, + forall|page_id| + local1.is_used_primary(page_id) ==> local2.is_used_primary(page_id) + && local1.page_capacity(page_id) <= local2.page_capacity(page_id) + && local1.page_reserved(page_id) <= local2.page_reserved(page_id) + && local1.page_count(page_id) == local2.page_count(page_id) && local1.block_size( + page_id, + ) == local2.block_size(page_id), + forall|page_id: PageId| #[trigger] + local2.is_used_primary(page_id) ==> local1.is_used_primary(page_id), + ensures + forall|sid| #[trigger] + local1.segments.dom().contains(sid) ==> local1.mem_chunk_good(sid) + ==> local2.mem_chunk_good(sid), +{ + let sid1 = SegmentId { id: 0, uniq: 0 }; + let sid2 = SegmentId { id: 1, uniq: 0 }; + preserves_mem_chunk_good_except(local1, local2, sid1); + preserves_mem_chunk_good_except(local1, local2, sid2); +} + +pub proof fn preserves_mem_chunk_good_except(local1: Local, local2: Local, esegment_id: SegmentId) + requires //local2.page_organization == local1.page_organization, +//local2.pages == local1.pages, +//local2.commit_mask(sid).bytes(sid) == local1.commit_mask(sid).bytes(sid), +//local2.segments[sid].mem.has_new_pointsto(&local1.segments[sid].mem), + + local1.segments.dom().subset_of(local2.segments.dom()), + forall|sid| + sid != esegment_id ==> #[trigger] local1.segments.dom().contains(sid) + ==> local2.commit_mask(sid).bytes(sid) == local1.commit_mask(sid).bytes(sid), + forall|sid| + sid != esegment_id ==> #[trigger] local1.segments.dom().contains(sid) + ==> local2.decommit_mask(sid).bytes(sid) == local1.decommit_mask(sid).bytes(sid), + forall|sid| + sid != esegment_id ==> #[trigger] local1.segments.dom().contains(sid) + ==> local2.segments[sid].mem == local1.segments[sid].mem, + forall|page_id: PageId| + page_id.segment_id != esegment_id && #[trigger] local1.is_used_primary(page_id) + ==> local2.is_used_primary(page_id) && local1.page_capacity(page_id) + <= local2.page_capacity(page_id) && local1.page_reserved(page_id) + <= local2.page_reserved(page_id) && local1.page_count(page_id) == local2.page_count( + page_id, + ) && local1.block_size(page_id) == local2.block_size(page_id), + forall|page_id: PageId| + page_id.segment_id != esegment_id && #[trigger] local2.is_used_primary(page_id) + ==> local1.is_used_primary(page_id), + ensures + forall|sid| + sid != esegment_id ==> #[trigger] local1.segments.dom().contains(sid) + ==> local1.mem_chunk_good(sid) ==> local2.mem_chunk_good(sid), +{ + assert forall|sid| + sid != esegment_id && #[trigger] local1.segments.dom().contains(sid) + && local1.mem_chunk_good(sid) implies local2.mem_chunk_good(sid) by { let mem = local2.segments[sid].mem; let commit_bytes = local2.commit_mask(sid).bytes(sid); let decommit_bytes = local2.decommit_mask(sid).bytes(sid); let pages_range_total1 = local1.segment_pages_range_total(sid); let pages_range_total2 = local2.segment_pages_range_total(sid); + //let pages_range_reserved_total1 = local1.segment_pages_range_reserved_total(sid); + //let pages_range_reserved_total2 = local2.segment_pages_range_reserved_total(sid); assert(mem.wf()); assert(mem.os_exact_range(segment_start(sid), SEGMENT_SIZE as int)); assert(commit_bytes.subset_of(mem.os_rw_bytes())); @@ -13104,274 +12886,459 @@ mod os_mem_util { assert(page_id.segment_id == sid); assert(local1.is_used_primary(page_id)); assert(local2.is_used_primary(page_id)); + assert(local1.page_capacity(page_id) * local1.block_size(page_id) + <= local2.page_capacity(page_id) * local2.block_size(page_id)) by (nonlinear_arith) + requires + local1.page_capacity(page_id) <= local2.page_capacity(page_id), + local1.block_size(page_id) == local2.block_size(page_id), + ; assert(local2.segment_page_range(sid, page_id).contains(addr)); } assert(pages_range_total1.subset_of(pages_range_total2)); - assert((mem.os_rw_bytes() - old_mem.os_rw_bytes()).subset_of(mem.points_to@.dom())); assert(mem.os_rw_bytes().subset_of( mem.points_to@.dom() + segment_info_range(sid) + pages_range_total2, )); - assert(decommit_bytes.subset_of(commit_bytes)); + //assert(pages_range_reserved_total2.subset_of(commit_bytes - decommit_bytes)); preserves_segment_pages_used_total(local1, local2, sid); + assert(mem_chunk_good1( + local2.segments[sid].mem, + sid, + local2.commit_mask(sid).bytes(sid), + local2.decommit_mask(sid).bytes(sid), + local2.segment_pages_range_total(sid), + local2.segment_pages_used_total(sid), + )); } +} - pub proof fn preserves_mem_chunk_good_on_transfer_to_capacity( - local1: Local, - local2: Local, - page_id: PageId, - ) - requires - local1.segments.dom().contains(page_id.segment_id), - local2.segments.dom().contains(page_id.segment_id), - local1.mem_chunk_good(page_id.segment_id), - local2.page_organization == local1.page_organization, - local1.pages.dom().contains(page_id), - local2.pages.dom().contains(page_id), - local2.commit_mask(page_id.segment_id).bytes(page_id.segment_id) == local1.commit_mask( - page_id.segment_id, - ).bytes(page_id.segment_id), - local2.decommit_mask(page_id.segment_id).bytes(page_id.segment_id) == local1.decommit_mask( - page_id.segment_id, - ).bytes(page_id.segment_id), - local2.segments[page_id.segment_id].mem.wf(), - local1.is_used_primary(page_id), - forall|page_id| - #[trigger] - local1.is_used_primary(page_id) ==> local2.is_used_primary(page_id) - && local1.page_capacity(page_id) <= local2.page_capacity(page_id) - && local1.block_size(page_id) == local2.block_size(page_id) && local1.page_count( - page_id, - ) == local2.page_count(page_id), - forall|page_id| local2.is_used_primary(page_id) ==> local1.is_used_primary(page_id), - local2.segments[page_id.segment_id].mem.os == local1.segments[page_id.segment_id].mem.os, - ({ - let sr = set_int_range( - page_start(page_id) + start_offset(local1.block_size(page_id)) - + local1.page_capacity(page_id) * local1.block_size(page_id), - page_start(page_id) + start_offset(local1.block_size(page_id)) - + local2.page_capacity(page_id) * local1.block_size(page_id), - ); - local2.segments[page_id.segment_id].mem.points_to@.dom() - =~= local1.segments[page_id.segment_id].mem.points_to@.dom() - - sr//&& local2.decommit_mask(page_id.segment_id).bytes(page_id.segment_id).disjoint(sr) +pub proof fn empty_segment_pages_used_total(local1: Local, sid: SegmentId) + requires + forall|pid: PageId| pid.segment_id == sid ==> !local1.is_used_primary(pid), + ensures + local1.segment_pages_used_total(sid) =~= Set::empty(), +{ +} - }), - ensures - local2.mem_chunk_good(page_id.segment_id), - { - let sid = page_id.segment_id; - let rng = set_int_range( - page_start(page_id) + start_offset(local1.block_size(page_id)) + local1.page_capacity( - page_id, - ) * local1.block_size(page_id), - page_start(page_id) + start_offset(local1.block_size(page_id)) + local2.page_capacity( +pub proof fn preserves_segment_pages_used_total(local1: Local, local2: Local, sid: SegmentId) + requires + forall|page_id: PageId| + page_id.segment_id == sid && #[trigger] local2.is_used_primary(page_id) + ==> local1.is_used_primary(page_id) && local1.page_count(page_id) + == local2.page_count(page_id), + ensures + local2.segment_pages_used_total(sid) <= local1.segment_pages_used_total(sid), +{ + assert forall|addr| + local2.segment_pages_used_total(sid).contains(addr) implies local1.segment_pages_used_total( + sid, + ).contains(addr) by { + let pid = choose|pid| local2.segment_page_used(sid, pid).contains(addr); + assert(local1.segment_page_used(sid, pid).contains(addr)); + } +} + +pub proof fn preserve_totals(local1: Local, local2: Local, sid: SegmentId) + requires + forall|page_id: PageId| + page_id.segment_id == sid && #[trigger] local2.is_used_primary(page_id) + ==> local1.is_used_primary(page_id) && local1.page_count(page_id) + == local2.page_count(page_id) && local1.page_capacity(page_id) + == local2.page_capacity(page_id) && local1.block_size(page_id) == local2.block_size( page_id, - ) * local1.block_size(page_id), - ); - let r1 = local1.page_capacity(page_id); - let r2 = local2.page_capacity(page_id); - let bs = local1.block_size(page_id); - assert(r1 * bs <= r2 * bs) by (nonlinear_arith) - requires - r1 <= r2 && bs >= 0, - ; - let old_mem = local1.segments[sid].mem; - let mem = local2.segments[sid].mem; - let commit_bytes = local2.commit_mask(sid).bytes(sid); - let old_decommit_bytes = local1.decommit_mask(sid).bytes(sid); - let decommit_bytes = local2.decommit_mask(sid).bytes(sid); - let pages_range_total1 = local1.segment_pages_range_total(sid); - let pages_range_total2 = local2.segment_pages_range_total(sid); - assert(mem.wf()); - assert(mem.os_exact_range(segment_start(sid), SEGMENT_SIZE as int)); - assert(commit_bytes.subset_of(mem.os_rw_bytes())); - assert forall|addr| - pages_range_total1.contains(addr) || rng.contains(addr) implies pages_range_total2.contains( + ), + forall|page_id: PageId| + page_id.segment_id == sid && #[trigger] local1.is_used_primary(page_id) + ==> local2.is_used_primary(page_id), + ensures + local2.segment_pages_used_total(sid) =~= local1.segment_pages_used_total(sid), + local2.segment_pages_range_total(sid) =~= local1.segment_pages_range_total(sid), +{ + assert forall|addr| + local2.segment_pages_used_total(sid).contains(addr) implies local1.segment_pages_used_total( + sid, + ).contains(addr) by { + let pid = choose|pid| local2.segment_page_used(sid, pid).contains(addr); + assert(local1.segment_page_used(sid, pid).contains(addr)); + } + assert forall|addr| + local1.segment_pages_used_total(sid).contains(addr) implies local2.segment_pages_used_total( + sid, + ).contains(addr) by { + let pid = choose|pid| local1.segment_page_used(sid, pid).contains(addr); + assert(local2.segment_page_used(sid, pid).contains(addr)); + } + assert forall|addr| + local2.segment_pages_range_total(sid).contains( addr, - ) by { - if pages_range_total1.contains(addr) { - let page_id = choose|page_id| local1.segment_page_range(sid, page_id).contains(addr); - assert(page_id.segment_id == sid); - assert(local1.is_used_primary(page_id)); - assert(local2.is_used_primary(page_id)); - assert(local1.page_capacity(page_id) * local1.block_size(page_id) - <= local2.page_capacity(page_id) * local2.block_size(page_id)) by (nonlinear_arith) - requires - local1.page_capacity(page_id) <= local2.page_capacity(page_id), - local1.block_size(page_id) == local2.block_size(page_id), - ; - assert(local2.segment_page_range(sid, page_id).contains(addr)); - } else { - assert(r1 * bs >= 0) by (nonlinear_arith) - requires - r1 >= 0, - bs >= 0, - ; - assert(local2.segment_page_range(sid, page_id).contains(addr)); - } - }//assert(pages_range_total1.subset_of(pages_range_total2)); - //assert((mem.os_rw_bytes() - old_mem.os_rw_bytes()).subset_of(mem.points_to@.dom())); - - preserves_segment_pages_used_total(local1, local2, page_id.segment_id); - assert(mem.os_rw_bytes().subset_of( - mem.points_to@.dom() + segment_info_range(sid) + pages_range_total2, - )); - //assert(old_decommit_bytes.subset_of(old_mem.points_to@.dom())); - //assert(decommit_bytes.subset_of(old_mem.points_to@.dom())); - //assert(decommit_bytes.subset_of(mem.points_to@.dom())); - //assert(decommit_bytes.subset_of(commit_bytes)); + ) implies local1.segment_pages_range_total(sid).contains(addr) by { + let pid = choose|pid| local2.segment_page_range(sid, pid).contains(addr); + assert(local1.segment_page_range(sid, pid).contains(addr)); } + assert forall|addr| + local1.segment_pages_range_total(sid).contains( + addr, + ) implies local2.segment_pages_range_total(sid).contains(addr) by { + let pid = choose|pid| local1.segment_page_range(sid, pid).contains(addr); + assert(local2.segment_page_range(sid, pid).contains(addr)); + } +} - pub proof fn preserves_mem_chunk_good_on_transfer_back( - local1: Local, - local2: Local, - page_id: PageId, - ) - requires - local1.segments.dom().contains(page_id.segment_id), - local2.segments.dom().contains(page_id.segment_id), - local1.mem_chunk_good(page_id.segment_id), - local1.pages.dom().contains(page_id), - local2.pages.dom().contains(page_id), - local2.commit_mask(page_id.segment_id).bytes(page_id.segment_id) == local1.commit_mask( - page_id.segment_id, - ).bytes(page_id.segment_id), - local2.decommit_mask(page_id.segment_id).bytes(page_id.segment_id) == local1.decommit_mask( - page_id.segment_id, - ).bytes(page_id.segment_id), - local2.segments[page_id.segment_id].mem.wf(), - local1.is_used_primary(page_id), - forall|pid| - #[trigger] - local1.is_used_primary(pid) && pid != page_id ==> local2.is_used_primary(pid) - && local1.page_capacity(pid) <= local2.page_capacity(pid) && local1.block_size(pid) - == local2.block_size(pid) && local1.page_count(pid) == local2.page_count(pid), - forall|pid| #[trigger] local2.is_used_primary(pid) ==> local1.is_used_primary(pid), - !local2.is_used_primary(page_id), - local2.segments[page_id.segment_id].mem.os == local1.segments[page_id.segment_id].mem.os, - local2.segments[page_id.segment_id].mem.points_to@.dom() - =~= local1.segments[page_id.segment_id].mem.points_to@.dom() + set_int_range( - page_start(page_id) + start_offset(local1.block_size(page_id)), - page_start(page_id) + start_offset(local1.block_size(page_id)) + local1.page_capacity( - page_id, - ) * local1.block_size(page_id), - ), - ensures - local2.mem_chunk_good(page_id.segment_id), - { - let sid = page_id.segment_id; - let rng = set_int_range( - page_start(page_id) + start_offset(local1.block_size(page_id)), - page_start(page_id) + start_offset(local1.block_size(page_id)) + local1.page_capacity( +pub proof fn preserves_mem_chunk_good_on_commit(local1: Local, local2: Local, sid: SegmentId) + requires + local1.segments.dom().contains(sid), + local2.segments.dom().contains(sid), + local1.mem_chunk_good(sid), + local2.page_organization == local1.page_organization, + local2.pages == local1.pages, + local2.commit_mask(sid).bytes(sid) == local1.commit_mask(sid).bytes(sid), + local2.decommit_mask(sid).bytes(sid) == local1.decommit_mask(sid).bytes(sid), + local2.segments[sid].mem.wf(), + local2.segments[sid].mem.has_new_pointsto(&local1.segments[sid].mem), + ensures + local2.mem_chunk_good(sid), +{ + preserves_mem_chunk_good_on_commit_with_mask_set(local1, local2, sid); +} + +pub proof fn preserves_mem_chunk_good_on_decommit(local1: Local, local2: Local, sid: SegmentId) + requires + local1.segments.dom().contains(sid), + local2.segments.dom().contains(sid), + local1.mem_chunk_good(sid), + local2.page_organization == local1.page_organization, + local2.pages == local1.pages, + local2.segments[sid].mem.wf(), + local2.decommit_mask(sid).bytes(sid) <= local1.decommit_mask(sid).bytes(sid), + local2.commit_mask(sid).bytes(sid) =~= local1.commit_mask(sid).bytes(sid) - ( + local1.decommit_mask(sid).bytes(sid) - local2.decommit_mask(sid).bytes(sid)), + local2.segments[sid].mem.os_rw_bytes() <= local1.segments[sid].mem.os_rw_bytes(), + local2.segments[sid].mem.points_to@.dom() =~= local1.segments[sid].mem.points_to@.dom() - ( + local1.segments[sid].mem.os_rw_bytes() - local2.segments[sid].mem.os_rw_bytes()), + (local1.segments[sid].mem.os_rw_bytes() - local2.segments[sid].mem.os_rw_bytes()) <= ( + local1.decommit_mask(sid).bytes(sid) - local2.decommit_mask(sid).bytes(sid)), + //(local1.decommit_mask(sid).bytes(sid) - local2.decommit_mask(sid).bytes(sid)), + local2.segments[sid].mem.os.dom() =~= local1.segments[sid].mem.os.dom(), + ensures + local2.mem_chunk_good(sid), +{ + preserve_totals(local1, local2, sid); + assert(mem_chunk_good1( + local2.segments[sid].mem, + sid, + local2.commit_mask(sid).bytes(sid), + local2.decommit_mask(sid).bytes(sid), + local2.segment_pages_range_total(sid), + local2.segment_pages_used_total(sid), + )); +} + +pub proof fn preserves_mem_chunk_good_on_commit_with_mask_set( + local1: Local, + local2: Local, + sid: SegmentId, +) + requires + local1.segments.dom().contains(sid), + local2.segments.dom().contains(sid), + local1.mem_chunk_good(sid), + local2.page_organization == local1.page_organization, + local2.pages == local1.pages, + local2.segments[sid].mem.wf(), + local2.segments[sid].mem.has_new_pointsto(&local1.segments[sid].mem), + local2.decommit_mask(sid).bytes(sid).subset_of(local1.decommit_mask(sid).bytes(sid)), + local1.commit_mask(sid).bytes(sid).subset_of(local2.commit_mask(sid).bytes(sid)), + local2.decommit_mask(sid).bytes(sid).disjoint( + local2.commit_mask(sid).bytes(sid) - local1.commit_mask(sid).bytes(sid), + ), + (local1.segments[sid].mem.os_rw_bytes() + (local2.commit_mask(sid).bytes(sid) + - local1.commit_mask(sid).bytes(sid))).subset_of( + local2.segments[sid].mem.os_rw_bytes(), + ), + ensures + local2.mem_chunk_good(sid), +{ + let old_mem = local1.segments[sid].mem; + let mem = local2.segments[sid].mem; + let commit_bytes = local2.commit_mask(sid).bytes(sid); + let decommit_bytes = local2.decommit_mask(sid).bytes(sid); + let pages_range_total1 = local1.segment_pages_range_total(sid); + let pages_range_total2 = local2.segment_pages_range_total(sid); + assert(mem.wf()); + assert(mem.os_exact_range(segment_start(sid), SEGMENT_SIZE as int)); + assert(commit_bytes.subset_of(mem.os_rw_bytes())); + assert forall|addr| pages_range_total1.contains(addr) implies pages_range_total2.contains( + addr, + ) by { + let page_id = choose|page_id| local1.segment_page_range(sid, page_id).contains(addr); + assert(page_id.segment_id == sid); + assert(local1.is_used_primary(page_id)); + assert(local2.is_used_primary(page_id)); + assert(local2.segment_page_range(sid, page_id).contains(addr)); + } + assert(pages_range_total1.subset_of(pages_range_total2)); + assert((mem.os_rw_bytes() - old_mem.os_rw_bytes()).subset_of(mem.points_to@.dom())); + assert(mem.os_rw_bytes().subset_of( + mem.points_to@.dom() + segment_info_range(sid) + pages_range_total2, + )); + assert(decommit_bytes.subset_of(commit_bytes)); + preserves_segment_pages_used_total(local1, local2, sid); +} + +pub proof fn preserves_mem_chunk_good_on_transfer_to_capacity( + local1: Local, + local2: Local, + page_id: PageId, +) + requires + local1.segments.dom().contains(page_id.segment_id), + local2.segments.dom().contains(page_id.segment_id), + local1.mem_chunk_good(page_id.segment_id), + local2.page_organization == local1.page_organization, + local1.pages.dom().contains(page_id), + local2.pages.dom().contains(page_id), + local2.commit_mask(page_id.segment_id).bytes(page_id.segment_id) == local1.commit_mask( + page_id.segment_id, + ).bytes(page_id.segment_id), + local2.decommit_mask(page_id.segment_id).bytes(page_id.segment_id) == local1.decommit_mask( + page_id.segment_id, + ).bytes(page_id.segment_id), + local2.segments[page_id.segment_id].mem.wf(), + local1.is_used_primary(page_id), + forall|page_id| #[trigger] + local1.is_used_primary(page_id) ==> local2.is_used_primary(page_id) + && local1.page_capacity(page_id) <= local2.page_capacity(page_id) + && local1.block_size(page_id) == local2.block_size(page_id) && local1.page_count( page_id, - ) * local1.block_size(page_id), - ); - let old_mem = local1.segments[sid].mem; - let mem = local2.segments[sid].mem; - let commit_bytes = local2.commit_mask(sid).bytes(sid); - let pages_range_total1 = local1.segment_pages_range_total(sid); - let pages_range_total2 = local2.segment_pages_range_total(sid); - assert(mem.wf()); - assert(mem.os_exact_range(segment_start(sid), SEGMENT_SIZE as int)); - assert(commit_bytes.subset_of(mem.os_rw_bytes())); - assert forall|addr| - pages_range_total1.contains(addr) && !pages_range_total2.contains(addr) implies #[trigger] - rng.contains(addr) by { - let pid = choose|pid| local1.segment_page_range(sid, pid).contains(addr); - if pid == page_id { - assert(mem.points_to@.dom().contains(addr)); - } else { - assert(pid.segment_id == sid); - assert(local1.is_used_primary(pid)); - assert(local2.is_used_primary(pid)); - assert(local1.page_capacity(pid) * local1.block_size(pid) <= local2.page_capacity(pid) - * local2.block_size(pid)) by (nonlinear_arith) - requires - local1.page_capacity(pid) <= local2.page_capacity(pid), - local1.block_size(pid) == local2.block_size(pid), - ; - assert(local2.segment_page_range(sid, pid).contains(addr)); - assert(false); - } - } - assert((pages_range_total1 - pages_range_total2).subset_of(rng)); - assert((pages_range_total1 - pages_range_total2).subset_of(mem.points_to@.dom())); - assert(mem.os_rw_bytes().subset_of( - mem.points_to@.dom() + segment_info_range(sid) + pages_range_total2, - )); - preserves_segment_pages_used_total(local1, local2, page_id.segment_id); - } + ) == local2.page_count(page_id), + forall|page_id| local2.is_used_primary(page_id) ==> local1.is_used_primary(page_id), + local2.segments[page_id.segment_id].mem.os == local1.segments[page_id.segment_id].mem.os, + ({ + let sr = set_int_range( + page_start(page_id) + start_offset(local1.block_size(page_id)) + + local1.page_capacity(page_id) * local1.block_size(page_id), + page_start(page_id) + start_offset(local1.block_size(page_id)) + + local2.page_capacity(page_id) * local1.block_size(page_id), + ); + local2.segments[page_id.segment_id].mem.points_to@.dom() + =~= local1.segments[page_id.segment_id].mem.points_to@.dom() + - sr //&& local2.decommit_mask(page_id.segment_id).bytes(page_id.segment_id).disjoint(sr) - pub proof fn preserves_mem_chunk_on_set_used(local1: Local, local2: Local, page_id: PageId) + }), + ensures + local2.mem_chunk_good(page_id.segment_id), +{ + let sid = page_id.segment_id; + let rng = set_int_range( + page_start(page_id) + start_offset(local1.block_size(page_id)) + local1.page_capacity( + page_id, + ) * local1.block_size(page_id), + page_start(page_id) + start_offset(local1.block_size(page_id)) + local2.page_capacity( + page_id, + ) * local1.block_size(page_id), + ); + let r1 = local1.page_capacity(page_id); + let r2 = local2.page_capacity(page_id); + let bs = local1.block_size(page_id); + assert(r1 * bs <= r2 * bs) by (nonlinear_arith) requires - local1.mem_chunk_good(page_id.segment_id), - //local2.page_organization == local1.page_organization, - //local2.pages == local1.pages, - //local2.commit_mask(sid).bytes(sid) == local1.commit_mask(sid).bytes(sid), - //local2.segments[sid].mem.has_new_pointsto(&local1.segments[sid].mem), - local1.segments.dom() == local2.segments.dom(), - forall|sid| - local1.segments.dom().contains(sid) ==> local2.commit_mask(sid).bytes(sid) - == local1.commit_mask(sid).bytes(sid), - forall|sid| - local1.segments.dom().contains(sid) ==> local2.decommit_mask(sid).bytes(sid) - == local1.decommit_mask(sid).bytes(sid), - forall|sid| - local1.segments.dom().contains(sid) ==> local2.segments[sid].mem - == local1.segments[sid].mem, - forall|pid| - local1.is_used_primary(pid) ==> local2.is_used_primary(pid) && local1.page_capacity(pid) - <= local2.page_capacity(pid) && local1.page_reserved(pid) <= local2.page_reserved( - pid, - ) && local1.page_count(pid) == local2.page_count(pid) && local1.block_size(pid) - == local2.block_size(pid), - forall|pid: PageId| - #[trigger] - local2.is_used_primary(pid) && page_id != pid ==> local1.is_used_primary(pid), - page_init_is_committed(page_id, local2), - ensures - local2.mem_chunk_good(page_id.segment_id), - { - let sid = page_id.segment_id; - let mem = local2.segments[sid].mem; - let commit_bytes = local2.commit_mask(sid).bytes(sid); - let decommit_bytes = local2.decommit_mask(sid).bytes(sid); - let pages_range_total1 = local1.segment_pages_range_total(sid); - let pages_range_total2 = local2.segment_pages_range_total(sid); - //let pages_range_reserved_total1 = local1.segment_pages_range_reserved_total(sid); - //let pages_range_reserved_total2 = local2.segment_pages_range_reserved_total(sid); - assert(mem.wf()); - assert(mem.os_exact_range(segment_start(sid), SEGMENT_SIZE as int)); - assert(commit_bytes.subset_of(mem.os_rw_bytes())); - assert forall|addr| pages_range_total1.contains(addr) implies pages_range_total2.contains( - addr, - ) by { + r1 <= r2 && bs >= 0, + ; + let old_mem = local1.segments[sid].mem; + let mem = local2.segments[sid].mem; + let commit_bytes = local2.commit_mask(sid).bytes(sid); + let old_decommit_bytes = local1.decommit_mask(sid).bytes(sid); + let decommit_bytes = local2.decommit_mask(sid).bytes(sid); + let pages_range_total1 = local1.segment_pages_range_total(sid); + let pages_range_total2 = local2.segment_pages_range_total(sid); + assert(mem.wf()); + assert(mem.os_exact_range(segment_start(sid), SEGMENT_SIZE as int)); + assert(commit_bytes.subset_of(mem.os_rw_bytes())); + assert forall|addr| + pages_range_total1.contains(addr) || rng.contains(addr) implies pages_range_total2.contains( + addr, + ) by { + if pages_range_total1.contains(addr) { let page_id = choose|page_id| local1.segment_page_range(sid, page_id).contains(addr); assert(page_id.segment_id == sid); assert(local1.is_used_primary(page_id)); assert(local2.is_used_primary(page_id)); - assert(local1.page_capacity(page_id) * local1.block_size(page_id) <= local2.page_capacity( - page_id, - ) * local2.block_size(page_id)) by (nonlinear_arith) + assert(local1.page_capacity(page_id) * local1.block_size(page_id) + <= local2.page_capacity(page_id) * local2.block_size(page_id)) by (nonlinear_arith) + requires + local1.page_capacity(page_id) <= local2.page_capacity(page_id), + local1.block_size(page_id) == local2.block_size(page_id), + ; + assert(local2.segment_page_range(sid, page_id).contains(addr)); + } else { + assert(r1 * bs >= 0) by (nonlinear_arith) requires - local1.page_capacity(page_id) <= local2.page_capacity(page_id), - local1.block_size(page_id) == local2.block_size(page_id), + r1 >= 0, + bs >= 0, ; assert(local2.segment_page_range(sid, page_id).contains(addr)); } - assert(pages_range_total1.subset_of(pages_range_total2)); - assert(mem.os_rw_bytes().subset_of( - mem.points_to@.dom() + segment_info_range(sid) + pages_range_total2, - )); - //assert(pages_range_reserved_total2.subset_of(commit_bytes - decommit_bytes)); - assert forall|addr| - local2.segment_pages_used_total(sid).contains(addr) implies commit_bytes.contains(addr) - && !decommit_bytes.contains(addr) by { - const_facts(); - let pid = choose|pid| local2.segment_page_used(sid, pid).contains(addr); - assert(local2.segment_page_used(sid, pid).contains(addr)); - if pid == page_id { - /*if page_id.segment_id == sid && local2.is_used_primary(page_id) { + } //assert(pages_range_total1.subset_of(pages_range_total2)); + //assert((mem.os_rw_bytes() - old_mem.os_rw_bytes()).subset_of(mem.points_to@.dom())); + + preserves_segment_pages_used_total(local1, local2, page_id.segment_id); + assert(mem.os_rw_bytes().subset_of( + mem.points_to@.dom() + segment_info_range(sid) + pages_range_total2, + )); + //assert(old_decommit_bytes.subset_of(old_mem.points_to@.dom())); + //assert(decommit_bytes.subset_of(old_mem.points_to@.dom())); + //assert(decommit_bytes.subset_of(mem.points_to@.dom())); + //assert(decommit_bytes.subset_of(commit_bytes)); +} + +pub proof fn preserves_mem_chunk_good_on_transfer_back( + local1: Local, + local2: Local, + page_id: PageId, +) + requires + local1.segments.dom().contains(page_id.segment_id), + local2.segments.dom().contains(page_id.segment_id), + local1.mem_chunk_good(page_id.segment_id), + local1.pages.dom().contains(page_id), + local2.pages.dom().contains(page_id), + local2.commit_mask(page_id.segment_id).bytes(page_id.segment_id) == local1.commit_mask( + page_id.segment_id, + ).bytes(page_id.segment_id), + local2.decommit_mask(page_id.segment_id).bytes(page_id.segment_id) == local1.decommit_mask( + page_id.segment_id, + ).bytes(page_id.segment_id), + local2.segments[page_id.segment_id].mem.wf(), + local1.is_used_primary(page_id), + forall|pid| #[trigger] + local1.is_used_primary(pid) && pid != page_id ==> local2.is_used_primary(pid) + && local1.page_capacity(pid) <= local2.page_capacity(pid) && local1.block_size(pid) + == local2.block_size(pid) && local1.page_count(pid) == local2.page_count(pid), + forall|pid| #[trigger] local2.is_used_primary(pid) ==> local1.is_used_primary(pid), + !local2.is_used_primary(page_id), + local2.segments[page_id.segment_id].mem.os == local1.segments[page_id.segment_id].mem.os, + local2.segments[page_id.segment_id].mem.points_to@.dom() + =~= local1.segments[page_id.segment_id].mem.points_to@.dom() + set_int_range( + page_start(page_id) + start_offset(local1.block_size(page_id)), + page_start(page_id) + start_offset(local1.block_size(page_id)) + local1.page_capacity( + page_id, + ) * local1.block_size(page_id), + ), + ensures + local2.mem_chunk_good(page_id.segment_id), +{ + let sid = page_id.segment_id; + let rng = set_int_range( + page_start(page_id) + start_offset(local1.block_size(page_id)), + page_start(page_id) + start_offset(local1.block_size(page_id)) + local1.page_capacity( + page_id, + ) * local1.block_size(page_id), + ); + let old_mem = local1.segments[sid].mem; + let mem = local2.segments[sid].mem; + let commit_bytes = local2.commit_mask(sid).bytes(sid); + let pages_range_total1 = local1.segment_pages_range_total(sid); + let pages_range_total2 = local2.segment_pages_range_total(sid); + assert(mem.wf()); + assert(mem.os_exact_range(segment_start(sid), SEGMENT_SIZE as int)); + assert(commit_bytes.subset_of(mem.os_rw_bytes())); + assert forall|addr| + pages_range_total1.contains(addr) && !pages_range_total2.contains( + addr, + ) implies #[trigger] rng.contains(addr) by { + let pid = choose|pid| local1.segment_page_range(sid, pid).contains(addr); + if pid == page_id { + assert(mem.points_to@.dom().contains(addr)); + } else { + assert(pid.segment_id == sid); + assert(local1.is_used_primary(pid)); + assert(local2.is_used_primary(pid)); + assert(local1.page_capacity(pid) * local1.block_size(pid) <= local2.page_capacity(pid) + * local2.block_size(pid)) by (nonlinear_arith) + requires + local1.page_capacity(pid) <= local2.page_capacity(pid), + local1.block_size(pid) == local2.block_size(pid), + ; + assert(local2.segment_page_range(sid, pid).contains(addr)); + assert(false); + } + } + assert((pages_range_total1 - pages_range_total2).subset_of(rng)); + assert((pages_range_total1 - pages_range_total2).subset_of(mem.points_to@.dom())); + assert(mem.os_rw_bytes().subset_of( + mem.points_to@.dom() + segment_info_range(sid) + pages_range_total2, + )); + preserves_segment_pages_used_total(local1, local2, page_id.segment_id); +} + +pub proof fn preserves_mem_chunk_on_set_used(local1: Local, local2: Local, page_id: PageId) + requires + local1.mem_chunk_good(page_id.segment_id), + //local2.page_organization == local1.page_organization, + //local2.pages == local1.pages, + //local2.commit_mask(sid).bytes(sid) == local1.commit_mask(sid).bytes(sid), + //local2.segments[sid].mem.has_new_pointsto(&local1.segments[sid].mem), + local1.segments.dom() == local2.segments.dom(), + forall|sid| + local1.segments.dom().contains(sid) ==> local2.commit_mask(sid).bytes(sid) + == local1.commit_mask(sid).bytes(sid), + forall|sid| + local1.segments.dom().contains(sid) ==> local2.decommit_mask(sid).bytes(sid) + == local1.decommit_mask(sid).bytes(sid), + forall|sid| + local1.segments.dom().contains(sid) ==> local2.segments[sid].mem + == local1.segments[sid].mem, + forall|pid| + local1.is_used_primary(pid) ==> local2.is_used_primary(pid) && local1.page_capacity(pid) + <= local2.page_capacity(pid) && local1.page_reserved(pid) <= local2.page_reserved( + pid, + ) && local1.page_count(pid) == local2.page_count(pid) && local1.block_size(pid) + == local2.block_size(pid), + forall|pid: PageId| #[trigger] + local2.is_used_primary(pid) && page_id != pid ==> local1.is_used_primary(pid), + page_init_is_committed(page_id, local2), + ensures + local2.mem_chunk_good(page_id.segment_id), +{ + let sid = page_id.segment_id; + let mem = local2.segments[sid].mem; + let commit_bytes = local2.commit_mask(sid).bytes(sid); + let decommit_bytes = local2.decommit_mask(sid).bytes(sid); + let pages_range_total1 = local1.segment_pages_range_total(sid); + let pages_range_total2 = local2.segment_pages_range_total(sid); + //let pages_range_reserved_total1 = local1.segment_pages_range_reserved_total(sid); + //let pages_range_reserved_total2 = local2.segment_pages_range_reserved_total(sid); + assert(mem.wf()); + assert(mem.os_exact_range(segment_start(sid), SEGMENT_SIZE as int)); + assert(commit_bytes.subset_of(mem.os_rw_bytes())); + assert forall|addr| pages_range_total1.contains(addr) implies pages_range_total2.contains( + addr, + ) by { + let page_id = choose|page_id| local1.segment_page_range(sid, page_id).contains(addr); + assert(page_id.segment_id == sid); + assert(local1.is_used_primary(page_id)); + assert(local2.is_used_primary(page_id)); + assert(local1.page_capacity(page_id) * local1.block_size(page_id) <= local2.page_capacity( + page_id, + ) * local2.block_size(page_id)) by (nonlinear_arith) + requires + local1.page_capacity(page_id) <= local2.page_capacity(page_id), + local1.block_size(page_id) == local2.block_size(page_id), + ; + assert(local2.segment_page_range(sid, page_id).contains(addr)); + } + assert(pages_range_total1.subset_of(pages_range_total2)); + assert(mem.os_rw_bytes().subset_of( + mem.points_to@.dom() + segment_info_range(sid) + pages_range_total2, + )); + //assert(pages_range_reserved_total2.subset_of(commit_bytes - decommit_bytes)); + assert forall|addr| + local2.segment_pages_used_total(sid).contains(addr) implies commit_bytes.contains(addr) + && !decommit_bytes.contains(addr) by { + const_facts(); + let pid = choose|pid| local2.segment_page_used(sid, pid).contains(addr); + assert(local2.segment_page_used(sid, pid).contains(addr)); + if pid == page_id { + /*if page_id.segment_id == sid && local2.is_used_primary(page_id) { let start = page_start(page_id); let len = page_start(page_id) + local2.page_count(page_id) * SLICE_SIZE; assert(set_int_range(start, start + len).contains(addr)); @@ -13379,111 +13346,111 @@ mod os_mem_util { } else { assert(false); }*/ - } else { - assert(local1.segment_page_used(sid, pid).contains(addr)); - assert(local1.segment_pages_used_total(sid).contains(addr)); - assert(commit_bytes.contains(addr) && !decommit_bytes.contains(addr)); - } - } - assert(mem_chunk_good1( - local2.segments[sid].mem, - sid, - local2.commit_mask(sid).bytes(sid), - local2.decommit_mask(sid).bytes(sid), - local2.segment_pages_range_total(sid), - local2.segment_pages_used_total(sid), - )); - } + } else { + assert(local1.segment_page_used(sid, pid).contains(addr)); + assert(local1.segment_pages_used_total(sid).contains(addr)); + assert(commit_bytes.contains(addr) && !decommit_bytes.contains(addr)); + } + } + assert(mem_chunk_good1( + local2.segments[sid].mem, + sid, + local2.commit_mask(sid).bytes(sid), + local2.decommit_mask(sid).bytes(sid), + local2.segment_pages_range_total(sid), + local2.segment_pages_used_total(sid), + )); +} - pub proof fn segment_mem_has_reserved_range(local: Local, page_id: PageId, new_cap: int) +pub proof fn segment_mem_has_reserved_range(local: Local, page_id: PageId, new_cap: int) + requires + local.wf_main(), + local.is_used_primary(page_id), + local.page_capacity(page_id) <= new_cap <= local.page_reserved(page_id), + ensures + ({ + let blocksize = local.block_size(page_id); + local.segments[page_id.segment_id].mem.pointsto_has_range( + block_start_at(page_id, blocksize, local.page_capacity(page_id)), + (new_cap - local.page_capacity(page_id)) * blocksize, + ) + }), +{ + let blocksize = local.block_size(page_id); + let capacity = local.page_capacity(page_id); + let reserved = local.page_reserved(page_id); + let r1 = block_start_at(page_id, blocksize, local.page_capacity(page_id)); + let size = (new_cap - local.page_capacity(page_id)) * blocksize; + let range = set_int_range(r1, r1 + size); + let segment_id = page_id.segment_id; + let mem = local.segments[segment_id].mem; + let commit_bytes = local.commit_mask(segment_id).bytes(segment_id); + block_start_at_diff(page_id, blocksize as nat, capacity as nat, new_cap as nat); + let res_range = set_int_range( + block_start_at(page_id, blocksize, 0), + block_start_at(page_id, blocksize, reserved), + ); + assert(capacity * blocksize >= 0); + start_offset_le_slice_size(blocksize); + const_facts(); + local.page_organization.used_offset0_has_count(page_id); + local.page_organization.get_count_bound(page_id); + assert(page_id.idx != 0); + assert(new_cap * blocksize <= reserved * blocksize) by (nonlinear_arith) requires - local.wf_main(), - local.is_used_primary(page_id), - local.page_capacity(page_id) <= new_cap <= local.page_reserved(page_id), - ensures - ({ - let blocksize = local.block_size(page_id); - local.segments[page_id.segment_id].mem.pointsto_has_range( - block_start_at(page_id, blocksize, local.page_capacity(page_id)), - (new_cap - local.page_capacity(page_id)) * blocksize, - ) - }), - { - let blocksize = local.block_size(page_id); - let capacity = local.page_capacity(page_id); - let reserved = local.page_reserved(page_id); - let r1 = block_start_at(page_id, blocksize, local.page_capacity(page_id)); - let size = (new_cap - local.page_capacity(page_id)) * blocksize; - let range = set_int_range(r1, r1 + size); - let segment_id = page_id.segment_id; - let mem = local.segments[segment_id].mem; - let commit_bytes = local.commit_mask(segment_id).bytes(segment_id); - block_start_at_diff(page_id, blocksize as nat, capacity as nat, new_cap as nat); - let res_range = set_int_range( - block_start_at(page_id, blocksize, 0), - block_start_at(page_id, blocksize, reserved), - ); - assert(capacity * blocksize >= 0); + new_cap <= reserved, + blocksize >= 0, + ; + assert(range <= res_range); + let pages_used_total = local.segment_pages_used_total(segment_id); + assert forall|addr| res_range.contains(addr) implies commit_bytes.contains(addr) by { start_offset_le_slice_size(blocksize); - const_facts(); - local.page_organization.used_offset0_has_count(page_id); - local.page_organization.get_count_bound(page_id); - assert(page_id.idx != 0); - assert(new_cap * blocksize <= reserved * blocksize) by (nonlinear_arith) - requires - new_cap <= reserved, - blocksize >= 0, - ; - assert(range <= res_range); - let pages_used_total = local.segment_pages_used_total(segment_id); - assert forall|addr| res_range.contains(addr) implies commit_bytes.contains(addr) by { - start_offset_le_slice_size(blocksize); - assert(local.segment_page_used(segment_id, page_id).contains(addr)); - assert(pages_used_total.contains(addr)); - } - assert(res_range <= commit_bytes); - assert(range.subset_of(mem.os_rw_bytes())); - assert(range.disjoint(segment_info_range(segment_id))); - assert forall|addr, pid| - local.segment_page_range(segment_id, pid).contains(addr) implies !range.contains(addr) by { - if pid == page_id { - assert(!range.contains(addr)); - } else if pid.segment_id == page_id.segment_id && local.is_used_primary(page_id) { - let p_blocksize = local.block_size(pid); - let p_capacity = local.page_capacity(pid); - let p_reserved = local.page_reserved(pid); - start_offset_le_slice_size(p_blocksize); - assert(p_capacity * p_blocksize <= p_reserved * p_blocksize) by (nonlinear_arith) - requires - p_capacity <= p_reserved, - p_blocksize >= 0, - ; - let my_count = local.pages[page_id].count@.value.unwrap() as int; - let p_count = local.pages[pid].count@.value.unwrap() as int; - local.page_organization.lemma_range_disjoint_used2(page_id, pid); - assert(page_id.idx + my_count <= pid.idx || pid.idx + p_count <= page_id.idx); - assert(!range.contains(addr)); - } else { - assert(!range.contains(addr)); - } + assert(local.segment_page_used(segment_id, page_id).contains(addr)); + assert(pages_used_total.contains(addr)); + } + assert(res_range <= commit_bytes); + assert(range.subset_of(mem.os_rw_bytes())); + assert(range.disjoint(segment_info_range(segment_id))); + assert forall|addr, pid| + local.segment_page_range(segment_id, pid).contains(addr) implies !range.contains(addr) by { + if pid == page_id { + assert(!range.contains(addr)); + } else if pid.segment_id == page_id.segment_id && local.is_used_primary(page_id) { + let p_blocksize = local.block_size(pid); + let p_capacity = local.page_capacity(pid); + let p_reserved = local.page_reserved(pid); + start_offset_le_slice_size(p_blocksize); + assert(p_capacity * p_blocksize <= p_reserved * p_blocksize) by (nonlinear_arith) + requires + p_capacity <= p_reserved, + p_blocksize >= 0, + ; + let my_count = local.pages[page_id].count@.value.unwrap() as int; + let p_count = local.pages[pid].count@.value.unwrap() as int; + local.page_organization.lemma_range_disjoint_used2(page_id, pid); + assert(page_id.idx + my_count <= pid.idx || pid.idx + p_count <= page_id.idx); + assert(!range.contains(addr)); + } else { + assert(!range.contains(addr)); } - assert(range.disjoint(local.segment_pages_range_total(segment_id))); - assert(range.subset_of(mem.points_to@.dom())); } + assert(range.disjoint(local.segment_pages_range_total(segment_id))); + assert(range.subset_of(mem.points_to@.dom())); +} - /////// - pub open spec fn page_init_is_committed(page_id: PageId, local: Local) -> bool { - let count = local.page_organization.pages[page_id].count.unwrap() as int; - let start = page_start(page_id); - let len = count * SLICE_SIZE; - let cm = local.segments[page_id.segment_id].main@.value.unwrap().commit_mask@; - set_int_range(start, start + len) <= local.commit_mask(page_id.segment_id).bytes( - page_id.segment_id, - ) - local.decommit_mask(page_id.segment_id).bytes(page_id.segment_id) && count - == local.page_count(page_id) - } +/////// +pub open spec fn page_init_is_committed(page_id: PageId, local: Local) -> bool { + let count = local.page_organization.pages[page_id].count.unwrap() as int; + let start = page_start(page_id); + let len = count * SLICE_SIZE; + let cm = local.segments[page_id.segment_id].main@.value.unwrap().commit_mask@; + set_int_range(start, start + len) <= local.commit_mask(page_id.segment_id).bytes( + page_id.segment_id, + ) - local.decommit_mask(page_id.segment_id).bytes(page_id.segment_id) && count + == local.page_count(page_id) +} - } // verus! +} // verus! } // utilities @@ -13503,8 +13470,8 @@ mod atomic_ghost_modified { use vstd::prelude::*; macro_rules! declare_atomic_type { - ($at_ident:ident, $patomic_ty:ident, $perm_ty:ty, $value_ty: ty, $atomic_pred_ty: ident) => { - verus!{ + ($at_ident:ident, $patomic_ty:ident, $perm_ty:ty, $value_ty: ty, $atomic_pred_ty: ident) => { + verus!{ pub struct $atomic_pred_ty { p: Pred } @@ -13623,8 +13590,8 @@ mod atomic_ghost_modified { } } + }; } -} tokenized_state_machine!(Cancellable { fields { @@ -13701,48 +13668,48 @@ mod atomic_ghost_modified { verus! { - pub tracked struct Duplicable { - pub tracked inst: Dupe::Instance, - } +pub tracked struct Duplicable { + pub tracked inst: Dupe::Instance, +} - impl Duplicable { - pub open spec fn wf(self) -> bool { - true - } +impl Duplicable { + pub open spec fn wf(self) -> bool { + true + } - pub open spec fn view(self) -> T { - self.inst.val() - } + pub open spec fn view(self) -> T { + self.inst.val() + } - pub proof fn new(tracked t: T) -> (tracked s: Self) - ensures - s.wf() && s@ === t, - { - let tracked inst = Dupe::Instance::initialize_one( /* spec */ - t, Option::Some(t)); - Duplicable { inst } - } + pub proof fn new(tracked t: T) -> (tracked s: Self) + ensures + s.wf() && s@ === t, + { + let tracked inst = Dupe::Instance::initialize_one( /* spec */ + t, Option::Some(t)); + Duplicable { inst } + } - pub proof fn clone(tracked &self) -> (tracked other: Self) - requires - self.wf(), - ensures - other.wf() && self@ === other@, - { - Duplicable { inst: self.inst.clone() } - } + pub proof fn clone(tracked &self) -> (tracked other: Self) + requires + self.wf(), + ensures + other.wf() && self@ === other@, + { + Duplicable { inst: self.inst.clone() } + } - pub proof fn borrow(tracked &self) -> (tracked t: &T) - requires - self.wf(), - ensures - *t === self@, - { - self.inst.borrow() - } + pub proof fn borrow(tracked &self) -> (tracked t: &T) + requires + self.wf(), + ensures + *t === self@, + { + self.inst.borrow() } +} - } // verus! +} // verus! declare_atomic_type!(AtomicU64, PAtomicU64, PermissionU64, u64, AtomicPredU64); /* declare_atomic_type!(AtomicU32, PAtomicU32, PermissionU32, u32, AtomicPredU32); @@ -14118,196 +14085,192 @@ mod pigeonhole { verus! { - // TODO: This belongs in set_lib - proof fn singleton_set_unique_elt(s: Set, a: T, b: T) - requires - s.finite(), - s.len() == 1, - s.contains(a), - s.contains(b), - ensures - a == b, - { - assert_by_contradiction!(a == b, { +// TODO: This belongs in set_lib +proof fn singleton_set_unique_elt(s: Set, a: T, b: T) + requires + s.finite(), + s.len() == 1, + s.contains(a), + s.contains(b), + ensures + a == b, +{ + assert_by_contradiction!(a == b, { let empty = s.remove(a); assert(empty.len() == 0); assert(empty.contains(b)); }); - } +} - proof fn set_mismatch(s1: Set, s2: Set, missing: nat) - requires - s1.finite(), - s2.finite(), - s1.len() == s2.len(), - forall|elt| s2.contains(elt) ==> s1.contains(elt), - s1.contains(missing), - !s2.contains(missing), - ensures - false, - decreases s1.len(), - { - if s1.len() == 1 { - let elt = s2.choose(); - assert(s2.contains(elt)); - assert(s1.contains(elt)); - singleton_set_unique_elt(s1, elt, missing); - assert(elt == missing); - assert(false); - } else { - let elt = s2.choose(); - assert(s2.contains(elt)); - assert(s1.contains(elt)); - let s1_smaller = s1.remove(elt); - set_mismatch(s1_smaller, s2.remove(elt), missing); - } +proof fn set_mismatch(s1: Set, s2: Set, missing: nat) + requires + s1.finite(), + s2.finite(), + s1.len() == s2.len(), + forall|elt| s2.contains(elt) ==> s1.contains(elt), + s1.contains(missing), + !s2.contains(missing), + ensures + false, + decreases s1.len(), +{ + if s1.len() == 1 { + let elt = s2.choose(); + assert(s2.contains(elt)); + assert(s1.contains(elt)); + singleton_set_unique_elt(s1, elt, missing); + assert(elt == missing); + assert(false); + } else { + let elt = s2.choose(); + assert(s2.contains(elt)); + assert(s1.contains(elt)); + let s1_smaller = s1.remove(elt); + set_mismatch(s1_smaller, s2.remove(elt), missing); } +} - /* TODO: These next two should be derived from the set_int_range and lemma_int_range in +/* TODO: These next two should be derived from the set_int_range and lemma_int_range in * set_lib.rs, but it's surprisingly painful to do so */ - /// Creates a finite set of nats in the range [lo, hi). - pub open spec fn set_nat_range(lo: nat, hi: nat) -> Set { - Set::new(|i: nat| lo <= i && i < hi) - } +/// Creates a finite set of nats in the range [lo, hi). +pub open spec fn set_nat_range(lo: nat, hi: nat) -> Set { + Set::new(|i: nat| lo <= i && i < hi) +} - /// If a set solely contains nats in the range [a, b), then its size is - /// bounded by b - a. - pub proof fn lemma_nat_range(lo: nat, hi: nat) - requires - lo <= hi, - ensures - set_nat_range(lo, hi).finite(), - set_nat_range(lo, hi).len() == hi - lo, - decreases hi - lo, - { - if lo == hi { - assert(set_nat_range(lo, hi) =~= Set::empty()); - } else { - lemma_nat_range(lo, (hi - 1) as nat); - assert(set_nat_range(lo, (hi - 1) as nat).insert((hi - 1) as nat) =~= set_nat_range( - lo, - hi, - )); - } +/// If a set solely contains nats in the range [a, b), then its size is +/// bounded by b - a. +pub proof fn lemma_nat_range(lo: nat, hi: nat) + requires + lo <= hi, + ensures + set_nat_range(lo, hi).finite(), + set_nat_range(lo, hi).len() == hi - lo, + decreases hi - lo, +{ + if lo == hi { + assert(set_nat_range(lo, hi) =~= Set::empty()); + } else { + lemma_nat_range(lo, (hi - 1) as nat); + assert(set_nat_range(lo, (hi - 1) as nat).insert((hi - 1) as nat) =~= set_nat_range( + lo, + hi, + )); } +} - proof fn nat_set_size(s: Set, bound: nat) - requires - forall|i: nat| (0 <= i < bound <==> s.contains(i)), - ensures - s.finite(), - s.len() == bound, - { - let nats = set_nat_range(0, bound); - lemma_nat_range(0, bound); - assert(s =~= nats); - } +proof fn nat_set_size(s: Set, bound: nat) + requires + forall|i: nat| (0 <= i < bound <==> s.contains(i)), + ensures + s.finite(), + s.len() == bound, +{ + let nats = set_nat_range(0, bound); + lemma_nat_range(0, bound); + assert(s =~= nats); +} - pub proof fn pigeonhole_missing_idx_implies_double_helper( - m: Map, - missing: nat, - len: nat, - prev_vals: Set, - k: nat, - ) -> (dup2: nat) - requires - len >= 2, - forall|i: nat| (0 <= i < len <==> m.dom().contains(i)), - forall|i: nat| - (#[trigger] - m.dom().contains(i) ==> (0 <= m[i] < len && m[i] != missing)), - 0 <= missing < len, - 0 <= k < len, - prev_vals.finite(), - prev_vals.len() == k, - //forall |j| 0 <= j < k ==> #[trigger] prev_vals.contains(m[j]), - forall|elt| #[trigger] prev_vals.contains(elt) ==> exists|j| 0 <= j < k && m[j] == elt, - ensures - m.dom().contains(dup2), - exists|dup1| - #![auto] - dup1 != dup2 && m.dom().contains(dup1) && 0 <= dup1 < len && m[dup1] == m[dup2], - decreases len - k, - { - if prev_vals.contains(m[k]) { - let dup1 = choose|j| 0 <= j < k && m[j] == m[k]; - dup1 +pub proof fn pigeonhole_missing_idx_implies_double_helper( + m: Map, + missing: nat, + len: nat, + prev_vals: Set, + k: nat, +) -> (dup2: nat) + requires + len >= 2, + forall|i: nat| (0 <= i < len <==> m.dom().contains(i)), + forall|i: nat| (#[trigger] m.dom().contains(i) ==> (0 <= m[i] < len && m[i] != missing)), + 0 <= missing < len, + 0 <= k < len, + prev_vals.finite(), + prev_vals.len() == k, + //forall |j| 0 <= j < k ==> #[trigger] prev_vals.contains(m[j]), + forall|elt| #[trigger] prev_vals.contains(elt) ==> exists|j| 0 <= j < k && m[j] == elt, + ensures + m.dom().contains(dup2), + exists|dup1| + #![auto] + dup1 != dup2 && m.dom().contains(dup1) && 0 <= dup1 < len && m[dup1] == m[dup2], + decreases len - k, +{ + if prev_vals.contains(m[k]) { + let dup1 = choose|j| 0 <= j < k && m[j] == m[k]; + dup1 + } else { + if k < len - 1 { + pigeonhole_missing_idx_implies_double_helper( + m, + missing, + len, + prev_vals.insert(m[k]), + k + 1, + ) } else { - if k < len - 1 { - pigeonhole_missing_idx_implies_double_helper( - m, - missing, - len, - prev_vals.insert(m[k]), - k + 1, - ) - } else { - assert forall|elt| prev_vals.contains(elt) implies 0 <= elt < len && elt != missing by { - let j = choose|j| 0 <= j < k && m[j] == elt; - assert(m.dom().contains(j)); + assert forall|elt| prev_vals.contains(elt) implies 0 <= elt < len && elt != missing by { + let j = choose|j| 0 <= j < k && m[j] == elt; + assert(m.dom().contains(j)); + } + let new_prev_vals = prev_vals.insert(m[k]); + assert forall|elt| new_prev_vals.contains(elt) implies 0 <= elt < len && elt + != missing by { + if prev_vals.contains(elt) { + } else { + assert(elt == m[k]); + assert(m.dom().contains(k)); } - let new_prev_vals = prev_vals.insert(m[k]); - assert forall|elt| new_prev_vals.contains(elt) implies 0 <= elt < len && elt - != missing by { - if prev_vals.contains(elt) { - } else { - assert(elt == m[k]); - assert(m.dom().contains(k)); - } - }; - nat_set_size(m.dom(), len); - set_mismatch(m.dom(), new_prev_vals, missing); - 1 - } + }; + nat_set_size(m.dom(), len); + set_mismatch(m.dom(), new_prev_vals, missing); + 1 } } +} - pub proof fn pigeonhole_missing_idx_implies_double(m: Map, missing: nat, len: nat) -> (r: - (nat, nat)) - requires - forall|i: nat| (0 <= i < len <==> m.dom().contains(i)), - forall|i: nat| - (#[trigger] - m.dom().contains(i) ==> (0 <= m[i] < len && m[i] != missing)), - 0 <= missing < len, - ensures - ({ - let (i, j) = r; - i != j && m.dom().contains(i) && m.dom().contains(j) && m[i] == m[j] - }), - { - assert(len >= 2) by { - assert(len >= 1); - if len == 1 { - assert(m.dom().contains(0)); - assert(m[0] != missing); - } - }; - let dup2 = pigeonhole_missing_idx_implies_double_helper(m, missing, len, Set::empty(), 0); - let dup1 = choose|dup1| - #![auto] - dup1 != dup2 && m.dom().contains(dup1) && 0 <= dup1 < len && m[dup1] == m[dup2]; - (dup1, dup2) - } +pub proof fn pigeonhole_missing_idx_implies_double(m: Map, missing: nat, len: nat) -> (r: + (nat, nat)) + requires + forall|i: nat| (0 <= i < len <==> m.dom().contains(i)), + forall|i: nat| (#[trigger] m.dom().contains(i) ==> (0 <= m[i] < len && m[i] != missing)), + 0 <= missing < len, + ensures + ({ + let (i, j) = r; + i != j && m.dom().contains(i) && m.dom().contains(j) && m[i] == m[j] + }), +{ + assert(len >= 2) by { + assert(len >= 1); + if len == 1 { + assert(m.dom().contains(0)); + assert(m[0] != missing); + } + }; + let dup2 = pigeonhole_missing_idx_implies_double_helper(m, missing, len, Set::empty(), 0); + let dup1 = choose|dup1| + #![auto] + dup1 != dup2 && m.dom().contains(dup1) && 0 <= dup1 < len && m[dup1] == m[dup2]; + (dup1, dup2) +} - pub proof fn pigeonhole_too_many_elements_implies_double(m: Map, len: nat) -> (r: ( - nat, - nat, - )) - requires - forall|i: nat| (0 <= i < len + 1 <==> m.dom().contains(i)), - forall|i: nat| #[trigger] m.dom().contains(i) ==> 0 <= m[i] < len, - ensures - ({ - let (i, j) = r; - i != j && m.dom().contains(i) && m.dom().contains(j) && m[i] == m[j] - }), - { - pigeonhole_missing_idx_implies_double(m, len, len + 1) - } +pub proof fn pigeonhole_too_many_elements_implies_double(m: Map, len: nat) -> (r: ( + nat, + nat, +)) + requires + forall|i: nat| (0 <= i < len + 1 <==> m.dom().contains(i)), + forall|i: nat| #[trigger] m.dom().contains(i) ==> 0 <= m[i] < len, + ensures + ({ + let (i, j) = r; + i != j && m.dom().contains(i) && m.dom().contains(j) && m[i] == m[j] + }), +{ + pigeonhole_missing_idx_implies_double(m, len, len + 1) +} - } // verus! +} // verus! } // implementation @@ -14334,197 +14297,192 @@ mod linked_list { verus! { - // Originally I wanted to do a linked list here in the proper Rust-idiomatic - // way, something like: - // - // struct LL { next: Option> } - // - // However, there were a couple of problems: - // - // 1. We need to pad each node out to the block size, which isn't statically fixed. - // This problem isn't too hard to work around though, we just need to make our - // own Box-like type that manages the size of the allocation. - // - // 2. Because of the way the thread-safe atomic linked list works, we need to - // split the 'ownership' from the 'physical pointer', so we can write the pointer - // into a node without the ownership. - // - // Problem (2) seems more annoying to work around. At any rate, I've decided to just - // give up on the recursive datatype and do a flat list of pointers and pointer permissions. - #[repr(C)] - #[derive(Copy)] - pub struct Node { - pub ptr: PPtr, - } +// Originally I wanted to do a linked list here in the proper Rust-idiomatic +// way, something like: +// +// struct LL { next: Option> } +// +// However, there were a couple of problems: +// +// 1. We need to pad each node out to the block size, which isn't statically fixed. +// This problem isn't too hard to work around though, we just need to make our +// own Box-like type that manages the size of the allocation. +// +// 2. Because of the way the thread-safe atomic linked list works, we need to +// split the 'ownership' from the 'physical pointer', so we can write the pointer +// into a node without the ownership. +// +// Problem (2) seems more annoying to work around. At any rate, I've decided to just +// give up on the recursive datatype and do a flat list of pointers and pointer permissions. +#[repr(C)] +#[derive(Copy)] +pub struct Node { + pub ptr: PPtr, +} - impl Clone for Node { - fn clone(&self) -> Node { - Node { ptr: self.ptr } - } +impl Clone for Node { + fn clone(&self) -> Node { + Node { ptr: self.ptr } } +} - global layout Node is size == 8, align == 8; +global layout Node is size == 8, align == 8; - pub proof fn size_of_node() - ensures - size_of::() == 8 && align_of::() == 8, - { - } +pub proof fn size_of_node() + ensures + size_of::() == 8 && align_of::() == 8, +{ +} - pub ghost struct LLData { - ghost fixed_page: bool, - ghost block_size: nat, // only used if fixed_page=true - ghost page_id: PageId, // only used if fixed_page=true - ghost heap_id: Option, // if set, then all blocks must have this HeapId - ghost instance: Mim::Instance, - ghost len: nat, - } +pub ghost struct LLData { + ghost fixed_page: bool, + ghost block_size: nat, // only used if fixed_page=true + ghost page_id: PageId, // only used if fixed_page=true + ghost heap_id: Option, // if set, then all blocks must have this HeapId + ghost instance: Mim::Instance, + ghost len: nat, +} - pub struct LL { - first: PPtr, - data: Ghost, - // first to be popped off goes at the end - perms: Tracked, PointsToRaw, Mim::block)>>, - } +pub struct LL { + first: PPtr, + data: Ghost, + // first to be popped off goes at the end + perms: Tracked, PointsToRaw, Mim::block)>>, +} - pub tracked struct LLGhostStateToReconvene { - pub ghost block_size: nat, - pub ghost page_id: PageId, - pub ghost instance: Mim::Instance, - pub tracked map: Map, - } +pub tracked struct LLGhostStateToReconvene { + pub ghost block_size: nat, + pub ghost page_id: PageId, + pub ghost instance: Mim::Instance, + pub tracked map: Map, +} - impl LL { - pub closed spec fn next_ptr(&self, i: nat) -> int { - if i == 0 { - 0 - } else { - self.perms@.index((i - 1) as nat).0@.pptr - } - } - - pub closed spec fn valid_node(&self, i: nat, next_ptr: int) -> bool { - 0 <= i < self.data@.len ==> (self.perms@.dom().contains(i) && { - let (perm, padding, block_token) = self.perms@.index(i); - // Each node points to the next node - perm@.value.is_some() && perm@.value.unwrap().ptr.id() - == next_ptr// The PointsToRaw makes up the rest of the block size allocation - && block_token@.key.block_size - size_of::() >= 0 && padding.is_range( - perm@.pptr + size_of::(), - block_token@.key.block_size - size_of::(), - )// block_token is correct - && block_token@.instance == self.data@.instance && is_block_ptr( - perm@.pptr, - block_token@.key, - ) && (self.data@.fixed_page ==> (block_token@.key.page_id == self.data@.page_id - && block_token@.key.block_size == self.data@.block_size)) && ( - match self.data@.heap_id { - Some(heap_id) => block_token@.value.heap_id == Some(heap_id), - None => true, - }) +impl LL { + pub closed spec fn next_ptr(&self, i: nat) -> int { + if i == 0 { + 0 + } else { + self.perms@.index((i - 1) as nat).0@.pptr + } + } + + pub closed spec fn valid_node(&self, i: nat, next_ptr: int) -> bool { + 0 <= i < self.data@.len ==> (self.perms@.dom().contains(i) && { + let (perm, padding, block_token) = self.perms@.index(i); + // Each node points to the next node + perm@.value.is_some() && perm@.value.unwrap().ptr.id() + == next_ptr // The PointsToRaw makes up the rest of the block size allocation + && block_token@.key.block_size - size_of::() >= 0 && padding.is_range( + perm@.pptr + size_of::(), + block_token@.key.block_size - size_of::(), + ) // block_token is correct + && block_token@.instance == self.data@.instance && is_block_ptr( + perm@.pptr, + block_token@.key, + ) && (self.data@.fixed_page ==> (block_token@.key.page_id == self.data@.page_id + && block_token@.key.block_size == self.data@.block_size)) && ( + match self.data@.heap_id { + Some(heap_id) => block_token@.value.heap_id == Some(heap_id), + None => true, }) - } + }) + } - pub closed spec fn wf(&self) -> bool { - &&& (forall|i: nat| self.perms@.dom().contains(i) ==> 0 <= i < self.data@.len) - &&& self.first.id() == self.next_ptr(self.data@.len) - &&& (forall|i: nat| - self.valid_node( - i, - #[trigger] - self.next_ptr(i), - )) - } + pub closed spec fn wf(&self) -> bool { + &&& (forall|i: nat| self.perms@.dom().contains(i) ==> 0 <= i < self.data@.len) + &&& self.first.id() == self.next_ptr(self.data@.len) + &&& (forall|i: nat| self.valid_node(i, #[trigger] self.next_ptr(i))) + } - pub closed spec fn len(&self) -> nat { - self.data@.len - } + pub closed spec fn len(&self) -> nat { + self.data@.len + } - pub closed spec fn page_id(&self) -> PageId { - self.data@.page_id - } + pub closed spec fn page_id(&self) -> PageId { + self.data@.page_id + } - pub closed spec fn block_size(&self) -> nat { - self.data@.block_size - } + pub closed spec fn block_size(&self) -> nat { + self.data@.block_size + } - pub closed spec fn fixed_page(&self) -> bool { - self.data@.fixed_page - } + pub closed spec fn fixed_page(&self) -> bool { + self.data@.fixed_page + } - pub closed spec fn instance(&self) -> Mim::Instance { - self.data@.instance - } + pub closed spec fn instance(&self) -> Mim::Instance { + self.data@.instance + } - pub closed spec fn heap_id(&self) -> Option { - self.data@.heap_id - } + pub closed spec fn heap_id(&self) -> Option { + self.data@.heap_id + } - pub closed spec fn ptr(&self) -> PPtr { - self.first - } + pub closed spec fn ptr(&self) -> PPtr { + self.first + } - /*spec fn is_valid_page_address(&self, ptr: int) -> bool { + /*spec fn is_valid_page_address(&self, ptr: int) -> bool { // We need this to save a ptr at this address // this is probably redundant since we also have is_block_ptr ptr as int % size_of::() as int == 0 }*/ - #[inline(always)] - pub fn insert_block( - &mut self, - ptr: PPtr, - Tracked(points_to_raw): Tracked, - Tracked(block_token): Tracked, - ) - requires - old(self).wf(), - points_to_raw.is_range(ptr.id(), block_token@.key.block_size as int), - //old(self).is_valid_page_address(points_to_raw@.pptr), - block_token@.instance == old(self).instance(), - is_block_ptr(ptr.id(), block_token@.key), - old(self).fixed_page() ==> (block_token@.key.page_id == old(self).page_id() - && block_token@.key.block_size == old(self).block_size()), - old(self).heap_id().is_none(), - ensures - self.wf(), - self.block_size() == old(self).block_size(), - self.len() == old(self).len() + 1, - self.instance() == old(self).instance(), - self.page_id() == old(self).page_id(), - self.fixed_page() == old(self).fixed_page(), - self.heap_id() == old(self).heap_id(), - { - let tracked mut mem1; - let tracked mut mem2; - vstd::layout::layout_for_type_is_valid::(); // $line_count$Proof$ - proof { - block_size_ge_word(); - block_ptr_aligned_to_word(); - let tracked (m1, m2) = points_to_raw.split( - set_int_range(ptr.id(), ptr.id() + size_of::() as int), - ); - mem1 = m1.into_typed::(ptr.id()); - mem2 = m2; - } - let ptr = PPtr::::from_usize(ptr.to_usize()); - ptr.put(Tracked(&mut mem1), Node { ptr: self.first }); - self.first = ptr; - proof { - let tracked tuple = (mem1, mem2, block_token); - self.perms.borrow_mut().tracked_insert(self.data@.len, tuple); - self.data@.len = self.data@.len + 1; - let ghost len = self.data@.len; - assert forall|i: nat| self.perms@.dom().contains(i) implies 0 <= i < self.data@.len by { - if i + 1 < len { - assert(old(self).perms@.dom().contains(i)); - } + #[inline(always)] + pub fn insert_block( + &mut self, + ptr: PPtr, + Tracked(points_to_raw): Tracked, + Tracked(block_token): Tracked, + ) + requires + old(self).wf(), + points_to_raw.is_range(ptr.id(), block_token@.key.block_size as int), + //old(self).is_valid_page_address(points_to_raw@.pptr), + block_token@.instance == old(self).instance(), + is_block_ptr(ptr.id(), block_token@.key), + old(self).fixed_page() ==> (block_token@.key.page_id == old(self).page_id() + && block_token@.key.block_size == old(self).block_size()), + old(self).heap_id().is_none(), + ensures + self.wf(), + self.block_size() == old(self).block_size(), + self.len() == old(self).len() + 1, + self.instance() == old(self).instance(), + self.page_id() == old(self).page_id(), + self.fixed_page() == old(self).fixed_page(), + self.heap_id() == old(self).heap_id(), + { + let tracked mut mem1; + let tracked mut mem2; + vstd::layout::layout_for_type_is_valid::(); // $line_count$Proof$ + proof { + block_size_ge_word(); + block_ptr_aligned_to_word(); + let tracked (m1, m2) = points_to_raw.split( + set_int_range(ptr.id(), ptr.id() + size_of::() as int), + ); + mem1 = m1.into_typed::(ptr.id()); + mem2 = m2; + } + let ptr = PPtr::::from_usize(ptr.to_usize()); + ptr.put(Tracked(&mut mem1), Node { ptr: self.first }); + self.first = ptr; + proof { + let tracked tuple = (mem1, mem2, block_token); + self.perms.borrow_mut().tracked_insert(self.data@.len, tuple); + self.data@.len = self.data@.len + 1; + let ghost len = self.data@.len; + assert forall|i: nat| self.perms@.dom().contains(i) implies 0 <= i < self.data@.len by { + if i + 1 < len { + assert(old(self).perms@.dom().contains(i)); } - assert forall|i: nat| #[trigger] self.valid_node(i, self.next_ptr(i)) by { - assert(old(self).valid_node(i, old(self).next_ptr(i))); - if i > 0 { - let j = (i - 1) as nat; - assert(old(self).valid_node(j, old(self).next_ptr(j))); - }/*if i < len { + } + assert forall|i: nat| #[trigger] self.valid_node(i, self.next_ptr(i)) by { + assert(old(self).valid_node(i, old(self).next_ptr(i))); + if i > 0 { + let j = (i - 1) as nat; + assert(old(self).valid_node(j, old(self).next_ptr(j))); + }/*if i < len { if i != 0 { assert(self.perms@.index((i - 1) as nat) == old(self).perms@.index((i - 1) as nat)); @@ -14537,68 +14495,68 @@ mod linked_list { } }*/ - } } } + } - // This is like insert_block but it only does the operation "ghostily". - // This is used by the ThreadLL - // - // It requires the pointer writer has already been done, so it's just arranging - // ghost data in a ghost LL. - pub proof fn ghost_insert_block( - tracked &mut self, - tracked ptr: PPtr, - tracked points_to_ptr: PointsTo, - tracked points_to_raw: PointsToRaw, - tracked block_token: Mim::block, - ) - requires - old(self).wf(), - block_token@.instance == old(self).instance(), - is_block_ptr(ptr.id(), block_token@.key), - // Require that the pointer has already been written: - points_to_ptr@.pptr == ptr.id(), - points_to_ptr@.value.is_Some(), - points_to_ptr@.value.get_Some_0().ptr.id() == old(self).ptr().id(), - // Require the padding to be correct - points_to_raw.is_range( - ptr.id() + size_of::(), - block_token@.key.block_size - size_of::(), - ), - block_token@.key.block_size - size_of::() >= 0, - old(self).fixed_page() ==> (block_token@.key.page_id == old(self).page_id() - && block_token@.key.block_size == old(self).block_size()), - (match old(self).heap_id() { - Some(heap_id) => block_token@.value.heap_id == Some(heap_id), - None => true, - }), - ensures - self.wf(), - self.block_size() == old(self).block_size(), - self.len() == old(self).len() + 1, - self.instance() == old(self).instance(), - self.page_id() == old(self).page_id(), - self.fixed_page() == old(self).fixed_page(), - self.heap_id() == old(self).heap_id(), - self.ptr() == ptr, - { - self.first = ptr; - let tracked tuple = (points_to_ptr, points_to_raw, block_token); - self.perms.borrow_mut().tracked_insert(self.data@.len, tuple); - self.data@.len = self.data@.len + 1; - let ghost len = self.data@.len; - assert forall|i: nat| self.perms@.dom().contains(i) implies 0 <= i < self.data@.len by { - if i + 1 < len { - assert(old(self).perms@.dom().contains(i)); - } - } - assert forall|i: nat| #[trigger] self.valid_node(i, self.next_ptr(i)) by { - assert(old(self).valid_node(i, old(self).next_ptr(i))); - if i > 0 { - let j = (i - 1) as nat; - assert(old(self).valid_node(j, old(self).next_ptr(j))); - }/*if i < len { + // This is like insert_block but it only does the operation "ghostily". + // This is used by the ThreadLL + // + // It requires the pointer writer has already been done, so it's just arranging + // ghost data in a ghost LL. + pub proof fn ghost_insert_block( + tracked &mut self, + tracked ptr: PPtr, + tracked points_to_ptr: PointsTo, + tracked points_to_raw: PointsToRaw, + tracked block_token: Mim::block, + ) + requires + old(self).wf(), + block_token@.instance == old(self).instance(), + is_block_ptr(ptr.id(), block_token@.key), + // Require that the pointer has already been written: + points_to_ptr@.pptr == ptr.id(), + points_to_ptr@.value.is_Some(), + points_to_ptr@.value.get_Some_0().ptr.id() == old(self).ptr().id(), + // Require the padding to be correct + points_to_raw.is_range( + ptr.id() + size_of::(), + block_token@.key.block_size - size_of::(), + ), + block_token@.key.block_size - size_of::() >= 0, + old(self).fixed_page() ==> (block_token@.key.page_id == old(self).page_id() + && block_token@.key.block_size == old(self).block_size()), + (match old(self).heap_id() { + Some(heap_id) => block_token@.value.heap_id == Some(heap_id), + None => true, + }), + ensures + self.wf(), + self.block_size() == old(self).block_size(), + self.len() == old(self).len() + 1, + self.instance() == old(self).instance(), + self.page_id() == old(self).page_id(), + self.fixed_page() == old(self).fixed_page(), + self.heap_id() == old(self).heap_id(), + self.ptr() == ptr, + { + self.first = ptr; + let tracked tuple = (points_to_ptr, points_to_raw, block_token); + self.perms.borrow_mut().tracked_insert(self.data@.len, tuple); + self.data@.len = self.data@.len + 1; + let ghost len = self.data@.len; + assert forall|i: nat| self.perms@.dom().contains(i) implies 0 <= i < self.data@.len by { + if i + 1 < len { + assert(old(self).perms@.dom().contains(i)); + } + } + assert forall|i: nat| #[trigger] self.valid_node(i, self.next_ptr(i)) by { + assert(old(self).valid_node(i, old(self).next_ptr(i))); + if i > 0 { + let j = (i - 1) as nat; + assert(old(self).valid_node(j, old(self).next_ptr(j))); + }/*if i < len { if i != 0 { assert(self.perms@.index((i - 1) as nat) == old(self).perms@.index((i - 1) as nat)); @@ -14611,98 +14569,93 @@ mod linked_list { } }*/ - } } + } - proof fn is_empty_iff_null(tracked &self) - requires - self.wf(), - ensures - self.len() == 0 <==> self.first.id() == 0, - { - if self.first.id() == 0 { - if self.len() != 0 { - let n = (self.len() - 1) as nat; - assert(self.valid_node(n, self.next_ptr(n))); - self.perms.borrow().tracked_borrow(n).0.is_nonnull(); - assert(false); - } - } else { - assert(self.len() != 0); + proof fn is_empty_iff_null(tracked &self) + requires + self.wf(), + ensures + self.len() == 0 <==> self.first.id() == 0, + { + if self.first.id() == 0 { + if self.len() != 0 { + let n = (self.len() - 1) as nat; + assert(self.valid_node(n, self.next_ptr(n))); + self.perms.borrow().tracked_borrow(n).0.is_nonnull(); + assert(false); } + } else { + assert(self.len() != 0); } + } - #[inline(always)] - pub fn is_empty(&self) -> (b: bool) - requires - self.wf(), - ensures - b <==> (self.len() == 0), - { - proof { - self.is_empty_iff_null(); - } - self.first.to_usize() == 0 + #[inline(always)] + pub fn is_empty(&self) -> (b: bool) + requires + self.wf(), + ensures + b <==> (self.len() == 0), + { + proof { + self.is_empty_iff_null(); } + self.first.to_usize() == 0 + } - #[inline(always)] - pub fn pop_block(&mut self) -> (x: (PPtr, Tracked, Tracked)) - requires - old(self).wf(), - old(self).len() != 0, - ensures - ({ - let (ptr, points_to, block_token) = x; - { - &&& self.wf() - &&& self.block_size() == old(self).block_size() - &&& self.len() + 1 == old(self).len() - &&& self.instance() == old(self).instance() - &&& self.page_id() == old(self).page_id() - &&& self.fixed_page() == old(self).fixed_page() - &&& self.heap_id() == old(self).heap_id() - &&& points_to@.is_range(ptr.id(), block_token@@.key.block_size as int) - &&& block_token@@.instance == old(self).instance() - &&& is_block_ptr(ptr.id(), block_token@@.key) - &&& (self.fixed_page() ==> (block_token@@.key.page_id == self.page_id() - && block_token@@.key.block_size == self.block_size())) - &&& (match self.heap_id() { - Some(heap_id) => block_token@@.value.heap_id == Some(heap_id), - None => true, - }) - } - }), - { - proof { - let i = (self.data@.len - 1) as nat; - assert(self.valid_node(i, self.next_ptr(i))); - } - let tracked (mut points_to_node, points_to_raw, block) = - self.perms.borrow_mut().tracked_remove((self.data@.len - 1) as nat); - let ptr = self.first; - let node = ptr.take(Tracked(&mut points_to_node)); - self.first = node.ptr; - let tracked points_to_raw = points_to_node.into_raw().join(points_to_raw); - let ptru = PPtr::::from_usize(ptr.to_usize()); - proof { - self.data@.len = (self.data@.len - 1) as nat; - assert forall|i: nat| - self.valid_node( - i, - #[trigger] - self.next_ptr(i), - ) by { - assert(old(self).valid_node(i, old(self).next_ptr(i))); - if i > 0 { - let j = (i - 1) as nat; - assert(old(self).valid_node(j, old(self).next_ptr(j))); - } + #[inline(always)] + pub fn pop_block(&mut self) -> (x: (PPtr, Tracked, Tracked)) + requires + old(self).wf(), + old(self).len() != 0, + ensures + ({ + let (ptr, points_to, block_token) = x; + { + &&& self.wf() + &&& self.block_size() == old(self).block_size() + &&& self.len() + 1 == old(self).len() + &&& self.instance() == old(self).instance() + &&& self.page_id() == old(self).page_id() + &&& self.fixed_page() == old(self).fixed_page() + &&& self.heap_id() == old(self).heap_id() + &&& points_to@.is_range(ptr.id(), block_token@@.key.block_size as int) + &&& block_token@@.instance == old(self).instance() + &&& is_block_ptr(ptr.id(), block_token@@.key) + &&& (self.fixed_page() ==> (block_token@@.key.page_id == self.page_id() + && block_token@@.key.block_size == self.block_size())) + &&& (match self.heap_id() { + Some(heap_id) => block_token@@.value.heap_id == Some(heap_id), + None => true, + }) } - let j = self.data@.len; - assert(old(self).valid_node(j, old(self).next_ptr(j))); - assert(old(self).valid_node((j - 1) as nat, old(self).next_ptr((j - 1) as nat))); - assert((forall|i: nat| self.perms@.dom().contains(i) ==> 0 <= i < self.data@.len)); - /*if j > 0 { + }), + { + proof { + let i = (self.data@.len - 1) as nat; + assert(self.valid_node(i, self.next_ptr(i))); + } + let tracked (mut points_to_node, points_to_raw, block) = + self.perms.borrow_mut().tracked_remove((self.data@.len - 1) as nat); + let ptr = self.first; + let node = ptr.take(Tracked(&mut points_to_node)); + self.first = node.ptr; + let tracked points_to_raw = points_to_node.into_raw().join(points_to_raw); + let ptru = PPtr::::from_usize(ptr.to_usize()); + proof { + self.data@.len = (self.data@.len - 1) as nat; + assert forall|i: nat| self.valid_node(i, #[trigger] self.next_ptr(i)) by { + assert(old(self).valid_node(i, old(self).next_ptr(i))); + if i > 0 { + let j = (i - 1) as nat; + assert(old(self).valid_node(j, old(self).next_ptr(j))); + } + } + let j = self.data@.len; + assert(old(self).valid_node(j, old(self).next_ptr(j))); + assert(old(self).valid_node((j - 1) as nat, old(self).next_ptr((j - 1) as nat))); + assert((forall|i: nat| self.perms@.dom().contains(i) ==> 0 <= i < self.data@.len)); + /*if j > 0 { //assert(old(self).perms@.dom().contains(j - 1)); //assert(self.perms@.dom().contains(j - 1)); assert(self.next_ptr(j) == old(self).next_ptr(j)); @@ -14710,239 +14663,234 @@ mod linked_list { } else { assert(self.first.id() == self.next_ptr(self.data@.len)); }*/ - assert(self.wf()); - } - assert(block@.key.block_size >= size_of::()); - return (ptru, Tracked(points_to_raw), Tracked(block)) - } - - // helper for clients using ghost_insert_block - #[inline(always)] - pub fn block_write_ptr( - ptr: PPtr, - Tracked(perm): Tracked, - next: PPtr, - ) -> (res: (Tracked>, Tracked)) - requires - perm.contains_range(ptr.id(), size_of::() as int), - ptr.id() % align_of::() as int == 0, - ensures - ({ - let points_to = res.0@; - let points_to_raw = res.1@; - points_to@.pptr == ptr.id() && points_to@.value == Some(Node { ptr: next }) - && points_to_raw@ == perm@.remove_keys( - set_int_range(ptr.id(), ptr.id() + size_of::()), - ) - }), - { - let tracked (points_to, rest) = perm.split( - set_int_range(ptr.id(), ptr.id() + size_of::()), - ); - vstd::layout::layout_for_type_is_valid::(); // $line_count$Proof$ - let tracked mut points_to_node = points_to.into_typed::(ptr.id()); - ptr.put(Tracked(&mut points_to_node), Node { ptr: next }); - (Tracked(points_to_node), Tracked(rest)) + assert(self.wf()); } + assert(block@.key.block_size >= size_of::()); + return (ptru, Tracked(points_to_raw), Tracked(block)) + } - #[inline(always)] - pub fn new( - Ghost(page_id): Ghost, - Ghost(fixed_page): Ghost, - Ghost(instance): Ghost, - Ghost(block_size): Ghost, - Ghost(heap_id): Ghost>, - ) -> (ll: LL) - ensures - ll.wf(), - ll.page_id() == page_id, - ll.fixed_page() == fixed_page, - ll.instance() == instance, - ll.block_size() == block_size, - ll.heap_id() == heap_id, - ll.len() == 0, - { - LL { - first: PPtr::from_usize(0), - data: Ghost(LLData { fixed_page, block_size, page_id, instance, len: 0, heap_id }), - perms: Tracked(Map::tracked_empty()), - } - } + // helper for clients using ghost_insert_block + #[inline(always)] + pub fn block_write_ptr( + ptr: PPtr, + Tracked(perm): Tracked, + next: PPtr, + ) -> (res: (Tracked>, Tracked)) + requires + perm.contains_range(ptr.id(), size_of::() as int), + ptr.id() % align_of::() as int == 0, + ensures + ({ + let points_to = res.0@; + let points_to_raw = res.1@; + points_to@.pptr == ptr.id() && points_to@.value == Some(Node { ptr: next }) + && points_to_raw@ == perm@.remove_keys( + set_int_range(ptr.id(), ptr.id() + size_of::()), + ) + }), + { + let tracked (points_to, rest) = perm.split( + set_int_range(ptr.id(), ptr.id() + size_of::()), + ); + vstd::layout::layout_for_type_is_valid::(); // $line_count$Proof$ + let tracked mut points_to_node = points_to.into_typed::(ptr.id()); + ptr.put(Tracked(&mut points_to_node), Node { ptr: next }); + (Tracked(points_to_node), Tracked(rest)) + } - #[inline(always)] - pub fn empty() -> (ll: LL) - ensures - ll.wf(), - ll.len() == 0, - { - LL::new( - Ghost(arbitrary()), - Ghost(arbitrary()), - Ghost(arbitrary()), - Ghost(arbitrary()), - Ghost(arbitrary()), - ) + #[inline(always)] + pub fn new( + Ghost(page_id): Ghost, + Ghost(fixed_page): Ghost, + Ghost(instance): Ghost, + Ghost(block_size): Ghost, + Ghost(heap_id): Ghost>, + ) -> (ll: LL) + ensures + ll.wf(), + ll.page_id() == page_id, + ll.fixed_page() == fixed_page, + ll.instance() == instance, + ll.block_size() == block_size, + ll.heap_id() == heap_id, + ll.len() == 0, + { + LL { + first: PPtr::from_usize(0), + data: Ghost(LLData { fixed_page, block_size, page_id, instance, len: 0, heap_id }), + perms: Tracked(Map::tracked_empty()), } + } - #[inline(always)] - pub fn set_ghost_data( - &mut self, - Ghost(page_id): Ghost, - Ghost(fixed_page): Ghost, - Ghost(instance): Ghost, - Ghost(block_size): Ghost, - Ghost(heap_id): Ghost>, + #[inline(always)] + pub fn empty() -> (ll: LL) + ensures + ll.wf(), + ll.len() == 0, + { + LL::new( + Ghost(arbitrary()), + Ghost(arbitrary()), + Ghost(arbitrary()), + Ghost(arbitrary()), + Ghost(arbitrary()), ) - requires - old(self).wf(), - old(self).len() == 0, - ensures - self.wf(), - self.page_id() == page_id, - self.fixed_page() == fixed_page, - self.instance() == instance, - self.block_size() == block_size, - self.heap_id() == heap_id, - self.len() == 0, - { - proof { - self.data@.fixed_page = fixed_page; - self.data@.block_size = block_size; - self.data@.page_id = page_id; - self.data@.instance = instance; - self.data@.heap_id = heap_id; - } + } + + #[inline(always)] + pub fn set_ghost_data( + &mut self, + Ghost(page_id): Ghost, + Ghost(fixed_page): Ghost, + Ghost(instance): Ghost, + Ghost(block_size): Ghost, + Ghost(heap_id): Ghost>, + ) + requires + old(self).wf(), + old(self).len() == 0, + ensures + self.wf(), + self.page_id() == page_id, + self.fixed_page() == fixed_page, + self.instance() == instance, + self.block_size() == block_size, + self.heap_id() == heap_id, + self.len() == 0, + { + proof { + self.data@.fixed_page = fixed_page; + self.data@.block_size = block_size; + self.data@.page_id = page_id; + self.data@.instance = instance; + self.data@.heap_id = heap_id; } + } - // Traverse `other` to find the tail, append `self`, - // and leave the resulting list in `self`. - // Returns the # of entries in `other` - #[inline(always)] - pub fn append(&mut self, other: &mut LL) -> (other_len: u32) - requires - old(self).wf() && old(other).wf(), - old(self).page_id() == old(other).page_id(), - old(self).block_size() == old(other).block_size(), - old(self).fixed_page() == old(other).fixed_page(), - old(self).instance() == old(other).instance(), - old(self).heap_id().is_none(), - old(other).heap_id().is_none(), - old(other).len() < u32::MAX, - ensures// Book-keeping junk: - - self.wf() && other.wf(), - self.page_id() == old(self).page_id(), - self.block_size() == old(self).block_size(), - self.fixed_page() == old(self).fixed_page(), - self.instance() == old(self).instance(), - self.heap_id() == old(self).heap_id(), - other.page_id() == old(other).page_id(), - other.block_size() == old(other).block_size(), - other.fixed_page() == old(other).fixed_page(), - other.instance() == old(other).instance(), - other.heap_id() == old(other).heap_id(), - // What you're here for: - self.len() == old(self).len() + old(other).len(), - other.len() == 0, - other_len as int == old(other).len(), - { - proof { - other.is_empty_iff_null(); - } - if other.first.to_usize() == 0 { - return 0; - } - let mut count = 1; - let mut p = other.first; - loop - invariant - 1 <= count <= other.len(), - other.len() < u32::MAX, - other.wf(), - p.id() == other.perms@[(other.len() - count) as nat].0@.pptr, - ensures - count == other.len(), - p.id() == other.perms@[0].0@.pptr, - { - proof { - let ghost i = (other.len() - count) as nat; - let ghost j = (i - 1) as nat; - assert(other.valid_node(i, other.next_ptr(i))); - assert(other.valid_node(j, other.next_ptr(j))); - if i != 0 { - other.perms.borrow().tracked_borrow(j).0.is_nonnull(); - } - } - let next = *p.borrow( - Tracked(&other.perms.borrow().tracked_borrow((other.len() - count) as nat).0), - ); - if next.ptr.to_usize() != 0 { - count += 1; - p = next.ptr; - } else { - break ; - } - } - let ghost old_other = *other; - let ghost old_self = *self; - assert(other.valid_node(0, other.next_ptr(0))); - let tracked mut perm = other.perms.borrow_mut().tracked_remove(0); - let tracked (mut a, b, c) = perm; - let _ = p.take(Tracked(&mut a)); - p.put(Tracked(&mut a), Node { ptr: self.first }); + // Traverse `other` to find the tail, append `self`, + // and leave the resulting list in `self`. + // Returns the # of entries in `other` + #[inline(always)] + pub fn append(&mut self, other: &mut LL) -> (other_len: u32) + requires + old(self).wf() && old(other).wf(), + old(self).page_id() == old(other).page_id(), + old(self).block_size() == old(other).block_size(), + old(self).fixed_page() == old(other).fixed_page(), + old(self).instance() == old(other).instance(), + old(self).heap_id().is_none(), + old(other).heap_id().is_none(), + old(other).len() < u32::MAX, + ensures // Book-keeping junk: + + self.wf() && other.wf(), + self.page_id() == old(self).page_id(), + self.block_size() == old(self).block_size(), + self.fixed_page() == old(self).fixed_page(), + self.instance() == old(self).instance(), + self.heap_id() == old(self).heap_id(), + other.page_id() == old(other).page_id(), + other.block_size() == old(other).block_size(), + other.fixed_page() == old(other).fixed_page(), + other.instance() == old(other).instance(), + other.heap_id() == old(other).heap_id(), + // What you're here for: + self.len() == old(self).len() + old(other).len(), + other.len() == 0, + other_len as int == old(other).len(), + { + proof { + other.is_empty_iff_null(); + } + if other.first.to_usize() == 0 { + return 0; + } + let mut count = 1; + let mut p = other.first; + loop + invariant + 1 <= count <= other.len(), + other.len() < u32::MAX, + other.wf(), + p.id() == other.perms@[(other.len() - count) as nat].0@.pptr, + ensures + count == other.len(), + p.id() == other.perms@[0].0@.pptr, + { proof { - other.perms.borrow_mut().tracked_insert(0, (a, b, c)); - let other_len = other.data@.len; - let self_len = self.data@.len; - self.data@.len = self.data@.len + other.data@.len; - other.data@.len = 0; - let tracked mut other_map = Map::tracked_empty(); - tracked_swap(other.perms.borrow_mut(), &mut other_map); - let tracked mut self_map = Map::tracked_empty(); - tracked_swap(self.perms.borrow_mut(), &mut self_map); - let key_map = Map::::new( - |i: nat| self_len <= i < self_len + other_len, - |i: nat| (i - self_len) as nat, - ); - assert forall|j| key_map.dom().contains(j) implies other_map.dom().contains( - key_map.index(j), - ) by { - let r = (j - self_len) as nat; - assert(old_other.valid_node(r, old_other.next_ptr(r))); + let ghost i = (other.len() - count) as nat; + let ghost j = (i - 1) as nat; + assert(other.valid_node(i, other.next_ptr(i))); + assert(other.valid_node(j, other.next_ptr(j))); + if i != 0 { + other.perms.borrow().tracked_borrow(j).0.is_nonnull(); } - other_map.tracked_map_keys_in_place(key_map); - other_map.tracked_union_prefer_right(self_map); - self.perms@ = other_map; } - self.first = other.first; - other.first = PPtr::from_usize(0); - proof { - assert forall|i: nat| - self.valid_node( - i, - #[trigger] - self.next_ptr(i), - ) by { - assert(old_self.valid_node(i, old_self.next_ptr(i))); - assert(old_self.valid_node((i - 1) as nat, old_self.next_ptr((i - 1) as nat))); - let k = (i - old_self.data@.len) as nat; - let k1 = (k - 1) as nat; - assert(old_other.valid_node(k, old_other.next_ptr(k))); - assert(old_other.valid_node(k1, old_other.next_ptr(k1))); - if i < old_self.data@.len { - assert(self.valid_node(i, self.next_ptr(i))); - } else if i < self.data@.len { - assert(self.valid_node(i, self.next_ptr(i))); - } else { - assert(self.valid_node(i, self.next_ptr(i))); - } + let next = *p.borrow( + Tracked(&other.perms.borrow().tracked_borrow((other.len() - count) as nat).0), + ); + if next.ptr.to_usize() != 0 { + count += 1; + p = next.ptr; + } else { + break ; + } + } + let ghost old_other = *other; + let ghost old_self = *self; + assert(other.valid_node(0, other.next_ptr(0))); + let tracked mut perm = other.perms.borrow_mut().tracked_remove(0); + let tracked (mut a, b, c) = perm; + let _ = p.take(Tracked(&mut a)); + p.put(Tracked(&mut a), Node { ptr: self.first }); + proof { + other.perms.borrow_mut().tracked_insert(0, (a, b, c)); + let other_len = other.data@.len; + let self_len = self.data@.len; + self.data@.len = self.data@.len + other.data@.len; + other.data@.len = 0; + let tracked mut other_map = Map::tracked_empty(); + tracked_swap(other.perms.borrow_mut(), &mut other_map); + let tracked mut self_map = Map::tracked_empty(); + tracked_swap(self.perms.borrow_mut(), &mut self_map); + let key_map = Map::::new( + |i: nat| self_len <= i < self_len + other_len, + |i: nat| (i - self_len) as nat, + ); + assert forall|j| key_map.dom().contains(j) implies other_map.dom().contains( + key_map.index(j), + ) by { + let r = (j - self_len) as nat; + assert(old_other.valid_node(r, old_other.next_ptr(r))); + } + other_map.tracked_map_keys_in_place(key_map); + other_map.tracked_union_prefer_right(self_map); + self.perms@ = other_map; + } + self.first = other.first; + other.first = PPtr::from_usize(0); + proof { + assert forall|i: nat| self.valid_node(i, #[trigger] self.next_ptr(i)) by { + assert(old_self.valid_node(i, old_self.next_ptr(i))); + assert(old_self.valid_node((i - 1) as nat, old_self.next_ptr((i - 1) as nat))); + let k = (i - old_self.data@.len) as nat; + let k1 = (k - 1) as nat; + assert(old_other.valid_node(k, old_other.next_ptr(k))); + assert(old_other.valid_node(k1, old_other.next_ptr(k1))); + if i < old_self.data@.len { + assert(self.valid_node(i, self.next_ptr(i))); + } else if i < self.data@.len { + assert(self.valid_node(i, self.next_ptr(i))); + } else { + assert(self.valid_node(i, self.next_ptr(i))); } } - return count; } + return count; + } - // don't need this? - /*// Despite being 'exec', this function is a no-op + // don't need this? + /*// Despite being 'exec', this function is a no-op #[inline(always)] pub fn mark_each_block_allocated(&mut self, tracked thread_token: &mut ThreadToken) requires @@ -14958,150 +14906,150 @@ mod linked_list { self.instance() == old(self).instance(), { } */ - #[inline(always)] - pub fn prepend_contiguous_blocks( - &mut self, - start: PPtr, - last: PPtr, - bsize: usize, - Ghost(cap): Ghost, // current capacity - Ghost(extend): Ghost, // spec we're extending to - Tracked(points_to_raw_r): Tracked<&mut PointsToRaw>, - Tracked(tokens): Tracked<&mut Map>, - ) - requires - old(self).wf(), - old(self).fixed_page(), - old(self).block_size() == bsize as nat, - old(self).heap_id().is_none(), - INTPTR_SIZE <= bsize, - start.id() % INTPTR_SIZE as int == 0, - bsize as int % INTPTR_SIZE as int == 0, - old(points_to_raw_r).is_range(start.id(), extend as int * bsize as int), + #[inline(always)] + pub fn prepend_contiguous_blocks( + &mut self, + start: PPtr, + last: PPtr, + bsize: usize, + Ghost(cap): Ghost, // current capacity + Ghost(extend): Ghost, // spec we're extending to + Tracked(points_to_raw_r): Tracked<&mut PointsToRaw>, + Tracked(tokens): Tracked<&mut Map>, + ) + requires + old(self).wf(), + old(self).fixed_page(), + old(self).block_size() == bsize as nat, + old(self).heap_id().is_none(), + INTPTR_SIZE <= bsize, + start.id() % INTPTR_SIZE as int == 0, + bsize as int % INTPTR_SIZE as int == 0, + old(points_to_raw_r).is_range(start.id(), extend as int * bsize as int), + start.id() + extend * bsize <= usize::MAX, + start.id() == block_start_at( + old(self).page_id(), + old(self).block_size() as int, + cap as int, + ), + extend >= 1, + last.id() == start.id() + ((extend as int - 1) * bsize as int), + (forall|i: int| cap <= i < cap + extend ==> old(tokens).dom().contains(i)), + (forall|i: int| + cap <= i < cap + extend ==> old(tokens).index(i)@.instance == old(self).instance()), + (forall|i: int| + cap <= i < cap + extend ==> old(tokens).index(i)@.key.page_id == old( + self, + ).page_id()), + (forall|i: int| cap <= i < cap + extend ==> old(tokens).index(i)@.key.idx == i), + (forall|i: int| + cap <= i < cap + extend ==> old(tokens).index(i)@.key.block_size == bsize), + (forall|i: int| + cap <= i < cap + extend ==> is_block_ptr( + block_start(old(tokens).index(i)@.key), + old(tokens).index(i)@.key, + )), + ensures + self.wf(), + self.page_id() == old(self).page_id(), + self.block_size() == old(self).block_size(), + self.fixed_page() == old(self).fixed_page(), + self.instance() == old(self).instance(), + self.heap_id() == old(self).heap_id(), + self.len() == old(self).len() + extend, + //points_to_raw@.pptr == old(points_to_raw)@.pptr + extend * (bsize as int), + //points_to_raw@.size == old(points_to_raw)@.size - extend * (bsize as int), + tokens == old(tokens).remove_keys(set_int_range(cap as int, cap as int + extend)), + { + // based on mi_page_free_list_extend + let tracked mut points_to_raw = PointsToRaw::empty(); + let tracked mut new_map: Map, PointsToRaw, Mim::block)> = + Map::tracked_empty(); + proof { + tracked_swap(&mut points_to_raw, points_to_raw_r); + } + let mut block = start.to_usize(); + let ghost mut i: int = 0; + let ghost tokens_snap = *tokens; + while block < last.to_usize() + invariant + 0 <= i < extend, start.id() + extend * bsize <= usize::MAX, - start.id() == block_start_at( - old(self).page_id(), - old(self).block_size() as int, - cap as int, - ), - extend >= 1, - last.id() == start.id() + ((extend as int - 1) * bsize as int), - (forall|i: int| cap <= i < cap + extend ==> old(tokens).dom().contains(i)), - (forall|i: int| - cap <= i < cap + extend ==> old(tokens).index(i)@.instance == old(self).instance()), - (forall|i: int| - cap <= i < cap + extend ==> old(tokens).index(i)@.key.page_id == old( - self, - ).page_id()), - (forall|i: int| cap <= i < cap + extend ==> old(tokens).index(i)@.key.idx == i), - (forall|i: int| - cap <= i < cap + extend ==> old(tokens).index(i)@.key.block_size == bsize), - (forall|i: int| - cap <= i < cap + extend ==> is_block_ptr( - block_start(old(tokens).index(i)@.key), - old(tokens).index(i)@.key, - )), - ensures - self.wf(), - self.page_id() == old(self).page_id(), - self.block_size() == old(self).block_size(), - self.fixed_page() == old(self).fixed_page(), - self.instance() == old(self).instance(), - self.heap_id() == old(self).heap_id(), - self.len() == old(self).len() + extend, - //points_to_raw@.pptr == old(points_to_raw)@.pptr + extend * (bsize as int), - //points_to_raw@.size == old(points_to_raw)@.size - extend * (bsize as int), - tokens == old(tokens).remove_keys(set_int_range(cap as int, cap as int + extend)), + block == start.id() + i * bsize, + last.id() == start.id() + (extend - 1) * bsize, + points_to_raw.is_range(block as int, (extend - i) * bsize), + INTPTR_SIZE as int <= bsize, + block as int % INTPTR_SIZE as int == 0, + bsize as int % INTPTR_SIZE as int == 0, + *tokens =~= tokens_snap.remove_keys(set_int_range(cap as int, cap as int + i)), + forall|j| + #![trigger tokens.dom().contains(j)] + #![trigger tokens.index(j)] + cap + i <= j < cap + extend ==> tokens.dom().contains(j) && tokens[j] + == tokens_snap[j], + forall|j| + (self.data@.len + extend - i <= j < self.data@.len + extend) + <==> #[trigger] new_map.dom().contains(j), + *old(self) == *self, + forall|j| + #![trigger new_map.dom().contains(j)] + #![trigger new_map.index(j)] + ((self.data@.len + extend - i <= j < self.data@.len + extend) ==> { + let k = self.data@.len + extend - 1 - j; + { + &&& new_map[j].2 == tokens_snap[cap + k] + &&& new_map[j].0@.pptr == start.id() + k * bsize + &&& new_map[j].0@.value.is_some() + &&& new_map[j].0@.value.unwrap().ptr.id() == start.id() + (k + 1) + * bsize + &&& new_map[j].1.is_range( + start.id() + k * bsize + size_of::(), + bsize - size_of::(), + ) + } + }), { - // based on mi_page_free_list_extend - let tracked mut points_to_raw = PointsToRaw::empty(); - let tracked mut new_map: Map, PointsToRaw, Mim::block)> = - Map::tracked_empty(); proof { - tracked_swap(&mut points_to_raw, points_to_raw_r); + assert(i < extend); + assert((i + 1) * bsize == i * bsize + bsize) by (nonlinear_arith); + assert((extend - i) * bsize == (extend - (i + 1)) * bsize + bsize) + by (nonlinear_arith); + assert(bsize <= (extend - i) * bsize) by (nonlinear_arith) + requires + bsize >= 0, + extend - i >= 1, + ; + assert(i * bsize + bsize <= extend * bsize) by (nonlinear_arith) + requires + bsize >= 0, + extend - i >= 1, + ; + assert(block + bsize <= start.id() + extend * bsize); + assert(size_of::() == 8); } - let mut block = start.to_usize(); - let ghost mut i: int = 0; - let ghost tokens_snap = *tokens; - while block < last.to_usize() - invariant - 0 <= i < extend, - start.id() + extend * bsize <= usize::MAX, - block == start.id() + i * bsize, - last.id() == start.id() + (extend - 1) * bsize, - points_to_raw.is_range(block as int, (extend - i) * bsize), - INTPTR_SIZE as int <= bsize, - block as int % INTPTR_SIZE as int == 0, - bsize as int % INTPTR_SIZE as int == 0, - *tokens =~= tokens_snap.remove_keys(set_int_range(cap as int, cap as int + i)), - forall|j| - #![trigger tokens.dom().contains(j)] - #![trigger tokens.index(j)] - cap + i <= j < cap + extend ==> tokens.dom().contains(j) && tokens[j] - == tokens_snap[j], - forall|j| - (self.data@.len + extend - i <= j < self.data@.len + extend) <==> #[trigger] - new_map.dom().contains(j), - *old(self) == *self, - forall|j| - #![trigger new_map.dom().contains(j)] - #![trigger new_map.index(j)] - ((self.data@.len + extend - i <= j < self.data@.len + extend) ==> { - let k = self.data@.len + extend - 1 - j; - { - &&& new_map[j].2 == tokens_snap[cap + k] - &&& new_map[j].0@.pptr == start.id() + k * bsize - &&& new_map[j].0@.value.is_some() - &&& new_map[j].0@.value.unwrap().ptr.id() == start.id() + (k + 1) - * bsize - &&& new_map[j].1.is_range( - start.id() + k * bsize + size_of::(), - bsize - size_of::(), - ) - } - }), - { - proof { - assert(i < extend); - assert((i + 1) * bsize == i * bsize + bsize) by (nonlinear_arith); - assert((extend - i) * bsize == (extend - (i + 1)) * bsize + bsize) - by (nonlinear_arith); - assert(bsize <= (extend - i) * bsize) by (nonlinear_arith) - requires - bsize >= 0, - extend - i >= 1, - ; - assert(i * bsize + bsize <= extend * bsize) by (nonlinear_arith) - requires - bsize >= 0, - extend - i >= 1, - ; - assert(block + bsize <= start.id() + extend * bsize); - assert(size_of::() == 8); - } - let next: PPtr = PPtr::from_usize(block + bsize); - let tracked (points_to, rest) = points_to_raw.split( - set_int_range(block as int, block as int + bsize as int), - ); - let tracked (points_to1, points_to2) = points_to.split( - set_int_range(block as int, block as int + size_of::() as int), + let next: PPtr = PPtr::from_usize(block + bsize); + let tracked (points_to, rest) = points_to_raw.split( + set_int_range(block as int, block as int + bsize as int), + ); + let tracked (points_to1, points_to2) = points_to.split( + set_int_range(block as int, block as int + size_of::() as int), + ); + vstd::layout::layout_for_type_is_valid::(); // $line_count$Proof$ + let tracked mut points_to_node = points_to1.into_typed::(block as int); + let block_ptr = PPtr::from_usize(block); + block_ptr.put(Tracked(&mut points_to_node), Node { ptr: next }); + block = next.to_usize(); + proof { + points_to_raw = rest; + let ghost old_tokens = *tokens; + let tracked block = tokens.tracked_remove(cap + i); + let ghost the_key = (self.data@.len + extend - 1 - i) as nat; + new_map.tracked_insert( + (self.data@.len + extend - 1 - i) as nat, + (points_to_node, points_to2, block), ); - vstd::layout::layout_for_type_is_valid::(); // $line_count$Proof$ - let tracked mut points_to_node = points_to1.into_typed::(block as int); - let block_ptr = PPtr::from_usize(block); - block_ptr.put(Tracked(&mut points_to_node), Node { ptr: next }); - block = next.to_usize(); - proof { - points_to_raw = rest; - let ghost old_tokens = *tokens; - let tracked block = tokens.tracked_remove(cap + i); - let ghost the_key = (self.data@.len + extend - 1 - i) as nat; - new_map.tracked_insert( - (self.data@.len + extend - 1 - i) as nat, - (points_to_node, points_to2, block), - ); - i = i + 1; - /*assert forall + i = i + 1; + /*assert forall #![trigger new_map.dom().contains(j)] #![trigger new_map.index(j)] |j| @@ -15138,68 +15086,110 @@ mod linked_list { assert(new_map[j].1@.size == bsize - size_of::()); } }*/ - } } - assert((i + 1) * bsize == i * bsize + bsize) by (nonlinear_arith); - assert((extend - i) * bsize == (extend - (i + 1)) * bsize + bsize) by (nonlinear_arith); - assert(bsize <= (extend - i) * bsize) by (nonlinear_arith) - requires - bsize >= 0, - extend - i >= 1, - ; - assert(i * bsize + bsize <= extend * bsize) by (nonlinear_arith) - requires - bsize >= 0, - extend - i >= 1, - ; - assert(block + bsize <= start.id() + extend * bsize); - assert(i == extend - 1) by { - if i < extend - 1 { - assert(i * bsize < (extend as int - 1) * bsize) by (nonlinear_arith) - requires - bsize > 0, - i < extend as int - 1, - ; - assert(false); - } + } + assert((i + 1) * bsize == i * bsize + bsize) by (nonlinear_arith); + assert((extend - i) * bsize == (extend - (i + 1)) * bsize + bsize) by (nonlinear_arith); + assert(bsize <= (extend - i) * bsize) by (nonlinear_arith) + requires + bsize >= 0, + extend - i >= 1, + ; + assert(i * bsize + bsize <= extend * bsize) by (nonlinear_arith) + requires + bsize >= 0, + extend - i >= 1, + ; + assert(block + bsize <= start.id() + extend * bsize); + assert(i == extend - 1) by { + if i < extend - 1 { + assert(i * bsize < (extend as int - 1) * bsize) by (nonlinear_arith) + requires + bsize > 0, + i < extend as int - 1, + ; + assert(false); } - let tracked (points_to, rest) = points_to_raw.split( - set_int_range(block as int, block as int + bsize as int), - ); - let tracked (points_to1, points_to2) = points_to.split( - set_int_range(block as int, block as int + size_of::() as int), + } + let tracked (points_to, rest) = points_to_raw.split( + set_int_range(block as int, block as int + bsize as int), + ); + let tracked (points_to1, points_to2) = points_to.split( + set_int_range(block as int, block as int + size_of::() as int), + ); + proof { + points_to_raw = rest; + } + vstd::layout::layout_for_type_is_valid::(); // $line_count$Proof$ + let tracked mut points_to_node = points_to1.into_typed::(block as int); + let block_ptr = PPtr::from_usize(block); + block_ptr.put(Tracked(&mut points_to_node), Node { ptr: self.first }); + self.first = PPtr::from_usize(start.to_usize()); + proof { + let tracked block = tokens.tracked_remove(cap + i); + let ghost the_key = (self.data@.len + extend - 1 - i) as nat; + new_map.tracked_insert( + (self.data@.len + extend - 1 - i) as nat, + (points_to_node, points_to2, block), ); - proof { - points_to_raw = rest; - } - vstd::layout::layout_for_type_is_valid::(); // $line_count$Proof$ - let tracked mut points_to_node = points_to1.into_typed::(block as int); - let block_ptr = PPtr::from_usize(block); - block_ptr.put(Tracked(&mut points_to_node), Node { ptr: self.first }); - self.first = PPtr::from_usize(start.to_usize()); - proof { - let tracked block = tokens.tracked_remove(cap + i); - let ghost the_key = (self.data@.len + extend - 1 - i) as nat; - new_map.tracked_insert( - (self.data@.len + extend - 1 - i) as nat, - (points_to_node, points_to2, block), - ); - let old_len = self.data@.len; - self.data@.len = self.data@.len + extend; - self.perms.borrow_mut().tracked_union_prefer_right(new_map); - assert_maps_equal!(*tokens == tokens_snap.remove_keys( + let old_len = self.data@.len; + self.data@.len = self.data@.len + extend; + self.perms.borrow_mut().tracked_union_prefer_right(new_map); + assert_maps_equal!(*tokens == tokens_snap.remove_keys( set_int_range(cap as int, cap as int + extend))); - assert forall|j: nat| - self.valid_node( - j, - #[trigger] - self.next_ptr(j), - ) by { + assert forall|j: nat| self.valid_node(j, #[trigger] self.next_ptr(j)) by { + let (perm, padding, block_token) = self.perms@.index(j); + if j < old_len { + assert(old(self).valid_node(j, old(self).next_ptr(j))); + assert(!new_map.dom().contains(j)); + assert(self.perms@.index(j) == old(self).perms@.index(j)); + if j > 0 { + assert(old(self).valid_node( + (j - 1) as nat, + old(self).next_ptr((j - 1) as nat), + )); + assert(self.perms@.index((j - 1) as nat) == old(self).perms@.index( + (j - 1) as nat, + )); + assert(self.perms@.index((j - 1) as nat) == old(self).perms@.index( + (j - 1) as nat, + )); + } + assert(old(self).next_ptr(j) == self.next_ptr(j)); + /*if self.fixed_page() { + assert(old(self).fixed_page()); + assert(self.data@.page_id == old(self).data@.page_id); + + assert(block_token == old(self).perms@.index(j).2); + assert(0 <= j < old(self).data@.len); + assert(old(self).perms@.dom().contains(j)); + assert(old(self).data@.page_id == + old(self).perms@.index(j).2@.key.page_id); + + assert(block_token@.key.page_id == self.data@.page_id); + }*/ + assert(self.valid_node(j, self.next_ptr(j))); + } else if j < self.data@.len { let (perm, padding, block_token) = self.perms@.index(j); - if j < old_len { - assert(old(self).valid_node(j, old(self).next_ptr(j))); - assert(!new_map.dom().contains(j)); - assert(self.perms@.index(j) == old(self).perms@.index(j)); + let next_ptr = self.next_ptr(j); + assert(block_token@.key.block_size == bsize); + assert(is_block_ptr(perm@.pptr, block_token@.key)) by { + let block_id = block_token@.key; + crate::layout::get_block_start_defn(block_id); + let k = old_len + extend - 1 - j; + crate::layout::block_start_at_diff( + block_id.page_id, + bsize as nat, + cap as nat, + (cap + k) as nat, + ); + //assert(perm@.pptr == block_start(old(tokens).index(k)@.key)); + //assert(is_block_ptr( + //block_start(old(tokens).index(i)@.key), + //old(tokens).index(i)@.key) + //) + } + if j == old_len { if j > 0 { assert(old(self).valid_node( (j - 1) as nat, @@ -15212,97 +15202,48 @@ mod linked_list { (j - 1) as nat, )); } - assert(old(self).next_ptr(j) == self.next_ptr(j)); - /*if self.fixed_page() { - assert(old(self).fixed_page()); - assert(self.data@.page_id == old(self).data@.page_id); - - assert(block_token == old(self).perms@.index(j).2); - assert(0 <= j < old(self).data@.len); - assert(old(self).perms@.dom().contains(j)); - assert(old(self).data@.page_id == - old(self).perms@.index(j).2@.key.page_id); - - assert(block_token@.key.page_id == self.data@.page_id); - }*/ - assert(self.valid_node(j, self.next_ptr(j))); - } else if j < self.data@.len { - let (perm, padding, block_token) = self.perms@.index(j); - let next_ptr = self.next_ptr(j); - assert(block_token@.key.block_size == bsize); - assert(is_block_ptr(perm@.pptr, block_token@.key)) by { - let block_id = block_token@.key; - crate::layout::get_block_start_defn(block_id); - let k = old_len + extend - 1 - j; - crate::layout::block_start_at_diff( - block_id.page_id, - bsize as nat, - cap as nat, - (cap + k) as nat, - ); - //assert(perm@.pptr == block_start(old(tokens).index(k)@.key)); - //assert(is_block_ptr( - //block_start(old(tokens).index(i)@.key), - //old(tokens).index(i)@.key) - //) - } - if j == old_len { - if j > 0 { - assert(old(self).valid_node( - (j - 1) as nat, - old(self).next_ptr((j - 1) as nat), - )); - assert(self.perms@.index((j - 1) as nat) == old(self).perms@.index( - (j - 1) as nat, - )); - assert(self.perms@.index((j - 1) as nat) == old(self).perms@.index( - (j - 1) as nat, - )); - } - assert(perm@.value.unwrap().ptr.id() == next_ptr); - } else { - assert(perm@.value.unwrap().ptr.id() == next_ptr); - }//assert(padding@.size + size_of::() == block_token@.key.block_size); + assert(perm@.value.unwrap().ptr.id() == next_ptr); + } else { + assert(perm@.value.unwrap().ptr.id() == next_ptr); + } //assert(padding@.size + size_of::() == block_token@.key.block_size); - assert(self.valid_node(j, self.next_ptr(j))); - } + assert(self.valid_node(j, self.next_ptr(j))); } - assert(self.wf()); } + assert(self.wf()); } + } - pub fn make_empty(&mut self) -> (llgstr: Tracked) - requires - old(self).wf(), - old(self).fixed_page(), - ensures - llgstr_wf(llgstr@), - llgstr@.block_size == old(self).block_size(), - llgstr@.page_id == old(self).page_id(), - llgstr@.instance == old(self).instance(), - llgstr@.map.len() == old(self).len(), - self.wf(), - self.len() == 0, - { - proof { - assert(forall|i: nat| - #[trigger] - self.perms@.dom().contains(i) ==> self.valid_node(i, self.next_ptr(i))); - } - self.first = PPtr::from_usize(0); - let ghost block_size = self.block_size(); - let ghost page_id = self.page_id(); - let ghost instance = self.instance(); - let tracked map; - proof { - let len = self.data@.len; - self.data@.len = 0; - let tracked mut m = Map::tracked_empty(); - tracked_swap(&mut m, self.perms.borrow_mut()); - assert forall|i: nat| - (#[trigger] - m.dom().contains(i) <==> 0 <= i - < len)/*&& (m.dom().contains(i) ==> ({ + pub fn make_empty(&mut self) -> (llgstr: Tracked) + requires + old(self).wf(), + old(self).fixed_page(), + ensures + llgstr_wf(llgstr@), + llgstr@.block_size == old(self).block_size(), + llgstr@.page_id == old(self).page_id(), + llgstr@.instance == old(self).instance(), + llgstr@.map.len() == old(self).len(), + self.wf(), + self.len() == 0, + { + proof { + assert(forall|i: nat| #[trigger] + self.perms@.dom().contains(i) ==> self.valid_node(i, self.next_ptr(i))); + } + self.first = PPtr::from_usize(0); + let ghost block_size = self.block_size(); + let ghost page_id = self.page_id(); + let ghost instance = self.instance(); + let tracked map; + proof { + let len = self.data@.len; + self.data@.len = 0; + let tracked mut m = Map::tracked_empty(); + tracked_swap(&mut m, self.perms.borrow_mut()); + assert forall|i: nat| + (#[trigger] m.dom().contains(i) <==> 0 <= i + < len)/*&& (m.dom().contains(i) ==> ({ let (perm, padding, block_token) = m[i]; perm@.value.is_some() && block_token@.key.block_size - size_of::() >= 0 @@ -15313,490 +15254,474 @@ mod linked_list { && block_token@.key.page_id == page_id && block_token@.key.block_size == block_size }))*/ - by { - if 0 <= i < len { - assert(old(self).valid_node(i, old(self).next_ptr(i))); - assert(m.dom().contains(i)); - }/*if m.dom().contains(i) { + by { + if 0 <= i < len { + assert(old(self).valid_node(i, old(self).next_ptr(i))); + assert(m.dom().contains(i)); + }/*if m.dom().contains(i) { assert(0 <= i < len); }*/ - } - map = Self::convene_pt_map(m, len, instance, page_id, block_size); - } - Tracked(LLGhostStateToReconvene { map: map, block_size, page_id, instance }) - } - - pub proof fn convene_pt_map( - tracked m: Map, PointsToRaw, Mim::block)>, - len: nat, - instance: Mim::Instance, - page_id: PageId, - block_size: nat, - ) -> (tracked m2: Map) - requires - forall|i: nat| - (#[trigger] - m.dom().contains(i) <==> 0 <= i < len) && (m.dom().contains(i) ==> ({ - let (perm, padding, block_token) = m[i]; - perm@.value.is_some() && block_token@.key.block_size - size_of::() >= 0 - && padding.is_range( - perm@.pptr + size_of::(), - block_token@.key.block_size - size_of::(), - ) && block_token@.instance == instance && is_block_ptr( - perm@.pptr, - block_token@.key, - ) && block_token@.key.page_id == page_id && block_token@.key.block_size - == block_size - })), - ensures - m2.len() == len, - m2.dom().finite(), - forall|i: nat| - (#[trigger] - m2.dom().contains(i) <==> 0 <= i < len) && (m2.dom().contains(i) ==> ({ - let (padding, block_token) = m2[i]; - &&block_token@.key.block_size - size_of::() >= 0 && padding.is_range( - block_start(block_token@.key), - block_token@.key.block_size as int, - ) && block_token@.instance == instance && block_token@.key.page_id == page_id - && block_token@.key.block_size == block_size - })), - decreases len, - { - if len == 0 { - let tracked m = Map::tracked_empty(); - assert(m.dom() =~= Set::empty()); - assert(m.len() == 0); - m - } else { - let i = (len - 1) as nat; - let tracked mut m = m; - assert(m.dom().contains(i)); - let tracked (mut perm, padding, block_token) = m.tracked_remove(i); - let tracked mut m2 = Self::convene_pt_map(m, i, instance, page_id, block_size); - crate::layout::get_block_start_from_is_block_ptr(perm@.pptr, block_token@.key); - perm.leak_contents(); - let tracked mut permraw = perm.into_raw(); - let tracked ptraw = permraw.join(padding); - let mj = m2; - m2.tracked_insert(i, (ptraw, block_token)); - assert(mj.dom().contains(i) == false); - assert(m2.dom() =~= mj.dom().insert(i)); - assert(m2.dom().len() == mj.dom().len() + 1); - assert(m2.len() == len); - m2 - } - } - - pub proof fn reconvene_state( - tracked inst: Mim::Instance, - tracked ts: &Mim::thread_local_state, - tracked llgstr1: LLGhostStateToReconvene, - tracked llgstr2: LLGhostStateToReconvene, - n_blocks: int, - ) -> (tracked res: (PointsToRaw, Map)) - requires - llgstr_wf(llgstr1), - llgstr_wf(llgstr2), - llgstr1.block_size == llgstr2.block_size, - llgstr1.page_id == llgstr2.page_id, - llgstr1.instance == inst, - llgstr2.instance == inst, - ts@.instance == inst, - n_blocks >= 0, - llgstr1.map.len() + llgstr2.map.len() == n_blocks, - ts@.value.pages.dom().contains(llgstr1.page_id), - ts@.value.pages[llgstr1.page_id].num_blocks == n_blocks, - ensures - ({ - let (points_to, map) = res; - { - &&& map.dom().finite() && map.len() == n_blocks - &&& (forall|block_id| - map.dom().contains(block_id) ==> block_id.page_id == llgstr1.page_id) - &&& (forall|block_id| - map.dom().contains(block_id) ==> map[block_id]@.key == block_id) - &&& (forall|block_id| - map.dom().contains(block_id) ==> map[block_id]@.instance == inst) - &&& points_to.is_range( - block_start_at(llgstr1.page_id, llgstr1.block_size as int, 0), - n_blocks * llgstr1.block_size, - ) - } - }), - { - let tracked llgstr = Self::llgstr_merge(llgstr1, llgstr2); - let idxmap = Map::::new( - |p| llgstr.map.dom().contains(p), - |p| llgstr.map[p].1@.key.idx, - ); - if exists|p| llgstr.map.dom().contains(p) && !(0 <= idxmap[p] < n_blocks) { - let p = choose|p| llgstr.map.dom().contains(p) && !(0 <= idxmap[p] < n_blocks); - let tracked LLGhostStateToReconvene { map: mut map, .. } = llgstr; - assert(map.dom().contains(p)); - let tracked (_, block_p) = map.tracked_remove(p); - assert(block_p@.instance == inst); - inst.block_in_range(ts@.key, block_p@.key, ts, &block_p); - assert(false); - proof_from_false() - } else if exists|i| 0 <= i < n_blocks && !has_idx(llgstr.map, i) { - let i = choose|i| 0 <= i < n_blocks && !has_idx(llgstr.map, i); - let (p, q) = crate::pigeonhole::pigeonhole_missing_idx_implies_double( - idxmap, - i, - llgstr.map.len(), - ); - let tracked LLGhostStateToReconvene { map: mut map, .. } = llgstr; - let tracked (_, block_p) = map.tracked_remove(p); - let tracked (_, block_q) = map.tracked_remove(q); - inst.block_tokens_distinct(block_p@.key, block_q@.key, block_p, block_q); - assert(false); - proof_from_false() - } else { - let tracked LLGhostStateToReconvene { map, .. } = llgstr; - Self::reconvene_rec(map, map.len(), llgstr.instance, llgstr.page_id, llgstr.block_size) - } - } - - pub proof fn llgstr_merge( - tracked llgstr1: LLGhostStateToReconvene, - tracked llgstr2: LLGhostStateToReconvene, - ) -> (tracked llgstr: LLGhostStateToReconvene) - requires - llgstr_wf(llgstr1), - llgstr_wf(llgstr2), - llgstr1.block_size == llgstr2.block_size, - llgstr1.page_id == llgstr2.page_id, - llgstr1.instance == llgstr2.instance, - ensures - llgstr_wf(llgstr), - llgstr.block_size == llgstr2.block_size, - llgstr.page_id == llgstr2.page_id, - llgstr.instance == llgstr2.instance, - llgstr.map.len() == llgstr1.map.len() + llgstr2.map.len(), - { - let tracked LLGhostStateToReconvene { map: mut map1, .. } = llgstr1; - let tracked LLGhostStateToReconvene { map: mut map2, .. } = llgstr2; - map2.tracked_map_keys_in_place( - Map::::new( - |k: nat| map1.len() <= k < map1.len() + map2.len(), - |k: nat| (k - map1.len()) as nat, - ), - ); - map1.tracked_union_prefer_right(map2); - assert(map1.dom() =~= set_nat_range(0, llgstr1.map.len() + llgstr2.map.len())); - lemma_nat_range(0, llgstr1.map.len() + llgstr2.map.len()); - assert(map1.len() == llgstr1.map.len() + llgstr2.map.len()); - let tracked llgstr = LLGhostStateToReconvene { - map: map1, - block_size: llgstr1.block_size, - page_id: llgstr1.page_id, - instance: llgstr1.instance, - }; - let len = llgstr.map.len(); - let map = llgstr.map; - let block_size = llgstr.block_size; - let page_id = llgstr.page_id; - let instance = llgstr.instance; - assert forall|i: nat| - (#[trigger] - map.dom().contains(i) <==> 0 <= i < len) by {} - assert forall|i: nat| #[trigger] map.dom().contains(i) implies ({ - let (padding, block_token) = map[i]; - &&block_token@.key.block_size - size_of::() >= 0 && padding.is_range( - block_start(block_token@.key), - block_token@.key.block_size as int, - ) && block_token@.instance == instance && block_token@.key.page_id == page_id - && block_token@.key.block_size == block_size - }) by { - let (padding, block_token) = map[i]; - if i < llgstr1.map.len() { - assert(block_token@.key.block_size - size_of::() >= 0); - } else { - let t = (i - llgstr1.map.len()) as nat; - assert(llgstr2.map.dom().contains(t)); - assert(block_token@.key.block_size - size_of::() >= 0); - } - } - llgstr - } - - pub proof fn reconvene_rec( - tracked m: Map, - len: nat, - instance: Mim::Instance, - page_id: PageId, - block_size: nat, - ) -> (tracked res: (PointsToRaw, Map)) - requires - forall|j: nat| - 0 <= j < len ==> #[trigger] - has_idx(m, j), - forall|i: nat| - (m.dom().contains(i) ==> ({ - let (padding, block_token) = m[i]; - &&block_token@.key.block_size - size_of::() >= 0 && padding.is_range( - block_start(block_token@.key), - block_token@.key.block_size as int, - ) && block_token@.instance == instance && block_token@.key.page_id == page_id - && block_token@.key.block_size == block_size - })), - ensures - ({ - let (points_to, map) = res; - { - &&& map.dom().finite() && map.len() == len - &&& (forall|block_id| - map.dom().contains(block_id) ==> block_id.page_id == page_id) - &&& (forall|block_id| - map.dom().contains(block_id) ==> map[block_id]@.key == block_id) - &&& (forall|block_id| - map.dom().contains(block_id) ==> map[block_id]@.instance == instance) - &&& (forall|block_id| map.dom().contains(block_id) ==> 0 <= block_id.idx < len) - &&& points_to.is_range( - block_start_at(page_id, block_size as int, 0), - (len * block_size) as int, - ) - } - }), - decreases len, - { - if len == 0 { - (PointsToRaw::empty(), Map::tracked_empty()) - } else { - let j = (len - 1) as nat; - assert(has_idx(m, j)); - let i = choose|i: nat| m.dom().contains(i) && m[i].1@.key.idx == j; - let old_m = m; - let tracked mut m = m; - let tracked (ptraw, block) = m.tracked_remove(i); - assert forall|k: nat| 0 <= k < (len - 1) as nat implies has_idx(m, k) by { - assert(has_idx(old_m, k)); - let p = choose|p: nat| old_m.dom().contains(p) && old_m[p].1@.key.idx == k; - assert(m.dom().contains(p) && m[p].1@.key.idx == k); - } - let tracked (ptraw1, mut blocks) = Self::reconvene_rec( - m, - (len - 1) as nat, - instance, - page_id, - block_size, - ); - let tracked ptraw2 = ptraw1.join(ptraw); - let old_blocks = blocks; - blocks.tracked_insert(block@.key, block); - assert(block@.key.idx == len - 1); - assert(old_blocks.dom().contains(block@.key) == false); - assert(blocks.dom() =~= old_blocks.dom().insert(block@.key)); - assert(blocks.dom().len() == len); - assert((len - 1) * block_size + block_size == len * block_size) by (nonlinear_arith); - crate::layout::get_block_start_defn(block@.key); - (ptraw2, blocks) } + map = Self::convene_pt_map(m, len, instance, page_id, block_size); } + Tracked(LLGhostStateToReconvene { map: map, block_size, page_id, instance }) } - pub closed spec fn has_idx(map: Map, i: nat) -> bool { - exists|p: nat| map.dom().contains(p) && map[p].1@.key.idx == i - } - - pub open spec fn set_nat_range(lo: nat, hi: nat) -> Set { - Set::new(|i: nat| lo <= i && i < hi) - } - - pub proof fn lemma_nat_range(lo: nat, hi: nat) + pub proof fn convene_pt_map( + tracked m: Map, PointsToRaw, Mim::block)>, + len: nat, + instance: Mim::Instance, + page_id: PageId, + block_size: nat, + ) -> (tracked m2: Map) + requires + forall|i: nat| + (#[trigger] m.dom().contains(i) <==> 0 <= i < len) && (m.dom().contains(i) ==> ({ + let (perm, padding, block_token) = m[i]; + perm@.value.is_some() && block_token@.key.block_size - size_of::() >= 0 + && padding.is_range( + perm@.pptr + size_of::(), + block_token@.key.block_size - size_of::(), + ) && block_token@.instance == instance && is_block_ptr( + perm@.pptr, + block_token@.key, + ) && block_token@.key.page_id == page_id && block_token@.key.block_size + == block_size + })), + ensures + m2.len() == len, + m2.dom().finite(), + forall|i: nat| + (#[trigger] m2.dom().contains(i) <==> 0 <= i < len) && (m2.dom().contains(i) ==> ({ + let (padding, block_token) = m2[i]; + &&block_token@.key.block_size - size_of::() >= 0 && padding.is_range( + block_start(block_token@.key), + block_token@.key.block_size as int, + ) && block_token@.instance == instance && block_token@.key.page_id == page_id + && block_token@.key.block_size == block_size + })), + decreases len, + { + if len == 0 { + let tracked m = Map::tracked_empty(); + assert(m.dom() =~= Set::empty()); + assert(m.len() == 0); + m + } else { + let i = (len - 1) as nat; + let tracked mut m = m; + assert(m.dom().contains(i)); + let tracked (mut perm, padding, block_token) = m.tracked_remove(i); + let tracked mut m2 = Self::convene_pt_map(m, i, instance, page_id, block_size); + crate::layout::get_block_start_from_is_block_ptr(perm@.pptr, block_token@.key); + perm.leak_contents(); + let tracked mut permraw = perm.into_raw(); + let tracked ptraw = permraw.join(padding); + let mj = m2; + m2.tracked_insert(i, (ptraw, block_token)); + assert(mj.dom().contains(i) == false); + assert(m2.dom() =~= mj.dom().insert(i)); + assert(m2.dom().len() == mj.dom().len() + 1); + assert(m2.len() == len); + m2 + } + } + + pub proof fn reconvene_state( + tracked inst: Mim::Instance, + tracked ts: &Mim::thread_local_state, + tracked llgstr1: LLGhostStateToReconvene, + tracked llgstr2: LLGhostStateToReconvene, + n_blocks: int, + ) -> (tracked res: (PointsToRaw, Map)) requires - lo <= hi, + llgstr_wf(llgstr1), + llgstr_wf(llgstr2), + llgstr1.block_size == llgstr2.block_size, + llgstr1.page_id == llgstr2.page_id, + llgstr1.instance == inst, + llgstr2.instance == inst, + ts@.instance == inst, + n_blocks >= 0, + llgstr1.map.len() + llgstr2.map.len() == n_blocks, + ts@.value.pages.dom().contains(llgstr1.page_id), + ts@.value.pages[llgstr1.page_id].num_blocks == n_blocks, ensures - set_nat_range(lo, hi).finite(), - set_nat_range(lo, hi).len() == hi - lo, - decreases hi - lo, + ({ + let (points_to, map) = res; + { + &&& map.dom().finite() && map.len() == n_blocks + &&& (forall|block_id| + map.dom().contains(block_id) ==> block_id.page_id == llgstr1.page_id) + &&& (forall|block_id| + map.dom().contains(block_id) ==> map[block_id]@.key == block_id) + &&& (forall|block_id| + map.dom().contains(block_id) ==> map[block_id]@.instance == inst) + &&& points_to.is_range( + block_start_at(llgstr1.page_id, llgstr1.block_size as int, 0), + n_blocks * llgstr1.block_size, + ) + } + }), { - if lo == hi { - assert(set_nat_range(lo, hi) =~= Set::empty()); + let tracked llgstr = Self::llgstr_merge(llgstr1, llgstr2); + let idxmap = Map::::new( + |p| llgstr.map.dom().contains(p), + |p| llgstr.map[p].1@.key.idx, + ); + if exists|p| llgstr.map.dom().contains(p) && !(0 <= idxmap[p] < n_blocks) { + let p = choose|p| llgstr.map.dom().contains(p) && !(0 <= idxmap[p] < n_blocks); + let tracked LLGhostStateToReconvene { map: mut map, .. } = llgstr; + assert(map.dom().contains(p)); + let tracked (_, block_p) = map.tracked_remove(p); + assert(block_p@.instance == inst); + inst.block_in_range(ts@.key, block_p@.key, ts, &block_p); + assert(false); + proof_from_false() + } else if exists|i| 0 <= i < n_blocks && !has_idx(llgstr.map, i) { + let i = choose|i| 0 <= i < n_blocks && !has_idx(llgstr.map, i); + let (p, q) = crate::pigeonhole::pigeonhole_missing_idx_implies_double( + idxmap, + i, + llgstr.map.len(), + ); + let tracked LLGhostStateToReconvene { map: mut map, .. } = llgstr; + let tracked (_, block_p) = map.tracked_remove(p); + let tracked (_, block_q) = map.tracked_remove(q); + inst.block_tokens_distinct(block_p@.key, block_q@.key, block_p, block_q); + assert(false); + proof_from_false() } else { - lemma_nat_range(lo, (hi - 1) as nat); - assert(set_nat_range(lo, (hi - 1) as nat).insert((hi - 1) as nat) =~= set_nat_range( - lo, - hi, - )); + let tracked LLGhostStateToReconvene { map, .. } = llgstr; + Self::reconvene_rec(map, map.len(), llgstr.instance, llgstr.page_id, llgstr.block_size) } } - pub closed spec fn llgstr_wf(llgstr: LLGhostStateToReconvene) -> bool { + pub proof fn llgstr_merge( + tracked llgstr1: LLGhostStateToReconvene, + tracked llgstr2: LLGhostStateToReconvene, + ) -> (tracked llgstr: LLGhostStateToReconvene) + requires + llgstr_wf(llgstr1), + llgstr_wf(llgstr2), + llgstr1.block_size == llgstr2.block_size, + llgstr1.page_id == llgstr2.page_id, + llgstr1.instance == llgstr2.instance, + ensures + llgstr_wf(llgstr), + llgstr.block_size == llgstr2.block_size, + llgstr.page_id == llgstr2.page_id, + llgstr.instance == llgstr2.instance, + llgstr.map.len() == llgstr1.map.len() + llgstr2.map.len(), + { + let tracked LLGhostStateToReconvene { map: mut map1, .. } = llgstr1; + let tracked LLGhostStateToReconvene { map: mut map2, .. } = llgstr2; + map2.tracked_map_keys_in_place( + Map::::new( + |k: nat| map1.len() <= k < map1.len() + map2.len(), + |k: nat| (k - map1.len()) as nat, + ), + ); + map1.tracked_union_prefer_right(map2); + assert(map1.dom() =~= set_nat_range(0, llgstr1.map.len() + llgstr2.map.len())); + lemma_nat_range(0, llgstr1.map.len() + llgstr2.map.len()); + assert(map1.len() == llgstr1.map.len() + llgstr2.map.len()); + let tracked llgstr = LLGhostStateToReconvene { + map: map1, + block_size: llgstr1.block_size, + page_id: llgstr1.page_id, + instance: llgstr1.instance, + }; let len = llgstr.map.len(); let map = llgstr.map; let block_size = llgstr.block_size; let page_id = llgstr.page_id; let instance = llgstr.instance; - forall|i: nat| - (#[trigger] - map.dom().contains(i) <==> 0 <= i < len) && (map.dom().contains(i) ==> ({ - let (padding, block_token) = map[i]; - &&block_token@.key.block_size - size_of::() >= 0 && padding.is_range( - block_start(block_token@.key), - block_token@.key.block_size as int, - ) && block_token@.instance == instance && block_token@.key.page_id == page_id - && block_token@.key.block_size == block_size - })) + assert forall|i: nat| (#[trigger] map.dom().contains(i) <==> 0 <= i < len) by {} + assert forall|i: nat| #[trigger] map.dom().contains(i) implies ({ + let (padding, block_token) = map[i]; + &&block_token@.key.block_size - size_of::() >= 0 && padding.is_range( + block_start(block_token@.key), + block_token@.key.block_size as int, + ) && block_token@.instance == instance && block_token@.key.page_id == page_id + && block_token@.key.block_size == block_size + }) by { + let (padding, block_token) = map[i]; + if i < llgstr1.map.len() { + assert(block_token@.key.block_size - size_of::() >= 0); + } else { + let t = (i - llgstr1.map.len()) as nat; + assert(llgstr2.map.dom().contains(t)); + assert(block_token@.key.block_size - size_of::() >= 0); + } + } + llgstr } - #[inline(always)] - pub fn bound_on_2_lists( - Tracked(instance): Tracked, - Tracked(thread_token): Tracked<&Mim::thread_local_state>, - ll1: &mut LL, - ll2: &mut LL, - ) + pub proof fn reconvene_rec( + tracked m: Map, + len: nat, + instance: Mim::Instance, + page_id: PageId, + block_size: nat, + ) -> (tracked res: (PointsToRaw, Map)) requires - thread_token@.instance == instance, - old(ll1).wf(), - old(ll2).wf(), - old(ll1).fixed_page(), - old(ll2).fixed_page(), - old(ll1).instance() == instance, - old(ll2).instance() == instance, - old(ll1).page_id() == old(ll2).page_id(), - // shouldn't really be necessary, but I'm reusing llgstr_merge - // which requires it - old(ll1).block_size() == old(ll2).block_size(), - thread_token@.value.pages.dom().contains(old(ll1).page_id()), + forall|j: nat| 0 <= j < len ==> #[trigger] has_idx(m, j), + forall|i: nat| + (m.dom().contains(i) ==> ({ + let (padding, block_token) = m[i]; + &&block_token@.key.block_size - size_of::() >= 0 && padding.is_range( + block_start(block_token@.key), + block_token@.key.block_size as int, + ) && block_token@.instance == instance && block_token@.key.page_id == page_id + && block_token@.key.block_size == block_size + })), ensures - *ll1 == *old(ll1), - *ll2 == *old(ll2), - ll1.len() + ll2.len() <= thread_token@.value.pages[ll1.page_id()].num_blocks, - { - proof { - assert(forall|i: nat| - #[trigger] - ll1.perms@.dom().contains(i) ==> ll1.valid_node(i, ll1.next_ptr(i))); - assert(forall|i: nat| - #[trigger] - ll2.perms@.dom().contains(i) ==> ll2.valid_node(i, ll2.next_ptr(i))); - let page_id = ll1.page_id(); - let block_size = ll1.block_size(); - let n_blocks = thread_token@.value.pages[ll1.page_id()].num_blocks; - if ll1.len() + ll2.len() > n_blocks { - let len = ll1.len(); - let tracked mut m = Map::tracked_empty(); - tracked_swap(&mut m, ll1.perms.borrow_mut()); - assert forall|i: nat| - (#[trigger] - m.dom().contains(i) <==> 0 <= i < len) by { - if 0 <= i < len { - assert(old(ll1).valid_node(i, old(ll1).next_ptr(i))); - assert(m.dom().contains(i)); - } + ({ + let (points_to, map) = res; + { + &&& map.dom().finite() && map.len() == len + &&& (forall|block_id| + map.dom().contains(block_id) ==> block_id.page_id == page_id) + &&& (forall|block_id| + map.dom().contains(block_id) ==> map[block_id]@.key == block_id) + &&& (forall|block_id| + map.dom().contains(block_id) ==> map[block_id]@.instance == instance) + &&& (forall|block_id| map.dom().contains(block_id) ==> 0 <= block_id.idx < len) + &&& points_to.is_range( + block_start_at(page_id, block_size as int, 0), + (len * block_size) as int, + ) } - let tracked mut map = LL::convene_pt_map(m, len, instance, page_id, block_size); - let tracked llgstr1 = LLGhostStateToReconvene { - map: map, - block_size, - page_id, - instance, - }; - let len = ll2.len(); - let tracked mut m = Map::tracked_empty(); - tracked_swap(&mut m, ll2.perms.borrow_mut()); - assert forall|i: nat| - (#[trigger] - m.dom().contains(i) <==> 0 <= i < len) by { - if 0 <= i < len { - assert(old(ll2).valid_node(i, old(ll2).next_ptr(i))); - assert(m.dom().contains(i)); - } + }), + decreases len, + { + if len == 0 { + (PointsToRaw::empty(), Map::tracked_empty()) + } else { + let j = (len - 1) as nat; + assert(has_idx(m, j)); + let i = choose|i: nat| m.dom().contains(i) && m[i].1@.key.idx == j; + let old_m = m; + let tracked mut m = m; + let tracked (ptraw, block) = m.tracked_remove(i); + assert forall|k: nat| 0 <= k < (len - 1) as nat implies has_idx(m, k) by { + assert(has_idx(old_m, k)); + let p = choose|p: nat| old_m.dom().contains(p) && old_m[p].1@.key.idx == k; + assert(m.dom().contains(p) && m[p].1@.key.idx == k); + } + let tracked (ptraw1, mut blocks) = Self::reconvene_rec( + m, + (len - 1) as nat, + instance, + page_id, + block_size, + ); + let tracked ptraw2 = ptraw1.join(ptraw); + let old_blocks = blocks; + blocks.tracked_insert(block@.key, block); + assert(block@.key.idx == len - 1); + assert(old_blocks.dom().contains(block@.key) == false); + assert(blocks.dom() =~= old_blocks.dom().insert(block@.key)); + assert(blocks.dom().len() == len); + assert((len - 1) * block_size + block_size == len * block_size) by (nonlinear_arith); + crate::layout::get_block_start_defn(block@.key); + (ptraw2, blocks) + } + } +} + +pub closed spec fn has_idx(map: Map, i: nat) -> bool { + exists|p: nat| map.dom().contains(p) && map[p].1@.key.idx == i +} + +pub open spec fn set_nat_range(lo: nat, hi: nat) -> Set { + Set::new(|i: nat| lo <= i && i < hi) +} + +pub proof fn lemma_nat_range(lo: nat, hi: nat) + requires + lo <= hi, + ensures + set_nat_range(lo, hi).finite(), + set_nat_range(lo, hi).len() == hi - lo, + decreases hi - lo, +{ + if lo == hi { + assert(set_nat_range(lo, hi) =~= Set::empty()); + } else { + lemma_nat_range(lo, (hi - 1) as nat); + assert(set_nat_range(lo, (hi - 1) as nat).insert((hi - 1) as nat) =~= set_nat_range( + lo, + hi, + )); + } +} + +pub closed spec fn llgstr_wf(llgstr: LLGhostStateToReconvene) -> bool { + let len = llgstr.map.len(); + let map = llgstr.map; + let block_size = llgstr.block_size; + let page_id = llgstr.page_id; + let instance = llgstr.instance; + forall|i: nat| + (#[trigger] map.dom().contains(i) <==> 0 <= i < len) && (map.dom().contains(i) ==> ({ + let (padding, block_token) = map[i]; + &&block_token@.key.block_size - size_of::() >= 0 && padding.is_range( + block_start(block_token@.key), + block_token@.key.block_size as int, + ) && block_token@.instance == instance && block_token@.key.page_id == page_id + && block_token@.key.block_size == block_size + })) +} + +#[inline(always)] +pub fn bound_on_2_lists( + Tracked(instance): Tracked, + Tracked(thread_token): Tracked<&Mim::thread_local_state>, + ll1: &mut LL, + ll2: &mut LL, +) + requires + thread_token@.instance == instance, + old(ll1).wf(), + old(ll2).wf(), + old(ll1).fixed_page(), + old(ll2).fixed_page(), + old(ll1).instance() == instance, + old(ll2).instance() == instance, + old(ll1).page_id() == old(ll2).page_id(), + // shouldn't really be necessary, but I'm reusing llgstr_merge + // which requires it + old(ll1).block_size() == old(ll2).block_size(), + thread_token@.value.pages.dom().contains(old(ll1).page_id()), + ensures + *ll1 == *old(ll1), + *ll2 == *old(ll2), + ll1.len() + ll2.len() <= thread_token@.value.pages[ll1.page_id()].num_blocks, +{ + proof { + assert(forall|i: nat| #[trigger] + ll1.perms@.dom().contains(i) ==> ll1.valid_node(i, ll1.next_ptr(i))); + assert(forall|i: nat| #[trigger] + ll2.perms@.dom().contains(i) ==> ll2.valid_node(i, ll2.next_ptr(i))); + let page_id = ll1.page_id(); + let block_size = ll1.block_size(); + let n_blocks = thread_token@.value.pages[ll1.page_id()].num_blocks; + if ll1.len() + ll2.len() > n_blocks { + let len = ll1.len(); + let tracked mut m = Map::tracked_empty(); + tracked_swap(&mut m, ll1.perms.borrow_mut()); + assert forall|i: nat| (#[trigger] m.dom().contains(i) <==> 0 <= i < len) by { + if 0 <= i < len { + assert(old(ll1).valid_node(i, old(ll1).next_ptr(i))); + assert(m.dom().contains(i)); } - let tracked mut map = LL::convene_pt_map(m, len, instance, page_id, block_size); - let tracked llgstr2 = LLGhostStateToReconvene { - map: map, - block_size, - page_id, - instance, - }; - let tracked llgstr = LL::llgstr_merge(llgstr1, llgstr2); - let tracked LLGhostStateToReconvene { map: mut map, .. } = llgstr; - let idxmap = Map::::new(|p| map.dom().contains(p), |p| map[p].1@.key.idx); - if exists|p| map.dom().contains(p) && !(0 <= idxmap[p] < n_blocks) { - let p = choose|p| map.dom().contains(p) && !(0 <= idxmap[p] < n_blocks); - assert(map.dom().contains(p)); - let tracked (_, block_p) = map.tracked_remove(p); - assert(block_p@.instance == instance); - instance.block_in_range(thread_token@.key, block_p@.key, thread_token, &block_p); - assert(false); - } else { - let (p, q) = crate::pigeonhole::pigeonhole_too_many_elements_implies_double( - idxmap, - (map.len() - 1) as nat, - ); - let tracked (_, block_p) = map.tracked_remove(p); - let tracked (_, block_q) = map.tracked_remove(q); - instance.block_tokens_distinct(block_p@.key, block_q@.key, block_p, block_q); - assert(false); + } + let tracked mut map = LL::convene_pt_map(m, len, instance, page_id, block_size); + let tracked llgstr1 = LLGhostStateToReconvene { + map: map, + block_size, + page_id, + instance, + }; + let len = ll2.len(); + let tracked mut m = Map::tracked_empty(); + tracked_swap(&mut m, ll2.perms.borrow_mut()); + assert forall|i: nat| (#[trigger] m.dom().contains(i) <==> 0 <= i < len) by { + if 0 <= i < len { + assert(old(ll2).valid_node(i, old(ll2).next_ptr(i))); + assert(m.dom().contains(i)); } } + let tracked mut map = LL::convene_pt_map(m, len, instance, page_id, block_size); + let tracked llgstr2 = LLGhostStateToReconvene { + map: map, + block_size, + page_id, + instance, + }; + let tracked llgstr = LL::llgstr_merge(llgstr1, llgstr2); + let tracked LLGhostStateToReconvene { map: mut map, .. } = llgstr; + let idxmap = Map::::new(|p| map.dom().contains(p), |p| map[p].1@.key.idx); + if exists|p| map.dom().contains(p) && !(0 <= idxmap[p] < n_blocks) { + let p = choose|p| map.dom().contains(p) && !(0 <= idxmap[p] < n_blocks); + assert(map.dom().contains(p)); + let tracked (_, block_p) = map.tracked_remove(p); + assert(block_p@.instance == instance); + instance.block_in_range(thread_token@.key, block_p@.key, thread_token, &block_p); + assert(false); + } else { + let (p, q) = crate::pigeonhole::pigeonhole_too_many_elements_implies_double( + idxmap, + (map.len() - 1) as nat, + ); + let tracked (_, block_p) = map.tracked_remove(p); + let tracked (_, block_q) = map.tracked_remove(q); + instance.block_tokens_distinct(block_p@.key, block_q@.key, block_p, block_q); + assert(false); + } } } +} - #[inline(always)] - pub fn bound_on_1_lists( - Tracked(instance): Tracked, - Tracked(thread_token): Tracked<&Mim::thread_local_state>, - ll1: &mut LL, - ) - requires - thread_token@.instance == instance, - old(ll1).wf(), - old(ll1).fixed_page(), - old(ll1).instance() == instance, - thread_token@.value.pages.dom().contains(old(ll1).page_id()), - ensures - *ll1 == *old(ll1), - ll1.len() <= thread_token@.value.pages[ll1.page_id()].num_blocks, - { - proof { - assert(forall|i: nat| - #[trigger] - ll1.perms@.dom().contains(i) ==> ll1.valid_node(i, ll1.next_ptr(i))); - let page_id = ll1.page_id(); - let block_size = ll1.block_size(); - let n_blocks = thread_token@.value.pages[ll1.page_id()].num_blocks; - if ll1.len() > n_blocks { - let len = ll1.len(); - let tracked mut m = Map::tracked_empty(); - tracked_swap(&mut m, ll1.perms.borrow_mut()); - assert forall|i: nat| - (#[trigger] - m.dom().contains(i) <==> 0 <= i < len) by { - if 0 <= i < len { - assert(old(ll1).valid_node(i, old(ll1).next_ptr(i))); - assert(m.dom().contains(i)); - } - } - let tracked mut map = LL::convene_pt_map(m, len, instance, page_id, block_size); - let idxmap = Map::::new(|p| map.dom().contains(p), |p| map[p].1@.key.idx); - if exists|p| map.dom().contains(p) && !(0 <= idxmap[p] < n_blocks) { - let p = choose|p| map.dom().contains(p) && !(0 <= idxmap[p] < n_blocks); - assert(map.dom().contains(p)); - let tracked (_, block_p) = map.tracked_remove(p); - assert(block_p@.instance == instance); - instance.block_in_range(thread_token@.key, block_p@.key, thread_token, &block_p); - assert(false); - } else { - let (p, q) = crate::pigeonhole::pigeonhole_too_many_elements_implies_double( - idxmap, - (map.len() - 1) as nat, - ); - let tracked (_, block_p) = map.tracked_remove(p); - let tracked (_, block_q) = map.tracked_remove(q); - instance.block_tokens_distinct(block_p@.key, block_q@.key, block_p, block_q); - assert(false); +#[inline(always)] +pub fn bound_on_1_lists( + Tracked(instance): Tracked, + Tracked(thread_token): Tracked<&Mim::thread_local_state>, + ll1: &mut LL, +) + requires + thread_token@.instance == instance, + old(ll1).wf(), + old(ll1).fixed_page(), + old(ll1).instance() == instance, + thread_token@.value.pages.dom().contains(old(ll1).page_id()), + ensures + *ll1 == *old(ll1), + ll1.len() <= thread_token@.value.pages[ll1.page_id()].num_blocks, +{ + proof { + assert(forall|i: nat| #[trigger] + ll1.perms@.dom().contains(i) ==> ll1.valid_node(i, ll1.next_ptr(i))); + let page_id = ll1.page_id(); + let block_size = ll1.block_size(); + let n_blocks = thread_token@.value.pages[ll1.page_id()].num_blocks; + if ll1.len() > n_blocks { + let len = ll1.len(); + let tracked mut m = Map::tracked_empty(); + tracked_swap(&mut m, ll1.perms.borrow_mut()); + assert forall|i: nat| (#[trigger] m.dom().contains(i) <==> 0 <= i < len) by { + if 0 <= i < len { + assert(old(ll1).valid_node(i, old(ll1).next_ptr(i))); + assert(m.dom().contains(i)); } } + let tracked mut map = LL::convene_pt_map(m, len, instance, page_id, block_size); + let idxmap = Map::::new(|p| map.dom().contains(p), |p| map[p].1@.key.idx); + if exists|p| map.dom().contains(p) && !(0 <= idxmap[p] < n_blocks) { + let p = choose|p| map.dom().contains(p) && !(0 <= idxmap[p] < n_blocks); + assert(map.dom().contains(p)); + let tracked (_, block_p) = map.tracked_remove(p); + assert(block_p@.instance == instance); + instance.block_in_range(thread_token@.key, block_p@.key, thread_token, &block_p); + assert(false); + } else { + let (p, q) = crate::pigeonhole::pigeonhole_too_many_elements_implies_double( + idxmap, + (map.len() - 1) as nat, + ); + let tracked (_, block_p) = map.tracked_remove(p); + let tracked (_, block_q) = map.tracked_remove(q); + instance.block_tokens_distinct(block_p@.key, block_q@.key, block_p, block_q); + assert(false); + } } } +} - struct_with_invariants!{ +struct_with_invariants!{ pub struct ThreadLLSimple { pub instance: Ghost, pub heap_id: Ghost, @@ -15824,85 +15749,85 @@ mod linked_list { } } - impl ThreadLLSimple { - #[inline(always)] - pub fn empty(Ghost(instance): Ghost, Ghost(heap_id): Ghost) -> (s: Self) - ensures - s.wf(), - s.instance@ == instance, - s.heap_id@ == heap_id, - { - let p: PPtr = PPtr::from_usize(0); - Self { - instance: Ghost(instance), - heap_id: Ghost(heap_id), - atomic: AtomicUsize::new( - Ghost((Ghost(instance), Ghost(heap_id))), - 0, - Tracked( - LL { - first: p, - data: Ghost( - LLData { - fixed_page: false, - block_size: arbitrary(), - page_id: arbitrary(), - instance, - len: 0, - heap_id: Some(heap_id), - }, - ), - perms: Tracked(Map::tracked_empty()), - }, - ), +impl ThreadLLSimple { + #[inline(always)] + pub fn empty(Ghost(instance): Ghost, Ghost(heap_id): Ghost) -> (s: Self) + ensures + s.wf(), + s.instance@ == instance, + s.heap_id@ == heap_id, + { + let p: PPtr = PPtr::from_usize(0); + Self { + instance: Ghost(instance), + heap_id: Ghost(heap_id), + atomic: AtomicUsize::new( + Ghost((Ghost(instance), Ghost(heap_id))), + 0, + Tracked( + LL { + first: p, + data: Ghost( + LLData { + fixed_page: false, + block_size: arbitrary(), + page_id: arbitrary(), + instance, + len: 0, + heap_id: Some(heap_id), + }, + ), + perms: Tracked(Map::tracked_empty()), + }, ), - } + ), } + } - // Oughta have a similar spec as LL:insert_block except that - // (i) self argument is a & reference so we don't need to talk about how it updates - // (ii) is we don't expose the length - #[inline(always)] - pub fn atomic_insert_block( - &self, - ptr: PPtr, - Tracked(points_to_raw): Tracked, - Tracked(block_token): Tracked, - ) - requires + // Oughta have a similar spec as LL:insert_block except that + // (i) self argument is a & reference so we don't need to talk about how it updates + // (ii) is we don't expose the length + #[inline(always)] + pub fn atomic_insert_block( + &self, + ptr: PPtr, + Tracked(points_to_raw): Tracked, + Tracked(block_token): Tracked, + ) + requires + self.wf(), + points_to_raw.is_range(ptr.id(), block_token@.key.block_size as int), + block_token@.instance == self.instance, + block_token@.value.heap_id == Some(self.heap_id@), + is_block_ptr(ptr.id(), block_token@.key), + { + let tracked mut points_to_raw = points_to_raw; + let tracked mut block_token_opt = Some(block_token); + loop + invariant + block_token_opt == Some(block_token), self.wf(), points_to_raw.is_range(ptr.id(), block_token@.key.block_size as int), block_token@.instance == self.instance, block_token@.value.heap_id == Some(self.heap_id@), is_block_ptr(ptr.id(), block_token@.key), { - let tracked mut points_to_raw = points_to_raw; - let tracked mut block_token_opt = Some(block_token); - loop - invariant - block_token_opt == Some(block_token), - self.wf(), - points_to_raw.is_range(ptr.id(), block_token@.key.block_size as int), - block_token@.instance == self.instance, - block_token@.value.heap_id == Some(self.heap_id@), - is_block_ptr(ptr.id(), block_token@.key), - { - let next_int = - my_atomic_with_ghost!( + let next_int = + my_atomic_with_ghost!( &self.atomic => load(); ghost g => { }); - let next_ptr = PPtr::::from_usize(next_int); - proof { - block_size_ge_word(); - block_ptr_aligned_to_word(); - } - let (Tracked(ptr_mem0), Tracked(raw_mem0)) = LL::block_write_ptr( - ptr, - Tracked(points_to_raw), - next_ptr, - ); - let p = ptr.to_usize(); - let cas_result = - my_atomic_with_ghost!( + let next_ptr = PPtr::::from_usize(next_int); + proof { + block_size_ge_word(); + block_ptr_aligned_to_word(); + } + let (Tracked(ptr_mem0), Tracked(raw_mem0)) = LL::block_write_ptr( + ptr, + Tracked(points_to_raw), + next_ptr, + ); + let p = ptr.to_usize(); + let cas_result = + my_atomic_with_ghost!( &self.atomic => compare_exchange_weak(next_int, p); returning cas_result; ghost ghost_ll => @@ -15923,38 +15848,38 @@ mod linked_list { points_to_raw = ptr_mem.into_raw().join(raw_mem); } }); - match cas_result { - Result::Ok(_) => { - break ; - }, - _ => {}, - } + match cas_result { + Result::Ok(_) => { + break ; + }, + _ => {}, } } + } - #[inline(always)] - pub fn take(&self) -> (ll: LL) - requires - self.wf(), - ensures - ll.wf(), - ll.instance() == self.instance, - ll.heap_id() == Some(self.heap_id@), - { - let res = self.atomic.load(); - if res == 0 { - return LL::new( - Ghost(arbitrary()), - Ghost(arbitrary()), - Ghost(self.instance@), - Ghost(arbitrary()), - Ghost(Some(self.heap_id@)), - ); - } - let tracked ll: LL; - let p = PPtr::::from_usize(0); - let res = - my_atomic_with_ghost!( + #[inline(always)] + pub fn take(&self) -> (ll: LL) + requires + self.wf(), + ensures + ll.wf(), + ll.instance() == self.instance, + ll.heap_id() == Some(self.heap_id@), + { + let res = self.atomic.load(); + if res == 0 { + return LL::new( + Ghost(arbitrary()), + Ghost(arbitrary()), + Ghost(self.instance@), + Ghost(arbitrary()), + Ghost(Some(self.heap_id@)), + ); + } + let tracked ll: LL; + let p = PPtr::::from_usize(0); + let res = + my_atomic_with_ghost!( &self.atomic => swap(0); ghost g => { ll = g; @@ -15968,23 +15893,23 @@ mod linked_list { g = new_ll; } ); - let new_ll = LL { - first: PPtr::from_usize(res), - data: Ghost(ll.data@), - perms: Tracked(ll.perms.get()), - }; - assert(forall|i: nat| - ll.valid_node(i, ll.next_ptr(i)) ==> new_ll.valid_node(i, new_ll.next_ptr(i))); - new_ll - } + let new_ll = LL { + first: PPtr::from_usize(res), + data: Ghost(ll.data@), + perms: Tracked(ll.perms.get()), + }; + assert(forall|i: nat| + ll.valid_node(i, ll.next_ptr(i)) ==> new_ll.valid_node(i, new_ll.next_ptr(i))); + new_ll } +} - pub struct BlockSizePageId { - pub block_size: nat, - pub page_id: PageId, - } +pub struct BlockSizePageId { + pub block_size: nat, + pub page_id: PageId, +} - tokenized_state_machine!{ StuffAgree { +tokenized_state_machine!{ StuffAgree { fields { #[sharding(variable)] pub x: Option, #[sharding(variable)] pub y: Option, @@ -16019,7 +15944,7 @@ mod linked_list { fn set_inductive(pre: Self, post: Self, b: Option) { } }} - struct_with_invariants!{ +struct_with_invariants!{ pub struct ThreadLLWithDelayBits { pub instance: Tracked, @@ -16083,78 +16008,78 @@ mod linked_list { } } - impl ThreadLLWithDelayBits { - pub open spec fn is_empty(&self) -> bool { - self.emp@@.value.is_none() - } +impl ThreadLLWithDelayBits { + pub open spec fn is_empty(&self) -> bool { + self.emp@@.value.is_none() + } - pub open spec fn block_size(&self) -> nat { - self.emp@@.value.unwrap().block_size - } + pub open spec fn block_size(&self) -> nat { + self.emp@@.value.unwrap().block_size + } - pub open spec fn page_id(&self) -> PageId { - self.emp@@.value.unwrap().page_id - } + pub open spec fn page_id(&self) -> PageId { + self.emp@@.value.unwrap().page_id + } - pub fn empty(Tracked(instance): Tracked) -> (ll: ThreadLLWithDelayBits) - ensures - ll.is_empty(), - ll.wf(), - ll.instance == instance, - { - let tracked (Tracked(emp_inst), Tracked(emp_x), Tracked(emp_y)) = - StuffAgree::Instance::initialize(None); - let emp = Tracked(emp_x); - let emp_inst = Tracked(emp_inst); - ThreadLLWithDelayBits { - instance: Tracked(instance), - atomic: AtomicUsize::new( - Ghost((Tracked(instance), emp_inst)), - 0, - Tracked((emp_y, None)), - ), - emp, - emp_inst, - } + pub fn empty(Tracked(instance): Tracked) -> (ll: ThreadLLWithDelayBits) + ensures + ll.is_empty(), + ll.wf(), + ll.instance == instance, + { + let tracked (Tracked(emp_inst), Tracked(emp_x), Tracked(emp_y)) = + StuffAgree::Instance::initialize(None); + let emp = Tracked(emp_x); + let emp_inst = Tracked(emp_inst); + ThreadLLWithDelayBits { + instance: Tracked(instance), + atomic: AtomicUsize::new( + Ghost((Tracked(instance), emp_inst)), + 0, + Tracked((emp_y, None)), + ), + emp, + emp_inst, } + } - #[inline(always)] - pub fn enable( - &mut self, - Ghost(block_size): Ghost, - Ghost(page_id): Ghost, - Tracked(instance): Tracked, - Tracked(delay_token): Tracked, - ) - requires - old(self).is_empty(), - old(self).wf(), - old(self).instance == instance, - delay_token@.instance == instance, - delay_token@.key == page_id, - delay_token@.value == DelayState::UseDelayedFree, - ensures - self.wf(), - !self.is_empty(), - self.block_size() == block_size, - self.page_id() == page_id, - self.instance == instance, - { - let p = PPtr::::from_usize(0); - let ghost data = LLData { - fixed_page: true, - block_size, - page_id, - instance, - len: 0, - heap_id: None, - }; - let tracked new_ll = LL { - first: p, - data: Ghost(data), - perms: Tracked(Map::tracked_empty()), - }; - my_atomic_with_ghost!( + #[inline(always)] + pub fn enable( + &mut self, + Ghost(block_size): Ghost, + Ghost(page_id): Ghost, + Tracked(instance): Tracked, + Tracked(delay_token): Tracked, + ) + requires + old(self).is_empty(), + old(self).wf(), + old(self).instance == instance, + delay_token@.instance == instance, + delay_token@.key == page_id, + delay_token@.value == DelayState::UseDelayedFree, + ensures + self.wf(), + !self.is_empty(), + self.block_size() == block_size, + self.page_id() == page_id, + self.instance == instance, + { + let p = PPtr::::from_usize(0); + let ghost data = LLData { + fixed_page: true, + block_size, + page_id, + instance, + len: 0, + heap_id: None, + }; + let tracked new_ll = LL { + first: p, + data: Ghost(data), + perms: Tracked(Map::tracked_empty()), + }; + my_atomic_with_ghost!( &self.atomic => no_op(); update old_v -> v; ghost g => { @@ -16195,37 +16120,37 @@ mod linked_list { } ); - } + } - #[inline(always)] - pub fn disable(&mut self) -> (delay: Tracked) - requires - !old(self).is_empty(), - old(self).wf(), - ensures - self.wf(), - self.is_empty(), - self.instance == old(self).instance, - delay@@.instance == old(self).instance, - delay@@.key == old(self).page_id(), - { - let mut tmp = Self::empty(Tracked(self.instance.borrow().clone())); - core::mem::swap(&mut *self, &mut tmp); - let ThreadLLWithDelayBits { - instance: Tracked(instance), - atomic: ato, - emp: Tracked(emp), - emp_inst: Tracked(emp_inst), - } = tmp; - let (v, Tracked(g)) = ato.into_inner(); - let tracked (y, g_opt) = g; - proof { - emp_inst.agree(&emp, &y); - } - Tracked(g_opt.tracked_unwrap().0) + #[inline(always)] + pub fn disable(&mut self) -> (delay: Tracked) + requires + !old(self).is_empty(), + old(self).wf(), + ensures + self.wf(), + self.is_empty(), + self.instance == old(self).instance, + delay@@.instance == old(self).instance, + delay@@.key == old(self).page_id(), + { + let mut tmp = Self::empty(Tracked(self.instance.borrow().clone())); + core::mem::swap(&mut *self, &mut tmp); + let ThreadLLWithDelayBits { + instance: Tracked(instance), + atomic: ato, + emp: Tracked(emp), + emp_inst: Tracked(emp_inst), + } = tmp; + let (v, Tracked(g)) = ato.into_inner(); + let tracked (y, g_opt) = g; + proof { + emp_inst.agree(&emp, &y); } + Tracked(g_opt.tracked_unwrap().0) + } - /*#[inline(always)] + /*#[inline(always)] pub fn exit_delaying_state( &self, Tracked(delay_actor_token): Tracked, @@ -16258,13 +16183,30 @@ mod linked_list { } ); }*/ - #[inline(always)] - pub fn check_is_good( - &self, - Tracked(thread_tok): Tracked<&Mim::thread_local_state>, - Tracked(tok): Tracked, - ) -> (new_tok: Tracked) - requires + #[inline(always)] + pub fn check_is_good( + &self, + Tracked(thread_tok): Tracked<&Mim::thread_local_state>, + Tracked(tok): Tracked, + ) -> (new_tok: Tracked) + requires + self.wf(), + !self.is_empty(), + thread_tok@.instance == self.instance, + thread_tok@.value.pages.dom().contains(self.page_id()), + thread_tok@.value.pages[self.page_id()].num_blocks == 0, + tok@.instance == self.instance, + tok@.key == thread_tok@.key, + ensures + new_tok@@.instance == tok@.instance, + new_tok@@.key == tok@.key, + new_tok@@.value == (crate::tokens::ThreadCheckedState { + pages: tok@.value.pages.insert(self.page_id()), + }), + { + let tracked mut tok0 = tok; + loop + invariant self.wf(), !self.is_empty(), thread_tok@.instance == self.instance, @@ -16272,29 +16214,12 @@ mod linked_list { thread_tok@.value.pages[self.page_id()].num_blocks == 0, tok@.instance == self.instance, tok@.key == thread_tok@.key, - ensures - new_tok@@.instance == tok@.instance, - new_tok@@.key == tok@.key, - new_tok@@.value == (crate::tokens::ThreadCheckedState { - pages: tok@.value.pages.insert(self.page_id()), - }), + tok0 == tok, { - let tracked mut tok0 = tok; - loop - invariant - self.wf(), - !self.is_empty(), - thread_tok@.instance == self.instance, - thread_tok@.value.pages.dom().contains(self.page_id()), - thread_tok@.value.pages[self.page_id()].num_blocks == 0, - tok@.instance == self.instance, - tok@.key == thread_tok@.key, - tok0 == tok, - { - let ghost mut the_ptr; - let ghost mut the_delay; - let tfree = - my_atomic_with_ghost!(&self.atomic => load(); ghost g => { + let ghost mut the_ptr; + let ghost mut the_delay; + let tfree = + my_atomic_with_ghost!(&self.atomic => load(); ghost g => { self.emp_inst.borrow().agree(self.emp.borrow(), &g.0); the_ptr = g.1.unwrap().1.ptr(); the_delay = g.1.unwrap().0.view().value; @@ -16309,54 +16234,54 @@ mod linked_list { tok0 = new_tok; } }); - let old_delay = masked_ptr_delay_get_delay(tfree, Ghost(the_delay), Ghost(the_ptr)); - if unlikely(old_delay == 1) { // Freeing - atomic_yield(); - } else { - return Tracked(tok0); - } + let old_delay = masked_ptr_delay_get_delay(tfree, Ghost(the_delay), Ghost(the_ptr)); + if unlikely(old_delay == 1) { // Freeing + atomic_yield(); + } else { + return Tracked(tok0); } } + } - #[inline(always)] - pub fn try_use_delayed_free(&self, delay: usize, override_never: bool) -> (b: bool) - requires + #[inline(always)] + pub fn try_use_delayed_free(&self, delay: usize, override_never: bool) -> (b: bool) + requires + self.wf(), + !self.is_empty(), + !override_never && delay == 0, // UseDelayedFree + { + let mut yield_count = 0; + loop + invariant self.wf(), !self.is_empty(), - !override_never && delay == 0, // UseDelayedFree + !override_never, + delay == 0, { - let mut yield_count = 0; - loop - invariant - self.wf(), - !self.is_empty(), - !override_never, - delay == 0, - { - let ghost mut the_ptr; - let ghost mut the_delay; - let tfree = - my_atomic_with_ghost!(&self.atomic => load(); ghost g => { + let ghost mut the_ptr; + let ghost mut the_delay; + let tfree = + my_atomic_with_ghost!(&self.atomic => load(); ghost g => { self.emp_inst.borrow().agree(self.emp.borrow(), &g.0); the_ptr = g.1.unwrap().1.ptr(); the_delay = g.1.unwrap().0.view().value; }); - let tfreex = masked_ptr_delay_set_delay(tfree, delay, Ghost(the_delay), Ghost(the_ptr)); - let old_delay = masked_ptr_delay_get_delay(tfree, Ghost(the_delay), Ghost(the_ptr)); - if unlikely(old_delay == 1) { // Freeing - if yield_count >= 4 { - return false; - } - yield_count += 1; - atomic_yield(); - } else if delay == old_delay { - return true; - } else if !override_never && old_delay == 3 { - return true; + let tfreex = masked_ptr_delay_set_delay(tfree, delay, Ghost(the_delay), Ghost(the_ptr)); + let old_delay = masked_ptr_delay_get_delay(tfree, Ghost(the_delay), Ghost(the_ptr)); + if unlikely(old_delay == 1) { // Freeing + if yield_count >= 4 { + return false; } - if old_delay != 1 { - let res = - my_atomic_with_ghost!( + yield_count += 1; + atomic_yield(); + } else if delay == old_delay { + return true; + } else if !override_never && old_delay == 3 { + return true; + } + if old_delay != 1 { + let res = + my_atomic_with_ghost!( &self.atomic => compare_exchange_weak(tfree, tfreex); returning cas_result; ghost g => { @@ -16370,31 +16295,31 @@ mod linked_list { } } ); - if res.is_ok() { - return true; - } + if res.is_ok() { + return true; } } } + } - // Clears the list (but leaves the 'delay' bit intact) - #[inline(always)] - pub fn take(&self) -> (ll: LL) - requires - self.wf(), - !self.is_empty(), - ensures - ll.wf(), - ll.page_id() == self.page_id(), - ll.block_size() == self.block_size(), - ll.instance() == self.instance, - ll.heap_id().is_none(), - ll.fixed_page(), - { - let tracked ll: LL; - let p = PPtr::::from_usize(0); - let res = - my_atomic_with_ghost!( + // Clears the list (but leaves the 'delay' bit intact) + #[inline(always)] + pub fn take(&self) -> (ll: LL) + requires + self.wf(), + !self.is_empty(), + ensures + ll.wf(), + ll.page_id() == self.page_id(), + ll.block_size() == self.block_size(), + ll.instance() == self.instance, + ll.heap_id().is_none(), + ll.fixed_page(), + { + let tracked ll: LL; + let p = PPtr::::from_usize(0); + let res = + my_atomic_with_ghost!( &self.atomic => fetch_and(3); update old_v -> new_v; ghost g => { @@ -16420,137 +16345,132 @@ mod linked_list { requires x % 4 == 0usize, 0usize <= y < 4usize; } ); - let ret_ll = LL { - first: PPtr::from_usize(res & !3), - data: Ghost(ll.data@), - perms: Tracked(ll.perms.get()), - }; - proof { - assert forall|i: nat| - ret_ll.valid_node( - i, - #[trigger] - ret_ll.next_ptr(i), - ) by { - assert(ll.valid_node(i, ll.next_ptr(i))); - } + let ret_ll = LL { + first: PPtr::from_usize(res & !3), + data: Ghost(ll.data@), + perms: Tracked(ll.perms.get()), + }; + proof { + assert forall|i: nat| ret_ll.valid_node(i, #[trigger] ret_ll.next_ptr(i)) by { + assert(ll.valid_node(i, ll.next_ptr(i))); } - ret_ll } + ret_ll } +} - #[inline(always)] - pub fn masked_ptr_delay_get_is_use_delayed( - v: usize, - Ghost(expected_delay): Ghost, - Ghost(expected_ptr): Ghost>, - ) -> (b: bool) - requires - v as int == expected_ptr.id() + expected_delay.to_int(), - expected_ptr.id() % 4 == 0, - ensures - b <==> (expected_delay == DelayState::UseDelayedFree), - { - v % 4 == 0 - } +#[inline(always)] +pub fn masked_ptr_delay_get_is_use_delayed( + v: usize, + Ghost(expected_delay): Ghost, + Ghost(expected_ptr): Ghost>, +) -> (b: bool) + requires + v as int == expected_ptr.id() + expected_delay.to_int(), + expected_ptr.id() % 4 == 0, + ensures + b <==> (expected_delay == DelayState::UseDelayedFree), +{ + v % 4 == 0 +} - #[inline(always)] - pub fn masked_ptr_delay_get_delay( - v: usize, - Ghost(expected_delay): Ghost, - Ghost(expected_ptr): Ghost>, - ) -> (d: usize) - requires - v as int == expected_ptr.id() + expected_delay.to_int(), - expected_ptr.id() % 4 == 0, - ensures - d == expected_delay.to_int(), - { - v % 4 - } +#[inline(always)] +pub fn masked_ptr_delay_get_delay( + v: usize, + Ghost(expected_delay): Ghost, + Ghost(expected_ptr): Ghost>, +) -> (d: usize) + requires + v as int == expected_ptr.id() + expected_delay.to_int(), + expected_ptr.id() % 4 == 0, + ensures + d == expected_delay.to_int(), +{ + v % 4 +} - #[inline(always)] - pub fn masked_ptr_delay_get_ptr( - v: usize, - Ghost(expected_delay): Ghost, - Ghost(expected_ptr): Ghost>, - ) -> (ptr: PPtr) - requires - v as int == expected_ptr.id() + expected_delay.to_int(), - expected_ptr.id() % 4 == 0, - ensures - ptr.id() == expected_ptr.id(), - { - proof { - assert((v & !3) == sub(v, (v % 4))) by (bit_vector); - } - PPtr::from_usize(v & !3) +#[inline(always)] +pub fn masked_ptr_delay_get_ptr( + v: usize, + Ghost(expected_delay): Ghost, + Ghost(expected_ptr): Ghost>, +) -> (ptr: PPtr) + requires + v as int == expected_ptr.id() + expected_delay.to_int(), + expected_ptr.id() % 4 == 0, + ensures + ptr.id() == expected_ptr.id(), +{ + proof { + assert((v & !3) == sub(v, (v % 4))) by (bit_vector); } + PPtr::from_usize(v & !3) +} - #[inline(always)] - pub fn masked_ptr_delay_set_ptr( - v: usize, - new_ptr: PPtr, - Ghost(expected_delay): Ghost, - Ghost(expected_ptr): Ghost>, - ) -> (v2: usize) - requires - v as int == expected_ptr.id() + expected_delay.to_int(), - expected_ptr.id() % 4 == 0, - new_ptr.id() % 4 == 0, - ensures - v2 as int == new_ptr.id() + expected_delay.to_int(), - { - proof { - assert((v & 3) == (v % 4)) by (bit_vector); - let u = new_ptr.id() as usize; - assert(u % 4 == 0usize ==> ((v & 3) | u) == add(v & 3, u)) by (bit_vector); - } - (v & 3) | new_ptr.to_usize() +#[inline(always)] +pub fn masked_ptr_delay_set_ptr( + v: usize, + new_ptr: PPtr, + Ghost(expected_delay): Ghost, + Ghost(expected_ptr): Ghost>, +) -> (v2: usize) + requires + v as int == expected_ptr.id() + expected_delay.to_int(), + expected_ptr.id() % 4 == 0, + new_ptr.id() % 4 == 0, + ensures + v2 as int == new_ptr.id() + expected_delay.to_int(), +{ + proof { + assert((v & 3) == (v % 4)) by (bit_vector); + let u = new_ptr.id() as usize; + assert(u % 4 == 0usize ==> ((v & 3) | u) == add(v & 3, u)) by (bit_vector); } + (v & 3) | new_ptr.to_usize() +} - #[inline(always)] - pub fn masked_ptr_delay_set_freeing( - v: usize, - Ghost(expected_delay): Ghost, - Ghost(expected_ptr): Ghost>, - ) -> (v2: usize) - requires - v as int == expected_ptr.id() + expected_delay.to_int(), - expected_ptr.id() % 4 == 0, - ensures - v2 <==> expected_ptr.id() + DelayState::Freeing.to_int(), - { - proof { - assert(((v & !3) | 1) == add(sub(v, (v % 4)), 1)) by (bit_vector); - } - (v & !3) | 1 +#[inline(always)] +pub fn masked_ptr_delay_set_freeing( + v: usize, + Ghost(expected_delay): Ghost, + Ghost(expected_ptr): Ghost>, +) -> (v2: usize) + requires + v as int == expected_ptr.id() + expected_delay.to_int(), + expected_ptr.id() % 4 == 0, + ensures + v2 <==> expected_ptr.id() + DelayState::Freeing.to_int(), +{ + proof { + assert(((v & !3) | 1) == add(sub(v, (v % 4)), 1)) by (bit_vector); } + (v & !3) | 1 +} - #[inline(always)] - pub fn masked_ptr_delay_set_delay( - v: usize, - new_delay: usize, - Ghost(expected_delay): Ghost, - Ghost(expected_ptr): Ghost>, - ) -> (v2: usize) - requires - v as int == expected_ptr.id() + expected_delay.to_int(), - expected_ptr.id() % 4 == 0, - new_delay <= 3, - ensures - v2 <==> expected_ptr.id() + new_delay, - { - proof { - assert(((v & !3) | new_delay) == add(sub(v, (v % 4)), new_delay)) by (bit_vector) - requires - new_delay <= 3usize, - ; - } - (v & !3) | new_delay +#[inline(always)] +pub fn masked_ptr_delay_set_delay( + v: usize, + new_delay: usize, + Ghost(expected_delay): Ghost, + Ghost(expected_ptr): Ghost>, +) -> (v2: usize) + requires + v as int == expected_ptr.id() + expected_delay.to_int(), + expected_ptr.id() % 4 == 0, + new_delay <= 3, + ensures + v2 <==> expected_ptr.id() + new_delay, +{ + proof { + assert(((v & !3) | new_delay) == add(sub(v, (v % 4)), new_delay)) by (bit_vector) + requires + new_delay <= 3usize, + ; } + (v & !3) | new_delay +} - /* +/* #[inline(always)] fn free_delayed_block(ll: &mut LL, Tracked(local): Tracked<&mut Local>) -> (b: bool) requires old(local).wf(), old(ll).wf(), old(ll).len() != 0, @@ -16614,12 +16534,12 @@ mod linked_list { } */ - #[inline(always)] - fn atomic_yield() { - std::thread::yield_now(); - } +#[inline(always)] +fn atomic_yield() { + std::thread::yield_now(); +} - } // verus! +} // verus! } mod bitmap { @@ -16634,7 +16554,7 @@ mod bitmap { verus! { - /* +/* type G = crate::os_mem::MemChunk; type K = int; @@ -16936,7 +16856,7 @@ mod bitmap { */ - } // verus! +} // verus! } mod commit_mask { @@ -16948,839 +16868,844 @@ mod commit_mask { verus! { - proof fn lemma_map_distribute(s1: Set, s2: Set, f: FnSpec(S) -> T) - ensures - s1.union(s2).map(f) == s1.map(f).union(s2.map(f)), - { - assert forall|x: T| #![auto] s1.map(f).union(s2.map(f)).contains(x) implies s1.union(s2).map( - f, - ).contains(x) by { - if s1.map(f).contains(x) { - assert(s1.union(s2).contains(choose|y: S| s1.contains(y) && f(y) == x)); - } else { - assert(s1.union(s2).contains(choose|y: S| s2.contains(y) && f(y) == x)); - } +proof fn lemma_map_distribute(s1: Set, s2: Set, f: FnSpec(S) -> T) + ensures + s1.union(s2).map(f) == s1.map(f).union(s2.map(f)), +{ + assert forall|x: T| #![auto] s1.map(f).union(s2.map(f)).contains(x) implies s1.union(s2).map( + f, + ).contains(x) by { + if s1.map(f).contains(x) { + assert(s1.union(s2).contains(choose|y: S| s1.contains(y) && f(y) == x)); + } else { + assert(s1.union(s2).contains(choose|y: S| s2.contains(y) && f(y) == x)); } - assert(s1.union(s2).map(f) =~= s1.map(f).union(s2.map(f))); } + assert(s1.union(s2).map(f) =~= s1.map(f).union(s2.map(f))); +} - proof fn lemma_map_distribute_auto() - ensures - forall|s1: Set, s2: Set, f: FnSpec(S) -> T| - s1.union(s2).map(f) == #[trigger] - s1.map(f).union(s2.map(f)), - { - assert forall|s1: Set, s2: Set, f: FnSpec(S) -> T| - s1.union(s2).map(f) == #[trigger] - s1.map(f).union(s2.map(f)) by { - lemma_map_distribute(s1, s2, f); - } +proof fn lemma_map_distribute_auto() + ensures + forall|s1: Set, s2: Set, f: FnSpec(S) -> T| + s1.union(s2).map(f) == #[trigger] s1.map(f).union(s2.map(f)), +{ + assert forall|s1: Set, s2: Set, f: FnSpec(S) -> T| + s1.union(s2).map(f) == #[trigger] s1.map(f).union(s2.map(f)) by { + lemma_map_distribute(s1, s2, f); } +} - // used for triggering - spec fn mod64(x: usize) -> usize { - x % 64 - } +// used for triggering +spec fn mod64(x: usize) -> usize { + x % 64 +} - spec fn div64(x: usize) -> usize { - x / 64 - } +spec fn div64(x: usize) -> usize { + x / 64 +} - #[verifier::opaque] - spec fn is_bit_set(a: usize, b: usize) -> bool { - a & (1 << b) == (1 << b) - } +#[verifier::opaque] +spec fn is_bit_set(a: usize, b: usize) -> bool { + a & (1 << b) == (1 << b) +} - #[allow(unused_macros)] +#[allow(unused_macros)] macro_rules! is_bit_set { ($a:expr, $b:expr) => { $a & (1u64 << $b) == (1u64 << $b) } } - proof fn lemma_bitmask_to_is_bit_set(n: usize, o: usize) - requires - n < 64, - o <= 64 - n, - ensures - ({ - let m = sub(1 << n, 1) << o; - &&& forall|j: usize| - j < o ==> !is_bit_set(m, j) - &&& forall|j: usize| - o <= j < o + n ==> is_bit_set(m, j) - &&& forall|j: usize| o + n <= j < 64 ==> !is_bit_set(m, j) - }), - { - let m = (sub(1 << n, 1) << o) as usize; - assert forall|j: usize| - { - &&& (j < o ==> !is_bit_set(m, j)) - &&& (o <= j < o + n ==> is_bit_set(m, j)) - &&& (o + n <= j < 64 ==> !is_bit_set(m, j)) - } by { - let j = j as u64; - let m = m as u64; - let o = o as u64; - let n = n as u64; - reveal(is_bit_set); - if j < 64 { - assert(j < o ==> !is_bit_set!(m, j)) by (bit_vector) - requires - j < 64, - m == (sub(1 << n, 1) << o) as u64, - ; - assert(o <= j < add(o, n) ==> is_bit_set!(m, j)) by (bit_vector) - requires - j < 64, - m == (sub(1 << n, 1) << o) as u64, - ; - assert(add(o, n) <= j < 64 ==> !is_bit_set!(m, j)) by (bit_vector) - requires - n < 64, - j < 64, - m == (sub(1 << n, 1) << o) as u64, - ; - } else { - } +proof fn lemma_bitmask_to_is_bit_set(n: usize, o: usize) + requires + n < 64, + o <= 64 - n, + ensures + ({ + let m = sub(1 << n, 1) << o; + &&& forall|j: usize| j < o ==> !is_bit_set(m, j) + &&& forall|j: usize| o <= j < o + n ==> is_bit_set(m, j) + &&& forall|j: usize| o + n <= j < 64 ==> !is_bit_set(m, j) + }), +{ + let m = (sub(1 << n, 1) << o) as usize; + assert forall|j: usize| + { + &&& (j < o ==> !is_bit_set(m, j)) + &&& (o <= j < o + n ==> is_bit_set(m, j)) + &&& (o + n <= j < 64 ==> !is_bit_set(m, j)) + } by { + let j = j as u64; + let m = m as u64; + let o = o as u64; + let n = n as u64; + reveal(is_bit_set); + if j < 64 { + assert(j < o ==> !is_bit_set!(m, j)) by (bit_vector) + requires + j < 64, + m == (sub(1 << n, 1) << o) as u64, + ; + assert(o <= j < add(o, n) ==> is_bit_set!(m, j)) by (bit_vector) + requires + j < 64, + m == (sub(1 << n, 1) << o) as u64, + ; + assert(add(o, n) <= j < 64 ==> !is_bit_set!(m, j)) by (bit_vector) + requires + n < 64, + j < 64, + m == (sub(1 << n, 1) << o) as u64, + ; + } else { } } +} - proof fn lemma_obtain_bit_index_3_aux(a: u64, b: u64, hi: u64) -> (i: u64) +proof fn lemma_obtain_bit_index_3_aux(a: u64, b: u64, hi: u64) -> (i: u64) + requires + a & b != b, + hi <= 64, + a >> hi == 0, + b >> hi == 0, + ensures + i < hi, + !is_bit_set!(a, i), + is_bit_set!(b, i), + decreases hi, +{ + assert(hi != 0) by (bit_vector) requires a & b != b, hi <= 64, a >> hi == 0, b >> hi == 0, - ensures - i < hi, - !is_bit_set!(a, i), - is_bit_set!(b, i), - decreases hi, - { - assert(hi != 0) by (bit_vector) + ; + assert(1u64 << 0 == 1) by (bit_vector); + if a & 1 != 1 && b & 1 == 1 { + return 0; + } else { + assert((a >> 1) & (b >> 1) != (b >> 1) && (a >> 1) >> sub(hi, 1) == 0 && (b >> 1) >> sub( + hi, + 1, + ) == 0) by (bit_vector) requires + !(a & 1 != 1 && b & 1 == 1), a & b != b, - hi <= 64, a >> hi == 0, b >> hi == 0, ; - assert(1u64 << 0 == 1) by (bit_vector); - if a & 1 != 1 && b & 1 == 1 { - return 0; - } else { - assert((a >> 1) & (b >> 1) != (b >> 1) && (a >> 1) >> sub(hi, 1) == 0 && (b >> 1) >> sub( - hi, - 1, - ) == 0) by (bit_vector) - requires - !(a & 1 != 1 && b & 1 == 1), - a & b != b, - a >> hi == 0, - b >> hi == 0, - ; - let j = lemma_obtain_bit_index_3_aux(a >> 1, b >> 1, sub(hi, 1)); - assert(!is_bit_set!(a, add(j, 1)) && is_bit_set!(b, add(j, 1))) by (bit_vector) - requires - !is_bit_set!(a >> 1u64, j), - is_bit_set!(b >> 1u64, j), - j < 64, - ; - add(j, 1) - } + let j = lemma_obtain_bit_index_3_aux(a >> 1, b >> 1, sub(hi, 1)); + assert(!is_bit_set!(a, add(j, 1)) && is_bit_set!(b, add(j, 1))) by (bit_vector) + requires + !is_bit_set!(a >> 1u64, j), + is_bit_set!(b >> 1u64, j), + j < 64, + ; + add(j, 1) } +} - proof fn lemma_obtain_bit_index_3(a: usize, b: usize) -> (i: usize) - requires - a & b != b, - ensures - i < 64, - !is_bit_set(a, i), - is_bit_set(b, i), - { - reveal(is_bit_set); - assert(a as u64 >> 64 == 0) by (bit_vector); - assert(b as u64 >> 64 == 0) by (bit_vector); - lemma_obtain_bit_index_3_aux(a as u64, b as u64, 64) as usize - } +proof fn lemma_obtain_bit_index_3(a: usize, b: usize) -> (i: usize) + requires + a & b != b, + ensures + i < 64, + !is_bit_set(a, i), + is_bit_set(b, i), +{ + reveal(is_bit_set); + assert(a as u64 >> 64 == 0) by (bit_vector); + assert(b as u64 >> 64 == 0) by (bit_vector); + lemma_obtain_bit_index_3_aux(a as u64, b as u64, 64) as usize +} - proof fn lemma_obtain_bit_index_2(a: usize) -> (b: usize) +proof fn lemma_obtain_bit_index_2(a: usize) -> (b: usize) + requires + a != !0usize, + ensures + b < 64, + !is_bit_set(a, b), +{ + reveal(is_bit_set); + assert(!a != 0usize) by (bit_vector) requires a != !0usize, - ensures - b < 64, - !is_bit_set(a, b), - { - reveal(is_bit_set); - assert(!a != 0usize) by (bit_vector) - requires - a != !0usize, - ; - let b = lemma_obtain_bit_index_1(!a) as u64; - let a = a as u64; - assert(!is_bit_set!(a, b)) by (bit_vector) - requires - b < 64 && !a & (1 << b) == (1 << b), - ; - b as usize - } + ; + let b = lemma_obtain_bit_index_1(!a) as u64; + let a = a as u64; + assert(!is_bit_set!(a, b)) by (bit_vector) + requires + b < 64 && !a & (1 << b) == (1 << b), + ; + b as usize +} - proof fn lemma_obtain_bit_index_1_aux(a: u64, hi: u64) -> (i: u64) +proof fn lemma_obtain_bit_index_1_aux(a: u64, hi: u64) -> (i: u64) + requires + a != 0, + hi <= 64, + a >> hi == 0, + ensures + i < hi, + is_bit_set!(a, i), + decreases hi, +{ + assert(hi != 0) by (bit_vector) requires a != 0, hi <= 64, a >> hi == 0, - ensures - i < hi, - is_bit_set!(a, i), - decreases hi, - { - assert(hi != 0) by (bit_vector) + ; + assert(1u64 << 0 == 1) by (bit_vector); + if a & 1 == 1 { + return 0; + } else { + assert((a >> 1) != 0 && (a >> 1) >> sub(hi, 1) == 0) by (bit_vector) requires + a & 1 != 1, a != 0, - hi <= 64, a >> hi == 0, ; - assert(1u64 << 0 == 1) by (bit_vector); - if a & 1 == 1 { - return 0; - } else { - assert((a >> 1) != 0 && (a >> 1) >> sub(hi, 1) == 0) by (bit_vector) - requires - a & 1 != 1, - a != 0, - a >> hi == 0, - ; - let j = lemma_obtain_bit_index_1_aux(a >> 1, sub(hi, 1)); - assert(is_bit_set!(a, add(j, 1))) by (bit_vector) - requires - is_bit_set!(a >> 1u64, j) && j < 64, - ; - add(j, 1) - } + let j = lemma_obtain_bit_index_1_aux(a >> 1, sub(hi, 1)); + assert(is_bit_set!(a, add(j, 1))) by (bit_vector) + requires + is_bit_set!(a >> 1u64, j) && j < 64, + ; + add(j, 1) } +} - proof fn lemma_obtain_bit_index_1(a: usize) -> (b: usize) - requires - a != 0, - ensures - b < 64, - is_bit_set(a, b), - { - reveal(is_bit_set); - assert(a as u64 >> 64 == 0) by (bit_vector); - lemma_obtain_bit_index_1_aux(a as u64, 64) as usize - } +proof fn lemma_obtain_bit_index_1(a: usize) -> (b: usize) + requires + a != 0, + ensures + b < 64, + is_bit_set(a, b), +{ + reveal(is_bit_set); + assert(a as u64 >> 64 == 0) by (bit_vector); + lemma_obtain_bit_index_1_aux(a as u64, 64) as usize +} - // I don't think there's a good reason that some of these would need `j < 64` and others don't but - // for some the bitvector assertions without it succeeds and for others it doesn't. - proof fn lemma_is_bit_set() - ensures - forall|j: usize| - j < 64 ==> !(#[trigger] - is_bit_set(0, j)), - forall|j: usize| is_bit_set(!0usize, j), - forall|a: usize, b: usize, j: usize| - #[trigger] - is_bit_set(a | b, j) <==> is_bit_set(a, j) || is_bit_set(b, j), - forall|a: usize, b: usize, j: usize| - j < 64 ==> (#[trigger] - is_bit_set(a & !b, j) <==> is_bit_set(a, j) && !is_bit_set(b, j)), - forall|a: usize, b: usize, j: usize| - #[trigger] - is_bit_set(a & b, j) <==> is_bit_set(a, j) && is_bit_set(b, j), - // Implied by previous properties, possibly too aggressive trigger - forall|a: usize, b: usize, j: usize| - j < 64 ==> (a & b == 0) ==> !(#[trigger] - is_bit_set(a, j) && #[trigger] - is_bit_set(b, j)), - { - reveal(is_bit_set); - assert(forall|j: u64| #![auto] j < 64 ==> !is_bit_set!(0, j)) by (bit_vector); - assert(forall|j: u64| #![auto] is_bit_set!(!0, j)) by (bit_vector); - assert(forall|a: u64, b: u64, j: u64| - (is_bit_set!(a | b, j) <==> is_bit_set!(a, j) || is_bit_set!(b, j))) by (bit_vector); - assert(forall|a: u64, b: u64, j: u64| - j < 64 ==> (is_bit_set!(a & !b, j) <==> is_bit_set!(a, j) && !is_bit_set!(b, j))) - by (bit_vector); - assert(forall|a: u64, b: u64, j: u64| - (is_bit_set!(a & b, j) <==> is_bit_set!(a, j) && is_bit_set!(b, j))) by (bit_vector); - } +// I don't think there's a good reason that some of these would need `j < 64` and others don't but +// for some the bitvector assertions without it succeeds and for others it doesn't. +proof fn lemma_is_bit_set() + ensures + forall|j: usize| j < 64 ==> !(#[trigger] is_bit_set(0, j)), + forall|j: usize| is_bit_set(!0usize, j), + forall|a: usize, b: usize, j: usize| #[trigger] + is_bit_set(a | b, j) <==> is_bit_set(a, j) || is_bit_set(b, j), + forall|a: usize, b: usize, j: usize| + j < 64 ==> (#[trigger] is_bit_set(a & !b, j) <==> is_bit_set(a, j) && !is_bit_set( + b, + j, + )), + forall|a: usize, b: usize, j: usize| #[trigger] + is_bit_set(a & b, j) <==> is_bit_set(a, j) && is_bit_set(b, j), + // Implied by previous properties, possibly too aggressive trigger + forall|a: usize, b: usize, j: usize| + j < 64 ==> (a & b == 0) ==> !(#[trigger] is_bit_set(a, j) && #[trigger] is_bit_set( + b, + j, + )), +{ + reveal(is_bit_set); + assert(forall|j: u64| #![auto] j < 64 ==> !is_bit_set!(0, j)) by (bit_vector); + assert(forall|j: u64| #![auto] is_bit_set!(!0, j)) by (bit_vector); + assert(forall|a: u64, b: u64, j: u64| + (is_bit_set!(a | b, j) <==> is_bit_set!(a, j) || is_bit_set!(b, j))) by (bit_vector); + assert(forall|a: u64, b: u64, j: u64| + j < 64 ==> (is_bit_set!(a & !b, j) <==> is_bit_set!(a, j) && !is_bit_set!(b, j))) + by (bit_vector); + assert(forall|a: u64, b: u64, j: u64| + (is_bit_set!(a & b, j) <==> is_bit_set!(a, j) && is_bit_set!(b, j))) by (bit_vector); +} - pub struct CommitMask { - mask: [usize; 8], // size = COMMIT_MASK_FIELD_COUNT - } +pub struct CommitMask { + mask: [usize; 8], // size = COMMIT_MASK_FIELD_COUNT +} - impl CommitMask { - pub closed spec fn view(&self) -> Set { - Set::new(|t: (int, usize)| 0 <= t.0 < 8 && t.1 < 64 && is_bit_set(self.mask[t.0], t.1)).map( - |t: (int, usize)| t.0 * 64 + t.1, - ) - } +impl CommitMask { + pub closed spec fn view(&self) -> Set { + Set::new(|t: (int, usize)| 0 <= t.0 < 8 && t.1 < 64 && is_bit_set(self.mask[t.0], t.1)).map( + |t: (int, usize)| t.0 * 64 + t.1, + ) + } - proof fn lemma_view(&self) - ensures// forall|i: int| self@.contains(i) ==> i < 512, - // TODO: this isn't currently used but probably will need it (-> check later) + proof fn lemma_view(&self) + ensures // forall|i: int| self@.contains(i) ==> i < 512, + // TODO: this isn't currently used but probably will need it (-> check later) - (forall|i: int| - self@.contains(i) ==> { - let a = i / usize::BITS as int; - let b = (i % usize::BITS as int) as usize; - &&& a * 64 + b == i - &&& is_bit_set(self.mask[a], b) - }), - forall|a: int, b: usize| - 0 <= a < 8 && b < 64 && is_bit_set(self.mask[a], b) ==> #[trigger] - self@.contains(a * 64 + b), - { - assert forall|a: int, b: usize| - 0 <= a < 8 && b < 64 && is_bit_set(self.mask[a], b) implies self@.contains( - a * 64 + b, - ) by { - assert(Set::new( - |t: (int, usize)| 0 <= t.0 < 8 && t.1 < 64 && is_bit_set(self.mask[t.0], t.1), - ).contains((a, b))) by (nonlinear_arith) - requires - 0 <= a < 8 && b < 64 && is_bit_set(self.mask[a], b), - ; - } + (forall|i: int| + self@.contains(i) ==> { + let a = i / usize::BITS as int; + let b = (i % usize::BITS as int) as usize; + &&& a * 64 + b == i + &&& is_bit_set(self.mask[a], b) + }), + forall|a: int, b: usize| + 0 <= a < 8 && b < 64 && is_bit_set(self.mask[a], b) ==> #[trigger] self@.contains( + a * 64 + b, + ), + { + assert forall|a: int, b: usize| + 0 <= a < 8 && b < 64 && is_bit_set(self.mask[a], b) implies self@.contains( + a * 64 + b, + ) by { + assert(Set::new( + |t: (int, usize)| 0 <= t.0 < 8 && t.1 < 64 && is_bit_set(self.mask[t.0], t.1), + ).contains((a, b))) by (nonlinear_arith) + requires + 0 <= a < 8 && b < 64 && is_bit_set(self.mask[a], b), + ; } + } - #[verifier::opaque] - pub open spec fn bytes(&self, segment_id: SegmentId) -> Set { - Set::::new( - |addr: int| self@.contains((addr - segment_start(segment_id)) / COMMIT_SIZE as int), - ) - } + #[verifier::opaque] + pub open spec fn bytes(&self, segment_id: SegmentId) -> Set { + Set::::new( + |addr: int| self@.contains((addr - segment_start(segment_id)) / COMMIT_SIZE as int), + ) + } - pub fn empty() -> (cm: CommitMask) - ensures - cm@ == Set::::empty(), - { - let res = CommitMask { mask: [0, 0, 0, 0, 0, 0, 0, 0] }; - proof { - lemma_is_bit_set(); - res.lemma_view(); - assert(res@ =~= Set::::empty()); - } - res + pub fn empty() -> (cm: CommitMask) + ensures + cm@ == Set::::empty(), + { + let res = CommitMask { mask: [0, 0, 0, 0, 0, 0, 0, 0] }; + proof { + lemma_is_bit_set(); + res.lemma_view(); + assert(res@ =~= Set::::empty()); } + res + } - pub fn all_set(&self, other: &CommitMask) -> (res: bool) - ensures - res == other@.subset_of(self@), + pub fn all_set(&self, other: &CommitMask) -> (res: bool) + ensures + res == other@.subset_of(self@), + { + let mut i = 0; + while i < 8 + invariant + forall|j: int| + #![auto] + 0 <= j < i ==> self.mask[j] & other.mask[j] == other.mask[j], { - let mut i = 0; - while i < 8 - invariant - forall|j: int| - #![auto] - 0 <= j < i ==> self.mask[j] & other.mask[j] == other.mask[j], - { - if self.mask[i] & other.mask[i] != other.mask[i] { - proof { - self.lemma_view(); - other.lemma_view(); - lemma_is_bit_set(); - let j = lemma_obtain_bit_index_3(self.mask[i as int], other.mask[i as int]); - assert(!self@.contains(i * 64 + j) && other@.contains(i * 64 + j)); - } - return false; + if self.mask[i] & other.mask[i] != other.mask[i] { + proof { + self.lemma_view(); + other.lemma_view(); + lemma_is_bit_set(); + let j = lemma_obtain_bit_index_3(self.mask[i as int], other.mask[i as int]); + assert(!self@.contains(i * 64 + j) && other@.contains(i * 64 + j)); } - i = i + 1; - } - proof { - lemma_is_bit_set(); - self.lemma_view(); - other.lemma_view(); + return false; } - return true; + i = i + 1; + } + proof { + lemma_is_bit_set(); + self.lemma_view(); + other.lemma_view(); } + return true; + } - pub fn any_set(&self, other: &CommitMask) -> (res: bool) - ensures - res == !self@.disjoint(other@), + pub fn any_set(&self, other: &CommitMask) -> (res: bool) + ensures + res == !self@.disjoint(other@), + { + let mut i = 0; + while i < 8 + invariant + forall|j: int| #![auto] 0 <= j < i ==> self.mask[j] & other.mask[j] == 0, { - let mut i = 0; - while i < 8 - invariant - forall|j: int| #![auto] 0 <= j < i ==> self.mask[j] & other.mask[j] == 0, - { - if self.mask[i] & other.mask[i] != 0 { - proof { - self.lemma_view(); - other.lemma_view(); - lemma_is_bit_set(); - let j = lemma_obtain_bit_index_1(self.mask[i as int] & other.mask[i as int]); - assert(self@.contains(i * 64 + j) && other@.contains(i * 64 + j)); - } - return true; + if self.mask[i] & other.mask[i] != 0 { + proof { + self.lemma_view(); + other.lemma_view(); + lemma_is_bit_set(); + let j = lemma_obtain_bit_index_1(self.mask[i as int] & other.mask[i as int]); + assert(self@.contains(i * 64 + j) && other@.contains(i * 64 + j)); } - i += 1; - } - proof { - lemma_is_bit_set(); - self.lemma_view(); - other.lemma_view(); - assert(self@.disjoint(other@)); + return true; } - return false; + i += 1; + } + proof { + lemma_is_bit_set(); + self.lemma_view(); + other.lemma_view(); + assert(self@.disjoint(other@)); } + return false; + } - pub fn create_intersect(&self, other: &CommitMask, res: &mut CommitMask) - ensures - res@ == self@.intersect(other@), + pub fn create_intersect(&self, other: &CommitMask, res: &mut CommitMask) + ensures + res@ == self@.intersect(other@), + { + let mut i = 0; + while i < 8 + invariant + forall|j: int| + 0 <= j < i ==> #[trigger] res.mask[j] == self.mask[j] & other.mask[j], { - let mut i = 0; - while i < 8 - invariant - forall|j: int| - 0 <= j < i ==> #[trigger] - res.mask[j] == self.mask[j] & other.mask[j], - { - res.mask.set(i, self.mask[i] & other.mask[i]); - i += 1; - } - proof { - self.lemma_view(); - other.lemma_view(); - res.lemma_view(); - lemma_is_bit_set(); - assert(res@ =~= self@.intersect(other@)); - } + res.mask.set(i, self.mask[i] & other.mask[i]); + i += 1; + } + proof { + self.lemma_view(); + other.lemma_view(); + res.lemma_view(); + lemma_is_bit_set(); + assert(res@ =~= self@.intersect(other@)); } + } - pub fn clear(&mut self, other: &CommitMask) - ensures - self@ == old(self)@.difference(other@), + pub fn clear(&mut self, other: &CommitMask) + ensures + self@ == old(self)@.difference(other@), + { + let mut i = 0; + while i < 8 + invariant + forall|j: int| + 0 <= j < i ==> #[trigger] self.mask[j] == old(self).mask[j] & !other.mask[j], + forall|j: int| i <= j < 8 ==> #[trigger] self.mask[j] == old(self).mask[j], { - let mut i = 0; - while i < 8 - invariant - forall|j: int| - 0 <= j < i ==> #[trigger] - self.mask[j] == old(self).mask[j] & !other.mask[j], - forall|j: int| - i <= j < 8 ==> #[trigger] - self.mask[j] == old(self).mask[j], - { - let m = self.mask[i]; - self.mask.set(i, m & !other.mask[i]); - i += 1; - } - proof { - old(self).lemma_view(); - other.lemma_view(); - self.lemma_view(); - lemma_is_bit_set(); - assert(self@ =~= old(self)@.difference(other@)); - } + let m = self.mask[i]; + self.mask.set(i, m & !other.mask[i]); + i += 1; + } + proof { + old(self).lemma_view(); + other.lemma_view(); + self.lemma_view(); + lemma_is_bit_set(); + assert(self@ =~= old(self)@.difference(other@)); } + } - pub fn set(&mut self, other: &CommitMask) - ensures - self@ == old(self)@.union(other@), + pub fn set(&mut self, other: &CommitMask) + ensures + self@ == old(self)@.union(other@), + { + let mut i = 0; + while i < 8 + invariant + forall|j: int| + 0 <= j < i ==> #[trigger] self.mask[j] == old(self).mask[j] | other.mask[j], + forall|j: int| i <= j < 8 ==> #[trigger] self.mask[j] == old(self).mask[j], { - let mut i = 0; - while i < 8 - invariant - forall|j: int| - 0 <= j < i ==> #[trigger] - self.mask[j] == old(self).mask[j] | other.mask[j], - forall|j: int| - i <= j < 8 ==> #[trigger] - self.mask[j] == old(self).mask[j], - { - let m = self.mask[i]; - self.mask.set(i, m | other.mask[i]); - i += 1; - } - proof { - old(self).lemma_view(); - other.lemma_view(); - self.lemma_view(); - lemma_is_bit_set(); - assert(self@ =~= old(self)@.union(other@)); - } + let m = self.mask[i]; + self.mask.set(i, m | other.mask[i]); + i += 1; + } + proof { + old(self).lemma_view(); + other.lemma_view(); + self.lemma_view(); + lemma_is_bit_set(); + assert(self@ =~= old(self)@.union(other@)); } + } - proof fn lemma_change_one_entry(&self, other: &Self, i: int) - requires - 0 <= i < 8, - self.mask[i] == 0, - forall|j: int| 0 <= j < i ==> other.mask[j] == self.mask[j], - forall|j: int| i < j < 8 ==> other.mask[j] == self.mask[j], - ensures - other@ == self@.union( - Set::new(|b: usize| b < 64 && is_bit_set(other.mask[i], b)).map( - |b: usize| 64 * i + b, - ), + proof fn lemma_change_one_entry(&self, other: &Self, i: int) + requires + 0 <= i < 8, + self.mask[i] == 0, + forall|j: int| 0 <= j < i ==> other.mask[j] == self.mask[j], + forall|j: int| i < j < 8 ==> other.mask[j] == self.mask[j], + ensures + other@ == self@.union( + Set::new(|b: usize| b < 64 && is_bit_set(other.mask[i], b)).map( + |b: usize| 64 * i + b, ), - { - let s_un = Set::new(|b: usize| b < 64 && is_bit_set(other.mask[i], b)); - let f_un = |b: usize| 64 * i + b; - let f = |t: (int, usize)| t.0 * 64 + t.1; - let s_full = Set::new( - |t: (int, usize)| 0 <= t.0 < 8 && t.1 < 64 && is_bit_set(self.mask[t.0], t.1), - ); - let s_full_o = Set::new( - |t: (int, usize)| 0 <= t.0 < 8 && t.1 < 64 && is_bit_set(other.mask[t.0], t.1), - ); - let s1 = Set::new( - |t: (int, usize)| 0 <= t.0 < i && t.1 < 64 && is_bit_set(self.mask[t.0], t.1), - ); - let s2 = Set::new(|t: (int, usize)| t.0 == i && t.1 < 64 && is_bit_set(self.mask[i], t.1)); - let s2o = Set::new( - |t: (int, usize)| t.0 == i && t.1 < 64 && is_bit_set(other.mask[i], t.1), - ); - let s3 = Set::new( - |t: (int, usize)| i < t.0 < 8 && t.1 < 64 && is_bit_set(self.mask[t.0], t.1), - ); - assert(s_full =~= s1.union(s2).union(s3)); - assert(s2 =~= Set::empty()) by { - lemma_is_bit_set(); - } - lemma_map_distribute_auto::<(int, usize), int>(); - assert(s_full.map(f) =~= s1.map(f).union(s2.map(f)).union(s3.map(f))); - assert(s_full_o =~= s_full.union(s2o)); - assert forall|x| #![auto] s_un.map(f_un).contains(x) implies s2o.map(f).contains(x) by { - assert(s2o.contains((i, choose|y| s_un.contains(y) && f_un(y) == x))); - }; - assert forall|x| #![auto] s2o.map(f).contains(x) implies s_un.map(f_un).contains(x) by { - let y = choose|y| s2o.contains(y) && f(y) == x; - assert(Set::new(|b: usize| b < 64 && is_bit_set(other.mask[i], b)).contains(y.1)); - }; - assert(s_un.map(f_un) =~= s2o.map(f)); - } + ), + { + let s_un = Set::new(|b: usize| b < 64 && is_bit_set(other.mask[i], b)); + let f_un = |b: usize| 64 * i + b; + let f = |t: (int, usize)| t.0 * 64 + t.1; + let s_full = Set::new( + |t: (int, usize)| 0 <= t.0 < 8 && t.1 < 64 && is_bit_set(self.mask[t.0], t.1), + ); + let s_full_o = Set::new( + |t: (int, usize)| 0 <= t.0 < 8 && t.1 < 64 && is_bit_set(other.mask[t.0], t.1), + ); + let s1 = Set::new( + |t: (int, usize)| 0 <= t.0 < i && t.1 < 64 && is_bit_set(self.mask[t.0], t.1), + ); + let s2 = Set::new(|t: (int, usize)| t.0 == i && t.1 < 64 && is_bit_set(self.mask[i], t.1)); + let s2o = Set::new( + |t: (int, usize)| t.0 == i && t.1 < 64 && is_bit_set(other.mask[i], t.1), + ); + let s3 = Set::new( + |t: (int, usize)| i < t.0 < 8 && t.1 < 64 && is_bit_set(self.mask[t.0], t.1), + ); + assert(s_full =~= s1.union(s2).union(s3)); + assert(s2 =~= Set::empty()) by { + lemma_is_bit_set(); + } + lemma_map_distribute_auto::<(int, usize), int>(); + assert(s_full.map(f) =~= s1.map(f).union(s2.map(f)).union(s3.map(f))); + assert(s_full_o =~= s_full.union(s2o)); + assert forall|x| #![auto] s_un.map(f_un).contains(x) implies s2o.map(f).contains(x) by { + assert(s2o.contains((i, choose|y| s_un.contains(y) && f_un(y) == x))); + }; + assert forall|x| #![auto] s2o.map(f).contains(x) implies s_un.map(f_un).contains(x) by { + let y = choose|y| s2o.contains(y) && f(y) == x; + assert(Set::new(|b: usize| b < 64 && is_bit_set(other.mask[i], b)).contains(y.1)); + }; + assert(s_un.map(f_un) =~= s2o.map(f)); + } - pub fn create(&mut self, idx: usize, count: usize) - requires - idx + count <= COMMIT_MASK_BITS, - old(self)@ == Set::::empty(), - ensures - self@ == Set::new(|i: int| idx <= i < idx + count), - { - proof { - const_facts(); - lemma_is_bit_set(); - self.lemma_view(); - assert forall|i: int| 0 <= i < 8 implies self.mask[i] == 0 by { - if self.mask[i] != 0 { - let j = lemma_obtain_bit_index_1(self.mask[i]); - assert(self@.contains(i * 64 + j)); - } + pub fn create(&mut self, idx: usize, count: usize) + requires + idx + count <= COMMIT_MASK_BITS, + old(self)@ == Set::::empty(), + ensures + self@ == Set::new(|i: int| idx <= i < idx + count), + { + proof { + const_facts(); + lemma_is_bit_set(); + self.lemma_view(); + assert forall|i: int| 0 <= i < 8 implies self.mask[i] == 0 by { + if self.mask[i] != 0 { + let j = lemma_obtain_bit_index_1(self.mask[i]); + assert(self@.contains(i * 64 + j)); } } - if count == COMMIT_MASK_BITS as usize { - self.create_full(); - } else if count == 0 { - assert(self@ =~= Set::new(|i: int| idx <= i < idx + count)); - } else { - let mut i = idx / usize::BITS as usize; - let mut ofs: usize = idx % usize::BITS as usize; - let mut bitcount = count; - assert(Set::new(|j: int| idx <= j < idx + (count - bitcount)) =~= Set::empty()); - while bitcount > 0 - invariant - self@ == Set::new(|j: int| idx <= j < idx + (count - bitcount)), - ofs == if count == bitcount { - idx % 64 - } else { - 0 - }, - bitcount > 0 ==> 64 * i + ofs == idx + (count - bitcount), - idx + count <= 512, - forall|j: int| i <= j < 8 ==> self.mask[j] == 0, - bitcount <= count, - { - assert(i < 8) by (nonlinear_arith) + } + if count == COMMIT_MASK_BITS as usize { + self.create_full(); + } else if count == 0 { + assert(self@ =~= Set::new(|i: int| idx <= i < idx + count)); + } else { + let mut i = idx / usize::BITS as usize; + let mut ofs: usize = idx % usize::BITS as usize; + let mut bitcount = count; + assert(Set::new(|j: int| idx <= j < idx + (count - bitcount)) =~= Set::empty()); + while bitcount > 0 + invariant + self@ == Set::new(|j: int| idx <= j < idx + (count - bitcount)), + ofs == if count == bitcount { + idx % 64 + } else { + 0 + }, + bitcount > 0 ==> 64 * i + ofs == idx + (count - bitcount), + idx + count <= 512, + forall|j: int| i <= j < 8 ==> self.mask[j] == 0, + bitcount <= count, + { + assert(i < 8) by (nonlinear_arith) + requires + idx + (count - bitcount) < 512, + i == (idx + (count - bitcount)) / 64, + ; + let avail = usize::BITS as usize - ofs; + let c = if bitcount > avail { + avail + } else { + bitcount + }; + let mask = if c >= usize::BITS as usize { + !0usize + } else { + assert((1usize << c) > 0usize) by (bit_vector) requires - idx + (count - bitcount) < 512, - i == (idx + (count - bitcount)) / 64, + c < 64usize, ; - let avail = usize::BITS as usize - ofs; - let c = if bitcount > avail { - avail - } else { - bitcount - }; - let mask = if c >= usize::BITS as usize { - !0usize - } else { - assert((1usize << c) > 0usize) by (bit_vector) - requires - c < 64usize, - ; - ((1usize << c) - 1) << ofs - }; - let old_self = Ghost(*self); - self.mask.set(i, mask); - let oi = Ghost(i); - let obc = Ghost(bitcount); - let oofs = Ghost(ofs); - bitcount -= c; - ofs = 0; - i += 1; - proof { - assert(forall|a: u64| a << 0u64 == a) by (bit_vector); - let oi = oi@; - let obc = obc@; - let oofs = oofs@; - lemma_is_bit_set(); - old_self@.lemma_change_one_entry(self, oi as int); - assert(self@ == old_self@@.union( - Set::new(|b: usize| b < 64 && is_bit_set(self.mask[oi as int], b)).map( - |b: usize| 64 * oi + b, - ), + ((1usize << c) - 1) << ofs + }; + let old_self = Ghost(*self); + self.mask.set(i, mask); + let oi = Ghost(i); + let obc = Ghost(bitcount); + let oofs = Ghost(ofs); + bitcount -= c; + ofs = 0; + i += 1; + proof { + assert(forall|a: u64| a << 0u64 == a) by (bit_vector); + let oi = oi@; + let obc = obc@; + let oofs = oofs@; + lemma_is_bit_set(); + old_self@.lemma_change_one_entry(self, oi as int); + assert(self@ == old_self@@.union( + Set::new(|b: usize| b < 64 && is_bit_set(self.mask[oi as int], b)).map( + |b: usize| 64 * oi + b, + ), + )); + // TODO: a lot of duplicated proof structure here, should be able to + // somehow lift that structure out of the if-else + if oofs > 0 { // first iteration + assert(Set::new(|j: int| idx <= j < idx + (count - bitcount)) =~= Set::new( + |j: int| idx + (count - obc) <= j < idx + (count - bitcount), )); - // TODO: a lot of duplicated proof structure here, should be able to - // somehow lift that structure out of the if-else - if oofs > 0 { // first iteration - assert(Set::new(|j: int| idx <= j < idx + (count - bitcount)) =~= Set::new( + if obc < 64 { + assert(mask == sub(1usize << c, 1usize) << oofs); + lemma_bitmask_to_is_bit_set(c, oofs); + assert(Set::new( |j: int| idx + (count - obc) <= j < idx + (count - bitcount), - )); - if obc < 64 { - assert(mask == sub(1usize << c, 1usize) << oofs); - lemma_bitmask_to_is_bit_set(c, oofs); - assert(Set::new( - |j: int| idx + (count - obc) <= j < idx + (count - bitcount), - ) =~= Set::new( - |b: usize| b < 64 && is_bit_set(self.mask[oi as int], b), - ).map(|b: usize| 64 * oi + b)) by { - let s1 = Set::new( - |j: int| idx + (count - obc) <= j < idx + (count - bitcount), - ); - let s2 = Set::new( - |b: usize| b < 64 && is_bit_set(self.mask[oi as int], b), - ).map(|b: usize| 64 * oi + b); - assert(forall|j: usize| - idx + (count - obc) <= j < idx + (count - bitcount) - ==> #[trigger] - is_bit_set(self.mask[oi as int], mod64(j))); - assert forall|x: int| s1.contains(x) implies s2.contains(x) by { - let b = x % 64; - assert(Set::new( - |b: usize| b < 64 && is_bit_set(self.mask[oi as int], b), - ).contains((x % 64) as usize)); - } - } - assert(Set::new( - |j: int| idx + (count - obc) <= j < idx + (count - bitcount), - ) =~= Set::new( - |b: usize| b < 64 && is_bit_set(self.mask[oi as int], b), - ).map(|b: usize| 64 * oi + b)); - } else { - assert(mask == sub(1usize << sub(64usize, oofs), 1usize) << oofs); - lemma_bitmask_to_is_bit_set(sub(64, oofs), oofs); - assert(Set::new( - |j: int| idx + (count - obc) <= j < idx + (count - bitcount), - ) =~= Set::new( - |b: usize| b < 64 && is_bit_set(self.mask[oi as int], b), - ).map(|b: usize| 64 * oi + b)) by { - let s1 = Set::new( - |j: int| idx + (count - obc) <= j < idx + (count - bitcount), - ); - let s2 = Set::new( - |b: usize| b < 64 && is_bit_set(self.mask[oi as int], b), - ).map(|b: usize| 64 * oi + b); - assert forall|x: int| s1.contains(x) implies s2.contains(x) by { // unstable - assert(Set::new( - |b: usize| b < 64 && is_bit_set(self.mask[oi as int], b), - ).contains((x % 64) as usize)); - } - } - assert(Set::new( - |j: int| idx + (count - obc) <= j < idx + (count - bitcount), - ) =~= Set::new( - |b: usize| b < 64 && is_bit_set(self.mask[oi as int], b), - ).map(|b: usize| 64 * oi + b)); - } - } else if obc < 64 { // last iteration - assert(Set::new(|j: int| idx <= j < idx + (count - bitcount)) =~= Set::new( - |j: int| idx <= j < idx + (count - obc), - ).union(Set::new(|j: int| idx + (count - obc) <= j < idx + count))); - assert(mask == (1usize << obc) - 1usize); - lemma_bitmask_to_is_bit_set(obc, 0); - assert(Set::new(|j: int| idx + (count - obc) <= j < idx + count) - =~= Set::new( + ) =~= Set::new( |b: usize| b < 64 && is_bit_set(self.mask[oi as int], b), ).map(|b: usize| 64 * oi + b)) by { - let s1 = Set::new(|j: int| idx + (count - obc) <= j < idx + count); + let s1 = Set::new( + |j: int| idx + (count - obc) <= j < idx + (count - bitcount), + ); let s2 = Set::new( |b: usize| b < 64 && is_bit_set(self.mask[oi as int], b), ).map(|b: usize| 64 * oi + b); + assert(forall|j: usize| + idx + (count - obc) <= j < idx + (count - bitcount) + ==> #[trigger] is_bit_set(self.mask[oi as int], mod64(j))); assert forall|x: int| s1.contains(x) implies s2.contains(x) by { + let b = x % 64; assert(Set::new( |b: usize| b < 64 && is_bit_set(self.mask[oi as int], b), ).contains((x % 64) as usize)); } } - assert(Set::new(|j: int| idx + (count - obc) <= j < idx + count) - =~= Set::new( + assert(Set::new( + |j: int| idx + (count - obc) <= j < idx + (count - bitcount), + ) =~= Set::new( |b: usize| b < 64 && is_bit_set(self.mask[oi as int], b), ).map(|b: usize| 64 * oi + b)); } else { - assert(Set::new(|j: int| idx <= j < idx + (count - bitcount)) =~= Set::new( - |j: int| idx <= j < idx + (count - obc), - ).union( - Set::new(|j: int| idx + (count - obc) <= j < idx + (count - obc) + 64), - )); - assert(mask == !0usize); - let new = Set::new( - |j: int| idx + (count - obc) <= j < idx + (count - obc) + 64, - ); - assert(Set::new(|j: int| 64 * oi <= j < 64 * oi + 64) =~= Set::new( + assert(mask == sub(1usize << sub(64usize, oofs), 1usize) << oofs); + lemma_bitmask_to_is_bit_set(sub(64, oofs), oofs); + assert(Set::new( + |j: int| idx + (count - obc) <= j < idx + (count - bitcount), + ) =~= Set::new( |b: usize| b < 64 && is_bit_set(self.mask[oi as int], b), ).map(|b: usize| 64 * oi + b)) by { - let s1 = Set::new(|j: int| 64 * oi <= j < 64 * oi + 64); + let s1 = Set::new( + |j: int| idx + (count - obc) <= j < idx + (count - bitcount), + ); let s2 = Set::new( |b: usize| b < 64 && is_bit_set(self.mask[oi as int], b), ).map(|b: usize| 64 * oi + b); - assert forall|x: int| s1.contains(x) implies s2.contains(x) by { + assert forall|x: int| s1.contains(x) implies s2.contains(x) by { // unstable assert(Set::new( |b: usize| b < 64 && is_bit_set(self.mask[oi as int], b), ).contains((x % 64) as usize)); } } - assert(Set::new(|j: int| 64 * oi <= j < 64 * oi + 64) =~= Set::new( + assert(Set::new( + |j: int| idx + (count - obc) <= j < idx + (count - bitcount), + ) =~= Set::new( |b: usize| b < 64 && is_bit_set(self.mask[oi as int], b), ).map(|b: usize| 64 * oi + b)); } + } else if obc < 64 { // last iteration + assert(Set::new(|j: int| idx <= j < idx + (count - bitcount)) =~= Set::new( + |j: int| idx <= j < idx + (count - obc), + ).union(Set::new(|j: int| idx + (count - obc) <= j < idx + count))); + assert(mask == (1usize << obc) - 1usize); + lemma_bitmask_to_is_bit_set(obc, 0); + assert(Set::new(|j: int| idx + (count - obc) <= j < idx + count) + =~= Set::new( + |b: usize| b < 64 && is_bit_set(self.mask[oi as int], b), + ).map(|b: usize| 64 * oi + b)) by { + let s1 = Set::new(|j: int| idx + (count - obc) <= j < idx + count); + let s2 = Set::new( + |b: usize| b < 64 && is_bit_set(self.mask[oi as int], b), + ).map(|b: usize| 64 * oi + b); + assert forall|x: int| s1.contains(x) implies s2.contains(x) by { + assert(Set::new( + |b: usize| b < 64 && is_bit_set(self.mask[oi as int], b), + ).contains((x % 64) as usize)); + } + } + assert(Set::new(|j: int| idx + (count - obc) <= j < idx + count) + =~= Set::new( + |b: usize| b < 64 && is_bit_set(self.mask[oi as int], b), + ).map(|b: usize| 64 * oi + b)); + } else { + assert(Set::new(|j: int| idx <= j < idx + (count - bitcount)) =~= Set::new( + |j: int| idx <= j < idx + (count - obc), + ).union( + Set::new(|j: int| idx + (count - obc) <= j < idx + (count - obc) + 64), + )); + assert(mask == !0usize); + let new = Set::new( + |j: int| idx + (count - obc) <= j < idx + (count - obc) + 64, + ); + assert(Set::new(|j: int| 64 * oi <= j < 64 * oi + 64) =~= Set::new( + |b: usize| b < 64 && is_bit_set(self.mask[oi as int], b), + ).map(|b: usize| 64 * oi + b)) by { + let s1 = Set::new(|j: int| 64 * oi <= j < 64 * oi + 64); + let s2 = Set::new( + |b: usize| b < 64 && is_bit_set(self.mask[oi as int], b), + ).map(|b: usize| 64 * oi + b); + assert forall|x: int| s1.contains(x) implies s2.contains(x) by { + assert(Set::new( + |b: usize| b < 64 && is_bit_set(self.mask[oi as int], b), + ).contains((x % 64) as usize)); + } + } + assert(Set::new(|j: int| 64 * oi <= j < 64 * oi + 64) =~= Set::new( + |b: usize| b < 64 && is_bit_set(self.mask[oi as int], b), + ).map(|b: usize| 64 * oi + b)); } - assert(self@ =~= Set::new(|j: int| idx <= j < idx + (count - bitcount))); } + assert(self@ =~= Set::new(|j: int| idx <= j < idx + (count - bitcount))); } } + } - pub fn create_empty(&mut self) - ensures - self@ == Set::::empty(), + pub fn create_empty(&mut self) + ensures + self@ == Set::::empty(), + { + let mut i = 0; + while i < 8 + invariant + forall|j: int| 0 <= j < i ==> self.mask[j] == 0, { - let mut i = 0; - while i < 8 - invariant - forall|j: int| 0 <= j < i ==> self.mask[j] == 0, - { - self.mask.set(i, 0); - i += 1; - } - proof { - lemma_is_bit_set(); - self.lemma_view(); - assert(self@ =~= Set::::empty()); - } + self.mask.set(i, 0); + i += 1; } + proof { + lemma_is_bit_set(); + self.lemma_view(); + assert(self@ =~= Set::::empty()); + } + } - pub fn create_full(&mut self) - ensures - self@ == Set::new(|i: int| 0 <= i < COMMIT_MASK_BITS), + pub fn create_full(&mut self) + ensures + self@ == Set::new(|i: int| 0 <= i < COMMIT_MASK_BITS), + { + let mut i = 0; + while i < 8 + invariant + forall|j: int| 0 <= j < i ==> self.mask[j] == !0usize, { - let mut i = 0; - while i < 8 - invariant - forall|j: int| 0 <= j < i ==> self.mask[j] == !0usize, - { - self.mask.set(i, !0usize); - i += 1; - } - proof { - const_facts(); - lemma_is_bit_set(); - self.lemma_view(); - let seq_set = Set::new(|i: int| 0 <= i < COMMIT_MASK_BITS); - let bit_set = Set::new(|t: (int, int)| 0 <= t.0 < 8 && 0 <= t.1 < 64).map( - |t: (int, int)| t.0 * 64 + t.1, - ); - assert forall|i: int| seq_set.contains(i) implies bit_set.contains(i) by { - assert(Set::new(|t: (int, int)| 0 <= t.0 < 8 && 0 <= t.1 < 64).contains( - (i / 64, i % 64), - )); - } - assert(seq_set =~= bit_set); - assert(self@ =~= Set::new(|i: int| 0 <= i < COMMIT_MASK_BITS)); + self.mask.set(i, !0usize); + i += 1; + } + proof { + const_facts(); + lemma_is_bit_set(); + self.lemma_view(); + let seq_set = Set::new(|i: int| 0 <= i < COMMIT_MASK_BITS); + let bit_set = Set::new(|t: (int, int)| 0 <= t.0 < 8 && 0 <= t.1 < 64).map( + |t: (int, int)| t.0 * 64 + t.1, + ); + assert forall|i: int| seq_set.contains(i) implies bit_set.contains(i) by { + assert(Set::new(|t: (int, int)| 0 <= t.0 < 8 && 0 <= t.1 < 64).contains( + (i / 64, i % 64), + )); } + assert(seq_set =~= bit_set); + assert(self@ =~= Set::new(|i: int| 0 <= i < COMMIT_MASK_BITS)); } + } - pub fn committed_size(&self, total: usize) -> usize { - todo(); - loop { - } + pub fn committed_size(&self, total: usize) -> usize { + todo(); + loop { } + } + + pub fn next_run(&self, idx: usize) -> (res: (usize, usize)) + requires + 0 <= idx < COMMIT_MASK_BITS, + ensures + ({ + let (next_idx, count) = res; + next_idx + count <= COMMIT_MASK_BITS && (forall|t| + next_idx <= t < next_idx + count ==> self@.contains(t)) + }), // This should be true, but isn't strictly needed to prove safety://forall |t| idx <= t < next_idx ==> !self@.contains(t),FORMATTER_NOT_INLINE_MARKER + // Likewise we could have a condition that `count` is not smaller than necessary - pub fn next_run(&self, idx: usize) -> (res: (usize, usize)) + { + proof { + const_facts(); + } + // Starting at idx, scan to find the first bit. + let mut i: usize = idx / usize::BITS as usize; + let mut ofs: usize = idx % usize::BITS as usize; + let mut mask: usize = 0; + assert(ofs < 64) by (nonlinear_arith) requires - 0 <= idx < COMMIT_MASK_BITS, + ofs == idx % usize::BITS as usize, + ; + // Changed loop condition to use 8 rather than COMMIT_MASK_FIELD_COUNT due to + // https://github.com/verus-lang/verus/issues/925 + while i < 8 + invariant + ofs < 64, ensures - ({ - let (next_idx, count) = res; - next_idx + count <= COMMIT_MASK_BITS && (forall|t| - next_idx <= t < next_idx + count ==> self@.contains(t)) - }),// This should be true, but isn't strictly needed to prove safety: - //forall |t| idx <= t < next_idx ==> !self@.contains(t), - // Likewise we could have a condition that `count` is not smaller than necessary - + i < 8 ==> mask == self.mask[i as int] >> ofs, + i < 8 ==> ofs < 64, + i < 8 ==> mask & 1 == 1, { - proof { - const_facts(); + mask = self.mask[i] >> ofs; + if mask != 0 { + while mask & 1 == 0 + invariant + i < 8, + ofs < 64, + mask == self.mask[i as int] >> ofs, + mask != 0, + { + assert((mask >> 1usize) != 0usize) by (bit_vector) + requires + mask != 0usize, + mask & 1 == 0usize, + ; + assert(forall|m: u64, n: u64| + #![auto] + n < 64 ==> (m >> n) >> 1u64 == m >> add(n, 1u64)) by (bit_vector); + assert(forall|m: u64| #![auto] (m >> 63u64) >> 1u64 == 0u64) by (bit_vector); + mask = mask >> 1usize; + ofs += 1; + } + assert(mask & 1 == 1usize) by (bit_vector) + requires + mask & 1 != 0usize, + ; + break ; } - // Starting at idx, scan to find the first bit. - let mut i: usize = idx / usize::BITS as usize; - let mut ofs: usize = idx % usize::BITS as usize; - let mut mask: usize = 0; - assert(ofs < 64) by (nonlinear_arith) + i += 1; + ofs = 0; + } + if i >= COMMIT_MASK_FIELD_COUNT as usize { + (COMMIT_MASK_BITS as usize, 0) + } else { + // Count 1 bits in this run + let mut count: usize = 0; + let next_idx = i * usize::BITS as usize + ofs; + assert((i * 64 + ofs) % 64 == ofs) by (nonlinear_arith) requires - ofs == idx % usize::BITS as usize, + ofs < 64, ; - // Changed loop condition to use 8 rather than COMMIT_MASK_FIELD_COUNT due to - // https://github.com/verus-lang/verus/issues/925 - while i < 8 + loop invariant - ofs < 64, + mask & 1 == 1, + i < 8, + mask == self.mask[i as int] >> mod64((next_idx + count) as usize), + (next_idx + count) / 64 == i, + invariant_ensures + forall|j: usize| + next_idx <= j < next_idx + count ==> #[trigger] is_bit_set( + self.mask[div64(j) as int], + mod64(j), + ), ensures - i < 8 ==> mask == self.mask[i as int] >> ofs, - i < 8 ==> ofs < 64, - i < 8 ==> mask & 1 == 1, + next_idx + count <= 512, { - mask = self.mask[i] >> ofs; - if mask != 0 { - while mask & 1 == 0 - invariant - i < 8, - ofs < 64, - mask == self.mask[i as int] >> ofs, - mask != 0, - { - assert((mask >> 1usize) != 0usize) by (bit_vector) - requires - mask != 0usize, - mask & 1 == 0usize, - ; - assert(forall|m: u64, n: u64| - #![auto] - n < 64 ==> (m >> n) >> 1u64 == m >> add(n, 1u64)) by (bit_vector); - assert(forall|m: u64| #![auto] (m >> 63u64) >> 1u64 == 0u64) by (bit_vector); - mask = mask >> 1usize; - ofs += 1; - } - assert(mask & 1 == 1usize) by (bit_vector) - requires - mask & 1 != 0usize, - ; - break ; + proof { + const_facts(); } - i += 1; - ofs = 0; - } - if i >= COMMIT_MASK_FIELD_COUNT as usize { - (COMMIT_MASK_BITS as usize, 0) - } else { - // Count 1 bits in this run - let mut count: usize = 0; - let next_idx = i * usize::BITS as usize + ofs; - assert((i * 64 + ofs) % 64 == ofs) by (nonlinear_arith) - requires - ofs < 64, - ; loop invariant mask & 1 == 1, @@ -17789,150 +17714,135 @@ mod commit_mask { (next_idx + count) / 64 == i, invariant_ensures forall|j: usize| - next_idx <= j < next_idx + count ==> #[trigger] - is_bit_set(self.mask[div64(j) as int], mod64(j)), + next_idx <= j < next_idx + count ==> #[trigger] is_bit_set( + self.mask[div64(j) as int], + mod64(j), + ), ensures - next_idx + count <= 512, + mask & 1 == 0, + (next_idx + count) / 64 == if mod64((next_idx + count) as usize) == 0 { + i + 1 + } else { + i as int + }, { proof { - const_facts(); - } - loop - invariant - mask & 1 == 1, - i < 8, - mask == self.mask[i as int] >> mod64((next_idx + count) as usize), - (next_idx + count) / 64 == i, - invariant_ensures - forall|j: usize| - next_idx <= j < next_idx + count ==> #[trigger] - is_bit_set(self.mask[div64(j) as int], mod64(j)), - ensures - mask & 1 == 0, - (next_idx + count) / 64 == if mod64((next_idx + count) as usize) == 0 { - i + 1 - } else { - i as int - }, - { - proof { - assert(forall|m: u64, b: u64| - b < 64 && #[trigger] - ((m >> b) & 1) == 1 ==> is_bit_set!(m, b)) by (bit_vector); - reveal(is_bit_set); - assert(forall|j: u64, m: u64| - j < 64 ==> #[trigger] - ((m >> j) >> 1) == m >> add(j, 1)) by (bit_vector); - assert(forall|m: u64, j: u64| - j >= 64 ==> #[trigger] - ((m >> j) & 1) != 1) by (bit_vector); - } - count += 1; - mask = mask >> 1usize; - if (mask & 1) != 1 { - assert(mask & 1 == 0usize) by (bit_vector) - requires - mask & 1 != 1usize, - ; - break ; - } - } - if ((next_idx + count) % usize::BITS as usize) == 0 { - i += 1; - if i >= COMMIT_MASK_FIELD_COUNT as usize { - break ; - } - mask = self.mask[i]; - assert(forall|m: u64| m >> 0u64 == m) by (bit_vector); - ofs = 0; + assert(forall|m: u64, b: u64| + b < 64 && #[trigger] ((m >> b) & 1) == 1 ==> is_bit_set!(m, b)) + by (bit_vector); + reveal(is_bit_set); + assert(forall|j: u64, m: u64| + j < 64 ==> #[trigger] ((m >> j) >> 1) == m >> add(j, 1)) + by (bit_vector); + assert(forall|m: u64, j: u64| j >= 64 ==> #[trigger] ((m >> j) & 1) != 1) + by (bit_vector); } + count += 1; + mask = mask >> 1usize; if (mask & 1) != 1 { + assert(mask & 1 == 0usize) by (bit_vector) + requires + mask & 1 != 1usize, + ; break ; } } - assert forall|j: usize| next_idx <= j < next_idx + count implies self@.contains( - j as int, - ) by { - self.lemma_view(); - assert(self@.contains(div64(j) * 64 + mod64(j))); - }; - (next_idx, count) - } - } - - pub fn is_empty(&self) -> (b: bool) - ensures - b == (self@ == Set::::empty()), - { - let mut i = 0; - while i < 8 - invariant - forall|j: int| #![auto] 0 <= j < i ==> self.mask[j] == 0, - { - if self.mask[i] != 0 { - proof { - lemma_is_bit_set(); - self.lemma_view(); - let j = lemma_obtain_bit_index_1(self.mask[i as int]); - assert(self@.contains(i * 64 + j)); + if ((next_idx + count) % usize::BITS as usize) == 0 { + i += 1; + if i >= COMMIT_MASK_FIELD_COUNT as usize { + break ; } - return false; + mask = self.mask[i]; + assert(forall|m: u64| m >> 0u64 == m) by (bit_vector); + ofs = 0; + } + if (mask & 1) != 1 { + break ; } - i += 1; } - proof { - lemma_is_bit_set(); + assert forall|j: usize| next_idx <= j < next_idx + count implies self@.contains( + j as int, + ) by { self.lemma_view(); - assert(self@ =~= Set::::empty()); - } - return true; + assert(self@.contains(div64(j) * 64 + mod64(j))); + }; + (next_idx, count) } + } - pub fn is_full(&self) -> (b: bool) - ensures - b == (self@ == Set::new(|i: int| 0 <= i < COMMIT_MASK_BITS)), + pub fn is_empty(&self) -> (b: bool) + ensures + b == (self@ == Set::::empty()), + { + let mut i = 0; + while i < 8 + invariant + forall|j: int| #![auto] 0 <= j < i ==> self.mask[j] == 0, { - let mut i = 0; - while i < 8 - invariant - forall|j: int| #![auto] 0 <= j < i ==> self.mask[j] == !0usize, - { - if self.mask[i] != (!0usize) { - proof { - const_facts(); - lemma_is_bit_set(); - self.lemma_view(); - let j = lemma_obtain_bit_index_2(self.mask[i as int]); - assert(!self@.contains(i * 64 + j)); - assert(i * 64 + j < 512) by (nonlinear_arith) - requires - i < 8 && j < 64, - ; - } - return false; + if self.mask[i] != 0 { + proof { + lemma_is_bit_set(); + self.lemma_view(); + let j = lemma_obtain_bit_index_1(self.mask[i as int]); + assert(self@.contains(i * 64 + j)); } - i = i + 1; + return false; } - proof { - lemma_is_bit_set(); - const_facts(); - self.lemma_view(); - assert forall|k: int| 0 <= k < COMMIT_MASK_BITS implies self@.contains(k) by { - let t = k / 64; - let u = (k % 64) as usize; - assert(t * 64 + u == k); - assert(is_bit_set(self.mask[t], u)); - assert(0 <= t < 8); - assert(0 <= u < 64); - assert(self@.contains(t * 64 + u)); + i += 1; + } + proof { + lemma_is_bit_set(); + self.lemma_view(); + assert(self@ =~= Set::::empty()); + } + return true; + } + + pub fn is_full(&self) -> (b: bool) + ensures + b == (self@ == Set::new(|i: int| 0 <= i < COMMIT_MASK_BITS)), + { + let mut i = 0; + while i < 8 + invariant + forall|j: int| #![auto] 0 <= j < i ==> self.mask[j] == !0usize, + { + if self.mask[i] != (!0usize) { + proof { + const_facts(); + lemma_is_bit_set(); + self.lemma_view(); + let j = lemma_obtain_bit_index_2(self.mask[i as int]); + assert(!self@.contains(i * 64 + j)); + assert(i * 64 + j < 512) by (nonlinear_arith) + requires + i < 8 && j < 64, + ; } - assert(self@ =~= Set::new(|i: int| 0 <= i < COMMIT_MASK_BITS)); + return false; } - return true; + i = i + 1; + } + proof { + lemma_is_bit_set(); + const_facts(); + self.lemma_view(); + assert forall|k: int| 0 <= k < COMMIT_MASK_BITS implies self@.contains(k) by { + let t = k / 64; + let u = (k % 64) as usize; + assert(t * 64 + u == k); + assert(is_bit_set(self.mask[t], u)); + assert(0 <= t < 8); + assert(0 <= u < 64); + assert(self@.contains(t * 64 + u)); + } + assert(self@ =~= Set::new(|i: int| 0 <= i < COMMIT_MASK_BITS)); } + return true; } +} - } // verus! +} // verus! } mod arena { @@ -17953,60 +17863,60 @@ mod arena { verus! { - pub type ArenaId = usize; +pub type ArenaId = usize; - pub type MemId = usize; +pub type MemId = usize; - pub fn arena_alloc_aligned( - size: usize, - alignment: usize, - align_offset: usize, - request_commit: bool, - allow_large: bool, - req_arena_id: ArenaId, - ) -> (res: (usize, Tracked, bool, bool, bool, bool, usize)) - requires - alignment as int % page_size() == 0, - size as int % page_size() == 0, - alignment + page_size() <= usize::MAX, - size == SEGMENT_SIZE, - ensures - ({ - let (addr, mem, commit, large, is_pinned, is_zero, mem_id) = res; - addr != 0 ==> (mem@.wf() && mem@.os_exact_range(addr as int, size as int) && addr + size - <= usize::MAX && (request_commit ==> commit) && (commit - ==> mem@.os_has_range_read_write(addr as int, size as int)) && (commit - ==> mem@.pointsto_has_range(addr as int, size as int)) - && mem@.has_pointsto_for_all_read_write() && (alignment != 0 ==> (addr as int - + align_offset as int) % alignment as int == 0)) - })// commit: bool - // large: bool - // is_pinned: bool - // is_zero: bool - // mem_id: usize - , - { - // TODO arena allocation - let (p, is_large, Tracked(mem)) = os_alloc_aligned_offset( - size, - alignment, - align_offset, - request_commit, - allow_large, - ); - let did_commit = request_commit; - let is_pinned = is_large; - let is_zero = true; - let memid_os = 0; - proof { - if p != 0 { - mem.os_restrict(p as int, size as int); - } +pub fn arena_alloc_aligned( + size: usize, + alignment: usize, + align_offset: usize, + request_commit: bool, + allow_large: bool, + req_arena_id: ArenaId, +) -> (res: (usize, Tracked, bool, bool, bool, bool, usize)) + requires + alignment as int % page_size() == 0, + size as int % page_size() == 0, + alignment + page_size() <= usize::MAX, + size == SEGMENT_SIZE, + ensures + ({ + let (addr, mem, commit, large, is_pinned, is_zero, mem_id) = res; + addr != 0 ==> (mem@.wf() && mem@.os_exact_range(addr as int, size as int) && addr + size + <= usize::MAX && (request_commit ==> commit) && (commit + ==> mem@.os_has_range_read_write(addr as int, size as int)) && (commit + ==> mem@.pointsto_has_range(addr as int, size as int)) + && mem@.has_pointsto_for_all_read_write() && (alignment != 0 ==> (addr as int + + align_offset as int) % alignment as int == 0)) + }) // commit: bool + // large: bool + // is_pinned: bool + // is_zero: bool + // mem_id: usize + , +{ + // TODO arena allocation + let (p, is_large, Tracked(mem)) = os_alloc_aligned_offset( + size, + alignment, + align_offset, + request_commit, + allow_large, + ); + let did_commit = request_commit; + let is_pinned = is_large; + let is_zero = true; + let memid_os = 0; + proof { + if p != 0 { + mem.os_restrict(p as int, size as int); } - (p, Tracked(mem), did_commit, is_large, is_pinned, is_zero, memid_os) } + (p, Tracked(mem), did_commit, is_large, is_pinned, is_zero, memid_os) +} - /* +/* pub const ARENA_BLOCK_SIZE: usize = SEGMENT_SIZE as usize; @@ -18093,7 +18003,7 @@ mod arena { // align_offset: usize, */ - } // verus! +} // verus! } mod alloc_fast { @@ -18121,233 +18031,233 @@ mod alloc_fast { verus! { - // Implements the "fast path" - // malloc -> heap_malloc -> heap_malloc_zero -> heap_malloc_zero_ex - // -> heap_malloc_small_zero - // -> heap_get_free_small_page & page_malloc - #[inline] - pub fn heap_malloc( - heap: HeapPtr, - size: usize, - Tracked(local): Tracked<&mut Local>, - ) // $line_count$Trusted$ - -> (t: ( - PPtr, - Tracked, - Tracked, - )) // $line_count$Trusted$ - requires // $line_count$Trusted$ - - old(local).wf(), // $line_count$Trusted$ - heap.wf(), // $line_count$Trusted$ - heap.is_in(*old(local)), // $line_count$Trusted$ - - ensures // $line_count$Trusted$ - - local.wf(), // $line_count$Trusted$ - local.instance == old(local).instance, // $line_count$Trusted$ - forall|heap: HeapPtr| heap.is_in(*old(local)) ==> heap.is_in(*local), // $line_count$Trusted$ - ({ // $line_count$Trusted$ - let (ptr, points_to_raw, dealloc) = t; // $line_count$Trusted$ - dealloc@.wf() // $line_count$Trusted$ - && points_to_raw@.is_range( - ptr.id(), - size as int, - ) // $line_count$Trusted$ - && ptr.id() == dealloc@.ptr() // $line_count$Trusted$ - && dealloc@.instance() == local.instance // $line_count$Trusted$ - && dealloc@.size == size // $line_count$Trusted$ - - }) // $line_count$Trusted$ - , - { - heap_malloc_zero(heap, size, false, Tracked(&mut *local)) - } +// Implements the "fast path" +// malloc -> heap_malloc -> heap_malloc_zero -> heap_malloc_zero_ex +// -> heap_malloc_small_zero +// -> heap_get_free_small_page & page_malloc +#[inline] +pub fn heap_malloc( + heap: HeapPtr, + size: usize, + Tracked(local): Tracked<&mut Local>, +) // $line_count$Trusted$ + -> (t: ( + PPtr, + Tracked, + Tracked, +)) // $line_count$Trusted$ + requires // $line_count$Trusted$ + + old(local).wf(), // $line_count$Trusted$ + heap.wf(), // $line_count$Trusted$ + heap.is_in(*old(local)), // $line_count$Trusted$ + + ensures // $line_count$Trusted$ + + local.wf(), // $line_count$Trusted$ + local.instance == old(local).instance, // $line_count$Trusted$ + forall|heap: HeapPtr| heap.is_in(*old(local)) ==> heap.is_in(*local), // $line_count$Trusted$ + ({ // $line_count$Trusted$ + let (ptr, points_to_raw, dealloc) = t; // $line_count$Trusted$ + dealloc@.wf() // $line_count$Trusted$ + && points_to_raw@.is_range( + ptr.id(), + size as int, + ) // $line_count$Trusted$ + && ptr.id() == dealloc@.ptr() // $line_count$Trusted$ + && dealloc@.instance() == local.instance // $line_count$Trusted$ + && dealloc@.size == size // $line_count$Trusted$ - #[inline] - pub fn heap_malloc_zero( - heap: HeapPtr, - size: usize, - zero: bool, - Tracked(local): Tracked<&mut Local>, - ) -> (t: (PPtr, Tracked, Tracked)) - requires - old(local).wf(), - heap.wf(), - heap.is_in(*old(local)), - ensures - local.wf(), - ({ - let (ptr, points_to_raw, dealloc) = t; - dealloc@.wf() && points_to_raw@.is_range(ptr.id(), size as int) && ptr.id() - == dealloc@.ptr() && dealloc@.instance() == local.instance && dealloc@.size == size - }), - common_preserves(*old(local), *local), - { - heap_malloc_zero_ex(heap, size, zero, 0, Tracked(&mut *local)) - } + }) // $line_count$Trusted$ + , +{ + heap_malloc_zero(heap, size, false, Tracked(&mut *local)) +} - #[inline] - pub fn heap_malloc_zero_ex( - heap: HeapPtr, - size: usize, - zero: bool, - huge_alignment: usize, - Tracked(local): Tracked<&mut Local>, - ) -> (t: (PPtr, Tracked, Tracked)) - requires - old(local).wf(), - heap.wf(), - heap.is_in(*old(local)), - ensures - local.wf(), - ({ - let (ptr, points_to_raw, dealloc) = t; - dealloc@.wf() && points_to_raw@.is_range(ptr.id(), size as int) && ptr.id() - == dealloc@.ptr() && dealloc@.instance() == local.instance && dealloc@.size == size - }), - common_preserves(*old(local), *local), - { - if likely(size <= SMALL_SIZE_MAX) { - heap_malloc_small_zero(heap, size, zero, Tracked(&mut *local)) - } else { - malloc_generic(heap, size, zero, huge_alignment, Tracked(&mut *local)) - } - } +#[inline] +pub fn heap_malloc_zero( + heap: HeapPtr, + size: usize, + zero: bool, + Tracked(local): Tracked<&mut Local>, +) -> (t: (PPtr, Tracked, Tracked)) + requires + old(local).wf(), + heap.wf(), + heap.is_in(*old(local)), + ensures + local.wf(), + ({ + let (ptr, points_to_raw, dealloc) = t; + dealloc@.wf() && points_to_raw@.is_range(ptr.id(), size as int) && ptr.id() + == dealloc@.ptr() && dealloc@.instance() == local.instance && dealloc@.size == size + }), + common_preserves(*old(local), *local), +{ + heap_malloc_zero_ex(heap, size, zero, 0, Tracked(&mut *local)) +} - #[inline] - pub fn heap_get_free_small_page( - heap: HeapPtr, - size: usize, - Tracked(local): Tracked<&Local>, - ) -> (page: PagePtr) - requires - 0 <= size <= SMALL_SIZE_MAX, - local.wf_main(), - heap.is_in(*local), - heap.wf(), - ensures - !page.is_empty_global(*local) ==> ({ - &&& page.wf() - &&& Some(page.page_id@) - == local.page_organization.used_dlist_headers[smallest_bin_fitting_size( - (size + 7) / 8 * 8, - )].first - }), - { - let idx = (size + 7) / 8; - let ptr = heap.get_pages_free_direct(Tracked(local))[idx]; - let ghost bin_idx = smallest_bin_fitting_size((size + 7) / 8 * 8); - let ghost page_id = local.page_organization.used_dlist_headers[bin_idx].first.unwrap(); - let page_ptr = PagePtr { page_ptr: ptr, page_id: Ghost(page_id) }; - proof { - bounds_for_smallest_bin_fitting_size((size + 7) / 8 * 8); - if page_ptr.page_ptr.id() != local.page_empty_global@.s.points_to@.pptr { - //assert(local.heap.pages_free_direct@.value.unwrap()@[idx as int].id() - // == local.heap.pages@.value.unwrap()@[bin_idx].first.id()); - //assert(local.heap.pages@.value.unwrap()@[bin_idx].first.id() != 0); - } - } - return page_ptr; +#[inline] +pub fn heap_malloc_zero_ex( + heap: HeapPtr, + size: usize, + zero: bool, + huge_alignment: usize, + Tracked(local): Tracked<&mut Local>, +) -> (t: (PPtr, Tracked, Tracked)) + requires + old(local).wf(), + heap.wf(), + heap.is_in(*old(local)), + ensures + local.wf(), + ({ + let (ptr, points_to_raw, dealloc) = t; + dealloc@.wf() && points_to_raw@.is_range(ptr.id(), size as int) && ptr.id() + == dealloc@.ptr() && dealloc@.instance() == local.instance && dealloc@.size == size + }), + common_preserves(*old(local), *local), +{ + if likely(size <= SMALL_SIZE_MAX) { + heap_malloc_small_zero(heap, size, zero, Tracked(&mut *local)) + } else { + malloc_generic(heap, size, zero, huge_alignment, Tracked(&mut *local)) } +} - #[inline] - pub fn heap_malloc_small_zero( - heap: HeapPtr, - size: usize, - zero: bool, - Tracked(local): Tracked<&mut Local>, - ) -> (t: (PPtr, Tracked, Tracked)) - requires - old(local).wf(), - heap.wf(), - heap.is_in(*old(local)), - size <= SMALL_SIZE_MAX, - ensures - local.wf(), - ({ - let (ptr, points_to_raw, dealloc) = t; - dealloc@.wf() && points_to_raw@.is_range(ptr.id(), size as int) && ptr.id() - == dealloc@.ptr() && dealloc@.instance() == local.instance && dealloc@.size == size - }), - common_preserves(*old(local), *local), - { - /*let mut size = size; - if PADDING { - if size == 0 { - size = INTPTR_SIZE; - } - }*/ - let page = heap_get_free_small_page(heap, size, Tracked(&*local)); - proof { - let bin_idx = smallest_bin_fitting_size((size + 7) / 8 * 8); - bounds_for_smallest_bin_fitting_size((size + 7) / 8 * 8); - local.page_organization.used_first_is_in(bin_idx); - //assert(local.page_organization.used_dlist_headers[bin_idx].first == Some(page.page_id@)); - //assert(local.page_organization.pages.dom().contains(page.page_id@)); - //assert(local.pages.dom().contains(page.page_id@)); - } - let (p, Tracked(points_to_raw), Tracked(mim_dealloc)) = page_malloc( - heap, - page, - size, - zero, - Tracked(&mut *local), - ); - (p, Tracked(points_to_raw), Tracked(mim_dealloc)) - } +#[inline] +pub fn heap_get_free_small_page( + heap: HeapPtr, + size: usize, + Tracked(local): Tracked<&Local>, +) -> (page: PagePtr) + requires + 0 <= size <= SMALL_SIZE_MAX, + local.wf_main(), + heap.is_in(*local), + heap.wf(), + ensures + !page.is_empty_global(*local) ==> ({ + &&& page.wf() + &&& Some(page.page_id@) + == local.page_organization.used_dlist_headers[smallest_bin_fitting_size( + (size + 7) / 8 * 8, + )].first + }), +{ + let idx = (size + 7) / 8; + let ptr = heap.get_pages_free_direct(Tracked(local))[idx]; + let ghost bin_idx = smallest_bin_fitting_size((size + 7) / 8 * 8); + let ghost page_id = local.page_organization.used_dlist_headers[bin_idx].first.unwrap(); + let page_ptr = PagePtr { page_ptr: ptr, page_id: Ghost(page_id) }; + proof { + bounds_for_smallest_bin_fitting_size((size + 7) / 8 * 8); + if page_ptr.page_ptr.id() != local.page_empty_global@.s.points_to@.pptr { + //assert(local.heap.pages_free_direct@.value.unwrap()@[idx as int].id() + // == local.heap.pages@.value.unwrap()@[bin_idx].first.id()); + //assert(local.heap.pages@.value.unwrap()@[bin_idx].first.id() != 0); + } + } + return page_ptr; +} - pub fn page_malloc( - heap: HeapPtr, - page_ptr: PagePtr, - size: usize, - zero: bool, - Tracked(local): Tracked<&mut Local>, - ) -> (t: (PPtr, Tracked, Tracked)) - requires - old(local).wf(), - heap.wf(), - heap.is_in(*old(local)), - page_ptr.is_empty_global(*old(local)) || ({ - &&& page_ptr.wf() - &&& page_ptr.is_used_and_primary(*old(local)) - &&& size <= old(local).page_state(page_ptr.page_id@).block_size - }), - ensures - local.wf(), - ({ - let (ptr, points_to_raw, dealloc) = t; - dealloc@.wf() && points_to_raw@.is_range(ptr.id(), size as int) && ptr.id() - == dealloc@.ptr() && dealloc@.instance() == local.instance && dealloc@.size == size - }), - common_preserves(*old(local), *local), - { - if unlikely(page_ptr.get_inner_ref_maybe_empty(Tracked(&*local)).free.is_empty()) { - return malloc_generic(heap, size, zero, 0, Tracked(&mut *local)); - }//assert(!page_ptr.is_empty_global(*local)); +#[inline] +pub fn heap_malloc_small_zero( + heap: HeapPtr, + size: usize, + zero: bool, + Tracked(local): Tracked<&mut Local>, +) -> (t: (PPtr, Tracked, Tracked)) + requires + old(local).wf(), + heap.wf(), + heap.is_in(*old(local)), + size <= SMALL_SIZE_MAX, + ensures + local.wf(), + ({ + let (ptr, points_to_raw, dealloc) = t; + dealloc@.wf() && points_to_raw@.is_range(ptr.id(), size as int) && ptr.id() + == dealloc@.ptr() && dealloc@.instance() == local.instance && dealloc@.size == size + }), + common_preserves(*old(local), *local), +{ + /*let mut size = size; + if PADDING { + if size == 0 { + size = INTPTR_SIZE; + } + }*/ + let page = heap_get_free_small_page(heap, size, Tracked(&*local)); + proof { + let bin_idx = smallest_bin_fitting_size((size + 7) / 8 * 8); + bounds_for_smallest_bin_fitting_size((size + 7) / 8 * 8); + local.page_organization.used_first_is_in(bin_idx); + //assert(local.page_organization.used_dlist_headers[bin_idx].first == Some(page.page_id@)); + //assert(local.page_organization.pages.dom().contains(page.page_id@)); + //assert(local.pages.dom().contains(page.page_id@)); + } + let (p, Tracked(points_to_raw), Tracked(mim_dealloc)) = page_malloc( + heap, + page, + size, + zero, + Tracked(&mut *local), + ); + (p, Tracked(points_to_raw), Tracked(mim_dealloc)) +} + +pub fn page_malloc( + heap: HeapPtr, + page_ptr: PagePtr, + size: usize, + zero: bool, + Tracked(local): Tracked<&mut Local>, +) -> (t: (PPtr, Tracked, Tracked)) + requires + old(local).wf(), + heap.wf(), + heap.is_in(*old(local)), + page_ptr.is_empty_global(*old(local)) || ({ + &&& page_ptr.wf() + &&& page_ptr.is_used_and_primary(*old(local)) + &&& size <= old(local).page_state(page_ptr.page_id@).block_size + }), + ensures + local.wf(), + ({ + let (ptr, points_to_raw, dealloc) = t; + dealloc@.wf() && points_to_raw@.is_range(ptr.id(), size as int) && ptr.id() + == dealloc@.ptr() && dealloc@.instance() == local.instance && dealloc@.size == size + }), + common_preserves(*old(local), *local), +{ + if unlikely(page_ptr.get_inner_ref_maybe_empty(Tracked(&*local)).free.is_empty()) { + return malloc_generic(heap, size, zero, 0, Tracked(&mut *local)); + } //assert(!page_ptr.is_empty_global(*local)); - let popped; - page_get_mut_inner!(page_ptr, local, page_inner => { + let popped; + page_get_mut_inner!(page_ptr, local, page_inner => { popped = page_inner.free.pop_block(); //assert(page_inner.used < 1000000); page_inner.used = page_inner.used + 1; }); - let ptr = popped.0; - let tracked dealloc; - let tracked points_to_raw; - proof { - let tracked points_to_r = popped.1.get(); - let tracked block = popped.2.get(); - //const_facts(); - //reveal(is_block_ptr); - local.instance.get_block_properties( - local.thread_token@.key, - block@.key, - &local.thread_token, - &block, - ); - /*assert(block@.key.slice_idx >= block@.key.page_id.idx); + let ptr = popped.0; + let tracked dealloc; + let tracked points_to_raw; + proof { + let tracked points_to_r = popped.1.get(); + let tracked block = popped.2.get(); + //const_facts(); + //reveal(is_block_ptr); + local.instance.get_block_properties( + local.thread_token@.key, + block@.key, + &local.thread_token, + &block, + ); + /*assert(block@.key.slice_idx >= block@.key.page_id.idx); assert(block@.value.page_shared_access == local.thread_token@.value.pages[block@.key.page_id].shared_access); assert(local.thread_token@.value.pages.dom().contains(block@.key.page_id_for_slice())); assert(block@.value.page_slice_shared_access == local.thread_token@.value.pages[block@.key.page_id_for_slice()].shared_access); @@ -18356,42 +18266,42 @@ mod alloc_fast { assert(block@.value.page_shared_access.wf(block@.key.page_id, block@.key.block_size, local.instance)); assert(valid_block_token(block, local.instance));*/ - //assert(!block@.value.allocated); - // Mark the block as 'allocated' in the token system - // let tracked thread_token = local.take_thread_token(); - //assert(thread_token@.instance == local.instance); - //assert(block@.instance == local.instance); - //assert(block@.key.page_id == page_ptr.page_id); - //#[spec] let ot = thread_token; - // let tracked (Tracked(thread_token), Tracked(block)) = local.instance.alloc_block( - // block@.key, local.thread_id, - // thread_token, block); - //local.thread_token = thread_token; - //assert(thread_token@.value.pages.index(page_ptr.page_id).len + 1 == - // ot@.value.pages.index(page_ptr.page_id).len); - let tracked dealloc_inner = MimDeallocInner { - mim_instance: local.instance.clone(), - mim_block: block, - ptr: ptr.id(), - }; - let tracked (dealloc0, points_to_raw0) = dealloc_inner.into_user(points_to_r, size as int); - dealloc = dealloc0; - points_to_raw = points_to_raw0; - // Mark the block as 'allocated' in the token system - //let Local { thread_id, instance, thread_token, heap_id, heap, pages, segments } - // = local; - /*assert(local.pages.index(page_ptr.page_id@).wf( + //assert(!block@.value.allocated); + // Mark the block as 'allocated' in the token system + // let tracked thread_token = local.take_thread_token(); + //assert(thread_token@.instance == local.instance); + //assert(block@.instance == local.instance); + //assert(block@.key.page_id == page_ptr.page_id); + //#[spec] let ot = thread_token; + // let tracked (Tracked(thread_token), Tracked(block)) = local.instance.alloc_block( + // block@.key, local.thread_id, + // thread_token, block); + //local.thread_token = thread_token; + //assert(thread_token@.value.pages.index(page_ptr.page_id).len + 1 == + // ot@.value.pages.index(page_ptr.page_id).len); + let tracked dealloc_inner = MimDeallocInner { + mim_instance: local.instance.clone(), + mim_block: block, + ptr: ptr.id(), + }; + let tracked (dealloc0, points_to_raw0) = dealloc_inner.into_user(points_to_r, size as int); + dealloc = dealloc0; + points_to_raw = points_to_raw0; + // Mark the block as 'allocated' in the token system + //let Local { thread_id, instance, thread_token, heap_id, heap, pages, segments } + // = local; + /*assert(local.pages.index(page_ptr.page_id@).wf( page_ptr.page_id@, local.thread_token@.value.pages.index(page_ptr.page_id@), local.instance, ));*/ - preserves_mem_chunk_good(*old(local), *local); - //assert(local.wf()); - } - (ptr, Tracked(points_to_raw), Tracked(dealloc)) + preserves_mem_chunk_good(*old(local), *local); + //assert(local.wf()); } + (ptr, Tracked(points_to_raw), Tracked(dealloc)) +} - } // verus! +} // verus! } mod alloc_generic { @@ -18417,43 +18327,43 @@ mod alloc_generic { verus! { - pub fn malloc_generic( - heap: HeapPtr, - size: usize, - zero: bool, - huge_alignment: usize, - Tracked(local): Tracked<&mut Local>, - ) -> (t: (PPtr, Tracked, Tracked)) - requires - old(local).wf(), - heap.wf(), - heap.is_in(*old(local)), - ensures - local.wf(), - ({ - let (ptr, points_to_raw, dealloc) = t; - dealloc@.wf() && points_to_raw@.is_range(ptr.id(), size as int) && ptr.id() - == dealloc@.ptr() && dealloc@.instance() == local.instance && dealloc@.size == size - }), - common_preserves(*old(local), *local), - { - // TODO heap initialization - // TODO deferred free? - heap_delayed_free_partial(heap, Tracked(&mut *local)); - let page = crate::page::find_page(heap, size, huge_alignment, Tracked(&mut *local)); - if unlikely(page.is_null()) { - todo(); - } - if unlikely(zero && page.get_block_size(Tracked(&*local)) == 0) { - todo(); - loop { - } - } else { - crate::alloc_fast::page_malloc(heap, page, size, zero, Tracked(&mut *local)) +pub fn malloc_generic( + heap: HeapPtr, + size: usize, + zero: bool, + huge_alignment: usize, + Tracked(local): Tracked<&mut Local>, +) -> (t: (PPtr, Tracked, Tracked)) + requires + old(local).wf(), + heap.wf(), + heap.is_in(*old(local)), + ensures + local.wf(), + ({ + let (ptr, points_to_raw, dealloc) = t; + dealloc@.wf() && points_to_raw@.is_range(ptr.id(), size as int) && ptr.id() + == dealloc@.ptr() && dealloc@.instance() == local.instance && dealloc@.size == size + }), + common_preserves(*old(local), *local), +{ + // TODO heap initialization + // TODO deferred free? + heap_delayed_free_partial(heap, Tracked(&mut *local)); + let page = crate::page::find_page(heap, size, huge_alignment, Tracked(&mut *local)); + if unlikely(page.is_null()) { + todo(); + } + if unlikely(zero && page.get_block_size(Tracked(&*local)) == 0) { + todo(); + loop { } + } else { + crate::alloc_fast::page_malloc(heap, page, size, zero, Tracked(&mut *local)) } +} - /* +/* void _mi_page_free_collect(mi_page_t* page, bool force) { mi_assert_internal(page!=NULL); @@ -18489,24 +18399,24 @@ mod alloc_generic { } */ - pub fn page_free_collect(page_ptr: PagePtr, force: bool, Tracked(local): Tracked<&mut Local>) - requires - old(local).wf(), - page_ptr.wf(), - page_ptr.is_used_and_primary(*old(local)), - old(local).page_organization.pages[page_ptr.page_id@].is_used == true, - ensures - local.wf(), - page_ptr.is_used_and_primary(*local), - old(local).page_organization == local.page_organization, - common_preserves(*old(local), *local), - old(local).thread_token == local.thread_token, - { - if force || page_ptr.get_ref(Tracked(&*local)).xthread_free.atomic.load() != 0 { - page_thread_free_collect(page_ptr, Tracked(&mut *local)); - } - let ghost old_local = *local; - page_get_mut_inner!(page_ptr, local, page_inner => { +pub fn page_free_collect(page_ptr: PagePtr, force: bool, Tracked(local): Tracked<&mut Local>) + requires + old(local).wf(), + page_ptr.wf(), + page_ptr.is_used_and_primary(*old(local)), + old(local).page_organization.pages[page_ptr.page_id@].is_used == true, + ensures + local.wf(), + page_ptr.is_used_and_primary(*local), + old(local).page_organization == local.page_organization, + common_preserves(*old(local), *local), + old(local).thread_token == local.thread_token, +{ + if force || page_ptr.get_ref(Tracked(&*local)).xthread_free.atomic.load() != 0 { + page_thread_free_collect(page_ptr, Tracked(&mut *local)); + } + let ghost old_local = *local; + page_get_mut_inner!(page_ptr, local, page_inner => { if !page_inner.local_free.is_empty() { if likely(page_inner.free.is_empty()) { // Move local_free to free @@ -18518,12 +18428,12 @@ mod alloc_generic { } } }); - proof { - preserves_mem_chunk_good(old_local, *local); - } + proof { + preserves_mem_chunk_good(old_local, *local); } +} - /* +/* static void _mi_page_thread_free_collect(mi_page_t* page) { mi_block_t* head; @@ -18561,24 +18471,24 @@ mod alloc_generic { } */ - fn page_thread_free_collect(page_ptr: PagePtr, Tracked(local): Tracked<&mut Local>) - requires - old(local).wf(), - page_ptr.wf(), - page_ptr.is_used_and_primary(*old(local)), - ensures - local.wf(), - local.pages.dom() == old(local).pages.dom(), - page_ptr.is_used_and_primary(*local), - old(local).page_organization == local.page_organization, - common_preserves(*old(local), *local), - old(local).thread_token == local.thread_token, - { - let mut ll = page_ptr.get_ref(Tracked(&*local)).xthread_free.take(); - if ll.is_empty() { - return ; - } - page_get_mut_inner!(page_ptr, local, page_inner => { +fn page_thread_free_collect(page_ptr: PagePtr, Tracked(local): Tracked<&mut Local>) + requires + old(local).wf(), + page_ptr.wf(), + page_ptr.is_used_and_primary(*old(local)), + ensures + local.wf(), + local.pages.dom() == old(local).pages.dom(), + page_ptr.is_used_and_primary(*local), + old(local).page_organization == local.page_organization, + common_preserves(*old(local), *local), + old(local).thread_token == local.thread_token, +{ + let mut ll = page_ptr.get_ref(Tracked(&*local)).xthread_free.take(); + if ll.is_empty() { + return ; + } + page_get_mut_inner!(page_ptr, local, page_inner => { bound_on_1_lists(Tracked(local.instance.clone()), Tracked(&local.thread_token), &mut ll); let count = page_inner.local_free.append(&mut ll); @@ -18590,12 +18500,12 @@ mod alloc_generic { page_inner.used = page_inner.used - count; }); - proof { - preserves_mem_chunk_good(*old(local), *local); - } + proof { + preserves_mem_chunk_good(*old(local), *local); } +} - /* +/* static mi_decl_noinline void mi_page_free_list_extend( mi_page_t* const page, const size_t bsize, const size_t extend, mi_stats_t* const stats) { MI_UNUSED(stats); @@ -18623,107 +18533,107 @@ mod alloc_generic { } */ - #[verifier::spinoff_prover] - fn page_free_list_extend( - page_ptr: PagePtr, - bsize: usize, - extend: usize, - Tracked(local): Tracked<&mut Local>, - ) - requires - old(local).wf_main(), - page_ptr.wf(), - page_ptr.is_used_and_primary(*old(local)), - old(local).page_capacity(page_ptr.page_id@) + extend as int <= old(local).page_reserved( - page_ptr.page_id@, - ), - // TODO this should have a special case for huge-page handling: - bsize == old(local).page_inner(page_ptr.page_id@).xblock_size, - bsize % 8 == 0, - extend >= 1, - ensures - local.wf_main(), - page_ptr.is_used_and_primary(*local), - local.page_organization == old(local).page_organization, - common_preserves(*old(local), *local), - { - let ghost page_id = page_ptr.page_id@; - proof { - const_facts(); - let reserved = local.page_reserved(page_id); - let capacity = local.page_capacity(page_id); - let count = local.page_organization.pages[page_id].count.unwrap(); - local.page_organization.get_count_bound(page_id); - //assert(local.page_organization.pages.dom().contains(page_id)); - local.page_organization.used_offset0_has_count(page_id); - //assert(count + page_id.idx <= SLICES_PER_SEGMENT); - assert(capacity * bsize <= reserved * bsize) by (nonlinear_arith) - requires - capacity <= reserved, - bsize >= 0, - ; - assert((capacity + extend - 1) * bsize <= reserved * bsize) by (nonlinear_arith) - requires - capacity + extend - 1 <= reserved, - bsize >= 0, - ; - assert((capacity + extend) * bsize <= reserved * bsize) by (nonlinear_arith) - requires - capacity + extend <= reserved, - bsize >= 0, - ; - //assert(bsize == local.thread_token@.value.pages[page_id].block_size); - //assert(count == local.pages[page_id].count@.value.unwrap()); - } - let capacity = page_ptr.get_inner_ref(Tracked(&*local)).capacity; - let pag_start = calculate_page_start(page_ptr, bsize); - let start = calculate_page_block_at( - pag_start, - bsize, - capacity as usize, - Ghost(page_ptr.page_id@), - ); - //assert((capacity + extend) as usize as int == capacity + extend); - let x = capacity as usize + extend - 1; - let last = calculate_page_block_at(pag_start, bsize, x, Ghost(page_ptr.page_id@)); - let ghost rng_start = block_start_at(page_id, bsize as int, capacity as int); - let ghost rng_size = extend * bsize; - let ghost segment_id = page_id.segment_id; - let tracked mut seg = local.segments.tracked_remove(segment_id); - proof { - assert(extend * bsize >= 0) by (nonlinear_arith) - requires - extend >= 0, - bsize >= 0, - ; - segment_mem_has_reserved_range(*old(local), page_id, capacity + extend); - assert(seg.mem.pointsto_has_range(rng_start, rng_size)); - } - let tracked mut pt = seg.mem.take_points_to_range(rng_start, rng_size); - proof { - local.segments.tracked_insert(segment_id, seg); - } - let tracked mut thread_token = local.take_thread_token(); - let tracked mut checked_token = local.take_checked_token(); - let ghost mut cap_nat; - let ghost mut extend_nat; - //assert(page_inner.wf(page_ptr.page_id@, - // local.thread_token@.value.pages.index(page_ptr.page_id@), - // local.instance)); - proof { - cap_nat = capacity as nat; - extend_nat = extend as nat; - let reserved = local.page_reserved(page_id); - // PAPER CUT: this kind of assert is flaky - sub_distribute(reserved as int - capacity as int, extend as int, bsize as int); - assert((reserved as int - capacity as int) * bsize as int >= extend as int * bsize as int) - by (nonlinear_arith) - requires - (reserved as int - capacity as int) >= extend, - {} - assert((capacity as int) * (bsize as int) + (extend as int - 1) * (bsize as int) == ( - capacity as int + extend as int - 1) * (bsize as int)) by (nonlinear_arith); - /*assert(capacity as int + extend as int - 1 +#[verifier::spinoff_prover] +fn page_free_list_extend( + page_ptr: PagePtr, + bsize: usize, + extend: usize, + Tracked(local): Tracked<&mut Local>, +) + requires + old(local).wf_main(), + page_ptr.wf(), + page_ptr.is_used_and_primary(*old(local)), + old(local).page_capacity(page_ptr.page_id@) + extend as int <= old(local).page_reserved( + page_ptr.page_id@, + ), + // TODO this should have a special case for huge-page handling: + bsize == old(local).page_inner(page_ptr.page_id@).xblock_size, + bsize % 8 == 0, + extend >= 1, + ensures + local.wf_main(), + page_ptr.is_used_and_primary(*local), + local.page_organization == old(local).page_organization, + common_preserves(*old(local), *local), +{ + let ghost page_id = page_ptr.page_id@; + proof { + const_facts(); + let reserved = local.page_reserved(page_id); + let capacity = local.page_capacity(page_id); + let count = local.page_organization.pages[page_id].count.unwrap(); + local.page_organization.get_count_bound(page_id); + //assert(local.page_organization.pages.dom().contains(page_id)); + local.page_organization.used_offset0_has_count(page_id); + //assert(count + page_id.idx <= SLICES_PER_SEGMENT); + assert(capacity * bsize <= reserved * bsize) by (nonlinear_arith) + requires + capacity <= reserved, + bsize >= 0, + ; + assert((capacity + extend - 1) * bsize <= reserved * bsize) by (nonlinear_arith) + requires + capacity + extend - 1 <= reserved, + bsize >= 0, + ; + assert((capacity + extend) * bsize <= reserved * bsize) by (nonlinear_arith) + requires + capacity + extend <= reserved, + bsize >= 0, + ; + //assert(bsize == local.thread_token@.value.pages[page_id].block_size); + //assert(count == local.pages[page_id].count@.value.unwrap()); + } + let capacity = page_ptr.get_inner_ref(Tracked(&*local)).capacity; + let pag_start = calculate_page_start(page_ptr, bsize); + let start = calculate_page_block_at( + pag_start, + bsize, + capacity as usize, + Ghost(page_ptr.page_id@), + ); + //assert((capacity + extend) as usize as int == capacity + extend); + let x = capacity as usize + extend - 1; + let last = calculate_page_block_at(pag_start, bsize, x, Ghost(page_ptr.page_id@)); + let ghost rng_start = block_start_at(page_id, bsize as int, capacity as int); + let ghost rng_size = extend * bsize; + let ghost segment_id = page_id.segment_id; + let tracked mut seg = local.segments.tracked_remove(segment_id); + proof { + assert(extend * bsize >= 0) by (nonlinear_arith) + requires + extend >= 0, + bsize >= 0, + ; + segment_mem_has_reserved_range(*old(local), page_id, capacity + extend); + assert(seg.mem.pointsto_has_range(rng_start, rng_size)); + } + let tracked mut pt = seg.mem.take_points_to_range(rng_start, rng_size); + proof { + local.segments.tracked_insert(segment_id, seg); + } + let tracked mut thread_token = local.take_thread_token(); + let tracked mut checked_token = local.take_checked_token(); + let ghost mut cap_nat; + let ghost mut extend_nat; + //assert(page_inner.wf(page_ptr.page_id@, + // local.thread_token@.value.pages.index(page_ptr.page_id@), + // local.instance)); + proof { + cap_nat = capacity as nat; + extend_nat = extend as nat; + let reserved = local.page_reserved(page_id); + // PAPER CUT: this kind of assert is flaky + sub_distribute(reserved as int - capacity as int, extend as int, bsize as int); + assert((reserved as int - capacity as int) * bsize as int >= extend as int * bsize as int) + by (nonlinear_arith) + requires + (reserved as int - capacity as int) >= extend, + {} + assert((capacity as int) * (bsize as int) + (extend as int - 1) * (bsize as int) == ( + capacity as int + extend as int - 1) * (bsize as int)) by (nonlinear_arith); + /*assert(capacity as int + extend as int - 1 == capacity as int + (extend as int - 1)); assert(start.id() == pag_start.id() + capacity as int * bsize as int); assert(last.id() == pag_start.id() + (x as int) * bsize as int); @@ -18741,131 +18651,131 @@ mod alloc_generic { (capacity as int + extend as int - 1) * (bsize as int) - (capacity as int) * (bsize as int)); assert(last.id() == start.id() + ((extend as int - 1) * bsize as int));*/ - block_start_at_diff(page_ptr.page_id@, bsize as nat, cap_nat, cap_nat + extend_nat); - let page_id = page_ptr.page_id@; - let block_size = bsize as nat; - let ts = thread_token@.value; - assert forall|i: nat| - cap_nat <= i < cap_nat + extend_nat implies Mim::State::okay_to_add_block( - ts, - page_id, - i, - block_size, - ) by { - let slice_id = PageId { - segment_id: page_id.segment_id, - idx: BlockId::get_slice_idx(page_id, i, block_size), - }; - start_offset_le_slice_size(bsize as int); - assert(i * block_size >= 0) by (nonlinear_arith) - requires - i >= 0, - block_size >= 0, - ; - let reserved = local.page_reserved(page_id); - let capacity = local.page_capacity(page_id); - assert(i * block_size < reserved * block_size) by (nonlinear_arith) - requires - i < reserved, - block_size > 0, - ; - //assert(page_id.idx <= slice_id.idx); - let count = local.page_organization.pages[page_id].count.unwrap(); - //assert(slice_id.idx < page_id.idx + count); - local.page_organization.get_count_bound(page_id); - //assert(page_id.idx + count <= SLICES_PER_SEGMENT); - local.page_organization.get_offset_for_something_in_used_range(page_id, slice_id); - //assert(local.page_organization.pages.dom().contains(slice_id)); - //assert(local.page_organization.pages[slice_id].is_used); - //assert(local.page_organization.pages[slice_id].offset.is_some()); - //assert(local.page_organization.pages[slice_id].offset.unwrap() - // == slice_id.idx - page_id.idx); - //assert(ts.pages.dom().contains(slice_id)); - } - } - let tracked ( - Tracked(_thread_token), - Tracked(block_tokens), - Ghost(_s), - Tracked(_checked_token), - ) = local.instance.page_mk_block_tokens(// params - - local.thread_id, - page_ptr.page_id@, - cap_nat as nat, - cap_nat as nat + extend_nat as nat, - bsize as nat, - // input ghost state - thread_token, - checked_token, - ); - proof { - local.thread_token = _thread_token; - local.checked_token = _checked_token; - } - let tracked mut block_tokens = Map::tracked_map_keys( - block_tokens, - Map::::new( - |i: int| cap_nat <= i < cap_nat + extend_nat, - |i: int| - BlockId { - page_id: page_ptr.page_id@, - idx: i as nat, - slice_idx: BlockId::get_slice_idx(page_ptr.page_id@, i as nat, bsize as nat), - block_size: bsize as nat, - }, - ), - ); - // TODO - proof { - assert(start.id() % 8 == 0) by { - block_ptr_aligned_to_word(); - crate::linked_list::size_of_node(); - segment_start_mult8(page_id.segment_id); - start_offset_le_slice_size(bsize as int); - //assert(segment_start(page_id.segment_id) % 8 == 0); - assert(page_start(page_id) % 8 == 0); - assert(start_offset(bsize as int) % 8 == 0); - assert(pag_start % 8 == 0); - mod_mul(capacity as int, bsize as int, 8); - //assert((capacity * bsize) % 8 == 0) by(nonlinear_arith) - // requires bsize % 8 == 0; - } - assert forall|i: int| cap_nat <= i < cap_nat + extend_nat implies is_block_ptr( - block_start(block_tokens.index(i)@.key), - block_tokens.index(i)@.key, - ) by { - let block_id = block_tokens.index(i)@.key; - let block_size = bsize as int; - reveal(is_block_ptr); - get_block_start_defn(block_id); - crate::linked_list::size_of_node(); - start_offset_le_slice_size(block_size); - //assert(block_size >= 8); - //assert(block_id.page_id == page_id); - //assert(block_id.block_size == block_size); - //assert(page_id.segment_id == segment_id); - let reserved = local.page_reserved(page_id); - let capacity = local.page_capacity(page_id); - assert(i * block_size <= reserved * block_size) by (nonlinear_arith) - requires - i <= reserved, - block_size >= 0, - ; - //assert(i * block_size <= capacity * block_size); - //assert(block_start_at(page_id, block_size, block_id.idx as int) > - // segment_start(segment_id)); - //assert(block_start_at(page_id, block_size, block_id.idx as int) <= - // segment_start(segment_id) + SEGMENT_SIZE as int); - //assert(segment_start(segment_id) + (block_id.slice_idx * SLICE_SIZE) - // <= block_start_at(page_id, block_size, block_id.idx as int)); - //assert(i * block_size < - // i * block_size / SLICE_SIZE as int * SLICE_SIZE + SLICE_SIZE); - //assert(block_start_at(page_id, block_size, block_id.idx as int) - // < segment_start(segment_id) + (block_id.slice_idx * SLICE_SIZE) + SLICE_SIZE); - } - } - page_get_mut_inner!(page_ptr, local, page_inner => { + block_start_at_diff(page_ptr.page_id@, bsize as nat, cap_nat, cap_nat + extend_nat); + let page_id = page_ptr.page_id@; + let block_size = bsize as nat; + let ts = thread_token@.value; + assert forall|i: nat| + cap_nat <= i < cap_nat + extend_nat implies Mim::State::okay_to_add_block( + ts, + page_id, + i, + block_size, + ) by { + let slice_id = PageId { + segment_id: page_id.segment_id, + idx: BlockId::get_slice_idx(page_id, i, block_size), + }; + start_offset_le_slice_size(bsize as int); + assert(i * block_size >= 0) by (nonlinear_arith) + requires + i >= 0, + block_size >= 0, + ; + let reserved = local.page_reserved(page_id); + let capacity = local.page_capacity(page_id); + assert(i * block_size < reserved * block_size) by (nonlinear_arith) + requires + i < reserved, + block_size > 0, + ; + //assert(page_id.idx <= slice_id.idx); + let count = local.page_organization.pages[page_id].count.unwrap(); + //assert(slice_id.idx < page_id.idx + count); + local.page_organization.get_count_bound(page_id); + //assert(page_id.idx + count <= SLICES_PER_SEGMENT); + local.page_organization.get_offset_for_something_in_used_range(page_id, slice_id); + //assert(local.page_organization.pages.dom().contains(slice_id)); + //assert(local.page_organization.pages[slice_id].is_used); + //assert(local.page_organization.pages[slice_id].offset.is_some()); + //assert(local.page_organization.pages[slice_id].offset.unwrap() + // == slice_id.idx - page_id.idx); + //assert(ts.pages.dom().contains(slice_id)); + } + } + let tracked ( + Tracked(_thread_token), + Tracked(block_tokens), + Ghost(_s), + Tracked(_checked_token), + ) = local.instance.page_mk_block_tokens( // params + + local.thread_id, + page_ptr.page_id@, + cap_nat as nat, + cap_nat as nat + extend_nat as nat, + bsize as nat, + // input ghost state + thread_token, + checked_token, + ); + proof { + local.thread_token = _thread_token; + local.checked_token = _checked_token; + } + let tracked mut block_tokens = Map::tracked_map_keys( + block_tokens, + Map::::new( + |i: int| cap_nat <= i < cap_nat + extend_nat, + |i: int| + BlockId { + page_id: page_ptr.page_id@, + idx: i as nat, + slice_idx: BlockId::get_slice_idx(page_ptr.page_id@, i as nat, bsize as nat), + block_size: bsize as nat, + }, + ), + ); + // TODO + proof { + assert(start.id() % 8 == 0) by { + block_ptr_aligned_to_word(); + crate::linked_list::size_of_node(); + segment_start_mult8(page_id.segment_id); + start_offset_le_slice_size(bsize as int); + //assert(segment_start(page_id.segment_id) % 8 == 0); + assert(page_start(page_id) % 8 == 0); + assert(start_offset(bsize as int) % 8 == 0); + assert(pag_start % 8 == 0); + mod_mul(capacity as int, bsize as int, 8); + //assert((capacity * bsize) % 8 == 0) by(nonlinear_arith) + // requires bsize % 8 == 0; + } + assert forall|i: int| cap_nat <= i < cap_nat + extend_nat implies is_block_ptr( + block_start(block_tokens.index(i)@.key), + block_tokens.index(i)@.key, + ) by { + let block_id = block_tokens.index(i)@.key; + let block_size = bsize as int; + reveal(is_block_ptr); + get_block_start_defn(block_id); + crate::linked_list::size_of_node(); + start_offset_le_slice_size(block_size); + //assert(block_size >= 8); + //assert(block_id.page_id == page_id); + //assert(block_id.block_size == block_size); + //assert(page_id.segment_id == segment_id); + let reserved = local.page_reserved(page_id); + let capacity = local.page_capacity(page_id); + assert(i * block_size <= reserved * block_size) by (nonlinear_arith) + requires + i <= reserved, + block_size >= 0, + ; + //assert(i * block_size <= capacity * block_size); + //assert(block_start_at(page_id, block_size, block_id.idx as int) > + // segment_start(segment_id)); + //assert(block_start_at(page_id, block_size, block_id.idx as int) <= + // segment_start(segment_id) + SEGMENT_SIZE as int); + //assert(segment_start(segment_id) + (block_id.slice_idx * SLICE_SIZE) + // <= block_start_at(page_id, block_size, block_id.idx as int)); + //assert(i * block_size < + // i * block_size / SLICE_SIZE as int * SLICE_SIZE + SLICE_SIZE); + //assert(block_start_at(page_id, block_size, block_id.idx as int) + // < segment_start(segment_id) + (block_id.slice_idx * SLICE_SIZE) + SLICE_SIZE); + } + } + page_get_mut_inner!(page_ptr, local, page_inner => { page_inner.free.prepend_contiguous_blocks( start, last, bsize, // ghost args: @@ -18878,8 +18788,8 @@ mod alloc_generic { // but it's easier to just do it here to preserve local.wf() page_inner.capacity = page_inner.capacity + extend as u16; }); - proof { - /*assert forall |pid| + proof { + /*assert forall |pid| #[trigger] local.pages.dom().contains(pid) && local.thread_token@.value.pages.dom().contains(pid) implies local.pages.index(pid).wf( @@ -18898,7 +18808,7 @@ mod alloc_generic { assert(local.pages.index(pid).wf(pid, local.thread_token@.value.pages.index(pid), local.instance)); } }*/ - /*let blocksize = bsize as int; + /*let blocksize = bsize as int; assert((capacity + extend) * blocksize == capacity * blocksize + extend * blocksize); assert(local.page_capacity(page_id) == capacity + extend); assert(block_start_at(page_id, bsize as int, capacity as int + extend as int) @@ -18911,15 +18821,15 @@ mod alloc_generic { page_start(page_id) + start_offset(old(local).block_size(page_id)) + local.page_capacity(page_id) * old(local).block_size(page_id));*/ - block_start_at_diff(page_id, bsize as nat, capacity as nat, (capacity + extend) as nat); - preserves_mem_chunk_good_on_transfer_to_capacity(*old(local), *local, page_id); - assert(local.mem_chunk_good(segment_id)); - preserves_mem_chunk_good_except(*old(local), *local, segment_id); - assert(local.wf_main()); - } + block_start_at_diff(page_id, bsize as nat, capacity as nat, (capacity + extend) as nat); + preserves_mem_chunk_good_on_transfer_to_capacity(*old(local), *local, page_id); + assert(local.mem_chunk_good(segment_id)); + preserves_mem_chunk_good_except(*old(local), *local, segment_id); + assert(local.wf_main()); } +} - /* +/* static void mi_page_extend_free(mi_heap_t* heap, mi_page_t* page, mi_tld_t* tld) { MI_UNUSED(tld); mi_assert_expensive(mi_page_is_valid_init(page)); @@ -18971,37 +18881,37 @@ mod alloc_generic { } */ - const MIN_EXTEND: usize = 4; +const MIN_EXTEND: usize = 4; - const MAX_EXTEND_SIZE: u32 = 4096; +const MAX_EXTEND_SIZE: u32 = 4096; - pub fn page_extend_free(page_ptr: PagePtr, Tracked(local): Tracked<&mut Local>) - requires - old(local).wf_main(), - page_ptr.wf(), - old(local).is_used_primary(page_ptr.page_id@), - old(local).pages[page_ptr.page_id@].inner@.value.unwrap().xblock_size % 8 == 0, - ensures - local.wf_main(), - local.is_used_primary(page_ptr.page_id@), - local.page_organization == old(local).page_organization, - common_preserves(*old(local), *local), - { - let page_inner = page_ptr.get_inner_ref(Tracked(&*local)); - /*proof { +pub fn page_extend_free(page_ptr: PagePtr, Tracked(local): Tracked<&mut Local>) + requires + old(local).wf_main(), + page_ptr.wf(), + old(local).is_used_primary(page_ptr.page_id@), + old(local).pages[page_ptr.page_id@].inner@.value.unwrap().xblock_size % 8 == 0, + ensures + local.wf_main(), + local.is_used_primary(page_ptr.page_id@), + local.page_organization == old(local).page_organization, + common_preserves(*old(local), *local), +{ + let page_inner = page_ptr.get_inner_ref(Tracked(&*local)); + /*proof { assert(page_inner.wf(page_ptr.page_id@, local.thread_token@.value.pages.index(page_ptr.page_id@), local.instance)); }*/ - let reserved = page_inner.reserved; - let capacity = page_inner.capacity; - if capacity >= reserved { - return ; - }// Calculate the block size - // TODO should have special handling for huge blocks - - let bsize: usize = page_ptr.get_inner_ref(Tracked(&*local)).xblock_size as usize; - /*proof { + let reserved = page_inner.reserved; + let capacity = page_inner.capacity; + if capacity >= reserved { + return ; + } // Calculate the block size + // TODO should have special handling for huge blocks + + let bsize: usize = page_ptr.get_inner_ref(Tracked(&*local)).xblock_size as usize; + /*proof { let ghost page_id = page_ptr.page_id@; assert(local.page_organization.pages.dom().contains(page_id)); assert(page_organization_matches_token_page( @@ -19010,89 +18920,89 @@ mod alloc_generic { assert(local.is_used_primary(page_id)); assert(bsize != 0); }*/ - // Calculate extend amount - let mut max_extend: usize = if bsize >= MAX_EXTEND_SIZE as usize { - MIN_EXTEND - } else { - (MAX_EXTEND_SIZE / bsize as u32) as usize - }; - if max_extend < MIN_EXTEND { - max_extend = MIN_EXTEND; - } - let mut extend: usize = (reserved - capacity) as usize; - if extend > max_extend { - extend = max_extend; - } - page_free_list_extend(page_ptr, bsize, extend, Tracked(local)); - // page capacity is modified in page_free_list_extend, no need to do it here + // Calculate extend amount + let mut max_extend: usize = if bsize >= MAX_EXTEND_SIZE as usize { + MIN_EXTEND + } else { + (MAX_EXTEND_SIZE / bsize as u32) as usize + }; + if max_extend < MIN_EXTEND { + max_extend = MIN_EXTEND; + } + let mut extend: usize = (reserved - capacity) as usize; + if extend > max_extend { + extend = max_extend; } + page_free_list_extend(page_ptr, bsize, extend, Tracked(local)); + // page capacity is modified in page_free_list_extend, no need to do it here +} - fn heap_delayed_free_partial(heap: HeapPtr, Tracked(local): Tracked<&mut Local>) -> (b: bool) - requires - old(local).wf(), - heap.wf(), - heap.is_in(*old(local)), - ensures +fn heap_delayed_free_partial(heap: HeapPtr, Tracked(local): Tracked<&mut Local>) -> (b: bool) + requires + old(local).wf(), + heap.wf(), + heap.is_in(*old(local)), + ensures + local.wf(), + common_preserves(*old(local), *local), +{ + let mut ll = heap.get_ref(Tracked(&*local)).thread_delayed_free.take(); + let mut all_freed = true; + while !ll.is_empty() + invariant local.wf(), + heap.wf(), + heap.is_in(*local), + ll.wf(), common_preserves(*old(local), *local), + ll.instance() == local.instance, + ll.heap_id() == Some(local.thread_token@.value.heap_id), { - let mut ll = heap.get_ref(Tracked(&*local)).thread_delayed_free.take(); - let mut all_freed = true; - while !ll.is_empty() - invariant - local.wf(), - heap.wf(), - heap.is_in(*local), - ll.wf(), - common_preserves(*old(local), *local), - ll.instance() == local.instance, - ll.heap_id() == Some(local.thread_token@.value.heap_id), - { - let (ptr, Tracked(perm), Tracked(block)) = ll.pop_block(); - proof { - //assert(block@.value.heap_id == Some(local.thread_token@.value.heap_id)); - local.instance.block_in_heap_has_valid_page( - local.thread_token@.key, - block@.key, - &local.thread_token, - &block, - ); - local.instance.get_block_properties( - local.thread_token@.key, - block@.key, - &local.thread_token, - &block, - ); - //assert(valid_block_token(block, local.instance)); - } - let tracked dealloc_inner = MimDeallocInner { - mim_instance: local.instance.clone(), - mim_block: block, - ptr: ptr.id(), - }; - let (success, Tracked(p_opt), Tracked(d_opt)) = crate::free::free_delayed_block( + let (ptr, Tracked(perm), Tracked(block)) = ll.pop_block(); + proof { + //assert(block@.value.heap_id == Some(local.thread_token@.value.heap_id)); + local.instance.block_in_heap_has_valid_page( + local.thread_token@.key, + block@.key, + &local.thread_token, + &block, + ); + local.instance.get_block_properties( + local.thread_token@.key, + block@.key, + &local.thread_token, + &block, + ); + //assert(valid_block_token(block, local.instance)); + } + let tracked dealloc_inner = MimDeallocInner { + mim_instance: local.instance.clone(), + mim_block: block, + ptr: ptr.id(), + }; + let (success, Tracked(p_opt), Tracked(d_opt)) = crate::free::free_delayed_block( + ptr, + Tracked(perm), + Tracked(dealloc_inner), + Tracked(&mut *local), + ); + if !success { + all_freed = false; + let tracked perm = p_opt.tracked_unwrap(); + let tracked dealloc = d_opt.tracked_unwrap(); + let tracked block = dealloc.mim_block; + let ptr = PPtr::from_usize(ptr.to_usize()); + heap.get_ref(Tracked(&*local)).thread_delayed_free.atomic_insert_block( ptr, Tracked(perm), - Tracked(dealloc_inner), - Tracked(&mut *local), + Tracked(block), ); - if !success { - all_freed = false; - let tracked perm = p_opt.tracked_unwrap(); - let tracked dealloc = d_opt.tracked_unwrap(); - let tracked block = dealloc.mim_block; - let ptr = PPtr::from_usize(ptr.to_usize()); - heap.get_ref(Tracked(&*local)).thread_delayed_free.atomic_insert_block( - ptr, - Tracked(perm), - Tracked(block), - ); - } } - return all_freed; } + return all_freed; +} - } // verus! +} // verus! } mod free { @@ -19115,82 +19025,81 @@ mod free { verus! { - // The algorithm for `free` is this: - // - // 1. Given the ptr, compute the segment and page it is on. - // - // 2. Check the 'thread_id' on the page. If it matches the thread we're on, then - // this is a 'local' transition (the common case). - // Otherwise, it's a 'thread' transition. - // - // If it's a LOCAL transition: - // - // Update the local_free list. - // - // If it's a THREAD transition: - // - // Attempt to update the thread_free list by first reading the atomic, then performing - // a CAS (repeating if necessary). The thread_free contains both the linked_list pointer - // and a 'delay' state. - // - // If the 'delay' state is NOT in 'UseDelayedFree' (the usual case): - // - // Update the thread_free atomically by inserting the new block to the front of the list. - // - // If the 'delay' state is in 'UseDelayedFree' (the unusual case): - // - // Set 'delay' to Freeing - // Follow the heap pointer to access the Heap - // Atomically add to the delayed free list. - // Set 'delay' to NoDelaying - // - // (The purpose of setting the 'Freeing' state is to ensure that the Heap remains - // valid while we perform this operation.) - // - // (Also note that setting the 'Freeing' state does not prevent the next thread that - // comes along from adding to the thread_free list.) - pub fn free( - ptr: PPtr, - Tracked(user_perm): Tracked, - Tracked(user_dealloc): Tracked>, - Tracked(local): Tracked<&mut Local>, - )// According to the Linux man pages, `ptr` is allowed to be NULL, - // in which case no operation is performed. - - requires - old(local).wf(), - ptr.id() != 0 ==> user_dealloc.is_some(), - ptr.id() != 0 ==> user_dealloc.unwrap().wf(), - ptr.id() != 0 ==> user_perm.is_range(ptr.id(), user_dealloc.unwrap().size), - ptr.id() != 0 ==> ptr.id() == user_dealloc.unwrap().ptr(), - ptr.id() != 0 ==> old(local).instance == user_dealloc.unwrap().instance(), - ensures - local.wf(), - local.instance == old(local).instance, - forall|heap: HeapPtr| heap.is_in(*old(local)) ==> heap.is_in(*local), - { - if ptr.to_usize() == 0 { - return ; - } - let tracked user_dealloc = user_dealloc.tracked_unwrap(); - let tracked dealloc; - let tracked perm; - proof { - let tracked (x, y) = user_dealloc.into_internal(user_perm); - dealloc = x; - perm = y; - } - // Calculate the pointer to the segment this block is in. - let segment_ptr = calculate_segment_ptr_from_block(ptr, Ghost(dealloc.block_id())); - let tracked segment_shared_access: &SegmentSharedAccess = - dealloc.mim_instance.alloc_guards_segment_shared_access( - dealloc.block_id(), - &dealloc.mim_block, - ); - let segment: &SegmentHeader = segment_ptr.borrow(Tracked(&segment_shared_access.points_to)); - // Determine if this operation is thread local or not - let segment_thread_id_u64 = - my_atomic_with_ghost!( +// The algorithm for `free` is this: +// +// 1. Given the ptr, compute the segment and page it is on. +// +// 2. Check the 'thread_id' on the page. If it matches the thread we're on, then +// this is a 'local' transition (the common case). +// Otherwise, it's a 'thread' transition. +// +// If it's a LOCAL transition: +// +// Update the local_free list. +// +// If it's a THREAD transition: +// +// Attempt to update the thread_free list by first reading the atomic, then performing +// a CAS (repeating if necessary). The thread_free contains both the linked_list pointer +// and a 'delay' state. +// +// If the 'delay' state is NOT in 'UseDelayedFree' (the usual case): +// +// Update the thread_free atomically by inserting the new block to the front of the list. +// +// If the 'delay' state is in 'UseDelayedFree' (the unusual case): +// +// Set 'delay' to Freeing +// Follow the heap pointer to access the Heap +// Atomically add to the delayed free list. +// Set 'delay' to NoDelaying +// +// (The purpose of setting the 'Freeing' state is to ensure that the Heap remains +// valid while we perform this operation.) +// +// (Also note that setting the 'Freeing' state does not prevent the next thread that +// comes along from adding to the thread_free list.) +pub fn free( + ptr: PPtr, + Tracked(user_perm): Tracked, + Tracked(user_dealloc): Tracked>, + Tracked(local): Tracked<&mut Local>, +) // According to the Linux man pages, `ptr` is allowed to be NULL,// in which case no operation is performed.FORMATTER_NOT_INLINE_MARKER + + requires + old(local).wf(), + ptr.id() != 0 ==> user_dealloc.is_some(), + ptr.id() != 0 ==> user_dealloc.unwrap().wf(), + ptr.id() != 0 ==> user_perm.is_range(ptr.id(), user_dealloc.unwrap().size), + ptr.id() != 0 ==> ptr.id() == user_dealloc.unwrap().ptr(), + ptr.id() != 0 ==> old(local).instance == user_dealloc.unwrap().instance(), + ensures + local.wf(), + local.instance == old(local).instance, + forall|heap: HeapPtr| heap.is_in(*old(local)) ==> heap.is_in(*local), +{ + if ptr.to_usize() == 0 { + return ; + } + let tracked user_dealloc = user_dealloc.tracked_unwrap(); + let tracked dealloc; + let tracked perm; + proof { + let tracked (x, y) = user_dealloc.into_internal(user_perm); + dealloc = x; + perm = y; + } + // Calculate the pointer to the segment this block is in. + let segment_ptr = calculate_segment_ptr_from_block(ptr, Ghost(dealloc.block_id())); + let tracked segment_shared_access: &SegmentSharedAccess = + dealloc.mim_instance.alloc_guards_segment_shared_access( + dealloc.block_id(), + &dealloc.mim_block, + ); + let segment: &SegmentHeader = segment_ptr.borrow(Tracked(&segment_shared_access.points_to)); + // Determine if this operation is thread local or not + let segment_thread_id_u64 = + my_atomic_with_ghost!( &segment.thread_id => load(); returning thread_id_u64; ghost g => { @@ -19205,34 +19114,34 @@ mod free { } } ); - let (thread_id, Tracked(is_thread)) = crate::thread::thread_id(); - proof { - local.is_thread.agrees(is_thread); - } - let is_local = thread_id.thread_id == segment_thread_id_u64; - // Calculate the pointer to the PageHeader for the *slice* that this block is in. - // Remember this might not be the "main" PageHeader for this Page. - let slice_page_ptr = calculate_slice_page_ptr_from_block( - ptr, - segment_ptr, - Ghost(dealloc.block_id()), - ); - let tracked page_slice_shared_access: &PageSharedAccess = - dealloc.mim_instance.alloc_guards_page_slice_shared_access( - dealloc.block_id(), - &dealloc.mim_block, - ); - let slice_page: &Page = slice_page_ptr.borrow(Tracked(&page_slice_shared_access.points_to)); - // Use the 'offset' to calculate a pointer to the main PageHeader for this page. - let offset = slice_page.offset; - let page_ptr = calculate_page_ptr_subtract_offset( - slice_page_ptr, - offset, - Ghost(dealloc.block_id().page_id_for_slice()), - Ghost(dealloc.block_id().page_id), - ); - assert(is_page_ptr(page_ptr.id(), dealloc.block_id().page_id)); - /* + let (thread_id, Tracked(is_thread)) = crate::thread::thread_id(); + proof { + local.is_thread.agrees(is_thread); + } + let is_local = thread_id.thread_id == segment_thread_id_u64; + // Calculate the pointer to the PageHeader for the *slice* that this block is in. + // Remember this might not be the "main" PageHeader for this Page. + let slice_page_ptr = calculate_slice_page_ptr_from_block( + ptr, + segment_ptr, + Ghost(dealloc.block_id()), + ); + let tracked page_slice_shared_access: &PageSharedAccess = + dealloc.mim_instance.alloc_guards_page_slice_shared_access( + dealloc.block_id(), + &dealloc.mim_block, + ); + let slice_page: &Page = slice_page_ptr.borrow(Tracked(&page_slice_shared_access.points_to)); + // Use the 'offset' to calculate a pointer to the main PageHeader for this page. + let offset = slice_page.offset; + let page_ptr = calculate_page_ptr_subtract_offset( + slice_page_ptr, + offset, + Ghost(dealloc.block_id().page_id_for_slice()), + Ghost(dealloc.block_id().page_id), + ); + assert(is_page_ptr(page_ptr.id(), dealloc.block_id().page_id)); + /* let tracked page_shared_access: &PageSharedAccess; proof { page_shared_access = dealloc.mim_instance.alloc_guards_page_shared_access( @@ -19240,20 +19149,20 @@ mod free { } let page: &Page = page_ptr.borrow(Tracked(&page_shared_access.points_to)); */ - let ghost page_id = dealloc.block_id().page_id; - let page = PagePtr { page_ptr, page_id: Ghost(page_id) }; - assert(page_ptr.id() != 0) by { - is_page_ptr_nonzero(page_ptr.id(), page_id); - }// Case based on whether this is thread local or not - - if likely(is_local) { - assert(local.pages.dom().contains(page_id)); - assert(page.is_in(*local)); - assert(page.wf()); - assert(local.is_used_primary(page_id)); - if likely(page.get_inner_ref(Tracked(&*local)).not_full_nor_aligned()) { - let used; - page_get_mut_inner!(page, local, page_inner => { + let ghost page_id = dealloc.block_id().page_id; + let page = PagePtr { page_ptr, page_id: Ghost(page_id) }; + assert(page_ptr.id() != 0) by { + is_page_ptr_nonzero(page_ptr.id(), page_id); + } // Case based on whether this is thread local or not + + if likely(is_local) { + assert(local.pages.dom().contains(page_id)); + assert(page.is_in(*local)); + assert(page.wf()); + assert(local.is_used_primary(page_id)); + if likely(page.get_inner_ref(Tracked(&*local)).not_full_nor_aligned()) { + let used; + page_get_mut_inner!(page, local, page_inner => { let tracked mim_block = dealloc.mim_block; proof { @@ -19269,96 +19178,96 @@ mod free { used = page_inner.used - 1; page_inner.used = used; }); - proof { - crate::os_mem_util::preserves_mem_chunk_good(*old(local), *local); - assert(local.wf()); - } - if unlikely(used == 0) { - crate::page::page_retire(page, Tracked(&mut *local)); - } - } else { - free_generic( - segment_ptr, - page, - true, - ptr, - Tracked(perm), - Tracked(dealloc), - Tracked(&mut *local), - ); + proof { + crate::os_mem_util::preserves_mem_chunk_good(*old(local), *local); + assert(local.wf()); + } + if unlikely(used == 0) { + crate::page::page_retire(page, Tracked(&mut *local)); } } else { free_generic( segment_ptr, page, - false, + true, ptr, Tracked(perm), Tracked(dealloc), Tracked(&mut *local), ); } + } else { + free_generic( + segment_ptr, + page, + false, + ptr, + Tracked(perm), + Tracked(dealloc), + Tracked(&mut *local), + ); } +} - fn free_generic( - segment: PPtr, - page: PagePtr, - is_local: bool, - p: PPtr, - Tracked(perm): Tracked, - Tracked(dealloc): Tracked, - Tracked(local): Tracked<&mut Local>, - ) - requires - old(local).wf(), - dealloc.wf(), - perm.is_range(p.id(), dealloc.block_id().block_size as int), - p.id() == dealloc.ptr, - old(local).instance == dealloc.mim_instance, - page.wf(), - is_local ==> page.is_in(*old(local)), - is_local ==> old(local).is_used_primary(page.page_id@), - is_local ==> old(local).thread_token@.value.pages[page.page_id@].block_size - == dealloc.block_id().block_size, - page.page_id@ == dealloc.block_id().page_id, - ensures - local.wf(), - common_preserves(*old(local), *local), - { - // this has_aligned check could be a data race?? - //if page.get_inner_ref(Tracked(&*local)).get_has_aligned() { - // todo(); - //} - free_block(page, is_local, p, Tracked(perm), Tracked(dealloc), Tracked(&mut *local)); - } +fn free_generic( + segment: PPtr, + page: PagePtr, + is_local: bool, + p: PPtr, + Tracked(perm): Tracked, + Tracked(dealloc): Tracked, + Tracked(local): Tracked<&mut Local>, +) + requires + old(local).wf(), + dealloc.wf(), + perm.is_range(p.id(), dealloc.block_id().block_size as int), + p.id() == dealloc.ptr, + old(local).instance == dealloc.mim_instance, + page.wf(), + is_local ==> page.is_in(*old(local)), + is_local ==> old(local).is_used_primary(page.page_id@), + is_local ==> old(local).thread_token@.value.pages[page.page_id@].block_size + == dealloc.block_id().block_size, + page.page_id@ == dealloc.block_id().page_id, + ensures + local.wf(), + common_preserves(*old(local), *local), +{ + // this has_aligned check could be a data race?? + //if page.get_inner_ref(Tracked(&*local)).get_has_aligned() { + // todo(); + //} + free_block(page, is_local, p, Tracked(perm), Tracked(dealloc), Tracked(&mut *local)); +} - fn free_block( - page: PagePtr, - is_local: bool, - ptr: PPtr, - Tracked(perm): Tracked, - Tracked(dealloc): Tracked, - Tracked(local): Tracked<&mut Local>, - ) - requires - old(local).wf(), - dealloc.wf(), - perm.is_range(ptr.id(), dealloc.block_id().block_size as int), - ptr.id() == dealloc.ptr, - old(local).instance == dealloc.mim_instance, - page.wf(), - is_local ==> page.is_in(*old(local)), - is_local ==> old(local).is_used_primary(page.page_id@), - is_local ==> old(local).thread_token@.value.pages[page.page_id@].block_size - == dealloc.block_id().block_size, - page.page_id@ == dealloc.block_id().page_id, - ensures - local.wf(), - common_preserves(*old(local), *local), - { - if likely(is_local) { - let used; - page_get_mut_inner!(page, local, page_inner => { +fn free_block( + page: PagePtr, + is_local: bool, + ptr: PPtr, + Tracked(perm): Tracked, + Tracked(dealloc): Tracked, + Tracked(local): Tracked<&mut Local>, +) + requires + old(local).wf(), + dealloc.wf(), + perm.is_range(ptr.id(), dealloc.block_id().block_size as int), + ptr.id() == dealloc.ptr, + old(local).instance == dealloc.mim_instance, + page.wf(), + is_local ==> page.is_in(*old(local)), + is_local ==> old(local).is_used_primary(page.page_id@), + is_local ==> old(local).thread_token@.value.pages[page.page_id@].block_size + == dealloc.block_id().block_size, + page.page_id@ == dealloc.block_id().page_id, + ensures + local.wf(), + common_preserves(*old(local), *local), +{ + if likely(is_local) { + let used; + page_get_mut_inner!(page, local, page_inner => { let tracked mim_block = dealloc.mim_block; proof { @@ -19374,110 +19283,110 @@ mod free { used = page_inner.used - 1; page_inner.used = used; }); - proof { - crate::os_mem_util::preserves_mem_chunk_good(*old(local), *local); - assert(local.wf()); - } - if unlikely(used == 0) { - crate::page::page_retire(page, Tracked(&mut *local)); - } else if unlikely(page.get_inner_ref(Tracked(&*local)).get_in_full()) { - crate::page::page_unfull(page, Tracked(&mut *local)); - } - } else { - free_block_mt(page, ptr, Tracked(perm), Tracked(dealloc), Tracked(&mut *local)); + proof { + crate::os_mem_util::preserves_mem_chunk_good(*old(local), *local); + assert(local.wf()); } + if unlikely(used == 0) { + crate::page::page_retire(page, Tracked(&mut *local)); + } else if unlikely(page.get_inner_ref(Tracked(&*local)).get_in_full()) { + crate::page::page_unfull(page, Tracked(&mut *local)); + } + } else { + free_block_mt(page, ptr, Tracked(perm), Tracked(dealloc), Tracked(&mut *local)); } +} - fn free_block_mt( - page: PagePtr, - ptr: PPtr, - Tracked(perm): Tracked, - Tracked(dealloc): Tracked, - Tracked(local): Tracked<&mut Local>, - ) - requires - old(local).wf(), +fn free_block_mt( + page: PagePtr, + ptr: PPtr, + Tracked(perm): Tracked, + Tracked(dealloc): Tracked, + Tracked(local): Tracked<&mut Local>, +) + requires + old(local).wf(), + dealloc.wf(), + perm.is_range(ptr.id(), dealloc.block_id().block_size as int), + ptr.id() == dealloc.ptr, + old(local).instance == dealloc.mim_instance, + page.page_id@ == dealloc.block_id().page_id, + page.wf(), + ensures + local.wf(), + common_preserves(*old(local), *local), +{ + // Based on _mi_free_block_mt + // TODO check the segment kind + let tracked mut perm = perm; + let tracked mut delay_actor_token_opt: Option = None; + let tracked MimDeallocInner { mim_block, mim_instance, .. } = dealloc; + let tracked mut mim_block_opt = Some(mim_block); + let ptr = PPtr::::from_usize(ptr.to_usize()); + let mut use_delayed; + loop + invariant dealloc.wf(), + mim_block_opt == Some(dealloc.mim_block), + mim_instance == dealloc.mim_instance, + mim_instance == local.instance, perm.is_range(ptr.id(), dealloc.block_id().block_size as int), ptr.id() == dealloc.ptr, - old(local).instance == dealloc.mim_instance, - page.page_id@ == dealloc.block_id().page_id, - page.wf(), - ensures + is_page_ptr(page.page_ptr.id(), dealloc.block_id().page_id), local.wf(), common_preserves(*old(local), *local), + //*page == + // dealloc.mim_block@.value.page_shared_access.points_to@.value.get_Some_0(), + //ensures + // use_delayed ==> (match delay_actor_token_opt { + // None => false, + // Some(tok) => tok@.instance == dealloc.mim_instance + // && tok@.key == dealloc.block_id().page_id + // }), { - // Based on _mi_free_block_mt - // TODO check the segment kind - let tracked mut perm = perm; - let tracked mut delay_actor_token_opt: Option = None; - let tracked MimDeallocInner { mim_block, mim_instance, .. } = dealloc; - let tracked mut mim_block_opt = Some(mim_block); - let ptr = PPtr::::from_usize(ptr.to_usize()); - let mut use_delayed; - loop - invariant - dealloc.wf(), - mim_block_opt == Some(dealloc.mim_block), - mim_instance == dealloc.mim_instance, - mim_instance == local.instance, - perm.is_range(ptr.id(), dealloc.block_id().block_size as int), - ptr.id() == dealloc.ptr, - is_page_ptr(page.page_ptr.id(), dealloc.block_id().page_id), - local.wf(), - common_preserves(*old(local), *local), - //*page == - // dealloc.mim_block@.value.page_shared_access.points_to@.value.get_Some_0(), - //ensures - // use_delayed ==> (match delay_actor_token_opt { - // None => false, - // Some(tok) => tok@.instance == dealloc.mim_instance - // && tok@.key == dealloc.block_id().page_id - // }), - { - let tracked page_shared_access: &PageSharedAccess = - mim_instance.alloc_guards_page_shared_access( - dealloc.block_id(), - mim_block_opt.tracked_borrow(), - ); - let pag: &Page = page.page_ptr.borrow(Tracked(&page_shared_access.points_to)); - let ghost mut next_ptr; - let ghost mut delay; - let mask = - my_atomic_with_ghost!(&pag.xthread_free.atomic => load(); ghost g => { + let tracked page_shared_access: &PageSharedAccess = + mim_instance.alloc_guards_page_shared_access( + dealloc.block_id(), + mim_block_opt.tracked_borrow(), + ); + let pag: &Page = page.page_ptr.borrow(Tracked(&page_shared_access.points_to)); + let ghost mut next_ptr; + let ghost mut delay; + let mask = + my_atomic_with_ghost!(&pag.xthread_free.atomic => load(); ghost g => { pag.xthread_free.emp_inst.borrow().agree(pag.xthread_free.emp.borrow(), &g.0); next_ptr = g.1.unwrap().1.ptr(); delay = g.1.unwrap().0.view().value; // TODO fix macro syntax in atomic_with_ghost }); - use_delayed = masked_ptr_delay_get_is_use_delayed(mask, Ghost(delay), Ghost(next_ptr)); - let mask1; - let tracked mut ptr_mem = None; - let tracked mut raw_mem = None; - if unlikely(use_delayed) { - mask1 = masked_ptr_delay_set_freeing(mask, Ghost(delay), Ghost(next_ptr)); - } else { - proof { - block_size_ge_word(); - block_ptr_aligned_to_word(); - is_block_ptr_mult4(ptr.id(), dealloc.block_id()); - } - // *ptr = mask.next_ptr - let (ptr_mem0, raw_mem0) = LL::block_write_ptr( - ptr, - Tracked(perm), - masked_ptr_delay_get_ptr(mask, Ghost(delay), Ghost(next_ptr)), - ); - proof { - perm = PointsToRaw::empty(); - ptr_mem = Some(ptr_mem0.get()); - raw_mem = Some(raw_mem0.get()); - } - // mask1 = mask (set next_ptr to ptr) - mask1 = masked_ptr_delay_set_ptr(mask, ptr, Ghost(delay), Ghost(next_ptr)); + use_delayed = masked_ptr_delay_get_is_use_delayed(mask, Ghost(delay), Ghost(next_ptr)); + let mask1; + let tracked mut ptr_mem = None; + let tracked mut raw_mem = None; + if unlikely(use_delayed) { + mask1 = masked_ptr_delay_set_freeing(mask, Ghost(delay), Ghost(next_ptr)); + } else { + proof { + block_size_ge_word(); + block_ptr_aligned_to_word(); + is_block_ptr_mult4(ptr.id(), dealloc.block_id()); } - assert(pag.xthread_free.instance == mim_instance); - let cas_result = - my_atomic_with_ghost!( + // *ptr = mask.next_ptr + let (ptr_mem0, raw_mem0) = LL::block_write_ptr( + ptr, + Tracked(perm), + masked_ptr_delay_get_ptr(mask, Ghost(delay), Ghost(next_ptr)), + ); + proof { + perm = PointsToRaw::empty(); + ptr_mem = Some(ptr_mem0.get()); + raw_mem = Some(raw_mem0.get()); + } + // mask1 = mask (set next_ptr to ptr) + mask1 = masked_ptr_delay_set_ptr(mask, ptr, Ghost(delay), Ghost(next_ptr)); + } + assert(pag.xthread_free.instance == mim_instance); + let cas_result = + my_atomic_with_ghost!( &pag.xthread_free.atomic => compare_exchange_weak(mask, mask1); update v_old -> v_new; returning cas_result; @@ -19535,21 +19444,21 @@ mod free { assert(v_new as int == ghost_ll.ptr().id() + delay_token@.value.to_int()); assert(ghost_ll.ptr().id() % 4 == 0); }); - match cas_result { - Result::Err(_) => {}, - Result::Ok(_) => { - if unlikely(use_delayed) { - // Lookup the heap ptr - let tracked mut delay_actor_token; - let ghost mut heap_id; - let tracked page_shared_access: &PageSharedAccess = - mim_instance.alloc_guards_page_shared_access( - dealloc.block_id(), - mim_block_opt.tracked_borrow(), - ); - let pag: &Page = page.page_ptr.borrow(Tracked(&page_shared_access.points_to)); - let heap_ptr_int = - my_atomic_with_ghost!( + match cas_result { + Result::Err(_) => {}, + Result::Ok(_) => { + if unlikely(use_delayed) { + // Lookup the heap ptr + let tracked mut delay_actor_token; + let ghost mut heap_id; + let tracked page_shared_access: &PageSharedAccess = + mim_instance.alloc_guards_page_shared_access( + dealloc.block_id(), + mim_block_opt.tracked_borrow(), + ); + let pag: &Page = page.page_ptr.borrow(Tracked(&page_shared_access.points_to)); + let heap_ptr_int = + my_atomic_with_ghost!( &pag.xheap.atomic => load(); ghost g => { @@ -19567,37 +19476,37 @@ mod free { delay_actor_token = tok; heap_id = g.1.unwrap().view().value; }); - let heap_ptr = PPtr::::from_usize(heap_ptr_int); - let tracked heap_shared_access: &HeapSharedAccess; - proof { - heap_shared_access = - mim_instance.delay_guards_heap_shared_access( - dealloc.block_id().page_id, - &delay_actor_token, - ); - assert(heap_shared_access.wf2(heap_id, mim_instance)); - } - let heap: &Heap = heap_ptr.borrow(Tracked(&heap_shared_access.points_to)); - let tracked mim_block = mim_block_opt.tracked_unwrap(); - let tracked mim_block = local.instance.block_set_heap_id( - mim_block@.key, - mim_block, - &delay_actor_token, - ); - heap.thread_delayed_free.atomic_insert_block( - ptr, - Tracked(perm), - Tracked(mim_block), - ); - let tracked page_shared_access: &PageSharedAccess = - mim_instance.delay_guards_page_shared_access( + let heap_ptr = PPtr::::from_usize(heap_ptr_int); + let tracked heap_shared_access: &HeapSharedAccess; + proof { + heap_shared_access = + mim_instance.delay_guards_heap_shared_access( dealloc.block_id().page_id, &delay_actor_token, ); - let pag: &Page = page.page_ptr.borrow(Tracked(&page_shared_access.points_to)); - //pag.xthread_free.exit_delaying_state(Tracked(delay_actor_token)); - // have to inline this bc of lifetimes - my_atomic_with_ghost!( + assert(heap_shared_access.wf2(heap_id, mim_instance)); + } + let heap: &Heap = heap_ptr.borrow(Tracked(&heap_shared_access.points_to)); + let tracked mim_block = mim_block_opt.tracked_unwrap(); + let tracked mim_block = local.instance.block_set_heap_id( + mim_block@.key, + mim_block, + &delay_actor_token, + ); + heap.thread_delayed_free.atomic_insert_block( + ptr, + Tracked(perm), + Tracked(mim_block), + ); + let tracked page_shared_access: &PageSharedAccess = + mim_instance.delay_guards_page_shared_access( + dealloc.block_id().page_id, + &delay_actor_token, + ); + let pag: &Page = page.page_ptr.borrow(Tracked(&page_shared_access.points_to)); + //pag.xthread_free.exit_delaying_state(Tracked(delay_actor_token)); + // have to inline this bc of lifetimes + my_atomic_with_ghost!( &pag.xthread_free.atomic => fetch_xor(3); update v_old -> v_new; ghost g => { @@ -19620,78 +19529,78 @@ mod free { by (bit_vector); } ); - } - return ; - }, - } + } + return ; + }, } } +} - pub fn free_delayed_block( - ptr: PPtr, - Tracked(perm): Tracked, - Tracked(dealloc): Tracked, - Tracked(local): Tracked<&mut Local>, - ) -> (res: (bool, Tracked>, Tracked>)) - requires - old(local).wf(), - dealloc.wf(), - perm.is_range(ptr.id(), dealloc.block_id().block_size as int), - ptr.id() == dealloc.ptr, - old(local).instance == dealloc.mim_instance, - dealloc.mim_block@.value.heap_id == Some(old(local).thread_token@.value.heap_id), - ensures - local.wf(), - common_preserves(*old(local), *local), - !res.0 ==> res.1@ == Some(perm), - !res.0 ==> res.2@ == Some(dealloc), - { - let ghost block_id = dealloc.mim_block@.key; - let segment = crate::layout::calculate_segment_ptr_from_block(ptr, Ghost(block_id)); - let slice_page_ptr = crate::layout::calculate_slice_page_ptr_from_block( - ptr, - segment, - Ghost(block_id), - ); - let tracked page_slice_shared_access: &PageSharedAccess = - local.instance.alloc_guards_page_slice_shared_access(block_id, &dealloc.mim_block); - let slice_page: &Page = slice_page_ptr.borrow(Tracked(&page_slice_shared_access.points_to)); - let offset = slice_page.offset; - let page_ptr = crate::layout::calculate_page_ptr_subtract_offset( - slice_page_ptr, - offset, - Ghost(block_id.page_id_for_slice()), - Ghost(block_id.page_id), +pub fn free_delayed_block( + ptr: PPtr, + Tracked(perm): Tracked, + Tracked(dealloc): Tracked, + Tracked(local): Tracked<&mut Local>, +) -> (res: (bool, Tracked>, Tracked>)) + requires + old(local).wf(), + dealloc.wf(), + perm.is_range(ptr.id(), dealloc.block_id().block_size as int), + ptr.id() == dealloc.ptr, + old(local).instance == dealloc.mim_instance, + dealloc.mim_block@.value.heap_id == Some(old(local).thread_token@.value.heap_id), + ensures + local.wf(), + common_preserves(*old(local), *local), + !res.0 ==> res.1@ == Some(perm), + !res.0 ==> res.2@ == Some(dealloc), +{ + let ghost block_id = dealloc.mim_block@.key; + let segment = crate::layout::calculate_segment_ptr_from_block(ptr, Ghost(block_id)); + let slice_page_ptr = crate::layout::calculate_slice_page_ptr_from_block( + ptr, + segment, + Ghost(block_id), + ); + let tracked page_slice_shared_access: &PageSharedAccess = + local.instance.alloc_guards_page_slice_shared_access(block_id, &dealloc.mim_block); + let slice_page: &Page = slice_page_ptr.borrow(Tracked(&page_slice_shared_access.points_to)); + let offset = slice_page.offset; + let page_ptr = crate::layout::calculate_page_ptr_subtract_offset( + slice_page_ptr, + offset, + Ghost(block_id.page_id_for_slice()), + Ghost(block_id.page_id), + ); + assert(crate::layout::is_page_ptr(page_ptr.id(), block_id.page_id)); + let ghost page_id = dealloc.block_id().page_id; + assert(page_ptr.id() != 0) by { + is_page_ptr_nonzero(page_ptr.id(), page_id); + } + let page = PagePtr { page_ptr: page_ptr, page_id: Ghost(block_id.page_id) }; + proof { + local.instance.block_in_heap_has_valid_page( + local.thread_token@.key, + dealloc.mim_block@.key, + &local.thread_token, + &dealloc.mim_block, ); - assert(crate::layout::is_page_ptr(page_ptr.id(), block_id.page_id)); - let ghost page_id = dealloc.block_id().page_id; - assert(page_ptr.id() != 0) by { - is_page_ptr_nonzero(page_ptr.id(), page_id); - } - let page = PagePtr { page_ptr: page_ptr, page_id: Ghost(block_id.page_id) }; - proof { - local.instance.block_in_heap_has_valid_page( - local.thread_token@.key, - dealloc.mim_block@.key, - &local.thread_token, - &dealloc.mim_block, - ); - } - assert(page.is_in(*local)); - assert(page.is_used_and_primary(*local)); - assert(local.thread_token@.value.pages[page.page_id@].block_size - == dealloc.block_id().block_size); - if !crate::page::page_try_use_delayed_free(page, 0, false, Tracked(&*local)) { - return (false, Tracked(Some(perm)), Tracked(Some(dealloc))); - } - crate::alloc_generic::page_free_collect(page, false, Tracked(&mut *local)); - assert(local.thread_token@.value.pages[page.page_id@].block_size - == dealloc.block_id().block_size); - crate::free::free_block(page, true, ptr, Tracked(perm), Tracked(dealloc), Tracked(&mut *local)); - return (true, Tracked(None), Tracked(None)); } + assert(page.is_in(*local)); + assert(page.is_used_and_primary(*local)); + assert(local.thread_token@.value.pages[page.page_id@].block_size + == dealloc.block_id().block_size); + if !crate::page::page_try_use_delayed_free(page, 0, false, Tracked(&*local)) { + return (false, Tracked(Some(perm)), Tracked(Some(dealloc))); + } + crate::alloc_generic::page_free_collect(page, false, Tracked(&mut *local)); + assert(local.thread_token@.value.pages[page.page_id@].block_size + == dealloc.block_id().block_size); + crate::free::free_block(page, true, ptr, Tracked(perm), Tracked(dealloc), Tracked(&mut *local)); + return (true, Tracked(None), Tracked(None)); +} - } // verus! +} // verus! } mod realloc { @@ -19714,7 +19623,7 @@ mod realloc { verus! { - /* +/* #[inline(always)] fn usable_size(p: PPtr, Tracked(user_perm): Tracked<&ptr::PointsToRaw>, @@ -19778,7 +19687,7 @@ mod realloc { } */ - } // verus! +} // verus! } mod segment { @@ -19814,7 +19723,7 @@ mod segment { verus! { - /* +/* mi_page_t* _mi_segment_page_alloc(mi_heap_t* heap, size_t block_size, size_t page_alignment, mi_segments_tld_t* tld, mi_os_tld_t* os_tld) { mi_page_t* page; if mi_unlikely(page_alignment > MI_ALIGNMENT_MAX) { @@ -19840,55 +19749,55 @@ mod segment { return page; } */ - pub open spec fn good_count_for_block_size(block_size: int, count: int) -> bool { - count * SLICE_SIZE < block_size * 0x10000 - } +pub open spec fn good_count_for_block_size(block_size: int, count: int) -> bool { + count * SLICE_SIZE < block_size * 0x10000 +} - pub fn segment_page_alloc( - heap: HeapPtr, - block_size: usize, - page_alignment: usize, - tld: TldPtr, - Tracked(local): Tracked<&mut Local>, - ) -> (page_ptr: PagePtr) - requires - old(local).wf(), - tld.wf(), - tld.is_in(*old(local)), - heap.wf(), - heap.is_in(*old(local)), - 2 <= block_size, - ensures - local.wf_main(), - common_preserves(*old(local), *local), - (page_ptr.page_ptr.id() != 0 ==> page_ptr.wf() && page_ptr.is_in(*local) - && local.page_organization.popped == Popped::Ready(page_ptr.page_id@, true) - && page_init_is_committed(page_ptr.page_id@, *local) && good_count_for_block_size( - block_size as int, - local.page_organization.pages[page_ptr.page_id@].count.unwrap() as int, - )), - page_ptr.page_ptr.id() == 0 ==> local.wf(), - { - proof { - const_facts(); - } - if unlikely(page_alignment > ALIGNMENT_MAX as usize) { - todo(); - } - if block_size <= SMALL_OBJ_SIZE_MAX as usize { - segments_page_alloc(heap, block_size, block_size, tld, Tracked(&mut *local)) - } else if block_size <= MEDIUM_OBJ_SIZE_MAX as usize { - segments_page_alloc(heap, MEDIUM_PAGE_SIZE as usize, block_size, tld, Tracked(&mut *local)) - } else if block_size <= LARGE_OBJ_SIZE_MAX as usize { - segments_page_alloc(heap, block_size, block_size, tld, Tracked(&mut *local)) - } else { - todo(); - loop { - } +pub fn segment_page_alloc( + heap: HeapPtr, + block_size: usize, + page_alignment: usize, + tld: TldPtr, + Tracked(local): Tracked<&mut Local>, +) -> (page_ptr: PagePtr) + requires + old(local).wf(), + tld.wf(), + tld.is_in(*old(local)), + heap.wf(), + heap.is_in(*old(local)), + 2 <= block_size, + ensures + local.wf_main(), + common_preserves(*old(local), *local), + (page_ptr.page_ptr.id() != 0 ==> page_ptr.wf() && page_ptr.is_in(*local) + && local.page_organization.popped == Popped::Ready(page_ptr.page_id@, true) + && page_init_is_committed(page_ptr.page_id@, *local) && good_count_for_block_size( + block_size as int, + local.page_organization.pages[page_ptr.page_id@].count.unwrap() as int, + )), + page_ptr.page_ptr.id() == 0 ==> local.wf(), +{ + proof { + const_facts(); + } + if unlikely(page_alignment > ALIGNMENT_MAX as usize) { + todo(); + } + if block_size <= SMALL_OBJ_SIZE_MAX as usize { + segments_page_alloc(heap, block_size, block_size, tld, Tracked(&mut *local)) + } else if block_size <= MEDIUM_OBJ_SIZE_MAX as usize { + segments_page_alloc(heap, MEDIUM_PAGE_SIZE as usize, block_size, tld, Tracked(&mut *local)) + } else if block_size <= LARGE_OBJ_SIZE_MAX as usize { + segments_page_alloc(heap, block_size, block_size, tld, Tracked(&mut *local)) + } else { + todo(); + loop { } } +} - /* +/* static mi_page_t* mi_segments_page_alloc(mi_heap_t* heap, mi_page_kind_t page_kind, size_t required, size_t block_size, mi_segments_tld_t* tld, mi_os_tld_t* os_tld) { mi_assert_internal(required <= MI_LARGE_OBJ_SIZE_MAX && page_kind <= MI_PAGE_LARGE); @@ -19916,51 +19825,51 @@ mod segment { } */ - fn segments_page_alloc( - heap: HeapPtr, - required: usize, - block_size: usize, - tld: TldPtr, - Tracked(local): Tracked<&mut Local>, - ) -> (page_ptr: PagePtr) - requires - old(local).wf(), - tld.wf(), - tld.is_in(*old(local)), - heap.wf(), - heap.is_in(*old(local)), - 2 <= block_size <= LARGE_OBJ_SIZE_MAX, - 1 <= required <= LARGE_OBJ_SIZE_MAX, - (if block_size <= SMALL_OBJ_SIZE_MAX { - required == block_size - } else if block_size <= MEDIUM_OBJ_SIZE_MAX { - required == MEDIUM_PAGE_SIZE - } else { - required == block_size - }), - ensures - local.wf_main(), - common_preserves(*old(local), *local), - (page_ptr.page_ptr.id() != 0 ==> page_ptr.wf() && page_ptr.is_in(*local) - && local.page_organization.popped == Popped::Ready(page_ptr.page_id@, true) - && page_init_is_committed(page_ptr.page_id@, *local) && good_count_for_block_size( - block_size as int, - local.page_organization.pages[page_ptr.page_id@].count.unwrap() as int, - )), - page_ptr.page_ptr.id() == 0 ==> local.wf(), - { - proof { - const_facts(); - } - let alignment: usize = if required > MEDIUM_PAGE_SIZE as usize { - MEDIUM_PAGE_SIZE as usize +fn segments_page_alloc( + heap: HeapPtr, + required: usize, + block_size: usize, + tld: TldPtr, + Tracked(local): Tracked<&mut Local>, +) -> (page_ptr: PagePtr) + requires + old(local).wf(), + tld.wf(), + tld.is_in(*old(local)), + heap.wf(), + heap.is_in(*old(local)), + 2 <= block_size <= LARGE_OBJ_SIZE_MAX, + 1 <= required <= LARGE_OBJ_SIZE_MAX, + (if block_size <= SMALL_OBJ_SIZE_MAX { + required == block_size + } else if block_size <= MEDIUM_OBJ_SIZE_MAX { + required == MEDIUM_PAGE_SIZE } else { - SLICE_SIZE as usize - }; - let page_size = align_up(required, alignment); - let slices_needed = page_size / SLICE_SIZE as usize; - proof { - /*let b = (block_size as int) <= (SMALL_OBJ_SIZE_MAX as int); + required == block_size + }), + ensures + local.wf_main(), + common_preserves(*old(local), *local), + (page_ptr.page_ptr.id() != 0 ==> page_ptr.wf() && page_ptr.is_in(*local) + && local.page_organization.popped == Popped::Ready(page_ptr.page_id@, true) + && page_init_is_committed(page_ptr.page_id@, *local) && good_count_for_block_size( + block_size as int, + local.page_organization.pages[page_ptr.page_id@].count.unwrap() as int, + )), + page_ptr.page_ptr.id() == 0 ==> local.wf(), +{ + proof { + const_facts(); + } + let alignment: usize = if required > MEDIUM_PAGE_SIZE as usize { + MEDIUM_PAGE_SIZE as usize + } else { + SLICE_SIZE as usize + }; + let page_size = align_up(required, alignment); + let slices_needed = page_size / SLICE_SIZE as usize; + proof { + /*let b = (block_size as int) <= (SMALL_OBJ_SIZE_MAX as int); if b { assert(alignment == SLICE_SIZE); assert(page_size == SLICE_SIZE); @@ -19970,66 +19879,66 @@ mod segment { } else { assert(page_size < block_size * 0x10000); }*/ - assert(good_count_for_block_size(block_size as int, slices_needed as int)); - } - proof { - assert(page_size == slices_needed * SLICE_SIZE as nat) by { - assert(MEDIUM_PAGE_SIZE as int % SLICE_SIZE as int == 0); - assert(SLICE_SIZE as int % SLICE_SIZE as int == 0); - assert(alignment as int % SLICE_SIZE as int == 0); - assert(page_size as int % alignment as int == 0); - mod_trans(page_size as int, alignment as int, SLICE_SIZE as int); - assert(page_size as int % SLICE_SIZE as int == 0); - } - assert(1 <= slices_needed <= SLICES_PER_SEGMENT); - } - let page_ptr = segments_page_find_and_allocate( + assert(good_count_for_block_size(block_size as int, slices_needed as int)); + } + proof { + assert(page_size == slices_needed * SLICE_SIZE as nat) by { + assert(MEDIUM_PAGE_SIZE as int % SLICE_SIZE as int == 0); + assert(SLICE_SIZE as int % SLICE_SIZE as int == 0); + assert(alignment as int % SLICE_SIZE as int == 0); + assert(page_size as int % alignment as int == 0); + mod_trans(page_size as int, alignment as int, SLICE_SIZE as int); + assert(page_size as int % SLICE_SIZE as int == 0); + } + assert(1 <= slices_needed <= SLICES_PER_SEGMENT); + } + let page_ptr = segments_page_find_and_allocate( + slices_needed, + tld, + Tracked(&mut *local), + Ghost(block_size as nat), + ); + if page_ptr.page_ptr.to_usize() == 0 { + let roa = segment_reclaim_or_alloc( + heap, slices_needed, + block_size, tld, Tracked(&mut *local), - Ghost(block_size as nat), ); - if page_ptr.page_ptr.to_usize() == 0 { - let roa = segment_reclaim_or_alloc( - heap, - slices_needed, - block_size, - tld, - Tracked(&mut *local), - ); - if roa.segment_ptr.to_usize() == 0 { - return PagePtr::null(); - } else { - return segments_page_alloc(heap, required, block_size, tld, Tracked(&mut *local)); - } + if roa.segment_ptr.to_usize() == 0 { + return PagePtr::null(); } else { - return page_ptr; + return segments_page_alloc(heap, required, block_size, tld, Tracked(&mut *local)); } + } else { + return page_ptr; } +} - fn segment_reclaim_or_alloc( - heap: HeapPtr, - needed_slices: usize, - block_size: usize, - tld: TldPtr, - Tracked(local): Tracked<&mut Local>, - ) -> (segment_ptr: SegmentPtr) - requires - old(local).wf(), - tld.wf(), - tld.is_in(*old(local)), - heap.wf(), - heap.is_in(*old(local)), - ensures - local.wf(), - common_preserves(*old(local), *local), - { - // TODO reclaiming - let arena_id = heap.get_arena_id(Tracked(&*local)); - segment_alloc(0, 0, arena_id, tld, Tracked(&mut *local)) - } +fn segment_reclaim_or_alloc( + heap: HeapPtr, + needed_slices: usize, + block_size: usize, + tld: TldPtr, + Tracked(local): Tracked<&mut Local>, +) -> (segment_ptr: SegmentPtr) + requires + old(local).wf(), + tld.wf(), + tld.is_in(*old(local)), + heap.wf(), + heap.is_in(*old(local)), + ensures + local.wf(), + common_preserves(*old(local), *local), +{ + // TODO reclaiming + let arena_id = heap.get_arena_id(Tracked(&*local)); + segment_alloc(0, 0, arena_id, tld, Tracked(&mut *local)) +} - /* +/* static mi_page_t* mi_segments_page_find_and_allocate(size_t slice_count, mi_arena_id_t req_arena_id, mi_segments_tld_t* tld) { mi_assert_internal(slice_count*MI_SEGMENT_SLICE_SIZE <= MI_LARGE_OBJ_SIZE_MAX); // search from best fit up @@ -20065,42 +19974,71 @@ mod segment { } */ - #[verifier::spinoff_prover] - fn segments_page_find_and_allocate( - slice_count0: usize, - tld_ptr: TldPtr, - Tracked(local): Tracked<&mut Local>, - Ghost(block_size): Ghost, - ) -> (page_ptr: PagePtr) - requires - old(local).wf(), +#[verifier::spinoff_prover] +fn segments_page_find_and_allocate( + slice_count0: usize, + tld_ptr: TldPtr, + Tracked(local): Tracked<&mut Local>, + Ghost(block_size): Ghost, +) -> (page_ptr: PagePtr) + requires + old(local).wf(), + tld_ptr.wf(), + tld_ptr.is_in(*old(local)), + 1 <= slice_count0 <= SLICES_PER_SEGMENT, + ensures + local.wf_main(), + common_preserves(*old(local), *local), + (page_ptr.page_ptr.id() != 0 ==> page_ptr.wf() && page_ptr.is_in( + *local, + ) //&& allocated_block_tokens(blocks@, page_ptr.page_id@, block_size, n_blocks, local.instance) + && local.page_organization.popped == Popped::Ready(page_ptr.page_id@, true) + && page_init_is_committed(page_ptr.page_id@, *local) && (slice_count0 > 0 + ==> local.page_organization.pages[page_ptr.page_id@].count == Some( + slice_count0 as nat, + ))), + (page_ptr.page_ptr.id() == 0 ==> local.wf()), +{ + let mut sbin_idx = slice_bin(slice_count0); + let slice_count = if slice_count0 == 0 { + 1 + } else { + slice_count0 + }; + while sbin_idx <= SEGMENT_BIN_MAX + invariant + local.wf(), tld_ptr.wf(), - tld_ptr.is_in(*old(local)), - 1 <= slice_count0 <= SLICES_PER_SEGMENT, - ensures - local.wf_main(), + tld_ptr.is_in(*local), + slice_count > 0, + local.heap_id == old(local).heap_id, + slice_count == (if slice_count0 == 0 { + 1 + } else { + slice_count0 + }), common_preserves(*old(local), *local), - (page_ptr.page_ptr.id() != 0 ==> page_ptr.wf() && page_ptr.is_in( - *local, - )//&& allocated_block_tokens(blocks@, page_ptr.page_id@, block_size, n_blocks, local.instance) - && local.page_organization.popped == Popped::Ready(page_ptr.page_id@, true) - && page_init_is_committed(page_ptr.page_id@, *local) && (slice_count0 > 0 - ==> local.page_organization.pages[page_ptr.page_id@].count == Some( - slice_count0 as nat, - ))), - (page_ptr.page_ptr.id() == 0 ==> local.wf()), { - let mut sbin_idx = slice_bin(slice_count0); - let slice_count = if slice_count0 == 0 { - 1 - } else { - slice_count0 - }; - while sbin_idx <= SEGMENT_BIN_MAX + let mut slice_ptr = tld_ptr.tld_ptr.borrow( + Tracked(&local.tld), + ).segments.span_queue_headers[sbin_idx].first; + let ghost mut list_idx = 0int; + let ghost mut slice_page_id: Option = + local.page_organization.unused_dlist_headers[sbin_idx as int].first; + proof { + local.page_organization.first_is_in(sbin_idx as int); + } + while slice_ptr.to_usize() != 0 invariant local.wf(), tld_ptr.wf(), tld_ptr.is_in(*local), + is_page_ptr_opt(slice_ptr, slice_page_id), + slice_page_id.is_Some() ==> local.page_organization.valid_unused_page( + slice_page_id.get_Some_0(), + sbin_idx as int, + list_idx, + ), slice_count > 0, local.heap_id == old(local).heap_id, slice_count == (if slice_count0 == 0 { @@ -20110,52 +20048,23 @@ mod segment { }), common_preserves(*old(local), *local), { - let mut slice_ptr = tld_ptr.tld_ptr.borrow( - Tracked(&local.tld), - ).segments.span_queue_headers[sbin_idx].first; - let ghost mut list_idx = 0int; - let ghost mut slice_page_id: Option = - local.page_organization.unused_dlist_headers[sbin_idx as int].first; - proof { - local.page_organization.first_is_in(sbin_idx as int); - } - while slice_ptr.to_usize() != 0 - invariant - local.wf(), - tld_ptr.wf(), - tld_ptr.is_in(*local), - is_page_ptr_opt(slice_ptr, slice_page_id), - slice_page_id.is_Some() ==> local.page_organization.valid_unused_page( - slice_page_id.get_Some_0(), - sbin_idx as int, - list_idx, - ), - slice_count > 0, - local.heap_id == old(local).heap_id, - slice_count == (if slice_count0 == 0 { - 1 - } else { - slice_count0 - }), - common_preserves(*old(local), *local), - { - let slice = PagePtr { page_ptr: slice_ptr, page_id: Ghost(slice_page_id.get_Some_0()) }; - assert(slice.wf()); - let found_slice_count = slice.get_count(Tracked(&*local)) as usize; - if found_slice_count >= slice_count { - let segment = SegmentPtr::ptr_segment(slice); - assert(tld_ptr.is_in(*local)); - span_queue_delete( - tld_ptr, - sbin_idx, - slice, - Tracked(&mut *local), - Ghost(list_idx), - Ghost(found_slice_count as int), - ); - assert(tld_ptr.is_in(*local)); - if found_slice_count > slice_count { - /*proof { + let slice = PagePtr { page_ptr: slice_ptr, page_id: Ghost(slice_page_id.get_Some_0()) }; + assert(slice.wf()); + let found_slice_count = slice.get_count(Tracked(&*local)) as usize; + if found_slice_count >= slice_count { + let segment = SegmentPtr::ptr_segment(slice); + assert(tld_ptr.is_in(*local)); + span_queue_delete( + tld_ptr, + sbin_idx, + slice, + Tracked(&mut *local), + Ghost(list_idx), + Ghost(found_slice_count as int), + ); + assert(tld_ptr.is_in(*local)); + if found_slice_count > slice_count { + /*proof { let current_slice_count = found_slice_count; let target_slice_count = slice_count; assert((local).wf_main()); @@ -20169,105 +20078,104 @@ mod segment { assert(current_slice_count > target_slice_count); assert(target_slice_count > 0); }*/ - segment_slice_split( - slice, - found_slice_count, - slice_count, - tld_ptr, - Tracked(&mut *local), - ); - } - assert(tld_ptr.is_in(*local)); - let suc = segment_span_allocate( - segment, + segment_slice_split( slice, + found_slice_count, slice_count, tld_ptr, Tracked(&mut *local), ); - if !suc { - todo(); - }//assert(local.wf_main()); - //assert(slice.is_in(*local)); - //assert(allocated_block_tokens(block_tokens, slice.page_id@, block_size, n_blocks, local.instance)); - //assert(tld_ptr.is_in(*local)); - - return slice; - } - slice_ptr = slice.get_next(Tracked(&*local)); - proof { - local.page_organization.next_is_in( - slice_page_id.get_Some_0(), - sbin_idx as int, - list_idx, - ); - slice_page_id = - local.page_organization.pages[slice_page_id.get_Some_0()].dlist_entry.get_Some_0().next; - list_idx = list_idx + 1; } + assert(tld_ptr.is_in(*local)); + let suc = segment_span_allocate( + segment, + slice, + slice_count, + tld_ptr, + Tracked(&mut *local), + ); + if !suc { + todo(); + } //assert(local.wf_main()); + //assert(slice.is_in(*local)); + //assert(allocated_block_tokens(block_tokens, slice.page_id@, block_size, n_blocks, local.instance)); + //assert(tld_ptr.is_in(*local)); + + return slice; + } + slice_ptr = slice.get_next(Tracked(&*local)); + proof { + local.page_organization.next_is_in( + slice_page_id.get_Some_0(), + sbin_idx as int, + list_idx, + ); + slice_page_id = + local.page_organization.pages[slice_page_id.get_Some_0()].dlist_entry.get_Some_0().next; + list_idx = list_idx + 1; } - sbin_idx = sbin_idx + 1; } - PagePtr::null() + sbin_idx = sbin_idx + 1; } + PagePtr::null() +} - #[verifier::spinoff_prover] - fn span_queue_delete( - tld_ptr: TldPtr, - sbin_idx: usize, - slice: PagePtr, - Tracked(local): Tracked<&mut Local>, - Ghost(list_idx): Ghost, - Ghost(count): Ghost, - ) - requires - old(local).wf_main(), - tld_ptr.wf(), - tld_ptr.is_in(*old(local)), - slice.wf(), - old(local).page_organization.valid_unused_page(slice.page_id@, sbin_idx as int, list_idx), - count == old(local).page_organization.pages[slice.page_id@].count.get_Some_0(), - (match old(local).page_organization.popped { - Popped::No => true, - Popped::SegmentFreeing(sid, idx) => slice.page_id@.segment_id == sid - && slice.page_id@.idx == idx, - _ => false, - }), - ensures - local.wf_main(), - common_preserves(*old(local), *local), - local.page_organization.popped == (match old(local).page_organization.popped { - Popped::No => Popped::VeryUnready( - slice.page_id@.segment_id, - slice.page_id@.idx as int, - count, - false, - ), - Popped::SegmentFreeing(sid, idx) => Popped::SegmentFreeing(sid, idx + count), - _ => arbitrary(), - }), - local.page_organization.pages.dom().contains(slice.page_id@), - old(local).pages[slice.page_id@] == local.pages[slice.page_id@], - local.page_organization.pages[slice.page_id@].is_used - == false,//old(local).page_organization.pages[slice.page_id@] - // == local.page_organization.pages[slice.page_id@], +#[verifier::spinoff_prover] +fn span_queue_delete( + tld_ptr: TldPtr, + sbin_idx: usize, + slice: PagePtr, + Tracked(local): Tracked<&mut Local>, + Ghost(list_idx): Ghost, + Ghost(count): Ghost, +) + requires + old(local).wf_main(), + tld_ptr.wf(), + tld_ptr.is_in(*old(local)), + slice.wf(), + old(local).page_organization.valid_unused_page(slice.page_id@, sbin_idx as int, list_idx), + count == old(local).page_organization.pages[slice.page_id@].count.get_Some_0(), + (match old(local).page_organization.popped { + Popped::No => true, + Popped::SegmentFreeing(sid, idx) => slice.page_id@.segment_id == sid + && slice.page_id@.idx == idx, + _ => false, + }), + ensures + local.wf_main(), + common_preserves(*old(local), *local), + local.page_organization.popped == (match old(local).page_organization.popped { + Popped::No => Popped::VeryUnready( + slice.page_id@.segment_id, + slice.page_id@.idx as int, + count, + false, + ), + Popped::SegmentFreeing(sid, idx) => Popped::SegmentFreeing(sid, idx + count), + _ => arbitrary(), + }), + local.page_organization.pages.dom().contains(slice.page_id@), + old(local).pages[slice.page_id@] == local.pages[slice.page_id@], + local.page_organization.pages[slice.page_id@].is_used + == false, //old(local).page_organization.pages[slice.page_id@]// == local.page_organization.pages[slice.page_id@],FORMATTER_NOT_INLINE_MARKER - { - let prev = slice.get_prev(Tracked(&*local)); - let next = slice.get_next(Tracked(&*local)); - let ghost mut next_state; - proof { - //assert(local.page_organization.pages.dom().contains(slice.page_id@)); - next_state = - PageOrg::take_step::take_page_from_unused_queue( - local.page_organization, - slice.page_id@, - sbin_idx as int, - list_idx, - ); - } - if prev.to_usize() == 0 { - tld_get_mut!(tld_ptr, local, tld => { +{ + let prev = slice.get_prev(Tracked(&*local)); + let next = slice.get_next(Tracked(&*local)); + let ghost mut next_state; + proof { + //assert(local.page_organization.pages.dom().contains(slice.page_id@)); + next_state = + PageOrg::take_step::take_page_from_unused_queue( + local.page_organization, + slice.page_id@, + sbin_idx as int, + list_idx, + ); + } + if prev.to_usize() == 0 { + tld_get_mut!(tld_ptr, local, tld => { let cq = tld.segments.span_queue_headers[sbin_idx]; tld.segments.span_queue_headers.set( sbin_idx, @@ -20276,16 +20184,16 @@ mod segment { .. cq }); }); - } else { - //assert(local.page_organization.pages[slice.page_id@].dlist_entry.get_Some_0().prev.is_Some()); - let prev_page_ptr = PagePtr { - page_ptr: prev, - page_id: Ghost( - local.page_organization.pages[slice.page_id@].dlist_entry.get_Some_0().prev.get_Some_0(), - ), - }; - //assert(prev_page_ptr.wf()); - /*assert(local.page_organization_valid()); + } else { + //assert(local.page_organization.pages[slice.page_id@].dlist_entry.get_Some_0().prev.is_Some()); + let prev_page_ptr = PagePtr { + page_ptr: prev, + page_id: Ghost( + local.page_organization.pages[slice.page_id@].dlist_entry.get_Some_0().prev.get_Some_0(), + ), + }; + //assert(prev_page_ptr.wf()); + /*assert(local.page_organization_valid()); assert(local.page_organization.pages.dom().contains(prev_page_ptr.page_id@)); assert(page_organization_pages_match_data( local.page_organization.pages[prev_page_ptr.page_id@], @@ -20297,12 +20205,12 @@ mod segment { assert(!local.page_organization.pages[prev_page_ptr.page_id@].is_used); assert(local.psa.dom().contains(prev_page_ptr.page_id@));*/ - unused_page_get_mut_next!(prev_page_ptr, local, n => { + unused_page_get_mut_next!(prev_page_ptr, local, n => { n = next; }); - } - if next.to_usize() == 0 { - tld_get_mut!(tld_ptr, local, tld => { + } + if next.to_usize() == 0 { + tld_get_mut!(tld_ptr, local, tld => { let cq = tld.segments.span_queue_headers[sbin_idx]; tld.segments.span_queue_headers.set( sbin_idx, @@ -20311,45 +20219,45 @@ mod segment { .. cq }); }); - } else { - let next_page_ptr = PagePtr { - page_ptr: next, - page_id: Ghost( - local.page_organization.pages[slice.page_id@].dlist_entry.get_Some_0().next.get_Some_0(), - ), - }; - //assert(next_page_ptr.wf()); - //assert(local.psa.dom().contains(next_page_ptr.page_id@)); - unused_page_get_mut_prev!(next_page_ptr, local, p => { + } else { + let next_page_ptr = PagePtr { + page_ptr: next, + page_id: Ghost( + local.page_organization.pages[slice.page_id@].dlist_entry.get_Some_0().next.get_Some_0(), + ), + }; + //assert(next_page_ptr.wf()); + //assert(local.psa.dom().contains(next_page_ptr.page_id@)); + unused_page_get_mut_prev!(next_page_ptr, local, p => { p = prev; }); - } - proof { - let old_state = local.page_organization; - local.page_organization = next_state; - if old( + } + proof { + let old_state = local.page_organization; + local.page_organization = next_state; + if old( + local, + ).page_organization.pages[slice.page_id@].dlist_entry.get_Some_0().prev.is_Some() && old( + local, + ).page_organization.pages[slice.page_id@].dlist_entry.get_Some_0().next.is_Some() { + let old_p = old( local, - ).page_organization.pages[slice.page_id@].dlist_entry.get_Some_0().prev.is_Some() && old( + ).page_organization.pages[slice.page_id@].dlist_entry.get_Some_0().prev.get_Some_0(); + let old_n = old( local, - ).page_organization.pages[slice.page_id@].dlist_entry.get_Some_0().next.is_Some() { - let old_p = old( - local, - ).page_organization.pages[slice.page_id@].dlist_entry.get_Some_0().prev.get_Some_0(); - let old_n = old( - local, - ).page_organization.pages[slice.page_id@].dlist_entry.get_Some_0().next.get_Some_0(); - let p = - local.page_organization.pages[slice.page_id@].dlist_entry.get_Some_0().prev.get_Some_0(); - let n = - local.page_organization.pages[slice.page_id@].dlist_entry.get_Some_0().next.get_Some_0(); - //assert(old_p == p); - //assert(old_n == n); - //assert(page_organization_pages_match_data(old_state.pages[p], old(local).pages[p], old(local).psa[p], p, old_state.popped)); - //assert(old_state.pages[p].offset == local.page_organization.pages[p].offset); - //assert(page_organization_pages_match_data(local.page_organization.pages[p], local.pages[p], local.psa[p], p, local.page_organization.popped)); - //assert(page_organization_pages_match_data(local.page_organization.pages[n], local.pages[n], local.psa[n], n, local.page_organization.popped)); - //assert(page_organization_pages_match_data(local.page_organization.pages[slice.page_id@], local.pages[slice.page_id@], local.psa[slice.page_id@], slice.page_id@, local.page_organization.popped)); - /*let org_pages = local.page_organization.pages; + ).page_organization.pages[slice.page_id@].dlist_entry.get_Some_0().next.get_Some_0(); + let p = + local.page_organization.pages[slice.page_id@].dlist_entry.get_Some_0().prev.get_Some_0(); + let n = + local.page_organization.pages[slice.page_id@].dlist_entry.get_Some_0().next.get_Some_0(); + //assert(old_p == p); + //assert(old_n == n); + //assert(page_organization_pages_match_data(old_state.pages[p], old(local).pages[p], old(local).psa[p], p, old_state.popped)); + //assert(old_state.pages[p].offset == local.page_organization.pages[p].offset); + //assert(page_organization_pages_match_data(local.page_organization.pages[p], local.pages[p], local.psa[p], p, local.page_organization.popped)); + //assert(page_organization_pages_match_data(local.page_organization.pages[n], local.pages[n], local.psa[n], n, local.page_organization.popped)); + //assert(page_organization_pages_match_data(local.page_organization.pages[slice.page_id@], local.pages[slice.page_id@], local.psa[slice.page_id@], slice.page_id@, local.page_organization.popped)); + /*let org_pages = local.page_organization.pages; let pages = local.pages; let old_org_pages = old(local).page_organization.pages; @@ -20378,20 +20286,20 @@ mod segment { assert(page_organization_pages_match_data(org_pages[page_id], pages[page_id], local.psa[page_id], page_id, local.page_organization.popped)); } }*/ - } - let org_queues = local.page_organization.unused_dlist_headers; - let queues = local.tld@.value.get_Some_0().segments.span_queue_headers; - /*assert(is_page_ptr_opt(queues@[sbin_idx as int].first, org_queues[sbin_idx as int].first)); + } + let org_queues = local.page_organization.unused_dlist_headers; + let queues = local.tld@.value.get_Some_0().segments.span_queue_headers; + /*assert(is_page_ptr_opt(queues@[sbin_idx as int].first, org_queues[sbin_idx as int].first)); assert(is_page_ptr_opt(queues@[sbin_idx as int].last, org_queues[sbin_idx as int].last)); assert(page_organization_queues_match(org_queues, queues@)); assert_sets_equal!(local.page_organization.pages.dom(), local.pages.dom());*/ - preserves_mem_chunk_good(*old(local), *local); - //assert(local.wf_main()); - } + preserves_mem_chunk_good(*old(local), *local); + //assert(local.wf_main()); } +} - /* +/* static void mi_segment_slice_split(mi_segment_t* segment, mi_slice_t* slice, size_t slice_count, mi_segments_tld_t* tld) { mi_assert_internal(_mi_ptr_segment(slice) == segment); mi_assert_internal(slice->slice_count >= slice_count); @@ -20405,65 +20313,65 @@ mod segment { } */ - #[verifier::spinoff_prover] - fn segment_slice_split( - slice: PagePtr, - current_slice_count: usize, - target_slice_count: usize, - tld_ptr: TldPtr, - Tracked(local): Tracked<&mut Local>, - ) - requires - old(local).wf_main(), - tld_ptr.wf(), - tld_ptr.is_in(*old(local)), - slice.wf(), - old(local).page_organization.popped == Popped::VeryUnready( - slice.page_id@.segment_id, - slice.page_id@.idx as int, - current_slice_count as int, - false, - ), - old(local).page_organization.pages.dom().contains(slice.page_id@), - //old(local).page_organization.pages[slice.page_id@].count.is_some(), - old(local).page_organization.pages[slice.page_id@].is_used == false, - SLICES_PER_SEGMENT >= current_slice_count > target_slice_count, - target_slice_count > 0, - ensures - local.wf_main(), - common_preserves(*old(local), *local), - slice.wf(), - local.page_organization.popped == Popped::VeryUnready( - slice.page_id@.segment_id, - slice.page_id@.idx as int, - target_slice_count as int, - false, - ), - local.page_organization.pages.dom().contains(slice.page_id@), - local.page_organization.pages[slice.page_id@].is_used == false, - { - proof { - local.page_organization.get_count_bound_very_unready(); - //assert(local.page_organization.pages[slice.page_id@].count == Some(current_slice_coun - //assert(slice.page_id@.idx + current_slice_count <= SLICES_PER_SEGMENT + 1); - //assert(slice.page_id@.idx + target_slice_count <= SLICES_PER_SEGMENT); - } - let next_slice = slice.add_offset(target_slice_count); - //let count_being_returned = target_slice_count - current_slice_count; - let bin_idx = slice_bin(current_slice_count - target_slice_count); - let ghost mut next_state; - proof { - next_state = - PageOrg::take_step::split_page( - local.page_organization, - slice.page_id@, - current_slice_count as int, - target_slice_count as int, - bin_idx as int, - ); - } - let first_in_queue; - tld_get_mut!(tld_ptr, local, tld => { +#[verifier::spinoff_prover] +fn segment_slice_split( + slice: PagePtr, + current_slice_count: usize, + target_slice_count: usize, + tld_ptr: TldPtr, + Tracked(local): Tracked<&mut Local>, +) + requires + old(local).wf_main(), + tld_ptr.wf(), + tld_ptr.is_in(*old(local)), + slice.wf(), + old(local).page_organization.popped == Popped::VeryUnready( + slice.page_id@.segment_id, + slice.page_id@.idx as int, + current_slice_count as int, + false, + ), + old(local).page_organization.pages.dom().contains(slice.page_id@), + //old(local).page_organization.pages[slice.page_id@].count.is_some(), + old(local).page_organization.pages[slice.page_id@].is_used == false, + SLICES_PER_SEGMENT >= current_slice_count > target_slice_count, + target_slice_count > 0, + ensures + local.wf_main(), + common_preserves(*old(local), *local), + slice.wf(), + local.page_organization.popped == Popped::VeryUnready( + slice.page_id@.segment_id, + slice.page_id@.idx as int, + target_slice_count as int, + false, + ), + local.page_organization.pages.dom().contains(slice.page_id@), + local.page_organization.pages[slice.page_id@].is_used == false, +{ + proof { + local.page_organization.get_count_bound_very_unready(); + //assert(local.page_organization.pages[slice.page_id@].count == Some(current_slice_coun + //assert(slice.page_id@.idx + current_slice_count <= SLICES_PER_SEGMENT + 1); + //assert(slice.page_id@.idx + target_slice_count <= SLICES_PER_SEGMENT); + } + let next_slice = slice.add_offset(target_slice_count); + //let count_being_returned = target_slice_count - current_slice_count; + let bin_idx = slice_bin(current_slice_count - target_slice_count); + let ghost mut next_state; + proof { + next_state = + PageOrg::take_step::split_page( + local.page_organization, + slice.page_id@, + current_slice_count as int, + target_slice_count as int, + bin_idx as int, + ); + } + let first_in_queue; + tld_get_mut!(tld_ptr, local, tld => { let mut cq = tld.segments.span_queue_headers[bin_idx]; first_in_queue = cq.first; @@ -20474,89 +20382,89 @@ mod segment { tld.segments.span_queue_headers.set(bin_idx, cq); }); - if first_in_queue.to_usize() != 0 { - let first_in_queue_ptr = PagePtr { - page_ptr: first_in_queue, - page_id: Ghost( - local.page_organization.unused_dlist_headers[bin_idx as int].first.get_Some_0(), - ), - }; - unused_page_get_mut_prev!(first_in_queue_ptr, local, p => { + if first_in_queue.to_usize() != 0 { + let first_in_queue_ptr = PagePtr { + page_ptr: first_in_queue, + page_id: Ghost( + local.page_organization.unused_dlist_headers[bin_idx as int].first.get_Some_0(), + ), + }; + unused_page_get_mut_prev!(first_in_queue_ptr, local, p => { p = next_slice.page_ptr; }); - } - unused_page_get_mut_count!(slice, local, c => { + } + unused_page_get_mut_count!(slice, local, c => { c = target_slice_count as u32; }); - unused_page_get_mut_inner!(next_slice, local, inner => { + unused_page_get_mut_inner!(next_slice, local, inner => { inner.xblock_size = 0; }); - unused_page_get_mut_prev!(next_slice, local, p => { + unused_page_get_mut_prev!(next_slice, local, p => { p = PPtr::from_usize(0); }); - unused_page_get_mut_next!(next_slice, local, n => { + unused_page_get_mut_next!(next_slice, local, n => { n = first_in_queue; }); - unused_page_get_mut_count!(next_slice, local, c => { + unused_page_get_mut_count!(next_slice, local, c => { c = (current_slice_count - target_slice_count) as u32; }); - unused_page_get_mut!(next_slice, local, page => { + unused_page_get_mut!(next_slice, local, page => { page.offset = 0; }); - proof { - const_facts(); - } - if current_slice_count > target_slice_count + 1 { - let last_slice = slice.add_offset(current_slice_count - 1); - unused_page_get_mut_inner!(last_slice, local, inner => { + proof { + const_facts(); + } + if current_slice_count > target_slice_count + 1 { + let last_slice = slice.add_offset(current_slice_count - 1); + unused_page_get_mut_inner!(last_slice, local, inner => { inner.xblock_size = 0; }); - unused_page_get_mut_count!(last_slice, local, c => { + unused_page_get_mut_count!(last_slice, local, c => { c = (current_slice_count - target_slice_count) as u32; }); - unused_page_get_mut!(last_slice, local, page => { + unused_page_get_mut!(last_slice, local, page => { //assert(0 <= (current_slice_count - target_slice_count) as u32 <= 512); //assert(SIZEOF_PAGE_HEADER == 32); assert(SIZEOF_PAGE_HEADER as u32 == 80); - //assert((current_slice_count - target_slice_count) as u32 * (SIZEOF_PAGE_HEADER as u32) - // == (current_slice_count - target_slice_count) as u32 * 32); - page.offset = (current_slice_count - target_slice_count - 1) as u32 - * (SIZEOF_PAGE_HEADER as u32); - }); - } - proof { - local.page_organization = next_state; - let page_id = slice.page_id@; - let next_id = next_slice.page_id@; - let last_page_id = PageId { - idx: (page_id.idx + current_slice_count - 1) as nat, - ..page_id - }; - let old_org_pages = old(local).page_organization.pages; - let old_pages = old(local).pages; - let old_psa = old(local).psa; - let org_pages = local.page_organization.pages; - let pages = local.pages; - local.psa = local.psa.union_prefer_right(local.unused_pages); - let psa = local.psa; - let old_org_queues = old(local).page_organization.unused_dlist_headers; - let old_queues = old(local).tld@.value.get_Some_0().segments.span_queue_headers; - //assert(page_organization_pages_match_data(org_pages[slice.page_id@], pages[slice.page_id@], psa[slice.page_id@], slice.page_id@, local.page_organization.popped)); - //assert(page_organization_pages_match_data(org_pages[next_slice.page_id@], pages[next_slice.page_id@], psa[next_slice.page_id@], next_slice.page_id@, local.page_organization.popped)); - /*if current_slice_count > target_slice_count + 1 { + //assert((current_slice_count - target_slice_count) as u32 * (SIZEOF_PAGE_HEADER as u32) + // == (current_slice_count - target_slice_count) as u32 * 32); + page.offset = (current_slice_count - target_slice_count - 1) as u32 + * (SIZEOF_PAGE_HEADER as u32); + }); + } + proof { + local.page_organization = next_state; + let page_id = slice.page_id@; + let next_id = next_slice.page_id@; + let last_page_id = PageId { + idx: (page_id.idx + current_slice_count - 1) as nat, + ..page_id + }; + let old_org_pages = old(local).page_organization.pages; + let old_pages = old(local).pages; + let old_psa = old(local).psa; + let org_pages = local.page_organization.pages; + let pages = local.pages; + local.psa = local.psa.union_prefer_right(local.unused_pages); + let psa = local.psa; + let old_org_queues = old(local).page_organization.unused_dlist_headers; + let old_queues = old(local).tld@.value.get_Some_0().segments.span_queue_headers; + //assert(page_organization_pages_match_data(org_pages[slice.page_id@], pages[slice.page_id@], psa[slice.page_id@], slice.page_id@, local.page_organization.popped)); + //assert(page_organization_pages_match_data(org_pages[next_slice.page_id@], pages[next_slice.page_id@], psa[next_slice.page_id@], next_slice.page_id@, local.page_organization.popped)); + /*if current_slice_count > target_slice_count + 1 { assert(last_page_id != next_id); assert(last_page_id != page_id); assert(page_organization_pages_match_data(org_pages[last_page_id], pages[last_page_id], psa[last_page_id], last_page_id, local.page_organization.popped)); } else { assert(page_organization_pages_match_data(org_pages[last_page_id], pages[last_page_id], psa[last_page_id], last_page_id, local.page_organization.popped)); }*/ - /*if first_in_queue.id() != 0 { + /*if first_in_queue.id() != 0 { let first_page_id = local.page_organization.unused_dlist_headers[bin_idx as int].first.get_Some_0(); assert(page_organization_pages_match_data(org_pages[first_page_id], pages[first_page_id], psa[first_page_id])); }*/ - //let last_slice = slice.add_offset(current_slice_count - 1); - //assert(page_organization_pages_match_data(org_pages[last_slice.page_id@], pages[last_slice.page_id@], psa[last_slice.page_id@])); - /*assert forall |pid| #[trigger] org_pages.dom().contains(pid) implies + //let last_slice = slice.add_offset(current_slice_count - 1); + //assert(page_organization_pages_match_data(org_pages[last_slice.page_id@], pages[last_slice.page_id@], psa[last_slice.page_id@])); + /*assert forall |pid| #[trigger] org_pages.dom().contains(pid) implies page_organization_pages_match_data(org_pages[pid], pages[pid], psa[pid], pid, local.page_organization.popped) by { let first_id = old(local).page_organization.unused_dlist_headers[bin_idx as int].first.get_Some_0(); @@ -20588,7 +20496,7 @@ mod segment { assert(page_organization_pages_match_data(org_pages[pid], pages[pid], psa[pid], pid, local.page_organization.popped)); } }*/ - /*assert forall |page_id: PageId| (#[trigger] local.page_organization.pages.dom().contains(page_id) && + /*assert forall |page_id: PageId| (#[trigger] local.page_organization.pages.dom().contains(page_id) && !local.page_organization.pages[page_id].is_used) <==> local.unused_pages.dom().contains(page_id) by { if (local.page_organization.pages.dom().contains(page_id) && !local.page_organization.pages[page_id].is_used) { @@ -20598,15 +20506,15 @@ mod segment { assert(local.page_organization.pages.dom().contains(page_id) && !local.page_organization.pages[page_id].is_used); } }*/ - //assert(forall |page_id: PageId| #[trigger] local.unused_pages.dom().contains(page_id) ==> - // local.unused_pages[page_id] == local.psa[page_id]); - //assert(local.page_organization_valid()); - preserves_mem_chunk_good(*old(local), *local); - //assert(local.wf_main()); - } + //assert(forall |page_id: PageId| #[trigger] local.unused_pages.dom().contains(page_id) ==> + // local.unused_pages[page_id] == local.psa[page_id]); + //assert(local.page_organization_valid()); + preserves_mem_chunk_good(*old(local), *local); + //assert(local.wf_main()); } +} - /* +/* spec fn allocated_block_tokens(m: Map, page_id: PageId, block_size: nat, n_blocks: nat, instance: Mim::Instance) -> bool { &&& (forall |block_id: BlockId| block_id.page_id == page_id @@ -20625,92 +20533,93 @@ mod segment { } */ - #[verifier::spinoff_prover] - fn segment_span_allocate( - segment: SegmentPtr, - slice: PagePtr, - slice_count: usize, - tld_ptr: TldPtr, - Tracked(local): Tracked<&mut Local>, - ) -> (success: bool) - requires - old(local).wf_main(), - slice.wf(), - segment.wf(), - segment.segment_id == slice.page_id@.segment_id, - segment.is_in(*old(local)), - old(local).page_organization.popped == Popped::VeryUnready( - slice.page_id@.segment_id, - slice.page_id@.idx as int, - slice_count as int, - false, - ) || (old(local).page_organization.popped == Popped::SegmentCreating( - slice.page_id@.segment_id, - ) && slice.page_id@.idx == 0 && slice_count < SLICES_PER_SEGMENT), - old(local).page_organization.pages.dom().contains(slice.page_id@), - old(local).page_organization.pages[slice.page_id@].is_used == false, - SLICES_PER_SEGMENT >= slice_count > 0, - ensures - local.wf_main(), - success ==> old(local).page_organization.popped.is_VeryUnready() - ==> local.page_organization.popped == Popped::Ready(slice.page_id@, true), - success ==> old(local).page_organization.popped.is_SegmentCreating() - ==> local.page_organization.popped == Popped::VeryUnready( - slice.page_id@.segment_id, +#[verifier::spinoff_prover] +fn segment_span_allocate( + segment: SegmentPtr, + slice: PagePtr, + slice_count: usize, + tld_ptr: TldPtr, + Tracked(local): Tracked<&mut Local>, +) -> (success: bool) + requires + old(local).wf_main(), + slice.wf(), + segment.wf(), + segment.segment_id == slice.page_id@.segment_id, + segment.is_in(*old(local)), + old(local).page_organization.popped == Popped::VeryUnready( + slice.page_id@.segment_id, + slice.page_id@.idx as int, + slice_count as int, + false, + ) || (old(local).page_organization.popped == Popped::SegmentCreating( + slice.page_id@.segment_id, + ) && slice.page_id@.idx == 0 && slice_count < SLICES_PER_SEGMENT), + old(local).page_organization.pages.dom().contains(slice.page_id@), + old(local).page_organization.pages[slice.page_id@].is_used == false, + SLICES_PER_SEGMENT >= slice_count > 0, + ensures + local.wf_main(), + success ==> old(local).page_organization.popped.is_VeryUnready() + ==> local.page_organization.popped == Popped::Ready(slice.page_id@, true), + success ==> old(local).page_organization.popped.is_SegmentCreating() + ==> local.page_organization.popped == Popped::VeryUnready( + slice.page_id@.segment_id, + slice_count as int, + SLICES_PER_SEGMENT - slice_count as int, + true, + ), + success ==> local.page_organization.pages.dom().contains(slice.page_id@), + success ==> local.page_organization.pages[slice.page_id@].count == Some(slice_count as nat), + success ==> page_init_is_committed(slice.page_id@, *local), + common_preserves(*old(local), *local), + segment.is_in(*local), +{ + let ghost mut next_state; + proof { + const_facts(); + if local.page_organization.popped.is_VeryUnready() { + next_state = PageOrg::take_step::allocate_popped(local.page_organization); + } else { + next_state = + PageOrg::take_step::forget_about_first_page( + local.page_organization, slice_count as int, - SLICES_PER_SEGMENT - slice_count as int, - true, - ), - success ==> local.page_organization.pages.dom().contains(slice.page_id@), - success ==> local.page_organization.pages[slice.page_id@].count == Some(slice_count as nat), - success ==> page_init_is_committed(slice.page_id@, *local), - common_preserves(*old(local), *local), - segment.is_in(*local), - { - let ghost mut next_state; - proof { - const_facts(); - if local.page_organization.popped.is_VeryUnready() { - next_state = PageOrg::take_step::allocate_popped(local.page_organization); - } else { - next_state = - PageOrg::take_step::forget_about_first_page( - local.page_organization, - slice_count as int, - ); - } - } - let p = segment_page_start_from_slice(segment, slice, 0); - //assert(slice_count * SLICE_SIZE <= SLICES_PER_SEGMENT * SLICE_SIZE); - if !segment_ensure_committed( - segment, - p, - slice_count * SLICE_SIZE as usize, - Tracked(&mut *local), - ) { - return false; - } - let ghost old_local = *local; - let ghost first_page_id = slice.page_id@; - //assert(local.page_organization.pages.dom().contains(slice.page_id@)); - let ghost range = first_page_id.range_from(0, slice_count as int); - assert forall|pid| range.contains(pid) implies #[trigger] - local.unused_pages.dom().contains(pid) by { - assert(local.pages.dom().contains(pid)); - } - let tracked mut first_psa = local.unused_pages.tracked_remove(first_page_id); - let mut page = slice.page_ptr.take(Tracked(&mut first_psa.points_to)); - page.offset = 0; - slice.page_ptr.put(Tracked(&mut first_psa.points_to), page); - proof { - local.unused_pages.tracked_insert(first_page_id, first_psa); + ); } - unused_page_get_mut_count!(slice, local, count => { + } + let p = segment_page_start_from_slice(segment, slice, 0); + //assert(slice_count * SLICE_SIZE <= SLICES_PER_SEGMENT * SLICE_SIZE); + if !segment_ensure_committed( + segment, + p, + slice_count * SLICE_SIZE as usize, + Tracked(&mut *local), + ) { + return false; + } + let ghost old_local = *local; + let ghost first_page_id = slice.page_id@; + //assert(local.page_organization.pages.dom().contains(slice.page_id@)); + let ghost range = first_page_id.range_from(0, slice_count as int); + assert forall|pid| range.contains(pid) implies #[trigger] local.unused_pages.dom().contains( + pid, + ) by { + assert(local.pages.dom().contains(pid)); + } + let tracked mut first_psa = local.unused_pages.tracked_remove(first_page_id); + let mut page = slice.page_ptr.take(Tracked(&mut first_psa.points_to)); + page.offset = 0; + slice.page_ptr.put(Tracked(&mut first_psa.points_to), page); + proof { + local.unused_pages.tracked_insert(first_page_id, first_psa); + } + unused_page_get_mut_count!(slice, local, count => { // this is usually already set. I think the one case where it actually needs to // be set is when initializing the segment. count = slice_count as u32; }); - unused_page_get_mut_inner!(slice, local, inner => { + unused_page_get_mut_inner!(slice, local, inner => { // Not entirely sure what the rationale for setting to bsize to this value is. // In normal operation, we're going to set the block_size to something else soon. // If we are currently setting up page 0 as part of segment initialization, @@ -20719,61 +20628,59 @@ mod segment { inner.xblock_size = if bsize >= HUGE_BLOCK_SIZE as usize { HUGE_BLOCK_SIZE } else { bsize as u32 }; //assert(inner.xblock_size != 0); }); - // Set up the remaining pages - let mut i: usize = 1; - let ghost local_snapshot = *local; - let extra = slice_count - 1; - while i <= extra - invariant - 1 <= i <= extra + 1, - first_page_id.idx + extra < SLICES_PER_SEGMENT, - local == (Local { unused_pages: local.unused_pages, ..local_snapshot }), - local.unused_pages.dom() == local_snapshot.unused_pages.dom(), - slice.wf(), - slice.page_id == first_page_id, - forall|page_id| - #[trigger] - first_page_id.range_from(1, extra + 1).contains(page_id) - ==> local.unused_pages.dom().contains(page_id) && ( - local.unused_pages.dom().contains(page_id) - ==> local.unused_pages[page_id].points_to@.value.is_some() && is_page_ptr( - local.unused_pages[page_id].points_to@.pptr, - page_id, - )), - forall|page_id| - #[trigger] - local.unused_pages.dom().contains(page_id) ==> (if first_page_id.range_from( - 1, - i as int, - ).contains(page_id) { - psa_differ_only_in_offset( - local.unused_pages[page_id], - local_snapshot.unused_pages[page_id], - ) && local.unused_pages[page_id].points_to@.value.unwrap().offset == ( - page_id.idx - first_page_id.idx) * SIZEOF_PAGE_HEADER - } else { - local.unused_pages[page_id] == local_snapshot.unused_pages[page_id] - }), - { - proof { - const_facts(); - } - let ghost prelocal = *local; - let this_slice = slice.add_offset(i); - let ghost this_page_id = PageId { idx: (first_page_id.idx + i) as nat, ..first_page_id }; - assert(first_page_id.range_from(1, extra + 1).contains(this_page_id)); - //assert(is_page_ptr(local.unused_pages[this_page_id].points_to@.pptr, this_page_id)); - //assert(i * SIZEOF_PAGE_HEADER <= SLICES_PER_SEGMENT * SIZEOF_PAGE_HEADER); - let tracked mut this_psa = local.unused_pages.tracked_remove(this_page_id); - let mut page = this_slice.page_ptr.take(Tracked(&mut this_psa.points_to)); - page.offset = i as u32 * SIZEOF_PAGE_HEADER as u32; - this_slice.page_ptr.put(Tracked(&mut this_psa.points_to), page); - proof { - local.unused_pages.tracked_insert(this_page_id, this_psa); - assert_sets_equal!(local.unused_pages.dom() == prelocal.unused_pages.dom()); - } - i = i + 1; - /*proof { + // Set up the remaining pages + let mut i: usize = 1; + let ghost local_snapshot = *local; + let extra = slice_count - 1; + while i <= extra + invariant + 1 <= i <= extra + 1, + first_page_id.idx + extra < SLICES_PER_SEGMENT, + local == (Local { unused_pages: local.unused_pages, ..local_snapshot }), + local.unused_pages.dom() == local_snapshot.unused_pages.dom(), + slice.wf(), + slice.page_id == first_page_id, + forall|page_id| #[trigger] + first_page_id.range_from(1, extra + 1).contains(page_id) + ==> local.unused_pages.dom().contains(page_id) && ( + local.unused_pages.dom().contains(page_id) + ==> local.unused_pages[page_id].points_to@.value.is_some() && is_page_ptr( + local.unused_pages[page_id].points_to@.pptr, + page_id, + )), + forall|page_id| #[trigger] + local.unused_pages.dom().contains(page_id) ==> (if first_page_id.range_from( + 1, + i as int, + ).contains(page_id) { + psa_differ_only_in_offset( + local.unused_pages[page_id], + local_snapshot.unused_pages[page_id], + ) && local.unused_pages[page_id].points_to@.value.unwrap().offset == ( + page_id.idx - first_page_id.idx) * SIZEOF_PAGE_HEADER + } else { + local.unused_pages[page_id] == local_snapshot.unused_pages[page_id] + }), + { + proof { + const_facts(); + } + let ghost prelocal = *local; + let this_slice = slice.add_offset(i); + let ghost this_page_id = PageId { idx: (first_page_id.idx + i) as nat, ..first_page_id }; + assert(first_page_id.range_from(1, extra + 1).contains(this_page_id)); + //assert(is_page_ptr(local.unused_pages[this_page_id].points_to@.pptr, this_page_id)); + //assert(i * SIZEOF_PAGE_HEADER <= SLICES_PER_SEGMENT * SIZEOF_PAGE_HEADER); + let tracked mut this_psa = local.unused_pages.tracked_remove(this_page_id); + let mut page = this_slice.page_ptr.take(Tracked(&mut this_psa.points_to)); + page.offset = i as u32 * SIZEOF_PAGE_HEADER as u32; + this_slice.page_ptr.put(Tracked(&mut this_psa.points_to), page); + proof { + local.unused_pages.tracked_insert(this_page_id, this_psa); + assert_sets_equal!(local.unused_pages.dom() == prelocal.unused_pages.dom()); + } + i = i + 1; + /*proof { assert forall |page_id| #[trigger] local.unused_pages.dom().contains(page_id) implies ( @@ -20806,20 +20713,20 @@ mod segment { } } }*/ - } - unused_page_get_mut_inner!(slice, local, inner => { + } + unused_page_get_mut_inner!(slice, local, inner => { inner.set_is_reset(false); inner.set_is_committed(false); }); - segment_get_mut_main2!(segment, local, main2 => { + segment_get_mut_main2!(segment, local, main2 => { main2.used = main2.used + 1; }); - proof { - let old_po = local.page_organization; - local.page_organization = next_state; - local.psa = local.psa.union_prefer_right(local.unused_pages); - preserves_mem_chunk_good(old_local, *local); - /*if old_po.popped.is_VeryUnready() { + proof { + let old_po = local.page_organization; + local.page_organization = next_state; + local.psa = local.psa.union_prefer_right(local.unused_pages); + preserves_mem_chunk_good(old_local, *local); + /*if old_po.popped.is_VeryUnready() { assert(local.page_organization.pages[first_page_id].page_header_kind.is_none()); assert(page_organization_pages_match_data(local.page_organization.pages[first_page_id], local.pages[first_page_id], local.psa[first_page_id], first_page_id, local.page_organization.popped)); assert(page_organization_pages_match(local.page_organization.pages, local.pages, local.psa, local.page_organization.popped)); @@ -20846,11 +20753,11 @@ mod segment { assert(local.page_organization_valid()); } assert(local.wf_main());*/ - } - return true; } + return true; +} - /* +/* static mi_segment_t* mi_segment_reclaim_or_alloc(mi_heap_t* heap, size_t needed_slices, size_t block_size, mi_segments_tld_t* tld, mi_os_tld_t* os_tld) { mi_assert_internal(block_size < MI_HUGE_BLOCK_SIZE); @@ -20873,308 +20780,306 @@ mod segment { } */ - // segment_reclaim_or_alloc - // -> segment_alloc - // -> segment_os_alloc - // -> arena_alloc_aligned - //static mi_segment_t* mi_segment_alloc(size_t required, size_t page_alignment, mi_arena_id_t req_arena_id, mi_segments_tld_t* tld, mi_os_tld_t* os_tld, mi_page_t** huge_page) - // For normal pages, required == 0 - // For huge pages, required == ? - #[verifier::spinoff_prover] - fn segment_alloc( - required: usize, - page_alignment: usize, - req_arena_id: ArenaId, - tld: TldPtr, - Tracked(local): Tracked<&mut Local>,// os_tld, - // huge_page, - ) -> (segment_ptr: SegmentPtr) - requires - old(local).wf(), - tld.wf(), - tld.is_in(*old(local)), - required == 0, // only handling non-huge-pages for now +// segment_reclaim_or_alloc +// -> segment_alloc +// -> segment_os_alloc +// -> arena_alloc_aligned +//static mi_segment_t* mi_segment_alloc(size_t required, size_t page_alignment, mi_arena_id_t req_arena_id, mi_segments_tld_t* tld, mi_os_tld_t* os_tld, mi_page_t** huge_page) +// For normal pages, required == 0 +// For huge pages, required == ? +#[verifier::spinoff_prover] +fn segment_alloc( + required: usize, + page_alignment: usize, + req_arena_id: ArenaId, + tld: TldPtr, + Tracked(local): Tracked<&mut Local>, // os_tld, + // huge_page, +) -> (segment_ptr: SegmentPtr) + requires + old(local).wf(), + tld.wf(), + tld.is_in(*old(local)), + required == 0, // only handling non-huge-pages for now - ensures - local.wf(), - common_preserves(*old(local), *local), - { - proof { - const_facts(); - } - let (segment_slices, pre_size, info_slices) = segment_calculate_slices(required); - let eager_delay = (current_thread_count() > 1 && tld.get_segments_count(Tracked(&*local)) - < option_eager_commit_delay() as usize); - let eager = !eager_delay && option_eager_commit(); - let commit = eager || (required > 0); - let is_zero = false; - let mut commit_mask = CommitMask::empty(); - let mut decommit_mask = CommitMask::empty(); - let ( - pre_segment_ptr, - new_psegment_slices, - new_ppre_size, - new_pinfo_slices, - is_zero, - pcommit, - memid, - mem_large, - is_pinned, - align_offset, - Tracked(mem_chunk), - ) = segment_os_alloc( - required, - page_alignment, - eager_delay, - req_arena_id, - segment_slices, - pre_size, - info_slices, - &mut commit_mask, - &mut decommit_mask, - commit, - tld, - Tracked(&mut *local), - ); - let ghost local_snap1 = *local; - if pre_segment_ptr.is_null() { - return pre_segment_ptr; - } - let tracked thread_state_tok = local.take_thread_token(); - let ghost pre_segment_id = pre_segment_ptr.segment_id@; - let ghost segment_state = SegmentState { shared_access: arbitrary(), is_enabled: false }; - let tracked (Tracked(thread_state_tok), Ghost(tos), Tracked(thread_of_segment_tok)) = - local.instance.create_segment_mk_tokens( - local.thread_id, - pre_segment_id, - segment_state, - thread_state_tok, - ); - let ghost segment_id = Mim::State::mk_fresh_segment_id(tos, pre_segment_id); - let segment_ptr = SegmentPtr { - segment_ptr: pre_segment_ptr.segment_ptr, + ensures + local.wf(), + common_preserves(*old(local), *local), +{ + proof { + const_facts(); + } + let (segment_slices, pre_size, info_slices) = segment_calculate_slices(required); + let eager_delay = (current_thread_count() > 1 && tld.get_segments_count(Tracked(&*local)) + < option_eager_commit_delay() as usize); + let eager = !eager_delay && option_eager_commit(); + let commit = eager || (required > 0); + let is_zero = false; + let mut commit_mask = CommitMask::empty(); + let mut decommit_mask = CommitMask::empty(); + let ( + pre_segment_ptr, + new_psegment_slices, + new_ppre_size, + new_pinfo_slices, + is_zero, + pcommit, + memid, + mem_large, + is_pinned, + align_offset, + Tracked(mem_chunk), + ) = segment_os_alloc( + required, + page_alignment, + eager_delay, + req_arena_id, + segment_slices, + pre_size, + info_slices, + &mut commit_mask, + &mut decommit_mask, + commit, + tld, + Tracked(&mut *local), + ); + let ghost local_snap1 = *local; + if pre_segment_ptr.is_null() { + return pre_segment_ptr; + } + let tracked thread_state_tok = local.take_thread_token(); + let ghost pre_segment_id = pre_segment_ptr.segment_id@; + let ghost segment_state = SegmentState { shared_access: arbitrary(), is_enabled: false }; + let tracked (Tracked(thread_state_tok), Ghost(tos), Tracked(thread_of_segment_tok)) = + local.instance.create_segment_mk_tokens( + local.thread_id, + pre_segment_id, + segment_state, + thread_state_tok, + ); + let ghost segment_id = Mim::State::mk_fresh_segment_id(tos, pre_segment_id); + let segment_ptr = SegmentPtr { + segment_ptr: pre_segment_ptr.segment_ptr, + segment_id: Ghost(segment_id), + }; + proof { + local.thread_token = thread_state_tok; + const_facts(); + segment_start_eq(segment_id, pre_segment_id); + //assert(commit_mask.bytes(segment_id) == commit_mask.bytes(pre_segment_id)); + } + // the C version skips this step if the bytes are all zeroed by the OS + // We would need a complex transmute operation to do the same thing + let tracked seg_header_points_to_raw = mem_chunk.take_points_to_range( + segment_start(segment_id), + SIZEOF_SEGMENT_HEADER as int, + ); + //assert(SIZEOF_SEGMENT_HEADER == vstd::layout::size_of::()); + proof { + segment_start_mult8(segment_id); + } + //assert(segment_start(segment_id) % vstd::layout::align_of::() as int == 0); + vstd::layout::layout_for_type_is_valid::(); // $line_count$Proof$ + let tracked mut seg_header_points_to = seg_header_points_to_raw.into_typed::( + segment_start(segment_id), + ); + let allow_decommit = option_allow_decommit() && !is_pinned && !mem_large; + let (pcell_main, Tracked(pointsto_main)) = PCell::new( + SegmentHeaderMain { + memid: memid, + mem_is_pinned: is_pinned, + mem_is_large: mem_large, + mem_is_committed: commit_mask.is_full(), + mem_alignment: page_alignment, + mem_align_offset: align_offset, + allow_decommit: allow_decommit, + decommit_expire: 0, + decommit_mask: if allow_decommit { + decommit_mask + } else { + CommitMask::empty() + }, + commit_mask: commit_mask, + }, + ); + let (pcell_main2, Tracked(pointsto_main2)) = PCell::new( + SegmentHeaderMain2 { + next: PPtr::from_usize(0), + abandoned: 0, + abandoned_visits: 0, + used: 0, + cookie: 0, + segment_slices: 0, + segment_info_slices: 0, + kind: if required == 0 { + SegmentKind::Normal + } else { + SegmentKind::Huge + }, + slice_entries: 0, + }, + ); + let (cur_thread_id, Tracked(is_thread)) = crate::thread::thread_id(); + proof { + local.is_thread.agrees(is_thread); + } + segment_ptr.segment_ptr.put( + Tracked(&mut seg_header_points_to), + SegmentHeader { + main: pcell_main, + abandoned_next: 0, + main2: pcell_main2, + thread_id: AtomicU64::new( + Ghost((Ghost(local.instance), Ghost(segment_id))), + cur_thread_id.thread_id, + Tracked(thread_of_segment_tok), + ), + instance: Ghost(local.instance), segment_id: Ghost(segment_id), - }; + }, + ); + //assert(segment_ptr.segment_ptr.id() + SEGMENT_SIZE < usize::MAX); + let mut i: usize = 0; + let mut cur_page_ptr = PPtr::from_usize( + segment_ptr.segment_ptr.to_usize() + SIZEOF_SEGMENT_HEADER, + ); + //assert(i * SIZEOF_PAGE_HEADER == 0); + let ghost old_mem_chunk = mem_chunk; + let tracked mut psa_map = Map::::tracked_empty(); + let tracked mut pla_map = Map::::tracked_empty(); + while i <= SLICES_PER_SEGMENT as usize + invariant + mem_chunk.os == old_mem_chunk.os, + mem_chunk.wf(), + //mem_chunk.pointsto_has_range(segment_start(segment_id) + SIZEOF_SEGMENT_HEADER + i * SIZEOF_PAGE_HEADER, + // COMMIT_SIZE - (SIZEOF_SEGMENT_HEADER + i * SIZEOF_PAGE_HEADER)), + set_int_range( + segment_start(segment_id) + SIZEOF_SEGMENT_HEADER, + segment_start(segment_id) + COMMIT_SIZE, + ) <= old_mem_chunk.points_to@.dom(), + mem_chunk.points_to@.dom() =~= old_mem_chunk.points_to@.dom() - set_int_range( + segment_start(segment_id), + segment_start(segment_id) + SIZEOF_SEGMENT_HEADER + i * SIZEOF_PAGE_HEADER, + ), + cur_page_ptr.id() == segment_start(segment_id) + SIZEOF_SEGMENT_HEADER + i + * SIZEOF_PAGE_HEADER, + segment_ptr.segment_ptr.id() + SEGMENT_SIZE < usize::MAX, + segment_ptr.wf(), + segment_ptr.segment_id@ == segment_id, + i <= SLICES_PER_SEGMENT + 1, + forall|page_id: PageId| #[trigger] + psa_map.dom().contains(page_id) ==> page_id.segment_id == segment_id && 0 + <= page_id.idx < i, + forall|page_id: PageId| #[trigger] + pla_map.dom().contains(page_id) ==> page_id.segment_id == segment_id && 0 + <= page_id.idx < i, + forall|page_id: PageId| + #![trigger psa_map.dom().contains(page_id)] + #![trigger psa_map.index(page_id)] + #![trigger pla_map.dom().contains(page_id)] + #![trigger pla_map.index(page_id)] + { + page_id.segment_id == segment_id && 0 <= page_id.idx < i ==> { + &&& psa_map.dom().contains(page_id) + &&& pla_map.dom().contains(page_id) + &&& pla_map[page_id].inner@.value.is_some() + &&& pla_map[page_id].count@.value.is_some() + &&& pla_map[page_id].prev@.value.is_some() + &&& pla_map[page_id].next@.value.is_some() + &&& pla_map[page_id].inner@.value.unwrap().zeroed() + &&& pla_map[page_id].count@.value.unwrap() == 0 + &&& pla_map[page_id].prev@.value.unwrap().id() == 0 + &&& pla_map[page_id].next@.value.unwrap().id() == 0 + &&& is_page_ptr(psa_map[page_id].points_to@.pptr, page_id) + &&& psa_map[page_id].points_to@.value.is_some() + &&& psa_map[page_id].points_to@.value.unwrap().count.id() + == pla_map[page_id].count@.pcell + &&& psa_map[page_id].points_to@.value.unwrap().inner.id() + == pla_map[page_id].inner@.pcell + &&& psa_map[page_id].points_to@.value.unwrap().prev.id() + == pla_map[page_id].prev@.pcell + &&& psa_map[page_id].points_to@.value.unwrap().next.id() + == pla_map[page_id].next@.pcell + &&& psa_map[page_id].points_to@.value.unwrap().offset == 0 + &&& psa_map[page_id].points_to@.value.unwrap().xthread_free.is_empty() + &&& psa_map[page_id].points_to@.value.unwrap().xthread_free.wf() + &&& psa_map[page_id].points_to@.value.unwrap().xthread_free.instance + == local.instance + &&& psa_map[page_id].points_to@.value.unwrap().xheap.is_empty() + } + }, + { + let ghost page_id = PageId { segment_id, idx: i as nat }; proof { - local.thread_token = thread_state_tok; const_facts(); - segment_start_eq(segment_id, pre_segment_id); - //assert(commit_mask.bytes(segment_id) == commit_mask.bytes(pre_segment_id)); - } - // the C version skips this step if the bytes are all zeroed by the OS - // We would need a complex transmute operation to do the same thing - let tracked seg_header_points_to_raw = mem_chunk.take_points_to_range( - segment_start(segment_id), - SIZEOF_SEGMENT_HEADER as int, - ); - //assert(SIZEOF_SEGMENT_HEADER == vstd::layout::size_of::()); - proof { + //assert(SIZEOF_PAGE_HEADER as int == vstd::layout::size_of::()); segment_start_mult8(segment_id); - } - //assert(segment_start(segment_id) % vstd::layout::align_of::() as int == 0); - vstd::layout::layout_for_type_is_valid::(); // $line_count$Proof$ - let tracked mut seg_header_points_to = seg_header_points_to_raw.into_typed::( - segment_start(segment_id), + //assert(cur_page_ptr.id() % vstd::layout::align_of::() as int == 0); + assert(COMMIT_SIZE - (SIZEOF_SEGMENT_HEADER + SLICES_PER_SEGMENT * SIZEOF_PAGE_HEADER) + <= COMMIT_SIZE - (SIZEOF_SEGMENT_HEADER + i * SIZEOF_PAGE_HEADER)) + by (nonlinear_arith) + requires + i <= SLICES_PER_SEGMENT, + ; + //assert(SIZEOF_PAGE_HEADER as int <= + // COMMIT_SIZE - (SIZEOF_SEGMENT_HEADER + i * SIZEOF_PAGE_HEADER)); + assert(i * SIZEOF_PAGE_HEADER + SIZEOF_PAGE_HEADER == (i + 1) * SIZEOF_PAGE_HEADER) + by (nonlinear_arith); + //assert(SIZEOF_SEGMENT_HEADER + i * SIZEOF_PAGE_HEADER < SEGMENT_SIZE); + //assert(is_page_ptr(cur_page_ptr.id(), page_id)); + } + let ghost phstart = segment_start(segment_id) + SIZEOF_SEGMENT_HEADER + i + * SIZEOF_PAGE_HEADER; + vstd::layout::layout_for_type_is_valid::(); // $line_count$Proof$ + let tracked page_header_points_to_raw = mem_chunk.take_points_to_range( + phstart, + SIZEOF_PAGE_HEADER as int, ); - let allow_decommit = option_allow_decommit() && !is_pinned && !mem_large; - let (pcell_main, Tracked(pointsto_main)) = PCell::new( - SegmentHeaderMain { - memid: memid, - mem_is_pinned: is_pinned, - mem_is_large: mem_large, - mem_is_committed: commit_mask.is_full(), - mem_alignment: page_alignment, - mem_align_offset: align_offset, - allow_decommit: allow_decommit, - decommit_expire: 0, - decommit_mask: if allow_decommit { - decommit_mask - } else { - CommitMask::empty() - }, - commit_mask: commit_mask, - }, + let tracked mut page_header_points_to = page_header_points_to_raw.into_typed::( + phstart, ); - let (pcell_main2, Tracked(pointsto_main2)) = PCell::new( - SegmentHeaderMain2 { - next: PPtr::from_usize(0), - abandoned: 0, - abandoned_visits: 0, + let (pcell_count, Tracked(pointsto_count)) = PCell::new(0); + let (pcell_inner, Tracked(pointsto_inner)) = PCell::new( + PageInner { + flags0: 0, + capacity: 0, + reserved: 0, + flags1: 0, + flags2: 0, + free: LL::empty(), used: 0, - cookie: 0, - segment_slices: 0, - segment_info_slices: 0, - kind: if required == 0 { - SegmentKind::Normal - } else { - SegmentKind::Huge - }, - slice_entries: 0, + xblock_size: 0, + local_free: LL::empty(), }, ); - let (cur_thread_id, Tracked(is_thread)) = crate::thread::thread_id(); + let (pcell_prev, Tracked(pointsto_prev)) = PCell::new(PPtr::from_usize(0)); + let (pcell_next, Tracked(pointsto_next)) = PCell::new(PPtr::from_usize(0)); + let page = Page { + count: pcell_count, + offset: 0, + inner: pcell_inner, + xthread_free: ThreadLLWithDelayBits::empty(Tracked(local.instance.clone())), + xheap: AtomicHeapPtr::empty(), + prev: pcell_prev, + next: pcell_next, + padding: 0, + }; + let tracked pla = PageLocalAccess { + count: pointsto_count, + inner: pointsto_inner, + prev: pointsto_prev, + next: pointsto_next, + }; + cur_page_ptr.put(Tracked(&mut page_header_points_to), page); + let tracked psa = PageSharedAccess { points_to: page_header_points_to }; proof { - local.is_thread.agrees(is_thread); - } - segment_ptr.segment_ptr.put( - Tracked(&mut seg_header_points_to), - SegmentHeader { - main: pcell_main, - abandoned_next: 0, - main2: pcell_main2, - thread_id: AtomicU64::new( - Ghost((Ghost(local.instance), Ghost(segment_id))), - cur_thread_id.thread_id, - Tracked(thread_of_segment_tok), - ), - instance: Ghost(local.instance), - segment_id: Ghost(segment_id), - }, - ); - //assert(segment_ptr.segment_ptr.id() + SEGMENT_SIZE < usize::MAX); - let mut i: usize = 0; - let mut cur_page_ptr = PPtr::from_usize( - segment_ptr.segment_ptr.to_usize() + SIZEOF_SEGMENT_HEADER, - ); - //assert(i * SIZEOF_PAGE_HEADER == 0); - let ghost old_mem_chunk = mem_chunk; - let tracked mut psa_map = Map::::tracked_empty(); - let tracked mut pla_map = Map::::tracked_empty(); - while i <= SLICES_PER_SEGMENT as usize - invariant - mem_chunk.os == old_mem_chunk.os, - mem_chunk.wf(), - //mem_chunk.pointsto_has_range(segment_start(segment_id) + SIZEOF_SEGMENT_HEADER + i * SIZEOF_PAGE_HEADER, - // COMMIT_SIZE - (SIZEOF_SEGMENT_HEADER + i * SIZEOF_PAGE_HEADER)), - set_int_range( - segment_start(segment_id) + SIZEOF_SEGMENT_HEADER, - segment_start(segment_id) + COMMIT_SIZE, - ) <= old_mem_chunk.points_to@.dom(), - mem_chunk.points_to@.dom() =~= old_mem_chunk.points_to@.dom() - set_int_range( - segment_start(segment_id), - segment_start(segment_id) + SIZEOF_SEGMENT_HEADER + i * SIZEOF_PAGE_HEADER, - ), - cur_page_ptr.id() == segment_start(segment_id) + SIZEOF_SEGMENT_HEADER + i - * SIZEOF_PAGE_HEADER, - segment_ptr.segment_ptr.id() + SEGMENT_SIZE < usize::MAX, - segment_ptr.wf(), - segment_ptr.segment_id@ == segment_id, - i <= SLICES_PER_SEGMENT + 1, - forall|page_id: PageId| - #[trigger] - psa_map.dom().contains(page_id) ==> page_id.segment_id == segment_id && 0 - <= page_id.idx < i, - forall|page_id: PageId| - #[trigger] - pla_map.dom().contains(page_id) ==> page_id.segment_id == segment_id && 0 - <= page_id.idx < i, - forall|page_id: PageId| - #![trigger psa_map.dom().contains(page_id)] - #![trigger psa_map.index(page_id)] - #![trigger pla_map.dom().contains(page_id)] - #![trigger pla_map.index(page_id)] - { - page_id.segment_id == segment_id && 0 <= page_id.idx < i ==> { - &&& psa_map.dom().contains(page_id) - &&& pla_map.dom().contains(page_id) - &&& pla_map[page_id].inner@.value.is_some() - &&& pla_map[page_id].count@.value.is_some() - &&& pla_map[page_id].prev@.value.is_some() - &&& pla_map[page_id].next@.value.is_some() - &&& pla_map[page_id].inner@.value.unwrap().zeroed() - &&& pla_map[page_id].count@.value.unwrap() == 0 - &&& pla_map[page_id].prev@.value.unwrap().id() == 0 - &&& pla_map[page_id].next@.value.unwrap().id() == 0 - &&& is_page_ptr(psa_map[page_id].points_to@.pptr, page_id) - &&& psa_map[page_id].points_to@.value.is_some() - &&& psa_map[page_id].points_to@.value.unwrap().count.id() - == pla_map[page_id].count@.pcell - &&& psa_map[page_id].points_to@.value.unwrap().inner.id() - == pla_map[page_id].inner@.pcell - &&& psa_map[page_id].points_to@.value.unwrap().prev.id() - == pla_map[page_id].prev@.pcell - &&& psa_map[page_id].points_to@.value.unwrap().next.id() - == pla_map[page_id].next@.pcell - &&& psa_map[page_id].points_to@.value.unwrap().offset == 0 - &&& psa_map[page_id].points_to@.value.unwrap().xthread_free.is_empty() - &&& psa_map[page_id].points_to@.value.unwrap().xthread_free.wf() - &&& psa_map[page_id].points_to@.value.unwrap().xthread_free.instance - == local.instance - &&& psa_map[page_id].points_to@.value.unwrap().xheap.is_empty() - } - }, - { - let ghost page_id = PageId { segment_id, idx: i as nat }; - proof { - const_facts(); - //assert(SIZEOF_PAGE_HEADER as int == vstd::layout::size_of::()); - segment_start_mult8(segment_id); - //assert(cur_page_ptr.id() % vstd::layout::align_of::() as int == 0); - assert(COMMIT_SIZE - (SIZEOF_SEGMENT_HEADER + SLICES_PER_SEGMENT * SIZEOF_PAGE_HEADER) - <= COMMIT_SIZE - (SIZEOF_SEGMENT_HEADER + i * SIZEOF_PAGE_HEADER)) - by (nonlinear_arith) - requires - i <= SLICES_PER_SEGMENT, - ; - //assert(SIZEOF_PAGE_HEADER as int <= - // COMMIT_SIZE - (SIZEOF_SEGMENT_HEADER + i * SIZEOF_PAGE_HEADER)); - assert(i * SIZEOF_PAGE_HEADER + SIZEOF_PAGE_HEADER == (i + 1) * SIZEOF_PAGE_HEADER) - by (nonlinear_arith); - //assert(SIZEOF_SEGMENT_HEADER + i * SIZEOF_PAGE_HEADER < SEGMENT_SIZE); - //assert(is_page_ptr(cur_page_ptr.id(), page_id)); - } - let ghost phstart = segment_start(segment_id) + SIZEOF_SEGMENT_HEADER + i - * SIZEOF_PAGE_HEADER; - vstd::layout::layout_for_type_is_valid::(); // $line_count$Proof$ - let tracked page_header_points_to_raw = mem_chunk.take_points_to_range( - phstart, - SIZEOF_PAGE_HEADER as int, - ); - let tracked mut page_header_points_to = page_header_points_to_raw.into_typed::( - phstart, - ); - let (pcell_count, Tracked(pointsto_count)) = PCell::new(0); - let (pcell_inner, Tracked(pointsto_inner)) = PCell::new( - PageInner { - flags0: 0, - capacity: 0, - reserved: 0, - flags1: 0, - flags2: 0, - free: LL::empty(), - used: 0, - xblock_size: 0, - local_free: LL::empty(), - }, - ); - let (pcell_prev, Tracked(pointsto_prev)) = PCell::new(PPtr::from_usize(0)); - let (pcell_next, Tracked(pointsto_next)) = PCell::new(PPtr::from_usize(0)); - let page = Page { - count: pcell_count, - offset: 0, - inner: pcell_inner, - xthread_free: ThreadLLWithDelayBits::empty(Tracked(local.instance.clone())), - xheap: AtomicHeapPtr::empty(), - prev: pcell_prev, - next: pcell_next, - padding: 0, - }; - let tracked pla = PageLocalAccess { - count: pointsto_count, - inner: pointsto_inner, - prev: pointsto_prev, - next: pointsto_next, - }; - cur_page_ptr.put(Tracked(&mut page_header_points_to), page); - let tracked psa = PageSharedAccess { points_to: page_header_points_to }; - proof { - psa_map.tracked_insert(page_id, psa); - pla_map.tracked_insert(page_id, pla); - } - //assert(cur_page_ptr.id() + SIZEOF_PAGE_HEADER <= usize::MAX); - i = i + 1; - cur_page_ptr = PPtr::from_usize(cur_page_ptr.to_usize() + SIZEOF_PAGE_HEADER); - /*assert(psa_map.dom().contains(page_id)); + psa_map.tracked_insert(page_id, psa); + pla_map.tracked_insert(page_id, pla); + } + //assert(cur_page_ptr.id() + SIZEOF_PAGE_HEADER <= usize::MAX); + i = i + 1; + cur_page_ptr = PPtr::from_usize(cur_page_ptr.to_usize() + SIZEOF_PAGE_HEADER); + /*assert(psa_map.dom().contains(page_id)); assert( pla_map.dom().contains(page_id)); assert( pla_map[page_id].inner@.value.is_some()); assert( pla_map[page_id].count@.value.is_some()); @@ -21194,33 +21099,33 @@ mod segment { assert( psa_map[page_id].points_to@.value.unwrap().offset == 0); assert( psa_map[page_id].points_to@.value.unwrap().xthread_free.is_empty()); assert( psa_map[page_id].points_to@.value.unwrap().xheap.is_empty());*/ - } - proof { - local.unused_pages.tracked_union_prefer_right(psa_map); - local.pages.tracked_union_prefer_right(pla_map); - local.psa = local.psa.union_prefer_right(psa_map); - let tracked ssa = SegmentSharedAccess { points_to: seg_header_points_to }; - let tracked sla = SegmentLocalAccess { - mem: mem_chunk, - main: pointsto_main, - main2: pointsto_main2, - }; - local.segments.tracked_insert(segment_id, sla); - let tracked thread_state_tok = local.take_thread_token(); - let tracked thread_state_tok = local.instance.segment_enable( - local.thread_id, - segment_id, - ssa, - thread_state_tok, - ssa, - ); - local.thread_token = thread_state_tok; - ////////// Set up pages and stuff - local.page_organization = PageOrg::take_step::create_segment( - local.page_organization, - segment_id, - ); - /*assert forall |page_id| + } + proof { + local.unused_pages.tracked_union_prefer_right(psa_map); + local.pages.tracked_union_prefer_right(pla_map); + local.psa = local.psa.union_prefer_right(psa_map); + let tracked ssa = SegmentSharedAccess { points_to: seg_header_points_to }; + let tracked sla = SegmentLocalAccess { + mem: mem_chunk, + main: pointsto_main, + main2: pointsto_main2, + }; + local.segments.tracked_insert(segment_id, sla); + let tracked thread_state_tok = local.take_thread_token(); + let tracked thread_state_tok = local.instance.segment_enable( + local.thread_id, + segment_id, + ssa, + thread_state_tok, + ssa, + ); + local.thread_token = thread_state_tok; + ////////// Set up pages and stuff + local.page_organization = PageOrg::take_step::create_segment( + local.page_organization, + segment_id, + ); + /*assert forall |page_id| #[trigger] local.pages.dom().contains(page_id) && local.unused_pages.dom().contains(page_id) implies local.pages.index(page_id).wf_unused(page_id, local.unused_pages[page_id], local.page_organization.popped) @@ -21234,14 +21139,14 @@ mod segment { assert(local.pages.index(page_id).wf_unused(page_id, local.unused_pages[page_id], local.page_organization.popped)); } }*/ - //assert(i == SLICES_PER_SEGMENT + 1); - //assert(local.segments[segment_id].points_to@.value.unwrap().thread_id.wf( - // local.instance, segment_id)); - /*assert(local.segments[segment_id].wf(segment_id, + //assert(i == SLICES_PER_SEGMENT + 1); + //assert(local.segments[segment_id].points_to@.value.unwrap().thread_id.wf( + // local.instance, segment_id)); + /*assert(local.segments[segment_id].wf(segment_id, local.thread_token@.value.segments.index(segment_id), local.instance));*/ - assert(local.thread_token@.value.segments.dom() =~= local.segments.dom()); - /*let org_pages = local.page_organization.pages; + assert(local.thread_token@.value.segments.dom() =~= local.segments.dom()); + /*let org_pages = local.page_organization.pages; let pages = local.pages; let psa = local.psa; assert forall |page_id| #[trigger] org_pages.dom().contains(page_id) implies @@ -21253,290 +21158,290 @@ mod segment { assert(page_organization_pages_match_data(org_pages[page_id], pages[page_id], local.psa[page_id], page_id, local.page_organization.popped)); } }*/ - /*assert(page_organization_pages_match(local.page_organization.pages, + /*assert(page_organization_pages_match(local.page_organization.pages, local.pages, local.psa, local.page_organization.popped)); assert(local.page_organization_valid());*/ - preserves_mem_chunk_good_except(local_snap1, *local, segment_id); - assert(mem_chunk_good1( - local.segments[segment_id].mem, - segment_id, - local.commit_mask(segment_id).bytes(segment_id), - local.decommit_mask(segment_id).bytes(segment_id), - local.segment_pages_range_total(segment_id), - local.segment_pages_used_total(segment_id), - )) by { - reveal(CommitMask::bytes); - empty_segment_pages_used_total(*local, segment_id); - }//assert(local.mem_chunk_good(segment_id)); - //assert(local.wf_main()); - - } - let first_slice = PagePtr { - page_ptr: PPtr::from_usize(segment_ptr.segment_ptr.to_usize() + SIZEOF_SEGMENT_HEADER), - page_id: Ghost(PageId { segment_id, idx: 0 }), - }; - //assert(first_slice.wf()); - let success = segment_span_allocate(segment_ptr, first_slice, 1, tld, Tracked(&mut *local)); - if !success { - todo(); // TODO actually we don't need this check cause we can't fail - }//assert(local.wf_main()); - /*let all_page_headers_points_to_raw = mem_chunk.take_points_to_range( + preserves_mem_chunk_good_except(local_snap1, *local, segment_id); + assert(mem_chunk_good1( + local.segments[segment_id].mem, + segment_id, + local.commit_mask(segment_id).bytes(segment_id), + local.decommit_mask(segment_id).bytes(segment_id), + local.segment_pages_range_total(segment_id), + local.segment_pages_used_total(segment_id), + )) by { + reveal(CommitMask::bytes); + empty_segment_pages_used_total(*local, segment_id); + } //assert(local.mem_chunk_good(segment_id)); + //assert(local.wf_main()); + + } + let first_slice = PagePtr { + page_ptr: PPtr::from_usize(segment_ptr.segment_ptr.to_usize() + SIZEOF_SEGMENT_HEADER), + page_id: Ghost(PageId { segment_id, idx: 0 }), + }; + //assert(first_slice.wf()); + let success = segment_span_allocate(segment_ptr, first_slice, 1, tld, Tracked(&mut *local)); + if !success { + todo(); // TODO actually we don't need this check cause we can't fail + } //assert(local.wf_main()); + /*let all_page_headers_points_to_raw = mem_chunk.take_points_to_range( segment_start(segment_id) + SIZEOF_SEGMENT_HEADER, (NUM_SLICES + 1) * SIZEOF_PAGE_HEADER, );*/ - let ghost local_snap = *local; - let ghost next_state = PageOrg::take_step::forget_about_first_page2(local.page_organization); - segment_get_mut_main2!(segment_ptr, local, main2 => { - main2.used = main2.used - 1; - }); - proof { - local.page_organization = next_state; - local.psa = local.psa.union_prefer_right(local.unused_pages); - preserves_mem_chunk_good(local_snap, *local); - //assert(local.wf_main()); - } - if required == 0 { - segment_span_free( - segment_ptr, - 1, - SLICES_PER_SEGMENT as usize - 1, - false, - tld, - Tracked(&mut *local), - ); - } else { - todo(); - } - return segment_ptr; - } - - #[verifier::spinoff_prover] - fn segment_os_alloc( - required: usize, - page_alignment: usize, - eager_delay: bool, - req_arena_id: ArenaId, - psegment_slices: usize, - pre_size: usize, - pinfo_slices: usize, - pcommit_mask: &mut CommitMask, - pdecommit_mask: &mut CommitMask, - request_commit: bool, - tld: TldPtr, - Tracked(local): Tracked<&mut Local>,// outparams - // segment_ptr: SegmentPtr, - // new_psegment_slices: usize - // new_ppre_size: usize - // new_pinfo_slices: usize, - // is_zero: bool, - // pcommit: bool, - // memid: MemId, - // mem_large: bool, - // is_pinned: bool, - // align_offset: usize, - ) -> (res: ( - SegmentPtr, - usize, - usize, - usize, - bool, - bool, - MemId, - bool, - bool, - usize, - Tracked, - )) - requires - psegment_slices as int * SLICE_SIZE as int <= usize::MAX, - pinfo_slices == 1, - psegment_slices >= 1, - old(local).wf(), - tld.wf(), - tld.is_in(*old(local)), - psegment_slices == SLICES_PER_SEGMENT, - ensures - local.wf(), - common_preserves(*old(local), *local), - local.page_organization == old(local).page_organization, - pdecommit_mask == old(pdecommit_mask), // this is only modified if segment cache is used - ({ - let ( - segment_ptr, - new_psegment_slices, - new_ppre_size, - new_pinfo_slices, - is_zero, - pcommit, - mem_id, - mem_large, - is_pinned, - align_offset, - mem_chunk, - ) = res; - { - &&& (segment_ptr.segment_ptr.id() != 0 ==> { - &&& segment_ptr.wf() - &&& mem_chunk@.wf() - &&& mem_chunk@.os_exact_range(segment_ptr.segment_ptr.id(), SEGMENT_SIZE as int) - &&& set_int_range( - segment_start(segment_ptr.segment_id@), - segment_start(segment_ptr.segment_id@) + COMMIT_SIZE, - ).subset_of(pcommit_mask.bytes(segment_ptr.segment_id@)) - &&& pcommit_mask.bytes(segment_ptr.segment_id@).subset_of( - mem_chunk@.os_rw_bytes(), - ) - &&& mem_chunk@.os_rw_bytes().subset_of(mem_chunk@.points_to@.dom()) - }) - } - }), - { - proof { - const_facts(); - } - let mut mem_large = !eager_delay; - let mut is_pinned = false; - let mut mem_id: usize = 0; - let mut align_offset: usize = 0; - let mut alignment: usize = SEGMENT_ALIGN as usize; - let mut is_zero = false; - let mut pcommit = request_commit; - let mut psegment_slices = psegment_slices; - let mut pinfo_slices = pinfo_slices; - let mut pre_size = pre_size; - let tracked mut mem = MemChunk::empty(); - if page_alignment > 0 { - /* + let ghost local_snap = *local; + let ghost next_state = PageOrg::take_step::forget_about_first_page2(local.page_organization); + segment_get_mut_main2!(segment_ptr, local, main2 => { + main2.used = main2.used - 1; + }); + proof { + local.page_organization = next_state; + local.psa = local.psa.union_prefer_right(local.unused_pages); + preserves_mem_chunk_good(local_snap, *local); + //assert(local.wf_main()); + } + if required == 0 { + segment_span_free( + segment_ptr, + 1, + SLICES_PER_SEGMENT as usize - 1, + false, + tld, + Tracked(&mut *local), + ); + } else { + todo(); + } + return segment_ptr; +} + +#[verifier::spinoff_prover] +fn segment_os_alloc( + required: usize, + page_alignment: usize, + eager_delay: bool, + req_arena_id: ArenaId, + psegment_slices: usize, + pre_size: usize, + pinfo_slices: usize, + pcommit_mask: &mut CommitMask, + pdecommit_mask: &mut CommitMask, + request_commit: bool, + tld: TldPtr, + Tracked(local): Tracked<&mut Local>, // outparams + // segment_ptr: SegmentPtr, + // new_psegment_slices: usize + // new_ppre_size: usize + // new_pinfo_slices: usize, + // is_zero: bool, + // pcommit: bool, + // memid: MemId, + // mem_large: bool, + // is_pinned: bool, + // align_offset: usize, +) -> (res: ( + SegmentPtr, + usize, + usize, + usize, + bool, + bool, + MemId, + bool, + bool, + usize, + Tracked, +)) + requires + psegment_slices as int * SLICE_SIZE as int <= usize::MAX, + pinfo_slices == 1, + psegment_slices >= 1, + old(local).wf(), + tld.wf(), + tld.is_in(*old(local)), + psegment_slices == SLICES_PER_SEGMENT, + ensures + local.wf(), + common_preserves(*old(local), *local), + local.page_organization == old(local).page_organization, + pdecommit_mask == old(pdecommit_mask), // this is only modified if segment cache is used + ({ + let ( + segment_ptr, + new_psegment_slices, + new_ppre_size, + new_pinfo_slices, + is_zero, + pcommit, + mem_id, + mem_large, + is_pinned, + align_offset, + mem_chunk, + ) = res; + { + &&& (segment_ptr.segment_ptr.id() != 0 ==> { + &&& segment_ptr.wf() + &&& mem_chunk@.wf() + &&& mem_chunk@.os_exact_range(segment_ptr.segment_ptr.id(), SEGMENT_SIZE as int) + &&& set_int_range( + segment_start(segment_ptr.segment_id@), + segment_start(segment_ptr.segment_id@) + COMMIT_SIZE, + ).subset_of(pcommit_mask.bytes(segment_ptr.segment_id@)) + &&& pcommit_mask.bytes(segment_ptr.segment_id@).subset_of( + mem_chunk@.os_rw_bytes(), + ) + &&& mem_chunk@.os_rw_bytes().subset_of(mem_chunk@.points_to@.dom()) + }) + } + }), +{ + proof { + const_facts(); + } + let mut mem_large = !eager_delay; + let mut is_pinned = false; + let mut mem_id: usize = 0; + let mut align_offset: usize = 0; + let mut alignment: usize = SEGMENT_ALIGN as usize; + let mut is_zero = false; + let mut pcommit = request_commit; + let mut psegment_slices = psegment_slices; + let mut pinfo_slices = pinfo_slices; + let mut pre_size = pre_size; + let tracked mut mem = MemChunk::empty(); + if page_alignment > 0 { + /* assert(page_alignment >= SEGMENT_ALIGN); alignment = page_alignment; let info_size = pinfo_sizes * SLICE_SIZE; align_offset = align_up(info_size, SEGMENT_ALIGN); */ - todo(); - } - let segment_size = psegment_slices * SLICE_SIZE as usize; - let mut segment = SegmentPtr::null(); - if page_alignment == 0 { - // TODO get from cache if possible + todo(); + } + let segment_size = psegment_slices * SLICE_SIZE as usize; + let mut segment = SegmentPtr::null(); + if page_alignment == 0 { + // TODO get from cache if possible + } + if segment.is_null() { + let (_segment, Tracked(_mem), commit, _large, _is_pinned, _is_zero, _mem_id) = + arena_alloc_aligned( + segment_size, + alignment, + align_offset, + request_commit, + mem_large, + req_arena_id, + ); + segment = + SegmentPtr { + segment_ptr: PPtr::from_usize(_segment), + segment_id: Ghost(mk_segment_id(_segment as int)), + }; + mem_id = _mem_id; + mem_large = _large; + is_zero = _is_zero; + is_pinned = _is_pinned; + pcommit = commit; + proof { + mem = _mem; + //assert(segment.wf()); } if segment.is_null() { - let (_segment, Tracked(_mem), commit, _large, _is_pinned, _is_zero, _mem_id) = - arena_alloc_aligned( - segment_size, - alignment, - align_offset, - request_commit, + return ( + segment, + psegment_slices, + pre_size, + pinfo_slices, + is_zero, + pcommit, + mem_id, mem_large, - req_arena_id, - ); - segment = - SegmentPtr { - segment_ptr: PPtr::from_usize(_segment), - segment_id: Ghost(mk_segment_id(_segment as int)), - }; - mem_id = _mem_id; - mem_large = _large; - is_zero = _is_zero; - is_pinned = _is_pinned; - pcommit = commit; - proof { - mem = _mem; - //assert(segment.wf()); - } - if segment.is_null() { - return ( - segment, - psegment_slices, - pre_size, - pinfo_slices, - is_zero, - pcommit, - mem_id, - mem_large, - is_pinned, - align_offset, - Tracked(MemChunk::empty()), - ) - } - if pcommit { - pcommit_mask.create_full(); - } else { - pcommit_mask.create_empty(); - } - } - let commit_needed = pinfo_slices; - let mut commit_needed_mask = CommitMask::empty(); - commit_needed_mask.create(0, commit_needed); - if !pcommit_mask.all_set(&commit_needed_mask) { - //assert(commit_needed as int * COMMIT_SIZE as int <= segment_size); - let (success, is_zero) = crate::os_commit::os_commit( - segment.segment_ptr.to_usize(), - commit_needed * COMMIT_SIZE as usize, - Tracked(&mut mem), + is_pinned, + align_offset, + Tracked(MemChunk::empty()), + ) + } + if pcommit { + pcommit_mask.create_full(); + } else { + pcommit_mask.create_empty(); + } + } + let commit_needed = pinfo_slices; + let mut commit_needed_mask = CommitMask::empty(); + commit_needed_mask.create(0, commit_needed); + if !pcommit_mask.all_set(&commit_needed_mask) { + //assert(commit_needed as int * COMMIT_SIZE as int <= segment_size); + let (success, is_zero) = crate::os_commit::os_commit( + segment.segment_ptr.to_usize(), + commit_needed * COMMIT_SIZE as usize, + Tracked(&mut mem), + ); + if !success { + return ( + SegmentPtr::null(), + 0, + 0, + 0, + false, + false, + 0, + false, + false, + 0, + Tracked(MemChunk::empty()), ); - if !success { - return ( - SegmentPtr::null(), - 0, - 0, - 0, - false, - false, - 0, - false, - false, - 0, - Tracked(MemChunk::empty()), - ); - } - pcommit_mask.set(&commit_needed_mask); - }// note: segment metadata is set by the caller - // TODO what does _mi_segment_map_allocated_at do? + } + pcommit_mask.set(&commit_needed_mask); + } // note: segment metadata is set by the caller + // TODO what does _mi_segment_map_allocated_at do? - proof { - /*assert(segment.wf()); + proof { + /*assert(segment.wf()); assert(mem.wf()); assert(mem.os_exact_range(segment.segment_ptr.id(), SEGMENT_SIZE as int));*/ - assert(set_int_range( - segment_start(segment.segment_id@), - segment_start(segment.segment_id@) + COMMIT_SIZE, - ).subset_of(pcommit_mask.bytes(segment.segment_id@))) by { - reveal(CommitMask::bytes); - } - assert(pcommit_mask.bytes(segment.segment_id@).subset_of(mem.os_rw_bytes())) by { - reveal(CommitMask::bytes); - } - assert(mem.os_rw_bytes().subset_of(mem.points_to@.dom())); - } - return ( - segment, - psegment_slices, - pre_size, - pinfo_slices, - is_zero, - pcommit, - mem_id, - mem_large, - is_pinned, - align_offset, - Tracked(mem), - ); - } + assert(set_int_range( + segment_start(segment.segment_id@), + segment_start(segment.segment_id@) + COMMIT_SIZE, + ).subset_of(pcommit_mask.bytes(segment.segment_id@))) by { + reveal(CommitMask::bytes); + } + assert(pcommit_mask.bytes(segment.segment_id@).subset_of(mem.os_rw_bytes())) by { + reveal(CommitMask::bytes); + } + assert(mem.os_rw_bytes().subset_of(mem.points_to@.dom())); + } + return ( + segment, + psegment_slices, + pre_size, + pinfo_slices, + is_zero, + pcommit, + mem_id, + mem_large, + is_pinned, + align_offset, + Tracked(mem), + ); +} - fn segment_free(segment: SegmentPtr, force: bool, tld: TldPtr, Tracked(local): Tracked<&mut Local>) - requires - old(local).wf(), - tld.wf(), - tld.is_in(*old(local)), - segment.wf(), - segment.is_in(*old(local)), - ensures - local.wf(), - common_preserves(*old(local), *local), - { - todo(); - /* +fn segment_free(segment: SegmentPtr, force: bool, tld: TldPtr, Tracked(local): Tracked<&mut Local>) + requires + old(local).wf(), + tld.wf(), + tld.is_in(*old(local)), + segment.wf(), + segment.is_in(*old(local)), + ensures + local.wf(), + common_preserves(*old(local), *local), +{ + todo(); + /* proof { let next_state = PageOrg::take_step::segment_freeing_start(local.page_organization, segment.segment_id@); local.page_organization = next_state; @@ -21579,21 +21484,21 @@ mod segment { // mi_segment_os_free(segment, tld); */ - } +} - fn segment_os_free(segment: SegmentPtr, tld: TldPtr, Tracked(local): Tracked<&mut Local>) - requires - old(local).wf_main(), - segment.wf(), - segment.is_in(*old(local)), - tld.wf(), - tld.is_in(*old(local)), - { - // TODO segment_map_freed_at(segment); - //let size = segment_size(segment, Tracked(&*local)) as isize; - //segments_track_size(-size, tld, Tracked(&mut *local)); - todo(); - /* +fn segment_os_free(segment: SegmentPtr, tld: TldPtr, Tracked(local): Tracked<&mut Local>) + requires + old(local).wf_main(), + segment.wf(), + segment.is_in(*old(local)), + tld.wf(), + tld.is_in(*old(local)), +{ + // TODO segment_map_freed_at(segment); + //let size = segment_size(segment, Tracked(&*local)) as isize; + //segments_track_size(-size, tld, Tracked(&mut *local)); + todo(); + /* let skip_cache_push = size != SEGMENT_SIZE || segment.get_mem_align_offset(Tracked(&*local)) != 0 || segment.is_kind_huge(Tracked(&*local)); @@ -21604,122 +21509,122 @@ mod segment { // !_mi_segment_cache_push(segment, size, segment->memid, &segment->commit_mask, &segment->decommit_mask, segment->mem_is_large, segment->mem_is_pinned, tld->os)) } */ +} + +// segment_slices = # of slices in the segment +// pre_size = size of the pages that contain the segment metadata +// info_slices = # of slices needed to contain the pages of the segment metadata +fn segment_calculate_slices(required: usize) -> (res: (usize, usize, usize)) + requires + required == 0, + ensures + ({ + let (num_slices, pre_size, info_slices) = res; + required == 0 ==> num_slices == SLICES_PER_SEGMENT && pre_size + == crate::os_mem::page_size() && info_slices == 1 + }), +{ + proof { + const_facts(); } + let page_size = crate::os_mem::get_page_size(); + let i_size = align_up(SIZEOF_SEGMENT_HEADER, page_size); + let guardsize = 0; + let pre_size = i_size; + let j_size = align_up(i_size + guardsize, SLICE_SIZE as usize); + let info_slices = j_size / SLICE_SIZE as usize; + let segment_size = if required == 0 { + SEGMENT_SIZE as usize + } else { + align_up(required + j_size + guardsize, SLICE_SIZE as usize) + }; + let num_slices = segment_size / SLICE_SIZE as usize; + (num_slices, pre_size, info_slices) +} - // segment_slices = # of slices in the segment - // pre_size = size of the pages that contain the segment metadata - // info_slices = # of slices needed to contain the pages of the segment metadata - fn segment_calculate_slices(required: usize) -> (res: (usize, usize, usize)) - requires - required == 0, - ensures - ({ - let (num_slices, pre_size, info_slices) = res; - required == 0 ==> num_slices == SLICES_PER_SEGMENT && pre_size - == crate::os_mem::page_size() && info_slices == 1 - }), - { - proof { - const_facts(); - } - let page_size = crate::os_mem::get_page_size(); - let i_size = align_up(SIZEOF_SEGMENT_HEADER, page_size); - let guardsize = 0; - let pre_size = i_size; - let j_size = align_up(i_size + guardsize, SLICE_SIZE as usize); - let info_slices = j_size / SLICE_SIZE as usize; - let segment_size = if required == 0 { - SEGMENT_SIZE as usize +#[verifier::spinoff_prover] +fn segment_span_free( + segment_ptr: SegmentPtr, + slice_index: usize, + slice_count: usize, + allow_decommit: bool, + tld_ptr: TldPtr, + Tracked(local): Tracked<&mut Local>, +) + requires + old(local).wf_main(), + tld_ptr.wf(), + tld_ptr.is_in(*old(local)), + segment_ptr.wf(), + segment_ptr.is_in(*old(local)), + 0 <= slice_index, + slice_index + slice_count <= SLICES_PER_SEGMENT, + old(local).page_organization.popped == Popped::VeryUnready( + segment_ptr.segment_id@, + slice_index as int, + slice_count as int, + old(local).page_organization.popped.get_VeryUnready_3(), + ), + ensures + local.wf_main(), + common_preserves(*old(local), *local), + segment_ptr.is_in(*local), + local.page_organization.popped == if old( + local, + ).page_organization.popped.get_VeryUnready_3() { + Popped::ExtraCount(segment_ptr.segment_id@) } else { - align_up(required + j_size + guardsize, SLICE_SIZE as usize) - }; - let num_slices = segment_size / SLICE_SIZE as usize; - (num_slices, pre_size, info_slices) + Popped::No + }, + local.pages.dom() =~= old(local).pages.dom(), +{ + let bin_idx = slice_bin(slice_count); + proof { + const_facts(); } - - #[verifier::spinoff_prover] - fn segment_span_free( - segment_ptr: SegmentPtr, - slice_index: usize, - slice_count: usize, - allow_decommit: bool, - tld_ptr: TldPtr, - Tracked(local): Tracked<&mut Local>, - ) - requires - old(local).wf_main(), - tld_ptr.wf(), - tld_ptr.is_in(*old(local)), - segment_ptr.wf(), - segment_ptr.is_in(*old(local)), - 0 <= slice_index, - slice_index + slice_count <= SLICES_PER_SEGMENT, - old(local).page_organization.popped == Popped::VeryUnready( - segment_ptr.segment_id@, - slice_index as int, - slice_count as int, - old(local).page_organization.popped.get_VeryUnready_3(), - ), - ensures - local.wf_main(), - common_preserves(*old(local), *local), - segment_ptr.is_in(*local), - local.page_organization.popped == if old( - local, - ).page_organization.popped.get_VeryUnready_3() { - Popped::ExtraCount(segment_ptr.segment_id@) - } else { - Popped::No - }, - local.pages.dom() =~= old(local).pages.dom(), - { - let bin_idx = slice_bin(slice_count); - proof { - const_facts(); - } - let ghost mut next_state; - proof { - //assert(valid_sbin_idx(bin_idx as int)); - next_state = - PageOrg::take_step::free_to_unused_queue(local.page_organization, bin_idx as int); - } - let slice = segment_ptr.get_page_header_ptr(slice_index); - unused_page_get_mut_count!(slice, local, c => { + let ghost mut next_state; + proof { + //assert(valid_sbin_idx(bin_idx as int)); + next_state = + PageOrg::take_step::free_to_unused_queue(local.page_organization, bin_idx as int); + } + let slice = segment_ptr.get_page_header_ptr(slice_index); + unused_page_get_mut_count!(slice, local, c => { c = slice_count as u32; }); - unused_page_get_mut!(slice, local, page => { + unused_page_get_mut!(slice, local, page => { page.offset = 0; }); - if slice_count > 1 { - let last = segment_ptr.get_page_header_ptr(slice_index + slice_count - 1); - unused_page_get_mut!(last, local, page => { + if slice_count > 1 { + let last = segment_ptr.get_page_header_ptr(slice_index + slice_count - 1); + unused_page_get_mut!(last, local, page => { //assert(SIZEOF_PAGE_HEADER as u32 == 80); //assert(slice_count as u32 == slice_count); page.offset = (slice_count as u32 - 1) * SIZEOF_PAGE_HEADER as u32; }); - } - proof { - //assert(SLICE_SIZE as usize == 65536); - local.psa = local.psa.union_prefer_right(local.unused_pages); - preserves_mem_chunk_good(*old(local), *local); - //assert(local.page_organization_valid()); - //assert(local.wf_main()); - very_unready_range_okay_to_decommit(*local); - //assert(slice_index * SLICE_SIZE + slice_count * SLICE_SIZE - // == (slice_index + slice_count) * SLICE_SIZE); - } - if allow_decommit { - segment_perhaps_decommit( - segment_ptr, - slice.slice_start(), - slice_count * SLICE_SIZE as usize, - Tracked(&mut *local), - ); - }//assert(local.wf_main()); + } + proof { + //assert(SLICE_SIZE as usize == 65536); + local.psa = local.psa.union_prefer_right(local.unused_pages); + preserves_mem_chunk_good(*old(local), *local); + //assert(local.page_organization_valid()); + //assert(local.wf_main()); + very_unready_range_okay_to_decommit(*local); + //assert(slice_index * SLICE_SIZE + slice_count * SLICE_SIZE + // == (slice_index + slice_count) * SLICE_SIZE); + } + if allow_decommit { + segment_perhaps_decommit( + segment_ptr, + slice.slice_start(), + slice_count * SLICE_SIZE as usize, + Tracked(&mut *local), + ); + } //assert(local.wf_main()); - let ghost local_snap = *local; - let first_in_queue; - tld_get_mut!(tld_ptr, local, tld => { + let ghost local_snap = *local; + let first_in_queue; + tld_get_mut!(tld_ptr, local, tld => { let mut cq = tld.segments.span_queue_headers[bin_idx]; first_in_queue = cq.first; @@ -21730,33 +21635,33 @@ mod segment { tld.segments.span_queue_headers.set(bin_idx, cq); }); - if first_in_queue.to_usize() != 0 { - let first_in_queue_ptr = PagePtr { - page_ptr: first_in_queue, - page_id: Ghost( - local.page_organization.unused_dlist_headers[bin_idx as int].first.get_Some_0(), - ), - }; - unused_page_get_mut_prev!(first_in_queue_ptr, local, p => { + if first_in_queue.to_usize() != 0 { + let first_in_queue_ptr = PagePtr { + page_ptr: first_in_queue, + page_id: Ghost( + local.page_organization.unused_dlist_headers[bin_idx as int].first.get_Some_0(), + ), + }; + unused_page_get_mut_prev!(first_in_queue_ptr, local, p => { p = slice.page_ptr; }); - } - unused_page_get_mut_prev!(slice, local, p => { + } + unused_page_get_mut_prev!(slice, local, p => { p = PPtr::from_usize(0); }); - unused_page_get_mut_next!(slice, local, n => { + unused_page_get_mut_next!(slice, local, n => { n = first_in_queue; }); - unused_page_get_mut_inner!(slice, local, inner => { + unused_page_get_mut_inner!(slice, local, inner => { inner.xblock_size = 0; }); - proof { - let old_state = local.page_organization; - local.page_organization = next_state; - local.psa = local.psa.union_prefer_right(local.unused_pages); - assert_sets_equal!(local.page_organization.pages.dom(), local.pages.dom()); - preserves_mem_chunk_good(local_snap, *local); - /* + proof { + let old_state = local.page_organization; + local.page_organization = next_state; + local.psa = local.psa.union_prefer_right(local.unused_pages); + assert_sets_equal!(local.page_organization.pages.dom(), local.pages.dom()); + preserves_mem_chunk_good(local_snap, *local); + /* let org_pages = local.page_organization.pages; let pages = local.pages; let psa = local.psa; @@ -21791,69 +21696,69 @@ mod segment { } } */ - assert(local.wf_main()); - } + assert(local.wf_main()); } +} - pub fn segment_page_free( - page: PagePtr, - force: bool, - tld: TldPtr, - Tracked(local): Tracked<&mut Local>, - ) - requires - old(local).wf_main(), - tld.wf(), - tld.is_in(*old(local)), - page.wf(), - page.is_in(*old(local)), - old(local).page_organization.popped == Popped::Used(page.page_id@, true), - old(local).pages[page.page_id@].inner@.value.unwrap().used == 0, - ensures - local.wf(), - common_preserves(*old(local), *local), - { - let segment = SegmentPtr::ptr_segment(page); - segment_page_clear(page, tld, Tracked(&mut *local)); - let used = segment.get_used(Tracked(&*local)); - if used == 0 { - segment_free(segment, force, tld, Tracked(&mut *local)); - } else if used == segment.get_abandoned(Tracked(&*local)) { - todo(); - } +pub fn segment_page_free( + page: PagePtr, + force: bool, + tld: TldPtr, + Tracked(local): Tracked<&mut Local>, +) + requires + old(local).wf_main(), + tld.wf(), + tld.is_in(*old(local)), + page.wf(), + page.is_in(*old(local)), + old(local).page_organization.popped == Popped::Used(page.page_id@, true), + old(local).pages[page.page_id@].inner@.value.unwrap().used == 0, + ensures + local.wf(), + common_preserves(*old(local), *local), +{ + let segment = SegmentPtr::ptr_segment(page); + segment_page_clear(page, tld, Tracked(&mut *local)); + let used = segment.get_used(Tracked(&*local)); + if used == 0 { + segment_free(segment, force, tld, Tracked(&mut *local)); + } else if used == segment.get_abandoned(Tracked(&*local)) { + todo(); } +} - #[verifier::spinoff_prover] - fn segment_page_clear(page: PagePtr, tld: TldPtr, Tracked(local): Tracked<&mut Local>) - requires - old(local).wf_main(), - tld.wf(), - tld.is_in(*old(local)), - page.wf(), - page.is_in(*old(local)), - old(local).page_organization.popped == Popped::Used(page.page_id@, true), - old(local).pages[page.page_id@].inner@.value.unwrap().used == 0, - ensures - local.wf(), - page.is_in(*local), - common_preserves(*old(local), *local), - { - let ghost page_id = page.page_id@; - let ghost next_state = PageOrg::take_step::set_range_to_not_used(local.page_organization); - let ghost n_slices = local.page_organization.pages[page_id].count.unwrap(); - //assert(page.is_used_and_primary(*local)); - //assert(local.thread_token@.value.pages.dom().contains(page_id)); - let ghost page_state = local.thread_token@.value.pages[page_id]; - let segment = SegmentPtr::ptr_segment(page); - let mem_is_pinned = segment.get_mem_is_pinned(Tracked(&*local)); - let is_reset = page.get_inner_ref(Tracked(&*local)).get_is_reset(); - let option_page_reset = option_page_reset(); - if !mem_is_pinned && !is_reset && option_page_reset { - todo(); - } - let tracked block_tokens; - let tracked block_pt; - page_get_mut_inner!(page, local, inner => { +#[verifier::spinoff_prover] +fn segment_page_clear(page: PagePtr, tld: TldPtr, Tracked(local): Tracked<&mut Local>) + requires + old(local).wf_main(), + tld.wf(), + tld.is_in(*old(local)), + page.wf(), + page.is_in(*old(local)), + old(local).page_organization.popped == Popped::Used(page.page_id@, true), + old(local).pages[page.page_id@].inner@.value.unwrap().used == 0, + ensures + local.wf(), + page.is_in(*local), + common_preserves(*old(local), *local), +{ + let ghost page_id = page.page_id@; + let ghost next_state = PageOrg::take_step::set_range_to_not_used(local.page_organization); + let ghost n_slices = local.page_organization.pages[page_id].count.unwrap(); + //assert(page.is_used_and_primary(*local)); + //assert(local.thread_token@.value.pages.dom().contains(page_id)); + let ghost page_state = local.thread_token@.value.pages[page_id]; + let segment = SegmentPtr::ptr_segment(page); + let mem_is_pinned = segment.get_mem_is_pinned(Tracked(&*local)); + let is_reset = page.get_inner_ref(Tracked(&*local)).get_is_reset(); + let option_page_reset = option_page_reset(); + if !mem_is_pinned && !is_reset && option_page_reset { + todo(); + } + let tracked block_tokens; + let tracked block_pt; + page_get_mut_inner!(page, local, inner => { inner.set_is_zero_init(false); inner.capacity = 0; inner.reserved = 0; @@ -21869,65 +21774,65 @@ mod segment { page_state.num_blocks as int); proof { block_tokens = _block_tokens; block_pt = _block_pt; } }); - let tracked psa_map; - proof { - let tracked thread_state_tok = local.take_thread_token(); - let block_state_map = Map::new( - |block_id: BlockId| block_tokens.dom().contains(block_id), - |block_id: BlockId| block_tokens[block_id]@.value, - ); - assert(block_state_map.dom() =~= block_tokens.dom()); - let tracked thread_state_tok = local.instance.page_destroy_block_tokens( - local.thread_id, - page_id, - block_state_map, - thread_state_tok, - block_tokens, - ); - assert forall|pid: PageId| - page_id.range_from(0, n_slices as int).contains( - pid, - ) implies thread_state_tok@.value.pages.dom().contains(pid) by { - assert(pid.segment_id == page_id.segment_id); - assert(page_id.idx <= pid.idx < page_id.idx + n_slices); - assert(local.page_organization.pages.dom().contains(pid)); - assert(local.page_organization.pages[pid].is_used); - } - local.thread_token = thread_state_tok; - } - let tracked checked_tok = local.take_checked_token(); - let tracked perm = &local.instance.thread_local_state_guards_page( + let tracked psa_map; + proof { + let tracked thread_state_tok = local.take_thread_token(); + let block_state_map = Map::new( + |block_id: BlockId| block_tokens.dom().contains(block_id), + |block_id: BlockId| block_tokens[block_id]@.value, + ); + assert(block_state_map.dom() =~= block_tokens.dom()); + let tracked thread_state_tok = local.instance.page_destroy_block_tokens( local.thread_id, - page.page_id@, - &local.thread_token, - ).points_to; - let Tracked(checked_tok) = page.page_ptr.borrow(Tracked(perm)).xthread_free.check_is_good( - Tracked(&local.thread_token), - Tracked(checked_tok), + page_id, + block_state_map, + thread_state_tok, + block_tokens, ); - proof { - let tracked thread_state_tok = local.take_thread_token(); - let tracked (Tracked(thread_state_tok), Tracked(_psa_map)) = local.instance.page_disable( - local.thread_id, - page_id, - n_slices, - thread_state_tok, - &checked_tok, - ); - local.thread_token = thread_state_tok; - local.checked_token = checked_tok; - psa_map = _psa_map; - local.unused_pages.tracked_union_prefer_right(psa_map); - } - let tracked delay_token; - let tracked heap_of_page_token; - unused_page_get_mut!(page, local, page => { + assert forall|pid: PageId| + page_id.range_from(0, n_slices as int).contains( + pid, + ) implies thread_state_tok@.value.pages.dom().contains(pid) by { + assert(pid.segment_id == page_id.segment_id); + assert(page_id.idx <= pid.idx < page_id.idx + n_slices); + assert(local.page_organization.pages.dom().contains(pid)); + assert(local.page_organization.pages[pid].is_used); + } + local.thread_token = thread_state_tok; + } + let tracked checked_tok = local.take_checked_token(); + let tracked perm = &local.instance.thread_local_state_guards_page( + local.thread_id, + page.page_id@, + &local.thread_token, + ).points_to; + let Tracked(checked_tok) = page.page_ptr.borrow(Tracked(perm)).xthread_free.check_is_good( + Tracked(&local.thread_token), + Tracked(checked_tok), + ); + proof { + let tracked thread_state_tok = local.take_thread_token(); + let tracked (Tracked(thread_state_tok), Tracked(_psa_map)) = local.instance.page_disable( + local.thread_id, + page_id, + n_slices, + thread_state_tok, + &checked_tok, + ); + local.thread_token = thread_state_tok; + local.checked_token = checked_tok; + psa_map = _psa_map; + local.unused_pages.tracked_union_prefer_right(psa_map); + } + let tracked delay_token; + let tracked heap_of_page_token; + unused_page_get_mut!(page, local, page => { let Tracked(_delay_token) = page.xthread_free.disable(); let Tracked(_heap_of_page_token) = page.xheap.disable(); proof { delay_token = _delay_token; heap_of_page_token = _heap_of_page_token; } }); - /* + /* used_page_get_mut_prev!(page, local, p => { p = PPtr::from_usize(0); }); @@ -21935,8 +21840,8 @@ mod segment { n = PPtr::from_usize(0); }); */ - proof { - /*assert forall |pid: PageId| + proof { + /*assert forall |pid: PageId| page_id.range_from(0, n_slices as int).contains(pid) && page_id != pid implies local.thread_token@.value.pages[pid].offset != 0 by { @@ -21945,27 +21850,27 @@ mod segment { //assert(local.page_organization.pages[pid].offset.is_some()); //assert(local.page_organization.pages[pid].offset.unwrap() != 0); }*/ - local.psa = local.psa.union_prefer_right(local.unused_pages); - let segment_id = page_id.segment_id; - let tracked mut seg = local.segments.tracked_remove(segment_id); - seg.mem.give_points_to_range(block_pt); - local.segments.tracked_insert(segment_id, seg); - local.page_organization = next_state; - let tracked thread_state_tok = local.take_thread_token(); - //assert(delay_token@.key == page_id); - //assert(heap_of_page_token@.key == page_id); - let tracked thread_tok = local.instance.page_destroy_tokens( - local.thread_id, - page_id, - n_slices, - thread_state_tok, - delay_token, - heap_of_page_token, - ); - local.thread_token = thread_tok; - preserves_mem_chunk_good_on_transfer_back(*old(local), *local, page_id); - preserves_mem_chunk_good_except(*old(local), *local, segment_id); - /*assert forall |pid| + local.psa = local.psa.union_prefer_right(local.unused_pages); + let segment_id = page_id.segment_id; + let tracked mut seg = local.segments.tracked_remove(segment_id); + seg.mem.give_points_to_range(block_pt); + local.segments.tracked_insert(segment_id, seg); + local.page_organization = next_state; + let tracked thread_state_tok = local.take_thread_token(); + //assert(delay_token@.key == page_id); + //assert(heap_of_page_token@.key == page_id); + let tracked thread_tok = local.instance.page_destroy_tokens( + local.thread_id, + page_id, + n_slices, + thread_state_tok, + delay_token, + heap_of_page_token, + ); + local.thread_token = thread_tok; + preserves_mem_chunk_good_on_transfer_back(*old(local), *local, page_id); + preserves_mem_chunk_good_except(*old(local), *local, segment_id); + /*assert forall |pid| #[trigger] local.pages.dom().contains(pid) implies ((local.unused_pages.dom().contains(pid) <==> !local.thread_token@.value.pages.dom().contains(pid))) @@ -21985,7 +21890,7 @@ mod segment { assert(local.thread_token@.value.pages.dom().contains(pid)); } }*/ - /*let org_pages = local.page_organization.pages; + /*let org_pages = local.page_organization.pages; let pages = local.pages; let psa = local.psa; @@ -22023,102 +21928,102 @@ mod segment { assert(page_organization_pages_match(local.page_organization.pages, local.pages, local.psa)); assert(local.page_organization_valid());*/ - assert(local.wf_main()); - } - segment_span_free_coalesce(page, tld, Tracked(&mut *local)); - let ghost local_snap = *local; - let ghost next_state = PageOrg::take_step::clear_ec(local.page_organization); - segment_get_mut_main2!(segment, local, main2 => { + assert(local.wf_main()); + } + segment_span_free_coalesce(page, tld, Tracked(&mut *local)); + let ghost local_snap = *local; + let ghost next_state = PageOrg::take_step::clear_ec(local.page_organization); + segment_get_mut_main2!(segment, local, main2 => { main2.used = main2.used - 1; }); - proof { - local.page_organization = next_state; - local.psa = local.psa.union_prefer_right(local.unused_pages); - preserves_mem_chunk_good(local_snap, *local); - //assert(local.wf_main()); - } - proof { - preserves_mem_chunk_good(local_snap, *local); - //assert(local.wf()); - } + proof { + local.page_organization = next_state; + local.psa = local.psa.union_prefer_right(local.unused_pages); + preserves_mem_chunk_good(local_snap, *local); + //assert(local.wf_main()); + } + proof { + preserves_mem_chunk_good(local_snap, *local); + //assert(local.wf()); } +} - fn segment_span_free_coalesce(slice: PagePtr, tld: TldPtr, Tracked(local): Tracked<&mut Local>) - requires - old(local).wf_main(), - tld.wf(), - tld.is_in(*old(local)), - slice.wf(), - slice.is_in(*old(local)), - match old(local).page_organization.popped { - Popped::VeryUnready(sid, idx, c, _) => slice.page_id@.segment_id == sid - && slice.page_id@.idx == idx && c == old( - local, - ).pages[slice.page_id@].count@.value.unwrap(), - _ => false, +fn segment_span_free_coalesce(slice: PagePtr, tld: TldPtr, Tracked(local): Tracked<&mut Local>) + requires + old(local).wf_main(), + tld.wf(), + tld.is_in(*old(local)), + slice.wf(), + slice.is_in(*old(local)), + match old(local).page_organization.popped { + Popped::VeryUnready(sid, idx, c, _) => slice.page_id@.segment_id == sid + && slice.page_id@.idx == idx && c == old( + local, + ).pages[slice.page_id@].count@.value.unwrap(), + _ => false, + }, + ensures + local.wf_main(), + slice.is_in(*local), + common_preserves(*old(local), *local), + local.page_organization.popped == (match old(local).page_organization.popped { + Popped::VeryUnready(_, _, _, b) => { + if b { + Popped::ExtraCount(slice.page_id@.segment_id) + } else { + Popped::No + } }, - ensures - local.wf_main(), - slice.is_in(*local), - common_preserves(*old(local), *local), - local.page_organization.popped == (match old(local).page_organization.popped { - Popped::VeryUnready(_, _, _, b) => { - if b { - Popped::ExtraCount(slice.page_id@.segment_id) - } else { - Popped::No - } - }, - _ => arbitrary(), - }), - { - let segment = SegmentPtr::ptr_segment(slice); - let is_abandoned = segment.is_abandoned(Tracked(&*local)); - if is_abandoned { - todo(); - } - let kind = segment.get_segment_kind(Tracked(&*local)); - if matches!(kind, SegmentKind::Huge) { - todo(); - } - let mut slice_count = slice.get_count(Tracked(&*local)); - proof { - local.page_organization.get_count_bound_very_unready(); - //assert(slice_count == local.page_organization.pages[slice.page_id@].count.unwrap()); - const_facts(); - } - //// Merge with the 'after' page - let (page, less_than_end) = slice.add_offset_and_check(slice_count as usize, segment); - proof { - if less_than_end { - local.page_organization.valid_page_after(); //slice.page_id@, page.page_id@); - } + _ => arbitrary(), + }), +{ + let segment = SegmentPtr::ptr_segment(slice); + let is_abandoned = segment.is_abandoned(Tracked(&*local)); + if is_abandoned { + todo(); + } + let kind = segment.get_segment_kind(Tracked(&*local)); + if matches!(kind, SegmentKind::Huge) { + todo(); + } + let mut slice_count = slice.get_count(Tracked(&*local)); + proof { + local.page_organization.get_count_bound_very_unready(); + //assert(slice_count == local.page_organization.pages[slice.page_id@].count.unwrap()); + const_facts(); + } + //// Merge with the 'after' page + let (page, less_than_end) = slice.add_offset_and_check(slice_count as usize, segment); + proof { + if less_than_end { + local.page_organization.valid_page_after(); //slice.page_id@, page.page_id@); } - if less_than_end && page.get_inner_ref(Tracked(&*local)).xblock_size == 0 { - let ghost page_id = page.page_id@; - let ghost local_snap = *local; - let ghost next_state = PageOrg::take_step::merge_with_after(local.page_organization); - let prev_ptr = page.get_prev(Tracked(&*local)); - let next_ptr = page.get_next(Tracked(&*local)); - let ghost prev_page_id = - local.page_organization.pages[page_id].dlist_entry.unwrap().prev.unwrap(); - let prev = PagePtr { page_ptr: prev_ptr, page_id: Ghost(prev_page_id) }; - let ghost next_page_id = - local.page_organization.pages[page_id].dlist_entry.unwrap().next.unwrap(); - let next = PagePtr { page_ptr: next_ptr, page_id: Ghost(next_page_id) }; - let n_count = page.get_count(Tracked(&*local)); - let sbin_idx = slice_bin(n_count as usize); - if prev_ptr.to_usize() != 0 { - unused_page_get_mut_next!(prev, local, n => { + } + if less_than_end && page.get_inner_ref(Tracked(&*local)).xblock_size == 0 { + let ghost page_id = page.page_id@; + let ghost local_snap = *local; + let ghost next_state = PageOrg::take_step::merge_with_after(local.page_organization); + let prev_ptr = page.get_prev(Tracked(&*local)); + let next_ptr = page.get_next(Tracked(&*local)); + let ghost prev_page_id = + local.page_organization.pages[page_id].dlist_entry.unwrap().prev.unwrap(); + let prev = PagePtr { page_ptr: prev_ptr, page_id: Ghost(prev_page_id) }; + let ghost next_page_id = + local.page_organization.pages[page_id].dlist_entry.unwrap().next.unwrap(); + let next = PagePtr { page_ptr: next_ptr, page_id: Ghost(next_page_id) }; + let n_count = page.get_count(Tracked(&*local)); + let sbin_idx = slice_bin(n_count as usize); + if prev_ptr.to_usize() != 0 { + unused_page_get_mut_next!(prev, local, n => { n = next_ptr; }); - } - if next_ptr.to_usize() != 0 { - unused_page_get_mut_prev!(next, local, p => { + } + if next_ptr.to_usize() != 0 { + unused_page_get_mut_prev!(next, local, p => { p = prev_ptr; }); - } - tld_get_mut!(tld, local, tld => { + } + tld_get_mut!(tld, local, tld => { let mut cq = tld.segments.span_queue_headers[sbin_idx]; if prev_ptr.to_usize() == 0 { @@ -22130,11 +22035,11 @@ mod segment { tld.segments.span_queue_headers.set(sbin_idx, cq); }); - slice_count += n_count; - proof { - //assert(!local.page_organization.pages[page_id].is_used); - local.page_organization = next_state; - /*let local1 = local_snap; + slice_count += n_count; + proof { + //assert(!local.page_organization.pages[page_id].is_used); + local.page_organization = next_state; + /*let local1 = local_snap; let local2 = *local; assert forall |page_id| local1.is_used_primary(page_id) implies local2.is_used_primary(page_id) @@ -22147,131 +22052,131 @@ mod segment { assert(local1.page_reserved(page_id) <= local2.page_reserved(page_id)); assert(local1.block_size(page_id) == local2.block_size(page_id)); }*/ - preserves_mem_chunk_good(local_snap, *local); - //assert(page_organization_queues_match(local.page_organization.unused_dlist_headers, - // local.tld@.value.get_Some_0().segments.span_queue_headers@)); - //assert(local.page_organization_valid()); - //assert(local.wf_main()); - } + preserves_mem_chunk_good(local_snap, *local); + //assert(page_organization_queues_match(local.page_organization.unused_dlist_headers, + // local.tld@.value.get_Some_0().segments.span_queue_headers@)); + //assert(local.page_organization_valid()); + //assert(local.wf_main()); } - assert(local.wf_main()); - //// Merge with the 'before' page - // Had to factor this out for timeout-related reasons :\ - let (slice, slice_count) = segment_span_free_coalesce_before( - segment, - slice, - tld, - Tracked(&mut *local), - slice_count, - ); - segment_span_free( - segment, - slice.get_index(), - slice_count as usize, - true, - tld, - Tracked(&mut *local), - ); } + assert(local.wf_main()); + //// Merge with the 'before' page + // Had to factor this out for timeout-related reasons :\ + let (slice, slice_count) = segment_span_free_coalesce_before( + segment, + slice, + tld, + Tracked(&mut *local), + slice_count, + ); + segment_span_free( + segment, + slice.get_index(), + slice_count as usize, + true, + tld, + Tracked(&mut *local), + ); +} - #[inline(always)] - #[verifier::spinoff_prover] - fn segment_span_free_coalesce_before( - segment: SegmentPtr, - slice: PagePtr, - tld: TldPtr, - Tracked(local): Tracked<&mut Local>, - slice_count: u32, - ) -> (res: (PagePtr, u32)) - requires - old(local).wf_main(), - tld.wf(), - tld.is_in(*old(local)), - segment.wf(), - segment.segment_id@ == slice.page_id@.segment_id, - slice.wf(), - slice.is_in(*old(local)), - old(local).page_organization.popped == Popped::VeryUnready( +#[inline(always)] +#[verifier::spinoff_prover] +fn segment_span_free_coalesce_before( + segment: SegmentPtr, + slice: PagePtr, + tld: TldPtr, + Tracked(local): Tracked<&mut Local>, + slice_count: u32, +) -> (res: (PagePtr, u32)) + requires + old(local).wf_main(), + tld.wf(), + tld.is_in(*old(local)), + segment.wf(), + segment.segment_id@ == slice.page_id@.segment_id, + slice.wf(), + slice.is_in(*old(local)), + old(local).page_organization.popped == Popped::VeryUnready( + slice.page_id@.segment_id, + slice.page_id@.idx as int, + slice_count as int, + old(local).page_organization.popped.get_VeryUnready_3(), + ), + ensures + local.wf_main(), + common_preserves(*old(local), *local), + slice.is_in(*local), + slice.page_id@.segment_id == res.0.page_id@.segment_id, + ({ + let (slice, slice_count) = res; + slice.wf() && local.page_organization.popped == Popped::VeryUnready( slice.page_id@.segment_id, slice.page_id@.idx as int, slice_count as int, old(local).page_organization.popped.get_VeryUnready_3(), - ), - ensures - local.wf_main(), - common_preserves(*old(local), *local), - slice.is_in(*local), - slice.page_id@.segment_id == res.0.page_id@.segment_id, - ({ - let (slice, slice_count) = res; - slice.wf() && local.page_organization.popped == Popped::VeryUnready( - slice.page_id@.segment_id, - slice.page_id@.idx as int, - slice_count as int, - old(local).page_organization.popped.get_VeryUnready_3(), - ) && slice.page_id@.idx + slice_count <= SLICES_PER_SEGMENT - }), - { + ) && slice.page_id@.idx + slice_count <= SLICES_PER_SEGMENT + }), +{ + proof { + const_facts(); + } + let ghost orig_id = slice.page_id@; + let mut slice = slice; + let mut slice_count = slice_count; + if slice.is_gt_0th_slice(segment) { proof { - const_facts(); - } - let ghost orig_id = slice.page_id@; - let mut slice = slice; - let mut slice_count = slice_count; - if slice.is_gt_0th_slice(segment) { - proof { - /*assert(local.page_organization.popped == Popped::VeryUnready( + /*assert(local.page_organization.popped == Popped::VeryUnready( slice.page_id@.segment_id, slice.page_id@.idx as int, slice_count as int, local.page_organization.popped.get_VeryUnready_3()));*/ - local.page_organization.valid_page_before(); - } - let last = slice.sub_offset(1); - //assert(local.page_organization.pages.dom().contains(last.page_id@)); - let offset = last.get_ref(Tracked(&*local)).offset; // multiplied by SIZEOF_PAGE_HEADER - //assert(local.page_organization.pages[last.page_id@].offset.is_some()); - let ghost o = local.page_organization.pages[last.page_id@].offset.unwrap(); - //assert(last.page_id@.idx - o >= 0); - let ghost page_id = PageId { - segment_id: last.page_id@.segment_id, - idx: (last.page_id@.idx - o) as nat, - }; - let page_ptr = calculate_page_ptr_subtract_offset( - last.page_ptr, - offset, - Ghost(last.page_id@), - Ghost(page_id), - ); - let page = PagePtr { page_ptr, page_id: Ghost(page_id) }; - proof { - is_page_ptr_nonzero(page_ptr.id(), page_id); - //assert(page.wf()); - } - if page.get_inner_ref(Tracked(&*local)).xblock_size == 0 { - let ghost local_snap = *local; - let ghost next_state = PageOrg::take_step::merge_with_before(local.page_organization); - let prev_ptr = page.get_prev(Tracked(&*local)); - let next_ptr = page.get_next(Tracked(&*local)); - let ghost prev_page_id = - local.page_organization.pages[page_id].dlist_entry.unwrap().prev.unwrap(); - let prev = PagePtr { page_ptr: prev_ptr, page_id: Ghost(prev_page_id) }; - let ghost next_page_id = - local.page_organization.pages[page_id].dlist_entry.unwrap().next.unwrap(); - let next = PagePtr { page_ptr: next_ptr, page_id: Ghost(next_page_id) }; - let n_count = page.get_count(Tracked(&*local)); - let sbin_idx = slice_bin(n_count as usize); - if prev_ptr.to_usize() != 0 { - unused_page_get_mut_next!(prev, local, n => { + local.page_organization.valid_page_before(); + } + let last = slice.sub_offset(1); + //assert(local.page_organization.pages.dom().contains(last.page_id@)); + let offset = last.get_ref(Tracked(&*local)).offset; // multiplied by SIZEOF_PAGE_HEADER + //assert(local.page_organization.pages[last.page_id@].offset.is_some()); + let ghost o = local.page_organization.pages[last.page_id@].offset.unwrap(); + //assert(last.page_id@.idx - o >= 0); + let ghost page_id = PageId { + segment_id: last.page_id@.segment_id, + idx: (last.page_id@.idx - o) as nat, + }; + let page_ptr = calculate_page_ptr_subtract_offset( + last.page_ptr, + offset, + Ghost(last.page_id@), + Ghost(page_id), + ); + let page = PagePtr { page_ptr, page_id: Ghost(page_id) }; + proof { + is_page_ptr_nonzero(page_ptr.id(), page_id); + //assert(page.wf()); + } + if page.get_inner_ref(Tracked(&*local)).xblock_size == 0 { + let ghost local_snap = *local; + let ghost next_state = PageOrg::take_step::merge_with_before(local.page_organization); + let prev_ptr = page.get_prev(Tracked(&*local)); + let next_ptr = page.get_next(Tracked(&*local)); + let ghost prev_page_id = + local.page_organization.pages[page_id].dlist_entry.unwrap().prev.unwrap(); + let prev = PagePtr { page_ptr: prev_ptr, page_id: Ghost(prev_page_id) }; + let ghost next_page_id = + local.page_organization.pages[page_id].dlist_entry.unwrap().next.unwrap(); + let next = PagePtr { page_ptr: next_ptr, page_id: Ghost(next_page_id) }; + let n_count = page.get_count(Tracked(&*local)); + let sbin_idx = slice_bin(n_count as usize); + if prev_ptr.to_usize() != 0 { + unused_page_get_mut_next!(prev, local, n => { n = next_ptr; }); - } - if next_ptr.to_usize() != 0 { - unused_page_get_mut_prev!(next, local, p => { + } + if next_ptr.to_usize() != 0 { + unused_page_get_mut_prev!(next, local, p => { p = prev_ptr; }); - } - tld_get_mut!(tld, local, tld => { + } + tld_get_mut!(tld, local, tld => { let mut cq = tld.segments.span_queue_headers[sbin_idx]; if prev_ptr.to_usize() == 0 { @@ -22283,46 +22188,46 @@ mod segment { tld.segments.span_queue_headers.set(sbin_idx, cq); }); - slice_count += n_count; - slice = page; - proof { - //assert(n_count == local.page_organization.pages[page_id].count.unwrap()); - //assert(!local.page_organization.pages[page_id].is_used); - local.page_organization = next_state; - preserves_mem_chunk_good(local_snap, *local); - //assert(page_organization_queues_match(local.page_organization.unused_dlist_headers, - // local.tld@.value.get_Some_0().segments.span_queue_headers)); - //assert(local.page_organization_valid()); - //let slice_page_id = slice.page_id@; - //assert( - // local.pages.index(slice_page_id).wf_unused(slice_page_id, local.unused_pages[slice_page_id], local.page_organization.popped, local.instance) - //); - //assert( - // old(local).pages.index(orig_id).wf_unused(orig_id, old(local).unused_pages[orig_id], old(local).page_organization.popped, local.instance) - //); - //assert(local.pages.index(orig_id).inner@.value.unwrap().zeroed_except_block_size()); - //assert( - // local.pages.index(orig_id).wf_unused(orig_id, local.unused_pages[orig_id], local.page_organization.popped, local.instance) - //); - //assert(local.wf_main()); - /*assert(slice.wf()); + slice_count += n_count; + slice = page; + proof { + //assert(n_count == local.page_organization.pages[page_id].count.unwrap()); + //assert(!local.page_organization.pages[page_id].is_used); + local.page_organization = next_state; + preserves_mem_chunk_good(local_snap, *local); + //assert(page_organization_queues_match(local.page_organization.unused_dlist_headers, + // local.tld@.value.get_Some_0().segments.span_queue_headers)); + //assert(local.page_organization_valid()); + //let slice_page_id = slice.page_id@; + //assert( + // local.pages.index(slice_page_id).wf_unused(slice_page_id, local.unused_pages[slice_page_id], local.page_organization.popped, local.instance) + //); + //assert( + // old(local).pages.index(orig_id).wf_unused(orig_id, old(local).unused_pages[orig_id], old(local).page_organization.popped, local.instance) + //); + //assert(local.pages.index(orig_id).inner@.value.unwrap().zeroed_except_block_size()); + //assert( + // local.pages.index(orig_id).wf_unused(orig_id, local.unused_pages[orig_id], local.page_organization.popped, local.instance) + //); + //assert(local.wf_main()); + /*assert(slice.wf()); assert(local.page_organization.popped.is_VeryUnready()); assert(local.page_organization.popped.get_VeryUnready_1() == slice.page_id@.idx as int); assert(local.page_organization.popped.get_VeryUnready_2() == slice_count as int); assert(local.page_organization.popped == Popped::VeryUnready(slice.page_id@.segment_id, slice.page_id@.idx as int, slice_count as int));*/ - } } } - proof { - local.page_organization.get_count_bound_very_unready(); - //assert(slice.page_id@.idx + slice_count <= SLICES_PER_SEGMENT); - } - (slice, slice_count) } + proof { + local.page_organization.get_count_bound_very_unready(); + //assert(slice.page_id@.idx + slice_count <= SLICES_PER_SEGMENT); + } + (slice, slice_count) +} - } // verus! +} // verus! } mod commit_segment { @@ -22342,372 +22247,371 @@ mod commit_segment { verus! { - fn clock_now() -> i64 { - let t = clock_gettime_monotonic(); - t.tv_sec.wrapping_mul(1000).wrapping_add((((t.tv_nsec as u64) / 1000000) as i64)) - } +fn clock_now() -> i64 { + let t = clock_gettime_monotonic(); + t.tv_sec.wrapping_mul(1000).wrapping_add((((t.tv_nsec as u64) / 1000000) as i64)) +} - // Should not be called for huge segments, I think? TODO can probably optimize out some checks - fn segment_commit_mask( - segment_ptr: usize, - conservative: bool, - p: usize, - size: usize, - cm: &mut CommitMask, - ) -> (res: (usize, usize)) // start_p, full_size - requires - segment_ptr as int % SEGMENT_SIZE as int == 0, - segment_ptr + SEGMENT_SIZE <= usize::MAX, - p >= segment_ptr, - p + size <= segment_ptr + SEGMENT_SIZE, - old(cm)@ == Set::::empty(), - ensures - ({ - let (start_p, full_size) = res; - { - (cm@ == Set::::empty() ==> !conservative ==> size == 0) && (cm@ != Set::< - int, - >::empty() ==> (conservative ==> p <= start_p <= start_p + full_size <= p + size) - && (!conservative ==> start_p <= p <= p + size <= start_p + full_size) - && start_p >= segment_ptr && start_p + full_size <= segment_ptr - + SEGMENT_SIZE//&& (!conservative ==> set_int_range((p - segment_ptr) / COMMIT_SIZE as int, - // (((p + size - 1 - segment_ptr as int) / COMMIT_SIZE as int) + 1)).subset_of(cm@)) - //&& (conservative ==> cm@ <= set_int_range((p - segment_ptr) / COMMIT_SIZE as int, - // (((p + size - 1 - segment_ptr as int) / COMMIT_SIZE as int) + 1))) - && start_p as int % COMMIT_SIZE as int == 0 && full_size as int - % COMMIT_SIZE as int == 0 && cm@ =~= set_int_range( - (start_p - segment_ptr) / COMMIT_SIZE as int, - (((start_p + full_size - segment_ptr) / COMMIT_SIZE as int)), - )) && (!conservative ==> forall|i| - #[trigger] - cm@.contains(i) ==> start_p <= segment_ptr + i * SLICE_SIZE && start_p - + full_size >= segment_ptr + (i + 1) - * SLICE_SIZE)//&& start_p as int % SLICE_SIZE as int == 0 - //&& full_size as int % SLICE_SIZE as int == 0 +// Should not be called for huge segments, I think? TODO can probably optimize out some checks +fn segment_commit_mask( + segment_ptr: usize, + conservative: bool, + p: usize, + size: usize, + cm: &mut CommitMask, +) -> (res: (usize, usize)) // start_p, full_size + requires + segment_ptr as int % SEGMENT_SIZE as int == 0, + segment_ptr + SEGMENT_SIZE <= usize::MAX, + p >= segment_ptr, + p + size <= segment_ptr + SEGMENT_SIZE, + old(cm)@ == Set::::empty(), + ensures + ({ + let (start_p, full_size) = res; + { + (cm@ == Set::::empty() ==> !conservative ==> size == 0) && (cm@ != Set::< + int, + >::empty() ==> (conservative ==> p <= start_p <= start_p + full_size <= p + size) + && (!conservative ==> start_p <= p <= p + size <= start_p + full_size) + && start_p >= segment_ptr && start_p + full_size <= segment_ptr + + SEGMENT_SIZE //&& (!conservative ==> set_int_range((p - segment_ptr) / COMMIT_SIZE as int, + // (((p + size - 1 - segment_ptr as int) / COMMIT_SIZE as int) + 1)).subset_of(cm@)) + //&& (conservative ==> cm@ <= set_int_range((p - segment_ptr) / COMMIT_SIZE as int, + // (((p + size - 1 - segment_ptr as int) / COMMIT_SIZE as int) + 1))) + && start_p as int % COMMIT_SIZE as int == 0 && full_size as int + % COMMIT_SIZE as int == 0 && cm@ =~= set_int_range( + (start_p - segment_ptr) / COMMIT_SIZE as int, + (((start_p + full_size - segment_ptr) / COMMIT_SIZE as int)), + )) && (!conservative ==> forall|i| #[trigger] + cm@.contains(i) ==> start_p <= segment_ptr + i * SLICE_SIZE && start_p + + full_size >= segment_ptr + (i + 1) + * SLICE_SIZE) //&& start_p as int % SLICE_SIZE as int == 0 + //&& full_size as int % SLICE_SIZE as int == 0 - } - }), - { - proof { - const_facts(); - } - if size == 0 || size > SEGMENT_SIZE as usize { - return (0, 0); - } - let segstart: usize = SLICE_SIZE as usize; - let segsize: usize = SEGMENT_SIZE as usize; - if p >= segment_ptr + segsize { - return (0, 0); - } - let pstart: usize = p - segment_ptr; - let mut start: usize; - let mut end: usize; + } + }), +{ + proof { + const_facts(); + } + if size == 0 || size > SEGMENT_SIZE as usize { + return (0, 0); + } + let segstart: usize = SLICE_SIZE as usize; + let segsize: usize = SEGMENT_SIZE as usize; + if p >= segment_ptr + segsize { + return (0, 0); + } + let pstart: usize = p - segment_ptr; + let mut start: usize; + let mut end: usize; + if conservative { + start = align_up(pstart, COMMIT_SIZE as usize); + end = align_down(pstart + size, COMMIT_SIZE as usize); + } else { + start = align_down(pstart, COMMIT_SIZE as usize); + end = align_up(pstart + size, COMMIT_SIZE as usize); + } + if pstart >= segstart && start < segstart { + start = segstart; + } + if end > segsize { + end = segsize; + } + let start_p = segment_ptr + start; + let full_size = if end > start { + end - start + } else { + 0 + }; + if full_size == 0 { + return (start_p, full_size); + } + let bitidx = start / COMMIT_SIZE as usize; + let bitcount = full_size / COMMIT_SIZE as usize; + cm.create(bitidx, bitcount); + proof { if conservative { - start = align_up(pstart, COMMIT_SIZE as usize); - end = align_down(pstart + size, COMMIT_SIZE as usize); - } else { - start = align_down(pstart, COMMIT_SIZE as usize); - end = align_up(pstart + size, COMMIT_SIZE as usize); - } - if pstart >= segstart && start < segstart { - start = segstart; - } - if end > segsize { - end = segsize; - } - let start_p = segment_ptr + start; - let full_size = if end > start { - end - start + assert(p <= start_p); + assert(start_p + full_size <= p + size); } else { - 0 - }; - if full_size == 0 { - return (start_p, full_size); + assert(start_p <= p); + assert(start_p + full_size == segment_ptr + end); + assert(p + size == segment_ptr + pstart + size); + assert(end >= pstart + size); + assert(p + size <= start_p + full_size); + assert((p - segment_ptr) / COMMIT_SIZE as int >= bitidx); + assert((((p + size - 1 - segment_ptr as int) / COMMIT_SIZE as int) + 1) <= bitidx + + bitcount); } - let bitidx = start / COMMIT_SIZE as usize; - let bitcount = full_size / COMMIT_SIZE as usize; - cm.create(bitidx, bitcount); - proof { - if conservative { - assert(p <= start_p); - assert(start_p + full_size <= p + size); - } else { - assert(start_p <= p); - assert(start_p + full_size == segment_ptr + end); - assert(p + size == segment_ptr + pstart + size); - assert(end >= pstart + size); - assert(p + size <= start_p + full_size); - assert((p - segment_ptr) / COMMIT_SIZE as int >= bitidx); - assert((((p + size - 1 - segment_ptr as int) / COMMIT_SIZE as int) + 1) <= bitidx - + bitcount); - } - if full_size > 0 { - assert(cm@.contains(bitidx as int)); - } + if full_size > 0 { + assert(cm@.contains(bitidx as int)); } - return (start_p, full_size); } + return (start_p, full_size); +} - #[verifier::spinoff_prover] - fn segment_commitx( - segment: SegmentPtr, - commit: bool, - p: usize, - size: usize, - Tracked(local): Tracked<&mut Local>, - ) -> (success: bool) - requires - old(local).wf_main(), - segment.wf(), - segment.is_in(*old(local)), - p >= segment.segment_ptr.id(), - p + size <= segment.segment_ptr.id() + SEGMENT_SIZE, - // !commit ==> old(local).segments[segment.segment_id@] - // .mem.os_has_range_read_write(p as int, size as int), - // !commit ==> old(local).segments[segment.segment_id@] - // .mem.pointsto_has_range(p as int, size as int), - !commit ==> set_int_range(p as int, p + size) <= old(local).decommit_mask( - segment.segment_id@, - ).bytes(segment.segment_id@), - ensures - local.wf_main(), - common_preserves(*old(local), *local), - commit ==> success ==> local.segments[segment.segment_id@].mem.os_has_range_read_write( - p as int, - size as int, - ), - commit ==> success ==> set_int_range(p as int, p + size) <= local.commit_mask( - segment.segment_id@, - ).bytes(segment.segment_id@) - local.decommit_mask(segment.segment_id@).bytes( - segment.segment_id@, - ), - local.page_organization == old(local).page_organization, - local.pages == old(local).pages, - local.psa == old(local).psa, - { - let ghost sid = segment.segment_id@; - proof { - segment_id_divis(segment); - local.instance.thread_local_state_guards_segment( - local.thread_id, - segment.segment_id@, - &local.thread_token, - ).points_to.is_nonnull(); - const_facts(); - decommit_subset_of_pointsto(*local, sid); - } - let mut mask: CommitMask = CommitMask::empty(); - let (start, full_size) = segment_commit_mask( - segment.segment_ptr.to_usize(), - !commit, - p, - size, - &mut mask, - ); - if mask.is_empty() || full_size == 0 { - return true; +#[verifier::spinoff_prover] +fn segment_commitx( + segment: SegmentPtr, + commit: bool, + p: usize, + size: usize, + Tracked(local): Tracked<&mut Local>, +) -> (success: bool) + requires + old(local).wf_main(), + segment.wf(), + segment.is_in(*old(local)), + p >= segment.segment_ptr.id(), + p + size <= segment.segment_ptr.id() + SEGMENT_SIZE, + // !commit ==> old(local).segments[segment.segment_id@] + // .mem.os_has_range_read_write(p as int, size as int), + // !commit ==> old(local).segments[segment.segment_id@] + // .mem.pointsto_has_range(p as int, size as int), + !commit ==> set_int_range(p as int, p + size) <= old(local).decommit_mask( + segment.segment_id@, + ).bytes(segment.segment_id@), + ensures + local.wf_main(), + common_preserves(*old(local), *local), + commit ==> success ==> local.segments[segment.segment_id@].mem.os_has_range_read_write( + p as int, + size as int, + ), + commit ==> success ==> set_int_range(p as int, p + size) <= local.commit_mask( + segment.segment_id@, + ).bytes(segment.segment_id@) - local.decommit_mask(segment.segment_id@).bytes( + segment.segment_id@, + ), + local.page_organization == old(local).page_organization, + local.pages == old(local).pages, + local.psa == old(local).psa, +{ + let ghost sid = segment.segment_id@; + proof { + segment_id_divis(segment); + local.instance.thread_local_state_guards_segment( + local.thread_id, + segment.segment_id@, + &local.thread_token, + ).points_to.is_nonnull(); + const_facts(); + decommit_subset_of_pointsto(*local, sid); + } + let mut mask: CommitMask = CommitMask::empty(); + let (start, full_size) = segment_commit_mask( + segment.segment_ptr.to_usize(), + !commit, + p, + size, + &mut mask, + ); + if mask.is_empty() || full_size == 0 { + return true; + } + if commit && !segment.get_commit_mask(Tracked(&*local)).all_set(&mask) { + proof { + let ghost sid = segment.segment_id@; + assert(local.mem_chunk_good(sid)); + assert(segment_start(sid) <= start); + assert(start + full_size <= segment_start(sid) + SEGMENT_SIZE); + //assert(local.segments[sid].mem.os_exact_range( + // segment_start(sid), SEGMENT_SIZE as int)); } - if commit && !segment.get_commit_mask(Tracked(&*local)).all_set(&mask) { - proof { - let ghost sid = segment.segment_id@; - assert(local.mem_chunk_good(sid)); - assert(segment_start(sid) <= start); - assert(start + full_size <= segment_start(sid) + SEGMENT_SIZE); - //assert(local.segments[sid].mem.os_exact_range( - // segment_start(sid), SEGMENT_SIZE as int)); - } - let mut is_zero = false; - let mut cmask = CommitMask::empty(); - segment.get_commit_mask(Tracked(&*local)).create_intersect(&mask, &mut cmask); - let success; - segment_get_mut_local!(segment, local, l => { + let mut is_zero = false; + let mut cmask = CommitMask::empty(); + segment.get_commit_mask(Tracked(&*local)).create_intersect(&mask, &mut cmask); + let success; + segment_get_mut_local!(segment, local, l => { let (_success, _is_zero) = crate::os_commit::os_commit(start, full_size, Tracked(&mut l.mem)); success = _success; }); - if (!success) { - proof { - preserves_mem_chunk_good_on_commit(*old(local), *local, sid); - assert(local.mem_chunk_good(sid)); - assert forall|sid1| - sid1 != sid && old(local).mem_chunk_good(sid1) implies local.mem_chunk_good( - sid1, - ) by { - preserves_mem_chunk_good_on_commit(*old(local), *local, sid1); - } - assert(local.wf_main()); + if (!success) { + proof { + preserves_mem_chunk_good_on_commit(*old(local), *local, sid); + assert(local.mem_chunk_good(sid)); + assert forall|sid1| + sid1 != sid && old(local).mem_chunk_good(sid1) implies local.mem_chunk_good( + sid1, + ) by { + preserves_mem_chunk_good_on_commit(*old(local), *local, sid1); } - return false; + assert(local.wf_main()); } - segment_get_mut_main!(segment, local, main => { + return false; + } + segment_get_mut_main!(segment, local, main => { main.commit_mask.set(&mask); }); - } else if !commit && segment.get_commit_mask(Tracked(&*local)).any_set(&mask) { - let mut cmask = CommitMask::empty(); - segment.get_commit_mask(Tracked(&*local)).create_intersect(&mask, &mut cmask); - if segment.get_allow_decommit(Tracked(&*local)) { - segment_get_mut_local!(segment, local, l => { + } else if !commit && segment.get_commit_mask(Tracked(&*local)).any_set(&mask) { + let mut cmask = CommitMask::empty(); + segment.get_commit_mask(Tracked(&*local)).create_intersect(&mask, &mut cmask); + if segment.get_allow_decommit(Tracked(&*local)) { + segment_get_mut_local!(segment, local, l => { crate::os_commit::os_decommit(start, full_size, Tracked(&mut l.mem)); }); - } - segment_get_mut_main!(segment, local, main => { + } + segment_get_mut_main!(segment, local, main => { main.commit_mask.clear(&mask); }); - } - if commit && segment.get_main_ref(Tracked(&*local)).decommit_mask.any_set(&mask) { - segment_get_mut_main!(segment, local, main => { + } + if commit && segment.get_main_ref(Tracked(&*local)).decommit_mask.any_set(&mask) { + segment_get_mut_main!(segment, local, main => { main.decommit_expire = clock_now().wrapping_add(option_decommit_delay()); }); - } - segment_get_mut_main!(segment, local, main => { + } + segment_get_mut_main!(segment, local, main => { main.decommit_mask.clear(&mask); }); - proof { - let cm = local.segments[sid].main@.value.unwrap().commit_mask@; - let old_cm = old(local).segments[sid].main@.value.unwrap().commit_mask@; - if commit { + proof { + let cm = local.segments[sid].main@.value.unwrap().commit_mask@; + let old_cm = old(local).segments[sid].main@.value.unwrap().commit_mask@; + if commit { + reveal(CommitMask::bytes); + preserves_mem_chunk_good_on_commit_with_mask_set(*old(local), *local, sid); + assert(local.mem_chunk_good(sid)); + assert forall|sid1| + sid1 != sid && old(local).mem_chunk_good(sid1) implies local.mem_chunk_good( + sid1, + ) by { + preserves_mem_chunk_good_on_commit(*old(local), *local, sid1); + } + assert(local.wf_main()); + assert forall|j: int| + set_int_range(p as int, p + size).contains(j) implies local.commit_mask(sid).bytes( + sid, + ).contains(j) by { + assert(segment_start(sid) == segment.segment_ptr.id()); + let k = (j - segment_start(sid)) / COMMIT_SIZE as int; + assert(mask@.contains(k)); + } + } else { + assert forall|sid1| + sid1 != sid && old(local).mem_chunk_good(sid1) implies local.mem_chunk_good( + sid1, + ) by { + preserves_mem_chunk_good_on_commit_with_mask_set(*old(local), *local, sid1); + } + let local1 = *old(local); + let local2 = *local; + assert(local2.commit_mask(sid).bytes(sid) =~= local1.commit_mask(sid).bytes(sid) - ( + local1.decommit_mask(sid).bytes(sid) - local2.decommit_mask(sid).bytes(sid))) by { + reveal(CommitMask::bytes); + } + assert(local2.decommit_mask(sid).bytes(sid) <= local1.decommit_mask(sid).bytes(sid)) + by { + reveal(CommitMask::bytes); + } + assert((local1.segments[sid].mem.os_rw_bytes() - local2.segments[sid].mem.os_rw_bytes()) + <= (local1.decommit_mask(sid).bytes(sid) - local2.decommit_mask(sid).bytes(sid))) + by { reveal(CommitMask::bytes); - preserves_mem_chunk_good_on_commit_with_mask_set(*old(local), *local, sid); - assert(local.mem_chunk_good(sid)); - assert forall|sid1| - sid1 != sid && old(local).mem_chunk_good(sid1) implies local.mem_chunk_good( - sid1, - ) by { - preserves_mem_chunk_good_on_commit(*old(local), *local, sid1); - } - assert(local.wf_main()); - assert forall|j: int| - set_int_range(p as int, p + size).contains(j) implies local.commit_mask(sid).bytes( - sid, - ).contains(j) by { - assert(segment_start(sid) == segment.segment_ptr.id()); - let k = (j - segment_start(sid)) / COMMIT_SIZE as int; - assert(mask@.contains(k)); - } - } else { - assert forall|sid1| - sid1 != sid && old(local).mem_chunk_good(sid1) implies local.mem_chunk_good( - sid1, - ) by { - preserves_mem_chunk_good_on_commit_with_mask_set(*old(local), *local, sid1); - } - let local1 = *old(local); - let local2 = *local; - assert(local2.commit_mask(sid).bytes(sid) =~= local1.commit_mask(sid).bytes(sid) - ( - local1.decommit_mask(sid).bytes(sid) - local2.decommit_mask(sid).bytes(sid))) by { - reveal(CommitMask::bytes); - } - assert(local2.decommit_mask(sid).bytes(sid) <= local1.decommit_mask(sid).bytes(sid)) - by { - reveal(CommitMask::bytes); - } - assert((local1.segments[sid].mem.os_rw_bytes() - local2.segments[sid].mem.os_rw_bytes()) - <= (local1.decommit_mask(sid).bytes(sid) - local2.decommit_mask(sid).bytes(sid))) - by { - reveal(CommitMask::bytes); - } - preserves_mem_chunk_good_on_decommit(*old(local), *local, sid); - assert(local.mem_chunk_good(sid)); - assert(local.wf_main()); } + preserves_mem_chunk_good_on_decommit(*old(local), *local, sid); + assert(local.mem_chunk_good(sid)); + assert(local.wf_main()); } - return true; } + return true; +} - pub fn segment_ensure_committed( - segment: SegmentPtr, - p: usize, - size: usize, - Tracked(local): Tracked<&mut Local>, - ) -> (success: bool) - requires - old(local).wf_main(), - segment.wf(), - segment.is_in(*old(local)), - p >= segment.segment_ptr.id(), - p + size <= segment.segment_ptr.id() + SEGMENT_SIZE, - ensures - local.wf_main(), - common_preserves(*old(local), *local), - success ==> set_int_range(p as int, p + size) <= local.commit_mask( - segment.segment_id@, - ).bytes(segment.segment_id@) - local.decommit_mask(segment.segment_id@).bytes( - segment.segment_id@, - ), - local.page_organization == old(local).page_organization, - { - if segment.get_commit_mask(Tracked(&*local)).is_full() && segment.get_decommit_mask( - Tracked(&*local), - ).is_empty() { - proof { - //assert forall |j: int| set_int_range(p as int, p + size).contains(j) - // implies local.commit_mask(segment.segment_id@).bytes(segment.segment_id@).contains(j) - //by { - const_facts(); - reveal(CommitMask::bytes); - //} - } - return true; +pub fn segment_ensure_committed( + segment: SegmentPtr, + p: usize, + size: usize, + Tracked(local): Tracked<&mut Local>, +) -> (success: bool) + requires + old(local).wf_main(), + segment.wf(), + segment.is_in(*old(local)), + p >= segment.segment_ptr.id(), + p + size <= segment.segment_ptr.id() + SEGMENT_SIZE, + ensures + local.wf_main(), + common_preserves(*old(local), *local), + success ==> set_int_range(p as int, p + size) <= local.commit_mask( + segment.segment_id@, + ).bytes(segment.segment_id@) - local.decommit_mask(segment.segment_id@).bytes( + segment.segment_id@, + ), + local.page_organization == old(local).page_organization, +{ + if segment.get_commit_mask(Tracked(&*local)).is_full() && segment.get_decommit_mask( + Tracked(&*local), + ).is_empty() { + proof { + //assert forall |j: int| set_int_range(p as int, p + size).contains(j) + // implies local.commit_mask(segment.segment_id@).bytes(segment.segment_id@).contains(j) + //by { + const_facts(); + reveal(CommitMask::bytes); + //} } - segment_commitx(segment, true, p, size, Tracked(local)) + return true; } + segment_commitx(segment, true, p, size, Tracked(local)) +} - pub fn segment_perhaps_decommit( - segment: SegmentPtr, - p: usize, - size: usize, - Tracked(local): Tracked<&mut Local>, - ) - requires - old(local).wf_main(), - segment.wf(), - segment.is_in(*old(local)), - p >= segment.segment_ptr.id(), - p + size <= segment.segment_ptr.id() + SEGMENT_SIZE, - set_int_range(p as int, p + size).disjoint( - segment_info_range(segment.segment_id@) + old(local).segment_pages_used_total( - segment.segment_id@, - ), +pub fn segment_perhaps_decommit( + segment: SegmentPtr, + p: usize, + size: usize, + Tracked(local): Tracked<&mut Local>, +) + requires + old(local).wf_main(), + segment.wf(), + segment.is_in(*old(local)), + p >= segment.segment_ptr.id(), + p + size <= segment.segment_ptr.id() + SEGMENT_SIZE, + set_int_range(p as int, p + size).disjoint( + segment_info_range(segment.segment_id@) + old(local).segment_pages_used_total( + segment.segment_id@, ), - ensures - local.wf_main(), - common_preserves(*old(local), *local), - local.page_organization == old(local).page_organization, - local.pages == old(local).pages, - local.psa == old(local).psa, - { - if !segment.get_allow_decommit(Tracked(&*local)) { + ), + ensures + local.wf_main(), + common_preserves(*old(local), *local), + local.page_organization == old(local).page_organization, + local.pages == old(local).pages, + local.psa == old(local).psa, +{ + if !segment.get_allow_decommit(Tracked(&*local)) { + return ; + } + if option_decommit_delay() == 0 { + todo(); + } else { + proof { + segment_id_divis(segment); + } + let mut mask: CommitMask = CommitMask::empty(); + let (start, full_size) = segment_commit_mask( + segment.segment_ptr.to_usize(), + true, + p, + size, + &mut mask, + ); + if mask.is_empty() || full_size == 0 { return ; } - if option_decommit_delay() == 0 { - todo(); - } else { - proof { - segment_id_divis(segment); - } - let mut mask: CommitMask = CommitMask::empty(); - let (start, full_size) = segment_commit_mask( - segment.segment_ptr.to_usize(), - true, - p, - size, - &mut mask, - ); - if mask.is_empty() || full_size == 0 { - return ; - } - let mut cmask = CommitMask::empty(); - segment_get_mut_main!(segment, local, main => { + let mut cmask = CommitMask::empty(); + segment_get_mut_main!(segment, local, main => { main.commit_mask.create_intersect(&mask, &mut cmask); main.decommit_mask.set(&cmask); }); - proof { - const_facts(); - reveal(CommitMask::bytes); - let segment_id = segment.segment_id@; - segment_start_mult_commit_size(segment_id); - assert(segment.segment_ptr.id() % COMMIT_SIZE as int == 0); - /*assert forall |addr| mask.bytes(segment_id).contains(addr) + proof { + const_facts(); + reveal(CommitMask::bytes); + let segment_id = segment.segment_id@; + segment_start_mult_commit_size(segment_id); + assert(segment.segment_ptr.id() % COMMIT_SIZE as int == 0); + /*assert forall |addr| mask.bytes(segment_id).contains(addr) implies set_int_range(p as int, p + size).contains(addr) by { assert(mask@.contains((addr - segment.segment_ptr.id()) / COMMIT_SIZE as int)); @@ -22717,123 +22621,123 @@ mod commit_segment { assert(addr >= p); assert(addr < p + size); }*/ - assert(mask.bytes(segment_id) <= set_int_range(p as int, p + size)); - assert(cmask.bytes(segment_id) <= set_int_range(p as int, p + size)); - assert(local.decommit_mask(segment_id).bytes(segment_id) =~= old(local).decommit_mask( - segment_id, - ).bytes(segment_id) + cmask.bytes(segment_id)); - assert(old(local).mem_chunk_good(segment_id)); - preserve_totals(*old(local), *local, segment_id); - //assert(local.segment_pages_used_total(segment_id) - // =~= old(local).segment_pages_used_total(segment_id)); - //assert(local.segment_pages_range_total(segment_id) - // =~= old(local).segment_pages_range_total(segment_id)); - preserves_mem_chunk_good_except(*old(local), *local, segment.segment_id@); - assert(mem_chunk_good1( - local.segments[segment_id].mem, - segment_id, - local.commit_mask(segment_id).bytes(segment_id), - local.decommit_mask(segment_id).bytes(segment_id), - local.segment_pages_range_total(segment_id), - local.segment_pages_used_total(segment_id), - )); - assert(local.mem_chunk_good(segment.segment_id@)); - assert(local.wf_main()); - } - let ghost local_snap = *local; - let now = clock_now(); - if segment.get_decommit_expire(Tracked(&*local)) == 0 { - segment_get_mut_main!(segment, local, main => { + assert(mask.bytes(segment_id) <= set_int_range(p as int, p + size)); + assert(cmask.bytes(segment_id) <= set_int_range(p as int, p + size)); + assert(local.decommit_mask(segment_id).bytes(segment_id) =~= old(local).decommit_mask( + segment_id, + ).bytes(segment_id) + cmask.bytes(segment_id)); + assert(old(local).mem_chunk_good(segment_id)); + preserve_totals(*old(local), *local, segment_id); + //assert(local.segment_pages_used_total(segment_id) + // =~= old(local).segment_pages_used_total(segment_id)); + //assert(local.segment_pages_range_total(segment_id) + // =~= old(local).segment_pages_range_total(segment_id)); + preserves_mem_chunk_good_except(*old(local), *local, segment.segment_id@); + assert(mem_chunk_good1( + local.segments[segment_id].mem, + segment_id, + local.commit_mask(segment_id).bytes(segment_id), + local.decommit_mask(segment_id).bytes(segment_id), + local.segment_pages_range_total(segment_id), + local.segment_pages_used_total(segment_id), + )); + assert(local.mem_chunk_good(segment.segment_id@)); + assert(local.wf_main()); + } + let ghost local_snap = *local; + let now = clock_now(); + if segment.get_decommit_expire(Tracked(&*local)) == 0 { + segment_get_mut_main!(segment, local, main => { main.decommit_expire = now.wrapping_add(option_decommit_delay()); }); - proof { - preserves_mem_chunk_good(local_snap, *local); - } - } else if segment.get_decommit_expire(Tracked(&*local)) <= now { - let ded = option_decommit_extend_delay(); - if segment.get_decommit_expire(Tracked(&*local)).wrapping_add( - option_decommit_extend_delay(), - ) <= now { - segment_delayed_decommit(segment, true, Tracked(&mut *local)); - } else { - segment_get_mut_main!(segment, local, main => { + proof { + preserves_mem_chunk_good(local_snap, *local); + } + } else if segment.get_decommit_expire(Tracked(&*local)) <= now { + let ded = option_decommit_extend_delay(); + if segment.get_decommit_expire(Tracked(&*local)).wrapping_add( + option_decommit_extend_delay(), + ) <= now { + segment_delayed_decommit(segment, true, Tracked(&mut *local)); + } else { + segment_get_mut_main!(segment, local, main => { main.decommit_expire = now.wrapping_add(option_decommit_extend_delay()); }); - proof { - preserves_mem_chunk_good(local_snap, *local); - } + proof { + preserves_mem_chunk_good(local_snap, *local); } - } else { - segment_get_mut_main!(segment, local, main => { + } + } else { + segment_get_mut_main!(segment, local, main => { main.decommit_expire = main.decommit_expire.wrapping_add(option_decommit_extend_delay()); }); - proof { - preserves_mem_chunk_good(local_snap, *local); - } + proof { + preserves_mem_chunk_good(local_snap, *local); } } } +} - pub fn segment_delayed_decommit( - segment: SegmentPtr, - force: bool, - Tracked(local): Tracked<&mut Local>, - ) - requires - old(local).wf_main(), +pub fn segment_delayed_decommit( + segment: SegmentPtr, + force: bool, + Tracked(local): Tracked<&mut Local>, +) + requires + old(local).wf_main(), + segment.wf(), + segment.is_in(*old(local)), + ensures + local.wf_main(), + common_preserves(*old(local), *local), + local.page_organization == old(local).page_organization, + local.pages == old(local).pages, + local.psa == old(local).psa, +{ + if !segment.get_allow_decommit(Tracked(&*local)) || segment.get_decommit_mask( + Tracked(&*local), + ).is_empty() { + return ; + } + let now = clock_now(); + if !force && now < segment.get_decommit_expire(Tracked(&*local)) { + return ; + } + proof { + const_facts(); + } + let mut idx = 0; + loop + invariant + local.wf_main(), segment.wf(), - segment.is_in(*old(local)), - ensures + segment.is_in(*local), + 0 <= idx < COMMIT_MASK_BITS, + invariant_ensures local.wf_main(), common_preserves(*old(local), *local), local.page_organization == old(local).page_organization, local.pages == old(local).pages, local.psa == old(local).psa, { - if !segment.get_allow_decommit(Tracked(&*local)) || segment.get_decommit_mask( - Tracked(&*local), - ).is_empty() { - return ; - } - let now = clock_now(); - if !force && now < segment.get_decommit_expire(Tracked(&*local)) { - return ; - } proof { const_facts(); + reveal(CommitMask::bytes); } - let mut idx = 0; - loop - invariant - local.wf_main(), - segment.wf(), - segment.is_in(*local), - 0 <= idx < COMMIT_MASK_BITS, - invariant_ensures - local.wf_main(), - common_preserves(*old(local), *local), - local.page_organization == old(local).page_organization, - local.pages == old(local).pages, - local.psa == old(local).psa, - { - proof { - const_facts(); - reveal(CommitMask::bytes); - } - let mask = segment.get_decommit_mask(Tracked(&*local)); - let (next_idx, count) = mask.next_run(idx); - if count == 0 { - break ; - } - idx = next_idx; - let p = segment.segment_ptr.to_usize() + idx * COMMIT_SIZE as usize; - let size = count * COMMIT_SIZE as usize; - segment_commitx(segment, false, p, size, Tracked(&mut *local)); + let mask = segment.get_decommit_mask(Tracked(&*local)); + let (next_idx, count) = mask.next_run(idx); + if count == 0 { + break ; } + idx = next_idx; + let p = segment.segment_ptr.to_usize() + idx * COMMIT_SIZE as usize; + let size = count * COMMIT_SIZE as usize; + segment_commitx(segment, false, p, size, Tracked(&mut *local)); } +} - } // verus! +} // verus! } mod os_commit { @@ -22849,67 +22753,67 @@ mod os_commit { verus! { - pub fn os_commit(addr: usize, size: usize, Tracked(mem): Tracked<&mut MemChunk>) -> (res: ( - bool, - bool, - )) - requires - old(mem).wf(), - old(mem).os_has_range(addr as int, size as int), - addr as int % page_size() == 0, - size as int % page_size() == 0, - addr != 0, - addr + size <= usize::MAX,//old(mem).has_pointsto_for_all_read_write(), +pub fn os_commit(addr: usize, size: usize, Tracked(mem): Tracked<&mut MemChunk>) -> (res: ( + bool, + bool, +)) + requires + old(mem).wf(), + old(mem).os_has_range(addr as int, size as int), + addr as int % page_size() == 0, + size as int % page_size() == 0, + addr != 0, + addr + size <= usize::MAX, //old(mem).has_pointsto_for_all_read_write(), - ensures - ({ - let (success, is_zero) = res; - mem.wf()//&& mem.has_pointsto_for_all_read_write() - //&& (success ==> mem.os_has_range_read_write(addr as int, size as int)) - && mem.has_new_pointsto(&*old(mem)) && mem.os.dom() == old(mem).os.dom() && (success - ==> mem.os_has_range_read_write(addr as int, size as int)) - }), - { - os_commitx(addr, size, true, false, Tracked(&mut *mem)) - } + ensures + ({ + let (success, is_zero) = res; + mem.wf() //&& mem.has_pointsto_for_all_read_write() + //&& (success ==> mem.os_has_range_read_write(addr as int, size as int)) + && mem.has_new_pointsto(&*old(mem)) && mem.os.dom() == old(mem).os.dom() && (success + ==> mem.os_has_range_read_write(addr as int, size as int)) + }), +{ + os_commitx(addr, size, true, false, Tracked(&mut *mem)) +} - pub fn os_decommit(addr: usize, size: usize, Tracked(mem): Tracked<&mut MemChunk>) -> (success: - bool) - requires - old(mem).wf(), - old(mem).os_has_range(addr as int, size as int), - old(mem).pointsto_has_range(addr as int, size as int), - addr as int % page_size() == 0, - size as int % page_size() == 0, - addr != 0, - addr + size <= usize::MAX, - ensures - mem.wf(), - mem.os.dom() =~= old(mem).os.dom(), - mem.points_to@.dom().subset_of(old(mem).points_to@.dom()), - mem.os_rw_bytes().subset_of(old(mem).os_rw_bytes()), - old(mem).points_to@.dom() - mem.points_to@.dom() =~= old(mem).os_rw_bytes() - - mem.os_rw_bytes(), - old(mem).os_rw_bytes() - mem.os_rw_bytes() <= set_int_range(addr as int, addr + size), - { - let tracked mut t = mem.split(addr as int, size as int); - let ghost t1 = t; - let (success, _) = os_commitx(addr, size, false, true, Tracked(&mut t)); - proof { - mem.join(t); - assert(t.os_rw_bytes().subset_of(t1.os_rw_bytes())); - assert forall|p| mem.os_rw_bytes().contains(p) implies old(mem).os_rw_bytes().contains( - p, - ) by { - if addr <= p < addr + size { - assert(t1.os_rw_bytes().contains(p)); - assert(t.os_rw_bytes().contains(p)); - assert(old(mem).os_rw_bytes().contains(p)); - } else { - assert(old(mem).os_rw_bytes().contains(p)); - } +pub fn os_decommit(addr: usize, size: usize, Tracked(mem): Tracked<&mut MemChunk>) -> (success: + bool) + requires + old(mem).wf(), + old(mem).os_has_range(addr as int, size as int), + old(mem).pointsto_has_range(addr as int, size as int), + addr as int % page_size() == 0, + size as int % page_size() == 0, + addr != 0, + addr + size <= usize::MAX, + ensures + mem.wf(), + mem.os.dom() =~= old(mem).os.dom(), + mem.points_to@.dom().subset_of(old(mem).points_to@.dom()), + mem.os_rw_bytes().subset_of(old(mem).os_rw_bytes()), + old(mem).points_to@.dom() - mem.points_to@.dom() =~= old(mem).os_rw_bytes() + - mem.os_rw_bytes(), + old(mem).os_rw_bytes() - mem.os_rw_bytes() <= set_int_range(addr as int, addr + size), +{ + let tracked mut t = mem.split(addr as int, size as int); + let ghost t1 = t; + let (success, _) = os_commitx(addr, size, false, true, Tracked(&mut t)); + proof { + mem.join(t); + assert(t.os_rw_bytes().subset_of(t1.os_rw_bytes())); + assert forall|p| mem.os_rw_bytes().contains(p) implies old(mem).os_rw_bytes().contains( + p, + ) by { + if addr <= p < addr + size { + assert(t1.os_rw_bytes().contains(p)); + assert(t.os_rw_bytes().contains(p)); + assert(old(mem).os_rw_bytes().contains(p)); + } else { + assert(old(mem).os_rw_bytes().contains(p)); } - assert_sets_equal!(old(mem).points_to@.dom() - mem.points_to@.dom(), + } + assert_sets_equal!(old(mem).points_to@.dom() - mem.points_to@.dom(), old(mem).os_rw_bytes() - mem.os_rw_bytes(), p => { @@ -22932,94 +22836,94 @@ mod os_commit { } } }); - assert(mem.os_rw_bytes().subset_of(old(mem).os_rw_bytes())); - } - success + assert(mem.os_rw_bytes().subset_of(old(mem).os_rw_bytes())); + } + success +} + +fn os_page_align_areax(conservative: bool, addr: usize, size: usize) -> (res: (usize, usize)) + requires + addr as int % page_size() == 0, + size as int % page_size() == 0, + addr != 0, + addr + size <= usize::MAX, + ensures + ({ + let (start, csize) = res; + start as int % page_size() == 0 && csize as int % page_size() == 0 && (size != 0 + ==> start == addr) && (size != 0 ==> csize == size) && (size == 0 ==> start == 0 + && csize == 0) + }), +{ + if size == 0 || addr == 0 { + return (0, 0); } - - fn os_page_align_areax(conservative: bool, addr: usize, size: usize) -> (res: (usize, usize)) - requires - addr as int % page_size() == 0, - size as int % page_size() == 0, - addr != 0, - addr + size <= usize::MAX, - ensures - ({ - let (start, csize) = res; - start as int % page_size() == 0 && csize as int % page_size() == 0 && (size != 0 - ==> start == addr) && (size != 0 ==> csize == size) && (size == 0 ==> start == 0 - && csize == 0) - }), - { - if size == 0 || addr == 0 { - return (0, 0); - } - let start = if conservative { - align_up(addr, get_page_size()) - } else { - align_down(addr, get_page_size()) - }; - let end = if conservative { - align_down(addr + size, get_page_size()) - } else { - align_up(addr + size, get_page_size()) - }; - let diff = end - start; - if diff <= 0 { - return (0, 0); - } - (start, diff) + let start = if conservative { + align_up(addr, get_page_size()) + } else { + align_down(addr, get_page_size()) + }; + let end = if conservative { + align_down(addr + size, get_page_size()) + } else { + align_up(addr + size, get_page_size()) + }; + let diff = end - start; + if diff <= 0 { + return (0, 0); } + (start, diff) +} - fn os_commitx( - addr: usize, - size: usize, - commit: bool, - conservative: bool, - Tracked(mem): Tracked<&mut MemChunk>, - ) -> (res: (bool, bool)) - requires - old(mem).wf(), - old(mem).os_has_range(addr as int, size as int), - addr as int % page_size() == 0, - size as int % page_size() == 0, - addr != 0, - addr + size <= usize::MAX, - !commit ==> old(mem).pointsto_has_range(addr as int, size as int), - ensures - mem.wf(), - mem.os.dom() =~= old(mem).os.dom(), - commit ==> mem.has_new_pointsto(&*old(mem)), - commit ==> res.0 ==> mem.os_has_range_read_write(addr as int, size as int), - !commit ==> mem.points_to@.dom().subset_of(old(mem).points_to@.dom()), - !commit ==> mem.os_rw_bytes().subset_of(old(mem).os_rw_bytes()), - !commit ==> old(mem).points_to@.dom() - mem.points_to@.dom() =~= old(mem).os_rw_bytes() - - mem.os_rw_bytes(), - { - let is_zero = false; - let (start, csize) = os_page_align_areax(conservative, addr, size); - if csize == 0 { - return (true, is_zero); - } - let err = 0; - let p = PPtr::from_usize(start); - let tracked weird_extra = mem.take_points_to_set(mem.points_to@.dom() - mem.os_rw_bytes()); - let tracked mut exact_mem = mem.split(addr as int, size as int); - let ghost em = exact_mem; +fn os_commitx( + addr: usize, + size: usize, + commit: bool, + conservative: bool, + Tracked(mem): Tracked<&mut MemChunk>, +) -> (res: (bool, bool)) + requires + old(mem).wf(), + old(mem).os_has_range(addr as int, size as int), + addr as int % page_size() == 0, + size as int % page_size() == 0, + addr != 0, + addr + size <= usize::MAX, + !commit ==> old(mem).pointsto_has_range(addr as int, size as int), + ensures + mem.wf(), + mem.os.dom() =~= old(mem).os.dom(), + commit ==> mem.has_new_pointsto(&*old(mem)), + commit ==> res.0 ==> mem.os_has_range_read_write(addr as int, size as int), + !commit ==> mem.points_to@.dom().subset_of(old(mem).points_to@.dom()), + !commit ==> mem.os_rw_bytes().subset_of(old(mem).os_rw_bytes()), + !commit ==> old(mem).points_to@.dom() - mem.points_to@.dom() =~= old(mem).os_rw_bytes() + - mem.os_rw_bytes(), +{ + let is_zero = false; + let (start, csize) = os_page_align_areax(conservative, addr, size); + if csize == 0 { + return (true, is_zero); + } + let err = 0; + let p = PPtr::from_usize(start); + let tracked weird_extra = mem.take_points_to_set(mem.points_to@.dom() - mem.os_rw_bytes()); + let tracked mut exact_mem = mem.split(addr as int, size as int); + let ghost em = exact_mem; + if commit { + mprotect_prot_read_write(p, csize, Tracked(&mut exact_mem)); + } else { + // TODO madvise? + mprotect_prot_none(p, csize, Tracked(&mut exact_mem)); + } + proof { + mem.join(exact_mem); + mem.give_points_to_range(weird_extra); + //assert( mem.os.dom() == old(mem).os.dom(), if commit { - mprotect_prot_read_write(p, csize, Tracked(&mut exact_mem)); - } else { - // TODO madvise? - mprotect_prot_none(p, csize, Tracked(&mut exact_mem)); } - proof { - mem.join(exact_mem); - mem.give_points_to_range(weird_extra); - //assert( mem.os.dom() == old(mem).os.dom(), - if commit { - } - if !commit { - /*assert(em.points_to@.dom() + if !commit { + /*assert(em.points_to@.dom() =~= set_int_range(addr as int, addr + size as int)); assert(em.points_to@.dom() - exact_mem.points_to@.dom() =~= set_int_range(addr as int, addr + size as int)); @@ -23035,14 +22939,14 @@ mod os_commit { assert(old(mem).os_rw_bytes() - mem.os_rw_bytes() =~= set_int_range(addr as int, addr + size as int)); */ - } - assert(mem.os.dom() =~= old(mem).os.dom()); } - // TODO bubble up error instead of panicking - return (true, is_zero); + assert(mem.os.dom() =~= old(mem).os.dom()); } + // TODO bubble up error instead of panicking + return (true, is_zero); +} - } // verus! +} // verus! } mod os_alloc { @@ -23055,39 +22959,39 @@ mod os_alloc { verus! { - pub fn os_alloc_aligned_offset( - size: usize, - alignment: usize, - offset: usize, - request_commit: bool, - allow_large: bool, - ) -> (res: (usize, bool, Tracked)) - requires - alignment + page_size() <= usize::MAX, - size as int % page_size() == 0, - size == SEGMENT_SIZE, - alignment as int % page_size() == 0, - ensures - ({ - let (addr, is_large, mem) = res; - addr != 0 ==> (mem@.wf() && mem@.os_has_range(addr as int, size as int) && addr + size - <= usize::MAX && (request_commit ==> mem@.os_has_range_read_write( - addr as int, - size as int, - )) && (request_commit ==> mem@.pointsto_has_range(addr as int, size as int)) && ( - !request_commit ==> mem@.os_has_range_no_read_write(addr as int, size as int)) && ( - alignment != 0 ==> (addr + offset) % alignment as int == 0)) - }), - { - if offset > SEGMENT_SIZE as usize { - return (0, allow_large, Tracked(MemChunk::empty())); - } - if offset == 0 { - return os_alloc_aligned(size, alignment, request_commit, allow_large); - } else { - todo(); - loop { - }/* +pub fn os_alloc_aligned_offset( + size: usize, + alignment: usize, + offset: usize, + request_commit: bool, + allow_large: bool, +) -> (res: (usize, bool, Tracked)) + requires + alignment + page_size() <= usize::MAX, + size as int % page_size() == 0, + size == SEGMENT_SIZE, + alignment as int % page_size() == 0, + ensures + ({ + let (addr, is_large, mem) = res; + addr != 0 ==> (mem@.wf() && mem@.os_has_range(addr as int, size as int) && addr + size + <= usize::MAX && (request_commit ==> mem@.os_has_range_read_write( + addr as int, + size as int, + )) && (request_commit ==> mem@.pointsto_has_range(addr as int, size as int)) && ( + !request_commit ==> mem@.os_has_range_no_read_write(addr as int, size as int)) && ( + alignment != 0 ==> (addr + offset) % alignment as int == 0)) + }), +{ + if offset > SEGMENT_SIZE as usize { + return (0, allow_large, Tracked(MemChunk::empty())); + } + if offset == 0 { + return os_alloc_aligned(size, alignment, request_commit, allow_large); + } else { + todo(); + loop { + }/* let extra = align_up(offset, alignment) - offset; let oversize = size + extra; @@ -23102,304 +23006,304 @@ mod os_alloc { } */ - } - } - - pub fn os_good_alloc_size(size: usize) -> (res: usize) - requires - size as int % page_size() == 0, - ensures - res as int % page_size() == 0, - res >= size, - size == SEGMENT_SIZE ==> res == SEGMENT_SIZE, - { - let kib = 1024; - let mib = 1024 * 1024; - let align_size = if size < 512 * kib { - get_page_size() - } else if size < 2 * mib { - 64 * kib - } else if size < 8 * mib { - 256 * kib - } else if size < 32 * mib { - mib - } else { - 4 * mib - }; - if unlikely(size >= usize::MAX - align_size) { - size - } else { - let x = align_up(size, align_size); - proof { - const_facts(); - mod_trans(x as int, align_size as int, page_size()); - if size <= SEGMENT_SIZE { - assert((size + page_size() - 1) / page_size() <= 8192); - assert((size + page_size() - 1) / page_size() * page_size() <= SEGMENT_SIZE); - } - } - return x; - } } +} - pub fn os_alloc_aligned( - size: usize, - alignment: usize, - request_commit: bool, - allow_large: bool, - ) -> (res: (usize, bool, Tracked)) - requires - alignment + page_size() <= usize::MAX, - size == SEGMENT_SIZE, - size as int % page_size() == 0, - alignment as int % page_size() == 0, - ensures - ({ - let (addr, is_large, mem) = res; - addr != 0 ==> (mem@.wf() && mem@.os_has_range(addr as int, size as int) && addr + size - <= usize::MAX && (request_commit ==> mem@.os_has_range_read_write( - addr as int, - size as int, - )) && (request_commit ==> mem@.pointsto_has_range(addr as int, size as int)) && ( - !request_commit ==> mem@.os_has_range_no_read_write(addr as int, size as int)) && ( - alignment != 0 ==> addr % alignment == 0)) - }), - { - if size == 0 { - return (0, allow_large, Tracked(MemChunk::empty())); - } - let size1 = os_good_alloc_size(size); - let alignment1 = align_up(alignment, get_page_size()); +pub fn os_good_alloc_size(size: usize) -> (res: usize) + requires + size as int % page_size() == 0, + ensures + res as int % page_size() == 0, + res >= size, + size == SEGMENT_SIZE ==> res == SEGMENT_SIZE, +{ + let kib = 1024; + let mib = 1024 * 1024; + let align_size = if size < 512 * kib { + get_page_size() + } else if size < 2 * mib { + 64 * kib + } else if size < 8 * mib { + 256 * kib + } else if size < 32 * mib { + mib + } else { + 4 * mib + }; + if unlikely(size >= usize::MAX - align_size) { + size + } else { + let x = align_up(size, align_size); proof { - assert(alignment1 == alignment); - assert(size1 >= size); const_facts(); + mod_trans(x as int, align_size as int, page_size()); + if size <= SEGMENT_SIZE { + assert((size + page_size() - 1) / page_size() <= 8192); + assert((size + page_size() - 1) / page_size() * page_size() <= SEGMENT_SIZE); + } } - os_mem_alloc_aligned(size1, alignment1, request_commit, allow_large) + return x; } +} - pub fn os_mem_alloc_aligned( - size: usize, - alignment: usize, - request_commit: bool, - allow_large: bool, - ) -> (res: (usize, bool, Tracked)) - requires - size as int % page_size() == 0, - size <= SEGMENT_SIZE, - alignment as int % page_size() == 0, - ensures - ({ - let (addr, is_large, mem) = res; - addr != 0 ==> (mem@.wf() && mem@.os_exact_range(addr as int, size as int) && addr + size - <= usize::MAX && (request_commit ==> mem@.os_has_range_read_write( - addr as int, - size as int, - )) && (request_commit ==> mem@.pointsto_has_range(addr as int, size as int)) && ( - !request_commit ==> mem@.os_has_range_no_read_write(addr as int, size as int)) && ( - alignment != 0 ==> addr % alignment == 0)) - }), - { - let mut allow_large = allow_large; - if !request_commit { - allow_large = false; - } - if (!(alignment >= get_page_size() && ((alignment & (alignment - 1)) == 0))) { - return (0, allow_large, Tracked(MemChunk::empty())); - } - let (p, is_large, Tracked(mem)) = os_mem_alloc(size, alignment, request_commit, allow_large); - if p == 0 { - return (p, is_large, Tracked(mem)); - } - if p % alignment != 0 { - todo(); - } - (p, is_large, Tracked(mem)) +pub fn os_alloc_aligned( + size: usize, + alignment: usize, + request_commit: bool, + allow_large: bool, +) -> (res: (usize, bool, Tracked)) + requires + alignment + page_size() <= usize::MAX, + size == SEGMENT_SIZE, + size as int % page_size() == 0, + alignment as int % page_size() == 0, + ensures + ({ + let (addr, is_large, mem) = res; + addr != 0 ==> (mem@.wf() && mem@.os_has_range(addr as int, size as int) && addr + size + <= usize::MAX && (request_commit ==> mem@.os_has_range_read_write( + addr as int, + size as int, + )) && (request_commit ==> mem@.pointsto_has_range(addr as int, size as int)) && ( + !request_commit ==> mem@.os_has_range_no_read_write(addr as int, size as int)) && ( + alignment != 0 ==> addr % alignment == 0)) + }), +{ + if size == 0 { + return (0, allow_large, Tracked(MemChunk::empty())); + } + let size1 = os_good_alloc_size(size); + let alignment1 = align_up(alignment, get_page_size()); + proof { + assert(alignment1 == alignment); + assert(size1 >= size); + const_facts(); } + os_mem_alloc_aligned(size1, alignment1, request_commit, allow_large) +} - fn os_mem_alloc(size: usize, try_alignment: usize, request_commit: bool, allow_large: bool) -> (res: - (usize, bool, Tracked)) - requires - size as int % page_size() == 0, - size <= SEGMENT_SIZE, - try_alignment == 1 || try_alignment as int % page_size() == 0, - ensures - ({ - let (addr, is_large, mem) = res; - addr != 0 ==> (mem@.wf() && addr + size <= usize::MAX && mem@.os_exact_range( - addr as int, - size as int, - ) && (request_commit ==> mem@.os_has_range_read_write(addr as int, size as int)) && ( - request_commit ==> mem@.pointsto_has_range(addr as int, size as int)) && ( - !request_commit ==> mem@.os_has_range_no_read_write(addr as int, size as int))) - }), - { - if size == 0 { - return (0, allow_large, Tracked(MemChunk::empty())); - } - let mut allow_large = allow_large; - if !request_commit { - allow_large = false; - } - let mut try_alignment = try_alignment; - if try_alignment == 0 { - try_alignment = 1; - } - unix_mmap(0, size, try_alignment, request_commit, false, allow_large) +pub fn os_mem_alloc_aligned( + size: usize, + alignment: usize, + request_commit: bool, + allow_large: bool, +) -> (res: (usize, bool, Tracked)) + requires + size as int % page_size() == 0, + size <= SEGMENT_SIZE, + alignment as int % page_size() == 0, + ensures + ({ + let (addr, is_large, mem) = res; + addr != 0 ==> (mem@.wf() && mem@.os_exact_range(addr as int, size as int) && addr + size + <= usize::MAX && (request_commit ==> mem@.os_has_range_read_write( + addr as int, + size as int, + )) && (request_commit ==> mem@.pointsto_has_range(addr as int, size as int)) && ( + !request_commit ==> mem@.os_has_range_no_read_write(addr as int, size as int)) && ( + alignment != 0 ==> addr % alignment == 0)) + }), +{ + let mut allow_large = allow_large; + if !request_commit { + allow_large = false; + } + if (!(alignment >= get_page_size() && ((alignment & (alignment - 1)) == 0))) { + return (0, allow_large, Tracked(MemChunk::empty())); } + let (p, is_large, Tracked(mem)) = os_mem_alloc(size, alignment, request_commit, allow_large); + if p == 0 { + return (p, is_large, Tracked(mem)); + } + if p % alignment != 0 { + todo(); + } + (p, is_large, Tracked(mem)) +} - fn use_large_os_page(size: usize, alignment: usize) -> bool { - false +fn os_mem_alloc(size: usize, try_alignment: usize, request_commit: bool, allow_large: bool) -> (res: + (usize, bool, Tracked)) + requires + size as int % page_size() == 0, + size <= SEGMENT_SIZE, + try_alignment == 1 || try_alignment as int % page_size() == 0, + ensures + ({ + let (addr, is_large, mem) = res; + addr != 0 ==> (mem@.wf() && addr + size <= usize::MAX && mem@.os_exact_range( + addr as int, + size as int, + ) && (request_commit ==> mem@.os_has_range_read_write(addr as int, size as int)) && ( + request_commit ==> mem@.pointsto_has_range(addr as int, size as int)) && ( + !request_commit ==> mem@.os_has_range_no_read_write(addr as int, size as int))) + }), +{ + if size == 0 { + return (0, allow_large, Tracked(MemChunk::empty())); } + let mut allow_large = allow_large; + if !request_commit { + allow_large = false; + } + let mut try_alignment = try_alignment; + if try_alignment == 0 { + try_alignment = 1; + } + unix_mmap(0, size, try_alignment, request_commit, false, allow_large) +} - fn unix_mmap( - addr: usize, - size: usize, - try_alignment: usize, - prot_rw: bool, - large_only: bool, - allow_large: bool, - ) -> (res: (usize, bool, Tracked)) - requires - addr as int % page_size() == 0, - size as int % page_size() == 0, - size <= SEGMENT_SIZE, - try_alignment == 1 || try_alignment as int % page_size() == 0, - ensures - ({ - let (addr, is_large, mem) = res; - addr != 0 ==> (mem@.wf() && mem@.os_exact_range(addr as int, size as int) && addr + size - <= usize::MAX && (prot_rw ==> mem@.os_has_range_read_write( - addr as int, - size as int, - )) && (prot_rw ==> mem@.pointsto_has_range(addr as int, size as int)) && (!prot_rw - ==> mem@.os_has_range_no_read_write(addr as int, size as int))) - }), - { - let is_large = true; - if (large_only || use_large_os_page(size, try_alignment)) && allow_large { +fn use_large_os_page(size: usize, alignment: usize) -> bool { + false +} + +fn unix_mmap( + addr: usize, + size: usize, + try_alignment: usize, + prot_rw: bool, + large_only: bool, + allow_large: bool, +) -> (res: (usize, bool, Tracked)) + requires + addr as int % page_size() == 0, + size as int % page_size() == 0, + size <= SEGMENT_SIZE, + try_alignment == 1 || try_alignment as int % page_size() == 0, + ensures + ({ + let (addr, is_large, mem) = res; + addr != 0 ==> (mem@.wf() && mem@.os_exact_range(addr as int, size as int) && addr + size + <= usize::MAX && (prot_rw ==> mem@.os_has_range_read_write( + addr as int, + size as int, + )) && (prot_rw ==> mem@.pointsto_has_range(addr as int, size as int)) && (!prot_rw + ==> mem@.os_has_range_no_read_write(addr as int, size as int))) + }), +{ + let is_large = true; + if (large_only || use_large_os_page(size, try_alignment)) && allow_large { + todo(); + } + let is_large = false; + let (p, Tracked(mem)) = unix_mmapx(addr, size, try_alignment, prot_rw); + if p != 0 { + if allow_large && use_large_os_page(size, try_alignment) { todo(); } - let is_large = false; - let (p, Tracked(mem)) = unix_mmapx(addr, size, try_alignment, prot_rw); - if p != 0 { - if allow_large && use_large_os_page(size, try_alignment) { - todo(); - } - return (p, is_large, Tracked(mem)); - } else { - todo(); - loop { - } + return (p, is_large, Tracked(mem)); + } else { + todo(); + loop { } } +} - exec static ALIGNED_BASE: core::sync::atomic::AtomicUsize = core::sync::atomic::AtomicUsize::new(0); +exec static ALIGNED_BASE: core::sync::atomic::AtomicUsize = core::sync::atomic::AtomicUsize::new(0); - #[inline] - fn aligned_base_add(s: usize) -> usize { - ALIGNED_BASE.fetch_add(s, core::sync::atomic::Ordering::AcqRel) - } +#[inline] +fn aligned_base_add(s: usize) -> usize { + ALIGNED_BASE.fetch_add(s, core::sync::atomic::Ordering::AcqRel) +} - #[inline] - fn aligned_base_cas(s: usize, t: usize) { - let _ = ALIGNED_BASE.compare_exchange( - s, - t, - core::sync::atomic::Ordering::AcqRel, - core::sync::atomic::Ordering::Acquire, - ); - } +#[inline] +fn aligned_base_cas(s: usize, t: usize) { + let _ = ALIGNED_BASE.compare_exchange( + s, + t, + core::sync::atomic::Ordering::AcqRel, + core::sync::atomic::Ordering::Acquire, + ); +} - const HINT_BASE: usize = (2 as usize) << (40 as usize); +const HINT_BASE: usize = (2 as usize) << (40 as usize); - const HINT_AREA: usize = (4 as usize) << (40 as usize); +const HINT_AREA: usize = (4 as usize) << (40 as usize); - const HINT_MAX: usize = (30 as usize) << (40 as usize); +const HINT_MAX: usize = (30 as usize) << (40 as usize); - fn os_get_aligned_hint(try_alignment: usize, size: usize) -> (hint: usize) - requires - size <= SEGMENT_SIZE, - ensures - try_alignment != 0 ==> hint % try_alignment == 0, - try_alignment <= 1 ==> hint == 0, - { - proof { - const_facts(); - } - if try_alignment <= 1 || try_alignment > SEGMENT_SIZE as usize { - return 0; - } - let size = align_up(size, SEGMENT_SIZE as usize); - if size > 1024 * 1024 * 1024 { - return 0; - } - let mut hint = aligned_base_add(size); - if hint == 0 || hint > HINT_MAX { - let iinit = HINT_BASE; - //let r = heap_random_next(); - //let iinit = iinit + ((MI_SEGMENT_SIZE * ((r>>17) & 0xFFFFF)) % MI_HINT_AREA); - let expected = hint.wrapping_add(size); - aligned_base_cas(expected, iinit); - hint = aligned_base_add(size); - } - if hint % try_alignment != 0 { - return 0; - } - return hint; +fn os_get_aligned_hint(try_alignment: usize, size: usize) -> (hint: usize) + requires + size <= SEGMENT_SIZE, + ensures + try_alignment != 0 ==> hint % try_alignment == 0, + try_alignment <= 1 ==> hint == 0, +{ + proof { + const_facts(); } + if try_alignment <= 1 || try_alignment > SEGMENT_SIZE as usize { + return 0; + } + let size = align_up(size, SEGMENT_SIZE as usize); + if size > 1024 * 1024 * 1024 { + return 0; + } + let mut hint = aligned_base_add(size); + if hint == 0 || hint > HINT_MAX { + let iinit = HINT_BASE; + //let r = heap_random_next(); + //let iinit = iinit + ((MI_SEGMENT_SIZE * ((r>>17) & 0xFFFFF)) % MI_HINT_AREA); + let expected = hint.wrapping_add(size); + aligned_base_cas(expected, iinit); + hint = aligned_base_add(size); + } + if hint % try_alignment != 0 { + return 0; + } + return hint; +} - fn unix_mmapx(hint: usize, size: usize, try_alignment: usize, prot_rw: bool) -> (res: ( - usize, - Tracked, - )) - requires - hint as int % page_size() == 0, - size as int % page_size() == 0, - size <= SEGMENT_SIZE, - try_alignment > 1 ==> try_alignment as int % page_size() == 0, - ensures - ({ - let (addr, mem) = res; - addr != 0 ==> (mem@.wf() && mem@.os_exact_range(addr as int, size as int) && addr + size - <= usize::MAX && (prot_rw ==> mem@.os_has_range_read_write( - addr as int, - size as int, - )) && (prot_rw ==> mem@.pointsto_has_range(addr as int, size as int)) && (!prot_rw - ==> mem@.os_has_range_no_read_write(addr as int, size as int))) - }), - { - if hint == 0 && INTPTR_SIZE >= 8 { - let hint = os_get_aligned_hint(try_alignment, size); - proof { - const_facts(); - if try_alignment > 1 { - mod_trans(hint as int, try_alignment as int, page_size()); - } - } - if hint != 0 { - let (p, Tracked(mem)) = if prot_rw { - mmap_prot_read_write(hint, size) - } else { - mmap_prot_none(hint, size) - }; - if p != MAP_FAILED { - return (p, Tracked(mem)); - } +fn unix_mmapx(hint: usize, size: usize, try_alignment: usize, prot_rw: bool) -> (res: ( + usize, + Tracked, +)) + requires + hint as int % page_size() == 0, + size as int % page_size() == 0, + size <= SEGMENT_SIZE, + try_alignment > 1 ==> try_alignment as int % page_size() == 0, + ensures + ({ + let (addr, mem) = res; + addr != 0 ==> (mem@.wf() && mem@.os_exact_range(addr as int, size as int) && addr + size + <= usize::MAX && (prot_rw ==> mem@.os_has_range_read_write( + addr as int, + size as int, + )) && (prot_rw ==> mem@.pointsto_has_range(addr as int, size as int)) && (!prot_rw + ==> mem@.os_has_range_no_read_write(addr as int, size as int))) + }), +{ + if hint == 0 && INTPTR_SIZE >= 8 { + let hint = os_get_aligned_hint(try_alignment, size); + proof { + const_facts(); + if try_alignment > 1 { + mod_trans(hint as int, try_alignment as int, page_size()); } } - let (p, Tracked(mem)) = if prot_rw { - mmap_prot_read_write(hint, size) - } else { - mmap_prot_none(hint, size) - }; - if p != MAP_FAILED { - return (p, Tracked(mem)); + if hint != 0 { + let (p, Tracked(mem)) = if prot_rw { + mmap_prot_read_write(hint, size) + } else { + mmap_prot_none(hint, size) + }; + if p != MAP_FAILED { + return (p, Tracked(mem)); + } } - return (0, Tracked(mem)); } + let (p, Tracked(mem)) = if prot_rw { + mmap_prot_read_write(hint, size) + } else { + mmap_prot_none(hint, size) + }; + if p != MAP_FAILED { + return (p, Tracked(mem)); + } + return (0, Tracked(mem)); +} - } // verus! +} // verus! } mod page { @@ -23430,343 +23334,343 @@ mod page { verus! { - pub fn find_page( - heap_ptr: HeapPtr, - size: usize, - huge_alignment: usize, - Tracked(local): Tracked<&mut Local>, - ) -> (page: PagePtr) - requires - old(local).wf(), - heap_ptr.wf(), - heap_ptr.is_in(*old(local)), - ensures - local.wf(), - common_preserves(*old(local), *local), - page.page_ptr.id() != 0 ==> page.wf() && page.is_in(*local) && page.is_used_and_primary( - *local, - ), - page.page_ptr.id() != 0 ==> local.pages.index( - page.page_id@, - ).inner@.value.unwrap().xblock_size >= size, - { - proof { - const_facts(); - } - let req_size = size; - if unlikely(req_size > MEDIUM_OBJ_SIZE_MAX as usize || huge_alignment > 0) { - if unlikely(req_size > MAX_ALLOC_SIZE) { - return PagePtr::null(); - } else { - todo(); - loop { - } - } +pub fn find_page( + heap_ptr: HeapPtr, + size: usize, + huge_alignment: usize, + Tracked(local): Tracked<&mut Local>, +) -> (page: PagePtr) + requires + old(local).wf(), + heap_ptr.wf(), + heap_ptr.is_in(*old(local)), + ensures + local.wf(), + common_preserves(*old(local), *local), + page.page_ptr.id() != 0 ==> page.wf() && page.is_in(*local) && page.is_used_and_primary( + *local, + ), + page.page_ptr.id() != 0 ==> local.pages.index( + page.page_id@, + ).inner@.value.unwrap().xblock_size >= size, +{ + proof { + const_facts(); + } + let req_size = size; + if unlikely(req_size > MEDIUM_OBJ_SIZE_MAX as usize || huge_alignment > 0) { + if unlikely(req_size > MAX_ALLOC_SIZE) { + return PagePtr::null(); } else { - return find_free_page(heap_ptr, size, Tracked(&mut *local)); + todo(); + loop { + } } + } else { + return find_free_page(heap_ptr, size, Tracked(&mut *local)); } - - fn find_free_page(heap_ptr: HeapPtr, size: usize, Tracked(local): Tracked<&mut Local>) -> (page: - PagePtr) - requires - old(local).wf(), - heap_ptr.wf(), - heap_ptr.is_in(*old(local)), - size <= MEDIUM_OBJ_SIZE_MAX, - ensures - local.wf(), - common_preserves(*old(local), *local), - page.page_ptr.id() != 0 ==> page.wf() && page.is_in(*local) && page.is_used_and_primary( - *local, - ), - page.page_ptr.id() != 0 ==> local.pages.index( - page.page_id@, - ).inner@.value.unwrap().xblock_size >= size, - { - proof { - const_facts(); - } - let pq = bin(size) as usize; - proof { - local.page_organization.used_first_is_in(pq as int); - crate::bin_sizes::bin_size_result(size); - } - let mut page = PagePtr { - page_ptr: heap_ptr.get_pages(Tracked(&*local))[pq].first, - page_id: Ghost(local.page_organization.used_dlist_headers[pq as int].first.get_Some_0()), - }; - if page.page_ptr.to_usize() != 0 { - crate::alloc_generic::page_free_collect(page, false, Tracked(&mut *local)); - if !page.get_inner_ref(Tracked(&*local)).free.is_empty() { - return page; - } +} + +fn find_free_page(heap_ptr: HeapPtr, size: usize, Tracked(local): Tracked<&mut Local>) -> (page: + PagePtr) + requires + old(local).wf(), + heap_ptr.wf(), + heap_ptr.is_in(*old(local)), + size <= MEDIUM_OBJ_SIZE_MAX, + ensures + local.wf(), + common_preserves(*old(local), *local), + page.page_ptr.id() != 0 ==> page.wf() && page.is_in(*local) && page.is_used_and_primary( + *local, + ), + page.page_ptr.id() != 0 ==> local.pages.index( + page.page_id@, + ).inner@.value.unwrap().xblock_size >= size, +{ + proof { + const_facts(); + } + let pq = bin(size) as usize; + proof { + local.page_organization.used_first_is_in(pq as int); + crate::bin_sizes::bin_size_result(size); + } + let mut page = PagePtr { + page_ptr: heap_ptr.get_pages(Tracked(&*local))[pq].first, + page_id: Ghost(local.page_organization.used_dlist_headers[pq as int].first.get_Some_0()), + }; + if page.page_ptr.to_usize() != 0 { + crate::alloc_generic::page_free_collect(page, false, Tracked(&mut *local)); + if !page.get_inner_ref(Tracked(&*local)).free.is_empty() { + return page; } - page_queue_find_free_ex(heap_ptr, pq, true, Tracked(&mut *local)) } + page_queue_find_free_ex(heap_ptr, pq, true, Tracked(&mut *local)) +} - fn page_queue_find_free_ex( - heap_ptr: HeapPtr, - pq: usize, - first_try: bool, - Tracked(local): Tracked<&mut Local>, - ) -> (page: PagePtr) - requires - old(local).wf(), - heap_ptr.wf(), - heap_ptr.is_in(*old(local)), - valid_bin_idx(pq as int), - size_of_bin(pq as int) <= MEDIUM_OBJ_SIZE_MAX, - ensures +fn page_queue_find_free_ex( + heap_ptr: HeapPtr, + pq: usize, + first_try: bool, + Tracked(local): Tracked<&mut Local>, +) -> (page: PagePtr) + requires + old(local).wf(), + heap_ptr.wf(), + heap_ptr.is_in(*old(local)), + valid_bin_idx(pq as int), + size_of_bin(pq as int) <= MEDIUM_OBJ_SIZE_MAX, + ensures + local.wf(), + common_preserves(*old(local), *local), + page.page_ptr.id() != 0 ==> page.wf() && page.is_in(*local) && page.is_used_and_primary( + *local, + ), + page.page_ptr.id() != 0 ==> local.pages.index( + page.page_id@, + ).inner@.value.unwrap().xblock_size == size_of_bin(pq as int), +{ + let mut page = PagePtr { + page_ptr: heap_ptr.get_pages(Tracked(&*local))[pq].first, + page_id: Ghost(local.page_organization.used_dlist_headers[pq as int].first.get_Some_0()), + }; + let ghost mut list_idx = 0; + proof { + local.page_organization.used_first_is_in(pq as int); + } + loop + invariant_ensures local.wf(), + heap_ptr.wf(), + heap_ptr.is_in(*local), common_preserves(*old(local), *local), - page.page_ptr.id() != 0 ==> page.wf() && page.is_in(*local) && page.is_used_and_primary( - *local, - ), - page.page_ptr.id() != 0 ==> local.pages.index( + 0 <= pq <= BIN_HUGE, + size_of_bin(pq as int) <= MEDIUM_OBJ_SIZE_MAX, + page.page_ptr.id() != 0 ==> page.wf() && local.page_organization.valid_used_page( page.page_id@, - ).inner@.value.unwrap().xblock_size == size_of_bin(pq as int), + pq as int, + list_idx, + ), { - let mut page = PagePtr { - page_ptr: heap_ptr.get_pages(Tracked(&*local))[pq].first, - page_id: Ghost(local.page_organization.used_dlist_headers[pq as int].first.get_Some_0()), - }; - let ghost mut list_idx = 0; - proof { - local.page_organization.used_first_is_in(pq as int); + if page.page_ptr.to_usize() == 0 { + break ; } - loop - invariant_ensures - local.wf(), - heap_ptr.wf(), - heap_ptr.is_in(*local), - common_preserves(*old(local), *local), - 0 <= pq <= BIN_HUGE, - size_of_bin(pq as int) <= MEDIUM_OBJ_SIZE_MAX, - page.page_ptr.id() != 0 ==> page.wf() && local.page_organization.valid_used_page( - page.page_id@, - pq as int, - list_idx, - ), - { - if page.page_ptr.to_usize() == 0 { - break ; - } - let next_ptr = page.get_next(Tracked(&*local)); - let ghost page_id = page.page_id@; - let ghost next_id = - local.page_organization.pages[page_id].dlist_entry.unwrap().next.unwrap(); - proof { - /*assert(local.page_organization.pages.dom().contains(page_id)); + let next_ptr = page.get_next(Tracked(&*local)); + let ghost page_id = page.page_id@; + let ghost next_id = + local.page_organization.pages[page_id].dlist_entry.unwrap().next.unwrap(); + proof { + /*assert(local.page_organization.pages.dom().contains(page_id)); assert(page_organization_pages_match_data(local.page_organization.pages[page_id], local.pages[page_id], local.psa[page_id])); assert(is_page_ptr_opt(next_ptr, local.page_organization.pages[page_id].dlist_entry.unwrap().next)); if next_ptr.id() != 0 { assert(local.page_organization.pages[page_id].dlist_entry.unwrap().next.is_some()); assert(is_page_ptr(next_ptr.id(), next_id)); }*/ - local.page_organization.used_next_is_in(page.page_id@, pq as int, list_idx); - size_of_bin_mult_word_size(pq as int); - } - crate::alloc_generic::page_free_collect(page, false, Tracked(&mut *local)); - if !page.get_inner_ref(Tracked(&*local)).free.is_empty() { - break ; - } - if page.get_inner_ref(Tracked(&*local)).capacity < page.get_inner_ref( - Tracked(&*local), - ).reserved { - //let tld_ptr = heap_ptr.get_ref(Tracked(&*local)).tld_ptr; - //assert(local.is_used_primary(page.page_id@)); - crate::alloc_generic::page_extend_free(page, Tracked(&mut *local)); - break ; - } - page_to_full(page, heap_ptr, pq, Tracked(&mut *local), Ghost(list_idx), Ghost(next_id)); - page = PagePtr { page_ptr: next_ptr, page_id: Ghost(next_id) }; - proof { - //list_idx = list_idx + 1; - /*if next_ptr.id() != 0 { + local.page_organization.used_next_is_in(page.page_id@, pq as int, list_idx); + size_of_bin_mult_word_size(pq as int); + } + crate::alloc_generic::page_free_collect(page, false, Tracked(&mut *local)); + if !page.get_inner_ref(Tracked(&*local)).free.is_empty() { + break ; + } + if page.get_inner_ref(Tracked(&*local)).capacity < page.get_inner_ref( + Tracked(&*local), + ).reserved { + //let tld_ptr = heap_ptr.get_ref(Tracked(&*local)).tld_ptr; + //assert(local.is_used_primary(page.page_id@)); + crate::alloc_generic::page_extend_free(page, Tracked(&mut *local)); + break ; + } + page_to_full(page, heap_ptr, pq, Tracked(&mut *local), Ghost(list_idx), Ghost(next_id)); + page = PagePtr { page_ptr: next_ptr, page_id: Ghost(next_id) }; + proof { + //list_idx = list_idx + 1; + /*if next_ptr.id() != 0 { assert(page.wf()); assert(local.page_organization.valid_used_page(page.page_id@, pq as int, list_idx)); }*/ - } } - if page.page_ptr.to_usize() == 0 { - let page = page_fresh(heap_ptr, pq, Tracked(&mut *local)); - if page.page_ptr.to_usize() == 0 && first_try { - return page_queue_find_free_ex(heap_ptr, pq, false, Tracked(&mut *local)) - } else { - return page; - } + } + if page.page_ptr.to_usize() == 0 { + let page = page_fresh(heap_ptr, pq, Tracked(&mut *local)); + if page.page_ptr.to_usize() == 0 && first_try { + return page_queue_find_free_ex(heap_ptr, pq, false, Tracked(&mut *local)) } else { - let ghost old_local = *local; - page_get_mut_inner!(page, local, inner => { + return page; + } + } else { + let ghost old_local = *local; + page_get_mut_inner!(page, local, inner => { inner.set_retire_expire(0); }); - proof { - preserves_mem_chunk_good(old_local, *local); - } - return page; + proof { + preserves_mem_chunk_good(old_local, *local); } + return page; } +} - fn page_fresh(heap_ptr: HeapPtr, pq: usize, Tracked(local): Tracked<&mut Local>) -> (page: PagePtr) - requires - old(local).wf(), - heap_ptr.wf(), - heap_ptr.is_in(*old(local)), - valid_bin_idx(pq as int), - size_of_bin(pq as int) <= MEDIUM_OBJ_SIZE_MAX, - ensures - local.wf(), - common_preserves(*old(local), *local), - page.page_ptr.id() != 0 ==> page.wf() && page.is_in(*local) && page.is_used_and_primary( - *local, - ), - page.page_ptr.id() != 0 ==> local.pages.index( - page.page_id@, - ).inner@.value.unwrap().xblock_size == size_of_bin(pq as int), - { - proof { - size_of_bin_bounds(pq as int); - } - let block_size = heap_ptr.get_pages(Tracked(&*local))[pq].block_size; - page_fresh_alloc(heap_ptr, pq, block_size, 0, Tracked(&mut *local)) +fn page_fresh(heap_ptr: HeapPtr, pq: usize, Tracked(local): Tracked<&mut Local>) -> (page: PagePtr) + requires + old(local).wf(), + heap_ptr.wf(), + heap_ptr.is_in(*old(local)), + valid_bin_idx(pq as int), + size_of_bin(pq as int) <= MEDIUM_OBJ_SIZE_MAX, + ensures + local.wf(), + common_preserves(*old(local), *local), + page.page_ptr.id() != 0 ==> page.wf() && page.is_in(*local) && page.is_used_and_primary( + *local, + ), + page.page_ptr.id() != 0 ==> local.pages.index( + page.page_id@, + ).inner@.value.unwrap().xblock_size == size_of_bin(pq as int), +{ + proof { + size_of_bin_bounds(pq as int); } + let block_size = heap_ptr.get_pages(Tracked(&*local))[pq].block_size; + page_fresh_alloc(heap_ptr, pq, block_size, 0, Tracked(&mut *local)) +} - fn page_fresh_alloc( - heap_ptr: HeapPtr, - pq: usize, - block_size: usize, - page_alignment: usize, - Tracked(local): Tracked<&mut Local>, - ) -> (page: PagePtr) - requires - old(local).wf(), - heap_ptr.wf(), - heap_ptr.is_in(*old(local)), - 2 <= block_size, - valid_bin_idx(pq as int), - block_size == size_of_bin(pq as int), - block_size <= MEDIUM_OBJ_SIZE_MAX, - ensures - local.wf(), - common_preserves(*old(local), *local), - page.page_ptr.id() != 0 ==> page.wf() && page.is_in(*local) && page.is_used_and_primary( - *local, - ), - page.page_ptr.id() != 0 ==> local.pages.index( - page.page_id@, - ).inner@.value.unwrap().xblock_size == block_size, - { - let tld_ptr = heap_ptr.get_ref(Tracked(&*local)).tld_ptr; - let page_ptr = crate::segment::segment_page_alloc( - heap_ptr, - block_size, - page_alignment, - tld_ptr, - Tracked(&mut *local), - ); - if page_ptr.page_ptr.to_usize() == 0 { - return page_ptr; - } - let full_block_size: usize = block_size; // TODO handle pq == NULL or huge pages - let tld_ptr = heap_ptr.get_ref(Tracked(&*local)).tld_ptr; - proof { - smallest_bin_fitting_size_size_of_bin(pq as int); - size_of_bin_mult_word_size(pq as int); - if pq != BIN_HUGE { - size_of_bin_bounds_not_huge(pq as int); - } - lemma_bin_sizes_constants(); - } - page_init(heap_ptr, page_ptr, full_block_size, tld_ptr, Tracked(&mut *local), Ghost(pq as int)); - page_queue_push(heap_ptr, pq, page_ptr, Tracked(&mut *local)); +fn page_fresh_alloc( + heap_ptr: HeapPtr, + pq: usize, + block_size: usize, + page_alignment: usize, + Tracked(local): Tracked<&mut Local>, +) -> (page: PagePtr) + requires + old(local).wf(), + heap_ptr.wf(), + heap_ptr.is_in(*old(local)), + 2 <= block_size, + valid_bin_idx(pq as int), + block_size == size_of_bin(pq as int), + block_size <= MEDIUM_OBJ_SIZE_MAX, + ensures + local.wf(), + common_preserves(*old(local), *local), + page.page_ptr.id() != 0 ==> page.wf() && page.is_in(*local) && page.is_used_and_primary( + *local, + ), + page.page_ptr.id() != 0 ==> local.pages.index( + page.page_id@, + ).inner@.value.unwrap().xblock_size == block_size, +{ + let tld_ptr = heap_ptr.get_ref(Tracked(&*local)).tld_ptr; + let page_ptr = crate::segment::segment_page_alloc( + heap_ptr, + block_size, + page_alignment, + tld_ptr, + Tracked(&mut *local), + ); + if page_ptr.page_ptr.to_usize() == 0 { return page_ptr; } - - // READY --> USED - fn page_init( - heap_ptr: HeapPtr, - page_ptr: PagePtr, - block_size: usize, - tld_ptr: TldPtr, - Tracked(local): Tracked<&mut Local>, - Ghost(pq): Ghost, - ) - requires - old(local).wf_main(), - heap_ptr.wf(), - heap_ptr.is_in(*old(local)), - page_ptr.wf(), - page_ptr.is_in(*old(local)), - old(local).page_organization.popped == Popped::Ready(page_ptr.page_id@, true), - block_size != 0, - block_size % 8 == 0, - block_size <= u32::MAX, - valid_bin_idx(pq), - size_of_bin(pq) == block_size, - //old(local).page_organization[page_ptr.page_id@].block_size == Some(block_ - //old(local).page_inner(page_ptr.page_id@).xblock_size == block_size - //old(local).segments[page_ptr.page_id@.segment_id] - // .mem.committed_pointsto_has_range( - // segment_start(page_ptr.page_id@.segment_id) + page_ptr.page_id@.idx * SLICE_SIZE, - // local.page_organization.pages[page_ptr.page_id@].count.unwrap() * SLIZE_SIZE), - page_init_is_committed(page_ptr.page_id@, *old(local)), - good_count_for_block_size( - block_size as int, - old(local).page_organization.pages[page_ptr.page_id@].count.unwrap() as int, - ), - ensures - local.wf_main(), - common_preserves(*old(local), *local), - page_ptr.is_used(*local), - local.page_organization.popped == Popped::Used(page_ptr.page_id@, true), - local.page_organization.pages[page_ptr.page_id@].page_header_kind == Some( - PageHeaderKind::Normal(pq as int, block_size as int), - ), - { - let ghost mut next_state; - proof { - next_state = - PageOrg::take_step::set_range_to_used( - local.page_organization, - PageHeaderKind::Normal(pq as int, block_size as int), - ); - } - let ghost page_id = page_ptr.page_id@; - let ghost n_slices = local.page_organization.pages[page_id].count.unwrap(); - let ghost n_blocks = n_slices * SLICE_SIZE / block_size as int; - let ghost range = page_id.range_from(0, n_slices as int); - assert forall|pid| range.contains(pid) implies local.unused_pages.dom().contains(pid) by { - assert(local.page_organization.pages.dom().contains(pid)); - assert(local.page_organization.pages[pid].is_used == false); + let full_block_size: usize = block_size; // TODO handle pq == NULL or huge pages + let tld_ptr = heap_ptr.get_ref(Tracked(&*local)).tld_ptr; + proof { + smallest_bin_fitting_size_size_of_bin(pq as int); + size_of_bin_mult_word_size(pq as int); + if pq != BIN_HUGE { + size_of_bin_bounds_not_huge(pq as int); } - let ghost new_page_state_map = Map::new( - |pid: PageId| range.contains(pid), - |pid: PageId| - PageState { - offset: pid.idx - page_id.idx, - block_size: block_size as nat, - num_blocks: 0, - shared_access: arbitrary(), - is_enabled: false, - }, - ); - assert(n_slices > 0); - assert(range.contains(page_id)); - let count = page_ptr.get_count(Tracked(&*local)); - let tracked thread_token = local.take_thread_token(); - let tracked (Tracked(thread_token), Tracked(delay_token), Tracked(heap_of_page_token)) = - local.instance.create_page_mk_tokens(// params + lemma_bin_sizes_constants(); + } + page_init(heap_ptr, page_ptr, full_block_size, tld_ptr, Tracked(&mut *local), Ghost(pq as int)); + page_queue_push(heap_ptr, pq, page_ptr, Tracked(&mut *local)); + return page_ptr; +} - local.thread_id, - page_id, - n_slices as nat, - block_size as nat, - new_page_state_map, - // input ghost state - thread_token, +// READY --> USED +fn page_init( + heap_ptr: HeapPtr, + page_ptr: PagePtr, + block_size: usize, + tld_ptr: TldPtr, + Tracked(local): Tracked<&mut Local>, + Ghost(pq): Ghost, +) + requires + old(local).wf_main(), + heap_ptr.wf(), + heap_ptr.is_in(*old(local)), + page_ptr.wf(), + page_ptr.is_in(*old(local)), + old(local).page_organization.popped == Popped::Ready(page_ptr.page_id@, true), + block_size != 0, + block_size % 8 == 0, + block_size <= u32::MAX, + valid_bin_idx(pq), + size_of_bin(pq) == block_size, + //old(local).page_organization[page_ptr.page_id@].block_size == Some(block_ + //old(local).page_inner(page_ptr.page_id@).xblock_size == block_size + //old(local).segments[page_ptr.page_id@.segment_id] + // .mem.committed_pointsto_has_range( + // segment_start(page_ptr.page_id@.segment_id) + page_ptr.page_id@.idx * SLICE_SIZE, + // local.page_organization.pages[page_ptr.page_id@].count.unwrap() * SLIZE_SIZE), + page_init_is_committed(page_ptr.page_id@, *old(local)), + good_count_for_block_size( + block_size as int, + old(local).page_organization.pages[page_ptr.page_id@].count.unwrap() as int, + ), + ensures + local.wf_main(), + common_preserves(*old(local), *local), + page_ptr.is_used(*local), + local.page_organization.popped == Popped::Used(page_ptr.page_id@, true), + local.page_organization.pages[page_ptr.page_id@].page_header_kind == Some( + PageHeaderKind::Normal(pq as int, block_size as int), + ), +{ + let ghost mut next_state; + proof { + next_state = + PageOrg::take_step::set_range_to_used( + local.page_organization, + PageHeaderKind::Normal(pq as int, block_size as int), ); - unused_page_get_mut!(page_ptr, local, page => { + } + let ghost page_id = page_ptr.page_id@; + let ghost n_slices = local.page_organization.pages[page_id].count.unwrap(); + let ghost n_blocks = n_slices * SLICE_SIZE / block_size as int; + let ghost range = page_id.range_from(0, n_slices as int); + assert forall|pid| range.contains(pid) implies local.unused_pages.dom().contains(pid) by { + assert(local.page_organization.pages.dom().contains(pid)); + assert(local.page_organization.pages[pid].is_used == false); + } + let ghost new_page_state_map = Map::new( + |pid: PageId| range.contains(pid), + |pid: PageId| + PageState { + offset: pid.idx - page_id.idx, + block_size: block_size as nat, + num_blocks: 0, + shared_access: arbitrary(), + is_enabled: false, + }, + ); + assert(n_slices > 0); + assert(range.contains(page_id)); + let count = page_ptr.get_count(Tracked(&*local)); + let tracked thread_token = local.take_thread_token(); + let tracked (Tracked(thread_token), Tracked(delay_token), Tracked(heap_of_page_token)) = + local.instance.create_page_mk_tokens( // params + + local.thread_id, + page_id, + n_slices as nat, + block_size as nat, + new_page_state_map, + // input ghost state + thread_token, + ); + unused_page_get_mut!(page_ptr, local, page => { let tracked (Tracked(emp_inst), Tracked(emp_x), Tracked(emp_y)) = BoolAgree::Instance::initialize(false); let ghost g = (Ghost(local.instance), Ghost(page_ptr.page_id@), Tracked(emp_x), Tracked(emp_inst)); page.xheap = AtomicHeapPtr { @@ -23776,7 +23680,7 @@ mod page { Tracked(local.instance.clone()), Tracked(delay_token)); //assert(page.xheap.wf(local.instance, page_ptr.page_id@)); }); - unused_page_get_mut_inner!(page_ptr, local, inner => { + unused_page_get_mut_inner!(page_ptr, local, inner => { proof { const_facts(); //assert(block_size as u32 == block_size); @@ -23829,21 +23733,21 @@ mod page { assert(page_size as int / block_size as int * block_size as int <= page_size) by(nonlinear_arith) requires page_size >= 0, block_size > 0; }); - proof { - let tracked new_psa_map = local.unused_pages.tracked_remove_keys(range); - let ghost new_page_state_map2 = Map::new( - |pid: PageId| range.contains(pid), - |pid: PageId| - PageState { - //offset: pid.idx - page_id.idx, - //block_size: block_size as nat, - //num_blocks: 0, - is_enabled: true, - shared_access: new_psa_map[pid], - ..thread_token@.value.pages[pid] - }, - ); - /*assert forall |pid: PageId| #[trigger] new_page_state_map2.dom().contains(pid) implies + proof { + let tracked new_psa_map = local.unused_pages.tracked_remove_keys(range); + let ghost new_page_state_map2 = Map::new( + |pid: PageId| range.contains(pid), + |pid: PageId| + PageState { + //offset: pid.idx - page_id.idx, + //block_size: block_size as nat, + //num_blocks: 0, + is_enabled: true, + shared_access: new_psa_map[pid], + ..thread_token@.value.pages[pid] + }, + ); + /*assert forall |pid: PageId| #[trigger] new_page_state_map2.dom().contains(pid) implies new_page_state_map2[pid] == PageState { is_enabled: true, shared_access: new_psa_map[pid], @@ -23870,21 +23774,21 @@ mod page { assert(a.is_enabled == llama.is_enabled); assert(a == llama); }*/ - let tracked thread_token = local.instance.page_enable(// params + let tracked thread_token = local.instance.page_enable( // params - local.thread_id, - page_id, - n_slices as nat, - new_page_state_map2, - new_psa_map, - // input ghost state - thread_token, - new_psa_map, - ); - local.thread_token = thread_token; - local.page_organization = next_state; - local.psa = local.psa.insert(page_id, new_psa_map[page_id]); - /*assert forall |pid| + local.thread_id, + page_id, + n_slices as nat, + new_page_state_map2, + new_psa_map, + // input ghost state + thread_token, + new_psa_map, + ); + local.thread_token = thread_token; + local.page_organization = next_state; + local.psa = local.psa.insert(page_id, new_psa_map[page_id]); + /*assert forall |pid| #[trigger] local.pages.dom().contains(pid) && local.thread_token@.value.pages.dom().contains(pid) implies local.pages.index(pid).wf( @@ -23909,7 +23813,7 @@ mod page { assert(local.pages.index(pid).wf(pid, local.thread_token@.value.pages.index(pid), local.instance)); } }*/ - /*assert forall |segment_id| + /*assert forall |segment_id| #[trigger] local.segments.dom().contains(segment_id) ==> local.segments[segment_id].wf( segment_id, @@ -23929,10 +23833,10 @@ mod page { assert(local.mem_chunk_good(segment_id)); } }*/ - //let org_pages = local.page_organization.pages; - //let pages = local.pages; - //let psa = local.psa; - /*assert forall |pid| #[trigger] org_pages.dom().contains(pid) implies + //let org_pages = local.page_organization.pages; + //let pages = local.pages; + //let psa = local.psa; + /*assert forall |pid| #[trigger] org_pages.dom().contains(pid) implies page_organization_pages_match_data(org_pages[pid], pages[pid], psa[pid], pid, local.page_organization.popped) by { if pid == page_id { @@ -23941,47 +23845,47 @@ mod page { assert(page_organization_pages_match_data(org_pages[pid], pages[pid], psa[pid], pid, local.page_organization.popped)); } }*/ - preserves_mem_chunk_good_except(*old(local), *local, page_id.segment_id); - preserves_mem_chunk_on_set_used(*old(local), *local, page_id); - /*assert(page_organization_pages_match(local.page_organization.pages, + preserves_mem_chunk_good_except(*old(local), *local, page_id.segment_id); + preserves_mem_chunk_on_set_used(*old(local), *local, page_id); + /*assert(page_organization_pages_match(local.page_organization.pages, local.pages, local.psa, local.page_organization.popped)); assert(local.page_organization_valid()); assert(local.wf_main());*/ - } - //assert(local.is_used_primary(page_ptr.page_id@)); - crate::alloc_generic::page_extend_free(page_ptr, Tracked(&mut *local)) } + //assert(local.is_used_primary(page_ptr.page_id@)); + crate::alloc_generic::page_extend_free(page_ptr, Tracked(&mut *local)) +} - fn page_queue_of(page: PagePtr, Tracked(local): Tracked<&Local>) -> (res: ( - HeapPtr, - usize, - Ghost, - )) - requires - local.wf(), - page.wf(), - page.is_in(*local), - page.is_used_and_primary(*local), - ensures - ({ - let (heap, pq, list_idx) = res; - { - &&& heap.wf() - &&& heap.is_in(*local) - &&& (valid_bin_idx(pq as int) || pq == BIN_FULL) - &&& local.page_organization.valid_used_page(page.page_id@, pq as int, list_idx@) - } - }), - { - let is_in_full = page.get_inner_ref(Tracked(&*local)).get_in_full(); - let ghost mut list_idx; - proof { - if is_in_full { - list_idx = local.page_organization.marked_full_is_in(page.page_id@); - //assert(local.page_organization.valid_used_page(page.page_id@, bin as int, list_idx)); - } else { - list_idx = local.page_organization.marked_unfull_is_in(page.page_id@); - /*smallest_bin_fitting_size_size_of_bin(bin as int); +fn page_queue_of(page: PagePtr, Tracked(local): Tracked<&Local>) -> (res: ( + HeapPtr, + usize, + Ghost, +)) + requires + local.wf(), + page.wf(), + page.is_in(*local), + page.is_used_and_primary(*local), + ensures + ({ + let (heap, pq, list_idx) = res; + { + &&& heap.wf() + &&& heap.is_in(*local) + &&& (valid_bin_idx(pq as int) || pq == BIN_FULL) + &&& local.page_organization.valid_used_page(page.page_id@, pq as int, list_idx@) + } + }), +{ + let is_in_full = page.get_inner_ref(Tracked(&*local)).get_in_full(); + let ghost mut list_idx; + proof { + if is_in_full { + list_idx = local.page_organization.marked_full_is_in(page.page_id@); + //assert(local.page_organization.valid_used_page(page.page_id@, bin as int, list_idx)); + } else { + list_idx = local.page_organization.marked_unfull_is_in(page.page_id@); + /*smallest_bin_fitting_size_size_of_bin(bin as int); assert(local.block_size(page.page_id@) == local.page_organization.pages[page.page_id@].page_header_kind.unwrap().get_Normal_1()); assert(bin == smallest_bin_fitting_size( @@ -23990,215 +23894,215 @@ mod page { size_of_bin()); assert(bin == local.page_organization.pages[page.page_id@].page_header_kind.unwrap().get_Normal_0()); assert(local.page_organization.valid_used_page(page.page_id@, bin as int, list_idx));*/ - } - const_facts(); } - let bin = if is_in_full { - BIN_FULL as usize - } else { - bin(page.get_inner_ref(Tracked(&*local)).xblock_size as usize) as usize - }; - let heap = page.get_heap(Tracked(&*local)); - (heap, bin, Ghost(list_idx)) - } - - const MAX_RETIRE_SIZE: u32 = MEDIUM_OBJ_SIZE_MAX as u32; - - pub fn page_retire(page: PagePtr, Tracked(local): Tracked<&mut Local>) - requires - old(local).wf(), - page.wf(), - page.is_in(*old(local)), - page.is_used_and_primary(*old(local)), - old(local).pages[page.page_id@].inner@.value.unwrap().used == 0, - ensures - local.wf(), - common_preserves(*old(local), *local), - { - let (heap, pq, Ghost(list_idx)) = page_queue_of(page, Tracked(&*local)); - if likely( - page.get_inner_ref(Tracked(&*local)).xblock_size <= MAX_RETIRE_SIZE && !(heap.get_pages( - Tracked(&*local), - )[pq].block_size > MEDIUM_OBJ_SIZE_MAX as usize), - ) { - if heap.get_pages(Tracked(&*local))[pq].last.to_usize() == page.page_ptr.to_usize() - && heap.get_pages(Tracked(&*local))[pq].first.to_usize() == page.page_ptr.to_usize() { - let RETIRE_CYCLES = 8; - page_get_mut_inner!(page, local, inner => { - let xb = inner.xblock_size as u64; - inner.set_retire_expire(1 + (if xb <= SMALL_OBJ_SIZE_MAX { RETIRE_CYCLES } else { RETIRE_CYCLES/4 })); - }); - if pq < heap.get_page_retired_min(Tracked(&*local)) { - heap.set_page_retired_min(Tracked(&mut *local), pq); - } - if pq > heap.get_page_retired_max(Tracked(&*local)) { - heap.set_page_retired_max(Tracked(&mut *local), pq); - } - proof { - preserves_mem_chunk_good(*old(local), *local); - } - return ; - } - } - page_free(page, pq, false, Tracked(&mut *local), Ghost(list_idx)); - } - - fn page_free( - page: PagePtr, - pq: usize, - force: bool, - Tracked(local): Tracked<&mut Local>, - Ghost(list_idx): Ghost, - ) - requires - old(local).wf(), - page.wf(), - page.is_in(*old(local)), - page.is_used_and_primary(*old(local)), - old(local).page_organization.valid_used_page(page.page_id@, pq as int, list_idx), - old(local).pages[page.page_id@].inner@.value.unwrap().used == 0, - ensures - local.wf(), - common_preserves(*old(local), *local), - { - page_get_mut_inner!(page, local, inner => { - inner.set_has_aligned(false); - }); - proof { - preserves_mem_chunk_good(*old(local), *local); - } - let heap = page.get_heap(Tracked(&*local)); - page_queue_remove(heap, pq, page, Tracked(&mut *local), Ghost(list_idx), Ghost(arbitrary())); - let tld = heap.get_ref(Tracked(&*local)).tld_ptr; - crate::segment::segment_page_free(page, force, tld, Tracked(&mut *local)); - } - - fn page_to_full( - page: PagePtr, - heap: HeapPtr, - pq: usize, - Tracked(local): Tracked<&mut Local>, - Ghost(list_idx): Ghost, - Ghost(next_id): Ghost, - ) - requires - old(local).wf(), - page.wf(), - page.is_in(*old(local)), - heap.wf(), - heap.is_in(*old(local)), - page.is_used_and_primary(*old(local)), - valid_bin_idx(pq as int), - old(local).page_organization.valid_used_page(page.page_id@, pq as int, list_idx), - ensures - local.wf(), - common_preserves(*old(local), *local), - old(local).page_organization.valid_used_page(next_id, pq as int, list_idx + 1) - ==> local.page_organization.valid_used_page(next_id, pq as int, list_idx), - { - page_queue_enqueue_from( - heap, - BIN_FULL as usize, - pq, - page, - Tracked(&mut *local), - Ghost(list_idx), - Ghost(next_id), - ); - crate::alloc_generic::page_free_collect(page, false, Tracked(&mut *local)); - } - - pub fn page_unfull(page: PagePtr, Tracked(local): Tracked<&mut Local>) - requires - old(local).wf(), - page.wf(), - page.is_in(*old(local)), - page.is_used_and_primary(*old(local)), - old(local).pages[page.page_id@].inner@.value.unwrap().in_full(), - ensures - local.wf(), - common_preserves(*old(local), *local), - { - let heap = page.get_heap(Tracked(&*local)); - proof { - local.page_organization.marked_full_is_in(page.page_id@); - const_facts(); + const_facts(); + } + let bin = if is_in_full { + BIN_FULL as usize + } else { + bin(page.get_inner_ref(Tracked(&*local)).xblock_size as usize) as usize + }; + let heap = page.get_heap(Tracked(&*local)); + (heap, bin, Ghost(list_idx)) +} + +const MAX_RETIRE_SIZE: u32 = MEDIUM_OBJ_SIZE_MAX as u32; + +pub fn page_retire(page: PagePtr, Tracked(local): Tracked<&mut Local>) + requires + old(local).wf(), + page.wf(), + page.is_in(*old(local)), + page.is_used_and_primary(*old(local)), + old(local).pages[page.page_id@].inner@.value.unwrap().used == 0, + ensures + local.wf(), + common_preserves(*old(local), *local), +{ + let (heap, pq, Ghost(list_idx)) = page_queue_of(page, Tracked(&*local)); + if likely( + page.get_inner_ref(Tracked(&*local)).xblock_size <= MAX_RETIRE_SIZE && !(heap.get_pages( + Tracked(&*local), + )[pq].block_size > MEDIUM_OBJ_SIZE_MAX as usize), + ) { + if heap.get_pages(Tracked(&*local))[pq].last.to_usize() == page.page_ptr.to_usize() + && heap.get_pages(Tracked(&*local))[pq].first.to_usize() == page.page_ptr.to_usize() { + let RETIRE_CYCLES = 8; + page_get_mut_inner!(page, local, inner => { + let xb = inner.xblock_size as u64; + inner.set_retire_expire(1 + (if xb <= SMALL_OBJ_SIZE_MAX { RETIRE_CYCLES } else { RETIRE_CYCLES/4 })); + }); + if pq < heap.get_page_retired_min(Tracked(&*local)) { + heap.set_page_retired_min(Tracked(&mut *local), pq); + } + if pq > heap.get_page_retired_max(Tracked(&*local)) { + heap.set_page_retired_max(Tracked(&mut *local), pq); + } + proof { + preserves_mem_chunk_good(*old(local), *local); + } + return ; } - let pq = bin(page.get_inner_ref(Tracked(&mut *local)).xblock_size as usize); - let ghost list_idx = local.page_organization.marked_full_is_in(page.page_id@); - page_queue_enqueue_from( - heap, - pq as usize, - BIN_FULL as usize, - page, - Tracked(&mut *local), - Ghost(list_idx), - Ghost(arbitrary()), - ); } + page_free(page, pq, false, Tracked(&mut *local), Ghost(list_idx)); +} - fn page_queue_enqueue_from( - heap: HeapPtr, - to: usize, - from: usize, - page: PagePtr, - Tracked(local): Tracked<&mut Local>, - Ghost(list_idx): Ghost, - Ghost(next_id): Ghost, - ) - requires - old(local).wf(), - page.wf(), - page.is_in(*old(local)), - heap.wf(), - heap.is_in(*old(local)), - page.is_used_and_primary(*old(local)), - old(local).page_organization.valid_used_page(page.page_id@, from as int, list_idx), - (valid_bin_idx(from as int) && to == BIN_FULL) || (match old( - local, - ).page_organization.pages[page.page_id@].page_header_kind { - Some(PageHeaderKind::Normal(b, bsize)) => from == BIN_FULL && to == b, - //&& valid_bin_idx(to as int) - //&& bsize == size_of_bin(to as int), - None => false, - }), - ensures - local.wf(), - common_preserves(*old(local), *local), - old(local).page_organization.valid_used_page(next_id, from as int, list_idx + 1) - ==> local.page_organization.valid_used_page(next_id, from as int, list_idx), - page.is_used_and_primary(*local), - { - page_queue_remove(heap, from, page, Tracked(&mut *local), Ghost(list_idx), Ghost(next_id)); - page_queue_push_back( - heap, - to, - page, - Tracked(&mut *local), - Ghost(next_id), - Ghost(from as int), - Ghost(list_idx), - ); +fn page_free( + page: PagePtr, + pq: usize, + force: bool, + Tracked(local): Tracked<&mut Local>, + Ghost(list_idx): Ghost, +) + requires + old(local).wf(), + page.wf(), + page.is_in(*old(local)), + page.is_used_and_primary(*old(local)), + old(local).page_organization.valid_used_page(page.page_id@, pq as int, list_idx), + old(local).pages[page.page_id@].inner@.value.unwrap().used == 0, + ensures + local.wf(), + common_preserves(*old(local), *local), +{ + page_get_mut_inner!(page, local, inner => { + inner.set_has_aligned(false); + }); + proof { + preserves_mem_chunk_good(*old(local), *local); } + let heap = page.get_heap(Tracked(&*local)); + page_queue_remove(heap, pq, page, Tracked(&mut *local), Ghost(list_idx), Ghost(arbitrary())); + let tld = heap.get_ref(Tracked(&*local)).tld_ptr; + crate::segment::segment_page_free(page, force, tld, Tracked(&mut *local)); +} - pub fn page_try_use_delayed_free( - page: PagePtr, - delay: usize, - override_never: bool, - Tracked(local): Tracked<&Local>, - ) -> bool - requires - local.wf(), - page.wf(), - page.is_in(*local), - page.is_used_and_primary(*local), - delay == 0, - !override_never, - { - page.get_ref(Tracked(&*local)).xthread_free.try_use_delayed_free(delay, override_never) +fn page_to_full( + page: PagePtr, + heap: HeapPtr, + pq: usize, + Tracked(local): Tracked<&mut Local>, + Ghost(list_idx): Ghost, + Ghost(next_id): Ghost, +) + requires + old(local).wf(), + page.wf(), + page.is_in(*old(local)), + heap.wf(), + heap.is_in(*old(local)), + page.is_used_and_primary(*old(local)), + valid_bin_idx(pq as int), + old(local).page_organization.valid_used_page(page.page_id@, pq as int, list_idx), + ensures + local.wf(), + common_preserves(*old(local), *local), + old(local).page_organization.valid_used_page(next_id, pq as int, list_idx + 1) + ==> local.page_organization.valid_used_page(next_id, pq as int, list_idx), +{ + page_queue_enqueue_from( + heap, + BIN_FULL as usize, + pq, + page, + Tracked(&mut *local), + Ghost(list_idx), + Ghost(next_id), + ); + crate::alloc_generic::page_free_collect(page, false, Tracked(&mut *local)); +} + +pub fn page_unfull(page: PagePtr, Tracked(local): Tracked<&mut Local>) + requires + old(local).wf(), + page.wf(), + page.is_in(*old(local)), + page.is_used_and_primary(*old(local)), + old(local).pages[page.page_id@].inner@.value.unwrap().in_full(), + ensures + local.wf(), + common_preserves(*old(local), *local), +{ + let heap = page.get_heap(Tracked(&*local)); + proof { + local.page_organization.marked_full_is_in(page.page_id@); + const_facts(); } + let pq = bin(page.get_inner_ref(Tracked(&mut *local)).xblock_size as usize); + let ghost list_idx = local.page_organization.marked_full_is_in(page.page_id@); + page_queue_enqueue_from( + heap, + pq as usize, + BIN_FULL as usize, + page, + Tracked(&mut *local), + Ghost(list_idx), + Ghost(arbitrary()), + ); +} + +fn page_queue_enqueue_from( + heap: HeapPtr, + to: usize, + from: usize, + page: PagePtr, + Tracked(local): Tracked<&mut Local>, + Ghost(list_idx): Ghost, + Ghost(next_id): Ghost, +) + requires + old(local).wf(), + page.wf(), + page.is_in(*old(local)), + heap.wf(), + heap.is_in(*old(local)), + page.is_used_and_primary(*old(local)), + old(local).page_organization.valid_used_page(page.page_id@, from as int, list_idx), + (valid_bin_idx(from as int) && to == BIN_FULL) || (match old( + local, + ).page_organization.pages[page.page_id@].page_header_kind { + Some(PageHeaderKind::Normal(b, bsize)) => from == BIN_FULL && to == b, + //&& valid_bin_idx(to as int) + //&& bsize == size_of_bin(to as int), + None => false, + }), + ensures + local.wf(), + common_preserves(*old(local), *local), + old(local).page_organization.valid_used_page(next_id, from as int, list_idx + 1) + ==> local.page_organization.valid_used_page(next_id, from as int, list_idx), + page.is_used_and_primary(*local), +{ + page_queue_remove(heap, from, page, Tracked(&mut *local), Ghost(list_idx), Ghost(next_id)); + page_queue_push_back( + heap, + to, + page, + Tracked(&mut *local), + Ghost(next_id), + Ghost(from as int), + Ghost(list_idx), + ); +} + +pub fn page_try_use_delayed_free( + page: PagePtr, + delay: usize, + override_never: bool, + Tracked(local): Tracked<&Local>, +) -> bool + requires + local.wf(), + page.wf(), + page.is_in(*local), + page.is_used_and_primary(*local), + delay == 0, + !override_never, +{ + page.get_ref(Tracked(&*local)).xthread_free.try_use_delayed_free(delay, override_never) +} - } // verus! +} // verus! } mod queues { @@ -24228,79 +24132,79 @@ mod queues { verus! { - #[verifier::spinoff_prover] - pub fn page_queue_remove( - heap: HeapPtr, - pq: usize, - page: PagePtr, - Tracked(local): Tracked<&mut Local>, - Ghost(list_idx): Ghost, - Ghost(next_id): Ghost, - ) - requires - old(local).wf(), - page.wf(), - page.is_in(*old(local)), - heap.wf(), - heap.is_in(*old(local)), - page.is_used_and_primary(*old(local)), - //valid_bin_idx(pq as int) || pq == BIN_FULL, - //old(local).page_organization.pages[page.page_id@].page_header_kind == - // Some(PageHeaderKind::Normal(crate::bin_sizes::size_of_bin(pq as int) as int)), - old(local).page_organization.valid_used_page(page.page_id@, pq as int, list_idx), - ensures - local.wf_main(), - common_preserves(*old(local), *local), - page.is_in(*local), - local.page_organization.popped == Popped::Used(page.page_id@, true), - local.page_organization.pages[page.page_id@].page_header_kind == old( - local, - ).page_organization.pages[page.page_id@].page_header_kind, - local.tld_id == old(local).tld_id, - old(local).page_organization.valid_used_page(next_id, pq as int, list_idx + 1) - ==> local.page_organization.valid_used_page(next_id, pq as int, list_idx), - old(local).pages[page.page_id@].inner@.value.unwrap().used - == local.pages[page.page_id@].inner@.value.unwrap().used, - { - let ghost mut next_state; - let ghost page_id = page.page_id@; - proof { - next_state = - PageOrg::take_step::out_of_used_list(local.page_organization, page_id, pq as int, list_idx); - holds_on_present_value(*local, pq as int); - if old(local).page_organization.valid_used_page(next_id, pq as int, list_idx + 1) { - PageOrg::State::preserved_by_out_of_used_list( - local.page_organization, - next_state, - page_id, - pq as int, - list_idx, - next_id, - ); - } +#[verifier::spinoff_prover] +pub fn page_queue_remove( + heap: HeapPtr, + pq: usize, + page: PagePtr, + Tracked(local): Tracked<&mut Local>, + Ghost(list_idx): Ghost, + Ghost(next_id): Ghost, +) + requires + old(local).wf(), + page.wf(), + page.is_in(*old(local)), + heap.wf(), + heap.is_in(*old(local)), + page.is_used_and_primary(*old(local)), + //valid_bin_idx(pq as int) || pq == BIN_FULL, + //old(local).page_organization.pages[page.page_id@].page_header_kind == + // Some(PageHeaderKind::Normal(crate::bin_sizes::size_of_bin(pq as int) as int)), + old(local).page_organization.valid_used_page(page.page_id@, pq as int, list_idx), + ensures + local.wf_main(), + common_preserves(*old(local), *local), + page.is_in(*local), + local.page_organization.popped == Popped::Used(page.page_id@, true), + local.page_organization.pages[page.page_id@].page_header_kind == old( + local, + ).page_organization.pages[page.page_id@].page_header_kind, + local.tld_id == old(local).tld_id, + old(local).page_organization.valid_used_page(next_id, pq as int, list_idx + 1) + ==> local.page_organization.valid_used_page(next_id, pq as int, list_idx), + old(local).pages[page.page_id@].inner@.value.unwrap().used + == local.pages[page.page_id@].inner@.value.unwrap().used, +{ + let ghost mut next_state; + let ghost page_id = page.page_id@; + proof { + next_state = + PageOrg::take_step::out_of_used_list(local.page_organization, page_id, pq as int, list_idx); + holds_on_present_value(*local, pq as int); + if old(local).page_organization.valid_used_page(next_id, pq as int, list_idx + 1) { + PageOrg::State::preserved_by_out_of_used_list( + local.page_organization, + next_state, + page_id, + pq as int, + list_idx, + next_id, + ); } - let prev = page.get_prev(Tracked(&*local)); - let next = page.get_next(Tracked(&*local)); - let ghost prev_id = local.page_organization.pages[page_id].dlist_entry.unwrap().prev; - let ghost next_id = local.page_organization.pages[page_id].dlist_entry.unwrap().next; - if prev.to_usize() != 0 { - let prev = PagePtr { page_ptr: prev, page_id: Ghost(prev_id.get_Some_0()) }; - //assert(prev.wf()); - //assert(prev.is_in(*local)); - used_page_get_mut_next!(prev, local, n => { + } + let prev = page.get_prev(Tracked(&*local)); + let next = page.get_next(Tracked(&*local)); + let ghost prev_id = local.page_organization.pages[page_id].dlist_entry.unwrap().prev; + let ghost next_id = local.page_organization.pages[page_id].dlist_entry.unwrap().next; + if prev.to_usize() != 0 { + let prev = PagePtr { page_ptr: prev, page_id: Ghost(prev_id.get_Some_0()) }; + //assert(prev.wf()); + //assert(prev.is_in(*local)); + used_page_get_mut_next!(prev, local, n => { n = next; }); - } - if next.to_usize() != 0 { - let next = PagePtr { page_ptr: next, page_id: Ghost(next_id.get_Some_0()) }; - //assert(next.wf()); - //assert(next.is_in(*local)); - used_page_get_mut_prev!(next, local, p => { + } + if next.to_usize() != 0 { + let next = PagePtr { page_ptr: next, page_id: Ghost(next_id.get_Some_0()) }; + //assert(next.wf()); + //assert(next.is_in(*local)); + used_page_get_mut_prev!(next, local, p => { p = prev; }); - } - let ghost mut old_val; - heap_get_pages!(heap, local, pages => { + } + let ghost mut old_val; + heap_get_pages!(heap, local, pages => { let mut cq = pages[pq]; proof { old_val = cq.first.id(); } @@ -24314,95 +24218,93 @@ mod queues { pages.set(pq, cq); }); - proof { - local.page_organization = next_state; - preserves_mem_chunk_good(*old(local), *local); - //assert(local.wf_basic()); - } - let ghost local_snap = *local; - if prev.to_usize() == 0 { - heap_queue_first_update(heap, pq, Tracked(&mut *local), Ghost(old_val)); - } - let c = heap.get_page_count(Tracked(&*local)); - heap.set_page_count(Tracked(&mut *local), c.wrapping_sub(1)); - // These shouldn't be necessary: - // page->next = NULL; - // page->prev = NULL; - // mi_page_set_in_full(page, false) - proof { + proof { + local.page_organization = next_state; + preserves_mem_chunk_good(*old(local), *local); + //assert(local.wf_basic()); + } + let ghost local_snap = *local; + if prev.to_usize() == 0 { + heap_queue_first_update(heap, pq, Tracked(&mut *local), Ghost(old_val)); + } + let c = heap.get_page_count(Tracked(&*local)); + heap.set_page_count(Tracked(&mut *local), c.wrapping_sub(1)); + // These shouldn't be necessary: + // page->next = NULL; + // page->prev = NULL; + // mi_page_set_in_full(page, false) + proof { + let pfd = local.heap.pages_free_direct@.value.unwrap()@; + let emp = local.page_empty_global@.s.points_to@.pptr; + let pages = local.heap.pages@.value.unwrap()@; + if pq != BIN_FULL { + let opfd = local_snap.heap.pages_free_direct@.value.unwrap()@; let pfd = local.heap.pages_free_direct@.value.unwrap()@; - let emp = local.page_empty_global@.s.points_to@.pptr; let pages = local.heap.pages@.value.unwrap()@; - if pq != BIN_FULL { - let opfd = local_snap.heap.pages_free_direct@.value.unwrap()@; - let pfd = local.heap.pages_free_direct@.value.unwrap()@; - let pages = local.heap.pages@.value.unwrap()@; - let emp = local.page_empty_global@.s.points_to@.pptr; - let i = pfd_lower(pq as int) as int; - let j = pfd_upper(pq as int) as int + 1; - assert forall|wsize| 0 <= wsize < pfd.len() implies pages_free_direct_match( - (#[trigger] - pfd[wsize]).id(), - pages[smallest_bin_fitting_size(wsize * INTPTR_SIZE)].first.id(), - emp, - ) by { - if i <= wsize < j { - idx_in_range_has_bin_size(pq as int, wsize); - //assert(smallest_bin_fitting_size(wsize * INTPTR_SIZE) == pq); - //assert(pages_free_direct_match((pfd[wsize]).id(), - // pages[smallest_bin_fitting_size(wsize * INTPTR_SIZE)].first.id(), - // emp)); - } else { - //assert(opfd[wsize] == pfd[wsize]); - let sbfs = smallest_bin_fitting_size(wsize * INTPTR_SIZE); - bounds_for_smallest_bin_fitting_size(wsize * INTPTR_SIZE); - //assert(0 <= sbfs < BIN_FULL); - idx_out_of_range_has_different_bin_size(pq as int, wsize); - //assert(sbfs != pq); - //assert(pages[sbfs].first == local_snap.heap.pages@.value.unwrap()@[sbfs].first); - //assert(pages[sbfs].first == old(local).heap.pages@.value.unwrap()@[sbfs].first); - /*assert(pages_free_direct_match((#[trigger] pfd[wsize]).id(), + let emp = local.page_empty_global@.s.points_to@.pptr; + let i = pfd_lower(pq as int) as int; + let j = pfd_upper(pq as int) as int + 1; + assert forall|wsize| 0 <= wsize < pfd.len() implies pages_free_direct_match( + (#[trigger] pfd[wsize]).id(), + pages[smallest_bin_fitting_size(wsize * INTPTR_SIZE)].first.id(), + emp, + ) by { + if i <= wsize < j { + idx_in_range_has_bin_size(pq as int, wsize); + //assert(smallest_bin_fitting_size(wsize * INTPTR_SIZE) == pq); + //assert(pages_free_direct_match((pfd[wsize]).id(), + // pages[smallest_bin_fitting_size(wsize * INTPTR_SIZE)].first.id(), + // emp)); + } else { + //assert(opfd[wsize] == pfd[wsize]); + let sbfs = smallest_bin_fitting_size(wsize * INTPTR_SIZE); + bounds_for_smallest_bin_fitting_size(wsize * INTPTR_SIZE); + //assert(0 <= sbfs < BIN_FULL); + idx_out_of_range_has_different_bin_size(pq as int, wsize); + //assert(sbfs != pq); + //assert(pages[sbfs].first == local_snap.heap.pages@.value.unwrap()@[sbfs].first); + //assert(pages[sbfs].first == old(local).heap.pages@.value.unwrap()@[sbfs].first); + /*assert(pages_free_direct_match((#[trigger] pfd[wsize]).id(), pages[smallest_bin_fitting_size(wsize * INTPTR_SIZE)].first.id(), emp));*/ - } - }//assert(pages_free_direct_is_correct(pfd, pages, emp)); - - } else { - //let old_pfd = old(local).heap.pages_free_direct@.value.unwrap()@; - //let old_pages = old(local).heap.pages@.value.unwrap()@; - //let old_emp = old(local).page_empty_global@.s.points_to@.pptr; - //assert(pages_free_direct_is_correct(old_pfd, old_pages, old_emp)); - let pfd = local.heap.pages_free_direct@.value.unwrap()@; - let pages = local.heap.pages@.value.unwrap()@; - let emp = local.page_empty_global@.s.points_to@.pptr; - //assert(pfd == old_pfd); - //assert(pages == old_pages); - //assert(emp == old_emp); - assert forall|wsize| 0 <= wsize < pfd.len() implies pages_free_direct_match( - (#[trigger] - pfd[wsize]).id(), - pages[smallest_bin_fitting_size(wsize * INTPTR_SIZE)].first.id(), - emp, - ) by { - //let snap_pages = local_snap.heap.pages@.value.unwrap()@; - //let snap_pages1 = local_snap1.heap.pages@.value.unwrap()@; - //let snap_pages2 = local_snap2.heap.pages@.value.unwrap()@; - //let t = smallest_bin_fitting_size(wsize * INTPTR_SIZE); - bounds_for_smallest_bin_fitting_size(wsize * INTPTR_SIZE); - //assert(0 <= t < pages.len()); - //assert(t != BIN_FULL); - //assert(t != pq); - //assert(old_pages[t] == snap_pages[t]); - //assert(snap_pages[t] == pages[t]); - //assert(pages_free_direct_match( - // (#[trigger] old_pfd[wsize]).id(), - // old_pages[smallest_bin_fitting_size(wsize * INTPTR_SIZE)].first.id(), - // old_emp)); - }//assert(pages_free_direct_is_correct(pfd, pages, emp)); + } + } //assert(pages_free_direct_is_correct(pfd, pages, emp)); - } - preserves_mem_chunk_good(local_snap, *local); - /*let org_pages = local.page_organization.pages; + } else { + //let old_pfd = old(local).heap.pages_free_direct@.value.unwrap()@; + //let old_pages = old(local).heap.pages@.value.unwrap()@; + //let old_emp = old(local).page_empty_global@.s.points_to@.pptr; + //assert(pages_free_direct_is_correct(old_pfd, old_pages, old_emp)); + let pfd = local.heap.pages_free_direct@.value.unwrap()@; + let pages = local.heap.pages@.value.unwrap()@; + let emp = local.page_empty_global@.s.points_to@.pptr; + //assert(pfd == old_pfd); + //assert(pages == old_pages); + //assert(emp == old_emp); + assert forall|wsize| 0 <= wsize < pfd.len() implies pages_free_direct_match( + (#[trigger] pfd[wsize]).id(), + pages[smallest_bin_fitting_size(wsize * INTPTR_SIZE)].first.id(), + emp, + ) by { + //let snap_pages = local_snap.heap.pages@.value.unwrap()@; + //let snap_pages1 = local_snap1.heap.pages@.value.unwrap()@; + //let snap_pages2 = local_snap2.heap.pages@.value.unwrap()@; + //let t = smallest_bin_fitting_size(wsize * INTPTR_SIZE); + bounds_for_smallest_bin_fitting_size(wsize * INTPTR_SIZE); + //assert(0 <= t < pages.len()); + //assert(t != BIN_FULL); + //assert(t != pq); + //assert(old_pages[t] == snap_pages[t]); + //assert(snap_pages[t] == pages[t]); + //assert(pages_free_direct_match( + // (#[trigger] old_pfd[wsize]).id(), + // old_pages[smallest_bin_fitting_size(wsize * INTPTR_SIZE)].first.id(), + // old_emp)); + } //assert(pages_free_direct_is_correct(pfd, pages, emp)); + + } + preserves_mem_chunk_good(local_snap, *local); + /*let org_pages = local.page_organization.pages; assert forall |pid| #[trigger] org_pages.dom().contains(pid) implies page_organization_pages_match_data(org_pages[pid], local.pages[pid], local.psa[pid]) by { @@ -24417,47 +24319,47 @@ mod queues { } } assert(page_organization_pages_match(local.page_organization.pages, local.pages, local.psa));*/ - //assert(local.page_organization_valid()); - //assert(local.wf_main()); - } + //assert(local.page_organization_valid()); + //assert(local.wf_main()); } +} - #[verifier::spinoff_prover] - pub fn page_queue_push(heap: HeapPtr, pq: usize, page: PagePtr, Tracked(local): Tracked<&mut Local>) - requires - old(local).wf_main(), - pq == BIN_FULL || valid_bin_idx(pq as int), - old(local).page_organization.popped == Popped::Used(page.page_id@, true), - (match old(local).page_organization.pages[page.page_id@].page_header_kind.unwrap() { - PageHeaderKind::Normal(b, bsize) => { - (pq == BIN_FULL || pq as int == b) && valid_bin_idx(b as int) && bsize - == crate::bin_sizes::size_of_bin(b) && bsize <= MEDIUM_OBJ_SIZE_MAX - }, - }), - heap.wf(), - heap.is_in(*old(local)), - page.wf(), - ensures - local.wf(), - common_preserves(*old(local), *local), - page.wf(), - page.is_in(*local), - page.is_used_and_primary(*local), - local.pages.index(page.page_id@).inner@.value.unwrap().xblock_size == old( - local, - ).pages.index(page.page_id@).inner@.value.unwrap().xblock_size, - local.tld_id == old(local).tld_id, - { - let ghost mut next_state; - proof { - next_state = PageOrg::take_step::into_used_list(local.page_organization, pq as int); - holds_on_present_value(*local, pq as int); - } - page_get_mut_inner!(page, local, inner => { +#[verifier::spinoff_prover] +pub fn page_queue_push(heap: HeapPtr, pq: usize, page: PagePtr, Tracked(local): Tracked<&mut Local>) + requires + old(local).wf_main(), + pq == BIN_FULL || valid_bin_idx(pq as int), + old(local).page_organization.popped == Popped::Used(page.page_id@, true), + (match old(local).page_organization.pages[page.page_id@].page_header_kind.unwrap() { + PageHeaderKind::Normal(b, bsize) => { + (pq == BIN_FULL || pq as int == b) && valid_bin_idx(b as int) && bsize + == crate::bin_sizes::size_of_bin(b) && bsize <= MEDIUM_OBJ_SIZE_MAX + }, + }), + heap.wf(), + heap.is_in(*old(local)), + page.wf(), + ensures + local.wf(), + common_preserves(*old(local), *local), + page.wf(), + page.is_in(*local), + page.is_used_and_primary(*local), + local.pages.index(page.page_id@).inner@.value.unwrap().xblock_size == old( + local, + ).pages.index(page.page_id@).inner@.value.unwrap().xblock_size, + local.tld_id == old(local).tld_id, +{ + let ghost mut next_state; + proof { + next_state = PageOrg::take_step::into_used_list(local.page_organization, pq as int); + holds_on_present_value(*local, pq as int); + } + page_get_mut_inner!(page, local, inner => { inner.set_in_full(pq == BIN_FULL as usize); }); - let first_in_queue; - heap_get_pages!(heap, local, pages => { + let first_in_queue; + heap_get_pages!(heap, local, pages => { let mut cq = pages[pq]; first_in_queue = cq.first; @@ -24468,30 +24370,30 @@ mod queues { pages.set(pq, cq); }); - if first_in_queue.to_usize() != 0 { - let first_in_queue_ptr = PagePtr { - page_ptr: first_in_queue, - page_id: Ghost( - local.page_organization.used_dlist_headers[pq as int].first.get_Some_0(), - ), - }; - //assert(first_in_queue_ptr.wf()); - //assert(first_in_queue_ptr.is_in(*old(local))); - used_page_get_mut_prev!(first_in_queue_ptr, local, p => { + if first_in_queue.to_usize() != 0 { + let first_in_queue_ptr = PagePtr { + page_ptr: first_in_queue, + page_id: Ghost( + local.page_organization.used_dlist_headers[pq as int].first.get_Some_0(), + ), + }; + //assert(first_in_queue_ptr.wf()); + //assert(first_in_queue_ptr.is_in(*old(local))); + used_page_get_mut_prev!(first_in_queue_ptr, local, p => { p = page.page_ptr; }); - } - used_page_get_mut_prev!(page, local, p => { + } + used_page_get_mut_prev!(page, local, p => { p = PPtr::from_usize(0); }); - used_page_get_mut_next!(page, local, n => { + used_page_get_mut_next!(page, local, n => { n = first_in_queue; }); - proof { - local.page_organization = next_state; - preserves_mem_chunk_good(*old(local), *local); - //crate::os_mem_util::mem_chunk_good_preserved(old(local).page_organization, local.page_organization); - /* + proof { + local.page_organization = next_state; + preserves_mem_chunk_good(*old(local), *local); + //crate::os_mem_util::mem_chunk_good_preserved(old(local).page_organization, local.page_organization); + /* let queues = local.heap.pages@.value.unwrap(); let org_queues = local.page_organization.used_dlist_headers; assert forall |i: int| 0 <= i < org_queues.len() implies @@ -24505,15 +24407,185 @@ mod queues { assert(is_page_ptr_opt(queues@[i].first, org_queues[i].first)); } } - */ - //assert(local.wf_basic()); - //assert(local.mem_chunk_good(page.page_id@.segment_id)); - } - let ghost local_snap = *local; - heap_queue_first_update(heap, pq, Tracked(&mut *local), Ghost(first_in_queue.id())); - let c = heap.get_page_count(Tracked(&*local)); - heap.set_page_count(Tracked(&mut *local), c.wrapping_add(1)); - proof { + */ + //assert(local.wf_basic()); + //assert(local.mem_chunk_good(page.page_id@.segment_id)); + } + let ghost local_snap = *local; + heap_queue_first_update(heap, pq, Tracked(&mut *local), Ghost(first_in_queue.id())); + let c = heap.get_page_count(Tracked(&*local)); + heap.set_page_count(Tracked(&mut *local), c.wrapping_add(1)); + proof { + if pq != BIN_FULL { + let opfd = local_snap.heap.pages_free_direct@.value.unwrap()@; + let pfd = local.heap.pages_free_direct@.value.unwrap()@; + let pages = local.heap.pages@.value.unwrap()@; + let emp = local.page_empty_global@.s.points_to@.pptr; + let i = pfd_lower(pq as int) as int; + let j = pfd_upper(pq as int) as int + 1; + assert forall|wsize| 0 <= wsize < pfd.len() implies pages_free_direct_match( + (#[trigger] pfd[wsize]).id(), + pages[smallest_bin_fitting_size(wsize * INTPTR_SIZE)].first.id(), + emp, + ) by { + if i <= wsize < j { + //assert(pfd[wsize].id() != 0); + idx_in_range_has_bin_size(pq as int, wsize); + /*assert(smallest_bin_fitting_size(wsize * INTPTR_SIZE) == pq); + assert(pages_free_direct_match((#[trigger] pfd[wsize]).id(), + pages[smallest_bin_fitting_size(wsize * INTPTR_SIZE)].first.id(), + emp));*/ + } else { + //assert(opfd[wsize] == pfd[wsize]); + let sbfs = smallest_bin_fitting_size(wsize * INTPTR_SIZE); + bounds_for_smallest_bin_fitting_size(wsize * INTPTR_SIZE); + //assert(0 <= sbfs < BIN_FULL); + idx_out_of_range_has_different_bin_size(pq as int, wsize); + /*assert(sbfs != pq); + assert(pages[sbfs].first == local_snap.heap.pages@.value.unwrap()@[sbfs].first); + assert(pages[sbfs].first == old(local).heap.pages@.value.unwrap()@[sbfs].first); + assert(pages_free_direct_match((#[trigger] pfd[wsize]).id(), + pages[smallest_bin_fitting_size(wsize * INTPTR_SIZE)].first.id(), + emp));*/ + } + } //assert(pages_free_direct_is_correct(pfd, pages, emp)); + + } else { + //let old_pfd = old(local).heap.pages_free_direct@.value.unwrap()@; + //let old_pages = old(local).heap.pages@.value.unwrap()@; + //let old_emp = old(local).page_empty_global@.s.points_to@.pptr; + //assert(pages_free_direct_is_correct(old_pfd, old_pages, old_emp)); + let pfd = local.heap.pages_free_direct@.value.unwrap()@; + let pages = local.heap.pages@.value.unwrap()@; + let emp = local.page_empty_global@.s.points_to@.pptr; + //assert(pfd == old_pfd); + //assert(pages == old_pages); + //assert(emp == old_emp); + assert forall|wsize| 0 <= wsize < pfd.len() implies pages_free_direct_match( + (#[trigger] pfd[wsize]).id(), + pages[smallest_bin_fitting_size(wsize * INTPTR_SIZE)].first.id(), + emp, + ) by { + //let snap_pages = local_snap.heap.pages@.value.unwrap()@; + //let snap_pages1 = local_snap1.heap.pages@.value.unwrap()@; + //let snap_pages2 = local_snap2.heap.pages@.value.unwrap()@; + //let t = smallest_bin_fitting_size(wsize * INTPTR_SIZE); + bounds_for_smallest_bin_fitting_size(wsize * INTPTR_SIZE); + //assert(0 <= t < pages.len()); + //assert(t != BIN_FULL); + //assert(t != pq); + //assert(old_pages[t] == snap_pages[t]); + //assert(snap_pages[t] == pages[t]); + //assert(pages_free_direct_match( + // (#[trigger] old_pfd[wsize]).id(), + // old_pages[smallest_bin_fitting_size(wsize * INTPTR_SIZE)].first.id(), + // old_emp)); + } //assert(pages_free_direct_is_correct(pfd, pages, emp)); + + } + preserves_mem_chunk_good(local_snap, *local); + //assert(local.wf_main()); + //assert(local.wf()); + } +} + +#[verifier::spinoff_prover] +pub fn page_queue_push_back( + heap: HeapPtr, + pq: usize, + page: PagePtr, + Tracked(local): Tracked<&mut Local>, + Ghost(other_id): Ghost, + Ghost(other_pq): Ghost, + Ghost(other_list_idx): Ghost, +) + requires + old(local).wf_main(), + pq == BIN_FULL || valid_bin_idx(pq as int), + old(local).page_organization.popped == Popped::Used(page.page_id@, true), + (match old(local).page_organization.pages[page.page_id@].page_header_kind.unwrap() { + PageHeaderKind::Normal(b, bsize) => { + (pq == BIN_FULL || b == pq as int) && valid_bin_idx(b as int) && bsize + == crate::bin_sizes::size_of_bin(b) && bsize <= MEDIUM_OBJ_SIZE_MAX + }, + }), + heap.wf(), + heap.is_in(*old(local)), + page.wf(), + ensures + local.wf(), + common_preserves(*old(local), *local), + page.wf(), + page.is_in(*local), + page.is_used_and_primary(*local), + local.pages.index(page.page_id@).inner@.value.unwrap().xblock_size == old( + local, + ).pages.index(page.page_id@).inner@.value.unwrap().xblock_size, + local.tld_id == old(local).tld_id, + old(local).page_organization.valid_used_page(other_id, other_pq, other_list_idx) + ==> local.page_organization.valid_used_page(other_id, other_pq, other_list_idx), +{ + let ghost mut next_state; + proof { + next_state = PageOrg::take_step::into_used_list_back(local.page_organization, pq as int); + holds_on_present_value(*local, pq as int); + if local.page_organization.valid_used_page(other_id, other_pq, other_list_idx) { + PageOrg::State::preserved_by_into_used_list_back( + local.page_organization, + next_state, + pq as int, + other_id, + other_pq, + other_list_idx, + ); + } + } + page_get_mut_inner!(page, local, inner => { + inner.set_in_full(pq == BIN_FULL as usize); + }); + let last_in_queue; + heap_get_pages!(heap, local, pages => { + let mut cq = pages[pq]; + last_in_queue = cq.last; + + cq.last = page.page_ptr; + if last_in_queue.to_usize() == 0 { + cq.first = page.page_ptr; + } + + pages.set(pq, cq); + }); + used_page_get_mut_next!(page, local, n => { + n = PPtr::from_usize(0); + }); + used_page_get_mut_prev!(page, local, p => { + p = last_in_queue; + }); + if last_in_queue.to_usize() != 0 { + let last_in_queue_ptr = PagePtr { + page_ptr: last_in_queue, + page_id: Ghost(local.page_organization.used_dlist_headers[pq as int].last.get_Some_0()), + }; + //assert(last_in_queue_ptr.wf()); + //assert(last_in_queue_ptr.is_in(*old(local))); + used_page_get_mut_next!(last_in_queue_ptr, local, n => { + n = page.page_ptr; + }); + } + proof { + local.page_organization = next_state; + preserves_mem_chunk_good(*old(local), *local); + //assert(local.wf_basic()); + //assert(local.mem_chunk_good(page.page_id@.segment_id)); + } + let ghost local_snap = *local; + if last_in_queue.to_usize() == 0 { + heap_queue_first_update(heap, pq, Tracked(&mut *local), Ghost(0)); + } + let c = heap.get_page_count(Tracked(&*local)); + heap.set_page_count(Tracked(&mut *local), c.wrapping_add(1)); + proof { + if last_in_queue.id() == 0 { if pq != BIN_FULL { let opfd = local_snap.heap.pages_free_direct@.value.unwrap()@; let pfd = local.heap.pages_free_direct@.value.unwrap()@; @@ -24522,8 +24594,7 @@ mod queues { let i = pfd_lower(pq as int) as int; let j = pfd_upper(pq as int) as int + 1; assert forall|wsize| 0 <= wsize < pfd.len() implies pages_free_direct_match( - (#[trigger] - pfd[wsize]).id(), + (#[trigger] pfd[wsize]).id(), pages[smallest_bin_fitting_size(wsize * INTPTR_SIZE)].first.id(), emp, ) by { @@ -24531,9 +24602,9 @@ mod queues { //assert(pfd[wsize].id() != 0); idx_in_range_has_bin_size(pq as int, wsize); /*assert(smallest_bin_fitting_size(wsize * INTPTR_SIZE) == pq); - assert(pages_free_direct_match((#[trigger] pfd[wsize]).id(), - pages[smallest_bin_fitting_size(wsize * INTPTR_SIZE)].first.id(), - emp));*/ + assert(pages_free_direct_match((#[trigger] pfd[wsize]).id(), + pages[smallest_bin_fitting_size(wsize * INTPTR_SIZE)].first.id(), + emp));*/ } else { //assert(opfd[wsize] == pfd[wsize]); let sbfs = smallest_bin_fitting_size(wsize * INTPTR_SIZE); @@ -24541,13 +24612,13 @@ mod queues { //assert(0 <= sbfs < BIN_FULL); idx_out_of_range_has_different_bin_size(pq as int, wsize); /*assert(sbfs != pq); - assert(pages[sbfs].first == local_snap.heap.pages@.value.unwrap()@[sbfs].first); - assert(pages[sbfs].first == old(local).heap.pages@.value.unwrap()@[sbfs].first); - assert(pages_free_direct_match((#[trigger] pfd[wsize]).id(), - pages[smallest_bin_fitting_size(wsize * INTPTR_SIZE)].first.id(), - emp));*/ + assert(pages[sbfs].first == local_snap.heap.pages@.value.unwrap()@[sbfs].first); + assert(pages[sbfs].first == old(local).heap.pages@.value.unwrap()@[sbfs].first); + assert(pages_free_direct_match((#[trigger] pfd[wsize]).id(), + pages[smallest_bin_fitting_size(wsize * INTPTR_SIZE)].first.id(), + emp));*/ } - }//assert(pages_free_direct_is_correct(pfd, pages, emp)); + } //assert(pages_free_direct_is_correct(pfd, pages, emp)); } else { //let old_pfd = old(local).heap.pages_free_direct@.value.unwrap()@; @@ -24561,8 +24632,7 @@ mod queues { //assert(pages == old_pages); //assert(emp == old_emp); assert forall|wsize| 0 <= wsize < pfd.len() implies pages_free_direct_match( - (#[trigger] - pfd[wsize]).id(), + (#[trigger] pfd[wsize]).id(), pages[smallest_bin_fitting_size(wsize * INTPTR_SIZE)].first.id(), emp, ) by { @@ -24580,318 +24650,142 @@ mod queues { // (#[trigger] old_pfd[wsize]).id(), // old_pages[smallest_bin_fitting_size(wsize * INTPTR_SIZE)].first.id(), // old_emp)); - }//assert(pages_free_direct_is_correct(pfd, pages, emp)); - - } - preserves_mem_chunk_good(local_snap, *local); - //assert(local.wf_main()); - //assert(local.wf()); - } - } - - #[verifier::spinoff_prover] - pub fn page_queue_push_back( - heap: HeapPtr, - pq: usize, - page: PagePtr, - Tracked(local): Tracked<&mut Local>, - Ghost(other_id): Ghost, - Ghost(other_pq): Ghost, - Ghost(other_list_idx): Ghost, - ) - requires - old(local).wf_main(), - pq == BIN_FULL || valid_bin_idx(pq as int), - old(local).page_organization.popped == Popped::Used(page.page_id@, true), - (match old(local).page_organization.pages[page.page_id@].page_header_kind.unwrap() { - PageHeaderKind::Normal(b, bsize) => { - (pq == BIN_FULL || b == pq as int) && valid_bin_idx(b as int) && bsize - == crate::bin_sizes::size_of_bin(b) && bsize <= MEDIUM_OBJ_SIZE_MAX - }, - }), - heap.wf(), - heap.is_in(*old(local)), - page.wf(), - ensures - local.wf(), - common_preserves(*old(local), *local), - page.wf(), - page.is_in(*local), - page.is_used_and_primary(*local), - local.pages.index(page.page_id@).inner@.value.unwrap().xblock_size == old( - local, - ).pages.index(page.page_id@).inner@.value.unwrap().xblock_size, - local.tld_id == old(local).tld_id, - old(local).page_organization.valid_used_page(other_id, other_pq, other_list_idx) - ==> local.page_organization.valid_used_page(other_id, other_pq, other_list_idx), - { - let ghost mut next_state; - proof { - next_state = PageOrg::take_step::into_used_list_back(local.page_organization, pq as int); - holds_on_present_value(*local, pq as int); - if local.page_organization.valid_used_page(other_id, other_pq, other_list_idx) { - PageOrg::State::preserved_by_into_used_list_back( - local.page_organization, - next_state, - pq as int, - other_id, - other_pq, - other_list_idx, - ); - } - } - page_get_mut_inner!(page, local, inner => { - inner.set_in_full(pq == BIN_FULL as usize); - }); - let last_in_queue; - heap_get_pages!(heap, local, pages => { - let mut cq = pages[pq]; - last_in_queue = cq.last; + } //assert(pages_free_direct_is_correct(pfd, pages, emp)); - cq.last = page.page_ptr; - if last_in_queue.to_usize() == 0 { - cq.first = page.page_ptr; } + } else { + let pfd = local.heap.pages_free_direct@.value.unwrap()@; + let pages = local.heap.pages@.value.unwrap()@; + let emp = local.page_empty_global@.s.points_to@.pptr; + assert forall|wsize| 0 <= wsize < pfd.len() implies pages_free_direct_match( + (#[trigger] pfd[wsize]).id(), + pages[smallest_bin_fitting_size(wsize * INTPTR_SIZE)].first.id(), + emp, + ) by { + bounds_for_smallest_bin_fitting_size(wsize * INTPTR_SIZE); + } //assert(pages_free_direct_is_correct(pfd, pages, emp)); - pages.set(pq, cq); - }); - used_page_get_mut_next!(page, local, n => { - n = PPtr::from_usize(0); - }); - used_page_get_mut_prev!(page, local, p => { - p = last_in_queue; - }); - if last_in_queue.to_usize() != 0 { - let last_in_queue_ptr = PagePtr { - page_ptr: last_in_queue, - page_id: Ghost(local.page_organization.used_dlist_headers[pq as int].last.get_Some_0()), - }; - //assert(last_in_queue_ptr.wf()); - //assert(last_in_queue_ptr.is_in(*old(local))); - used_page_get_mut_next!(last_in_queue_ptr, local, n => { - n = page.page_ptr; - }); - } - proof { - local.page_organization = next_state; - preserves_mem_chunk_good(*old(local), *local); - //assert(local.wf_basic()); - //assert(local.mem_chunk_good(page.page_id@.segment_id)); - } - let ghost local_snap = *local; - if last_in_queue.to_usize() == 0 { - heap_queue_first_update(heap, pq, Tracked(&mut *local), Ghost(0)); - } - let c = heap.get_page_count(Tracked(&*local)); - heap.set_page_count(Tracked(&mut *local), c.wrapping_add(1)); - proof { - if last_in_queue.id() == 0 { - if pq != BIN_FULL { - let opfd = local_snap.heap.pages_free_direct@.value.unwrap()@; - let pfd = local.heap.pages_free_direct@.value.unwrap()@; - let pages = local.heap.pages@.value.unwrap()@; - let emp = local.page_empty_global@.s.points_to@.pptr; - let i = pfd_lower(pq as int) as int; - let j = pfd_upper(pq as int) as int + 1; - assert forall|wsize| 0 <= wsize < pfd.len() implies pages_free_direct_match( - (#[trigger] - pfd[wsize]).id(), - pages[smallest_bin_fitting_size(wsize * INTPTR_SIZE)].first.id(), - emp, - ) by { - if i <= wsize < j { - //assert(pfd[wsize].id() != 0); - idx_in_range_has_bin_size(pq as int, wsize); - /*assert(smallest_bin_fitting_size(wsize * INTPTR_SIZE) == pq); - assert(pages_free_direct_match((#[trigger] pfd[wsize]).id(), - pages[smallest_bin_fitting_size(wsize * INTPTR_SIZE)].first.id(), - emp));*/ - } else { - //assert(opfd[wsize] == pfd[wsize]); - let sbfs = smallest_bin_fitting_size(wsize * INTPTR_SIZE); - bounds_for_smallest_bin_fitting_size(wsize * INTPTR_SIZE); - //assert(0 <= sbfs < BIN_FULL); - idx_out_of_range_has_different_bin_size(pq as int, wsize); - /*assert(sbfs != pq); - assert(pages[sbfs].first == local_snap.heap.pages@.value.unwrap()@[sbfs].first); - assert(pages[sbfs].first == old(local).heap.pages@.value.unwrap()@[sbfs].first); - assert(pages_free_direct_match((#[trigger] pfd[wsize]).id(), - pages[smallest_bin_fitting_size(wsize * INTPTR_SIZE)].first.id(), - emp));*/ - } - }//assert(pages_free_direct_is_correct(pfd, pages, emp)); - - } else { - //let old_pfd = old(local).heap.pages_free_direct@.value.unwrap()@; - //let old_pages = old(local).heap.pages@.value.unwrap()@; - //let old_emp = old(local).page_empty_global@.s.points_to@.pptr; - //assert(pages_free_direct_is_correct(old_pfd, old_pages, old_emp)); - let pfd = local.heap.pages_free_direct@.value.unwrap()@; - let pages = local.heap.pages@.value.unwrap()@; - let emp = local.page_empty_global@.s.points_to@.pptr; - //assert(pfd == old_pfd); - //assert(pages == old_pages); - //assert(emp == old_emp); - assert forall|wsize| 0 <= wsize < pfd.len() implies pages_free_direct_match( - (#[trigger] - pfd[wsize]).id(), - pages[smallest_bin_fitting_size(wsize * INTPTR_SIZE)].first.id(), - emp, - ) by { - //let snap_pages = local_snap.heap.pages@.value.unwrap()@; - //let snap_pages1 = local_snap1.heap.pages@.value.unwrap()@; - //let snap_pages2 = local_snap2.heap.pages@.value.unwrap()@; - //let t = smallest_bin_fitting_size(wsize * INTPTR_SIZE); - bounds_for_smallest_bin_fitting_size(wsize * INTPTR_SIZE); - //assert(0 <= t < pages.len()); - //assert(t != BIN_FULL); - //assert(t != pq); - //assert(old_pages[t] == snap_pages[t]); - //assert(snap_pages[t] == pages[t]); - //assert(pages_free_direct_match( - // (#[trigger] old_pfd[wsize]).id(), - // old_pages[smallest_bin_fitting_size(wsize * INTPTR_SIZE)].first.id(), - // old_emp)); - }//assert(pages_free_direct_is_correct(pfd, pages, emp)); - - } - } else { - let pfd = local.heap.pages_free_direct@.value.unwrap()@; - let pages = local.heap.pages@.value.unwrap()@; - let emp = local.page_empty_global@.s.points_to@.pptr; - assert forall|wsize| 0 <= wsize < pfd.len() implies pages_free_direct_match( - (#[trigger] - pfd[wsize]).id(), - pages[smallest_bin_fitting_size(wsize * INTPTR_SIZE)].first.id(), - emp, - ) by { - bounds_for_smallest_bin_fitting_size(wsize * INTPTR_SIZE); - }//assert(pages_free_direct_is_correct(pfd, pages, emp)); - - } - preserves_mem_chunk_good(local_snap, *local); - //assert(local.wf_main()); - //assert(local.wf()); } + preserves_mem_chunk_good(local_snap, *local); + //assert(local.wf_main()); + //assert(local.wf()); } +} - //spec fn local_direct_no_change_needed(loc1: Local, loc2: Local, pq: int) -> bool { - //} - spec fn local_direct_update(loc1: Local, loc2: Local, i: int, j: int, pq: int) -> bool { - &&& loc2 == Local { heap: loc2.heap, ..loc1 } - &&& loc2.heap == HeapLocalAccess { pages_free_direct: loc2.heap.pages_free_direct, ..loc1.heap } - &&& loc1.heap.pages_free_direct@.pcell == loc2.heap.pages_free_direct@.pcell - &&& loc1.heap.pages_free_direct@.value.is_some() - &&& loc2.heap.pages_free_direct@.value.is_some() - &&& pfd_direct_update( - loc1.heap.pages_free_direct@.value.unwrap()@, - loc2.heap.pages_free_direct@.value.unwrap()@, - i, - j, - loc1.page_empty_global@.s.points_to@.pptr, - loc1.heap.pages@.value.unwrap()@[pq].first.id(), - ) - } +//spec fn local_direct_no_change_needed(loc1: Local, loc2: Local, pq: int) -> bool { +//} +spec fn local_direct_update(loc1: Local, loc2: Local, i: int, j: int, pq: int) -> bool { + &&& loc2 == Local { heap: loc2.heap, ..loc1 } + &&& loc2.heap == HeapLocalAccess { pages_free_direct: loc2.heap.pages_free_direct, ..loc1.heap } + &&& loc1.heap.pages_free_direct@.pcell == loc2.heap.pages_free_direct@.pcell + &&& loc1.heap.pages_free_direct@.value.is_some() + &&& loc2.heap.pages_free_direct@.value.is_some() + &&& pfd_direct_update( + loc1.heap.pages_free_direct@.value.unwrap()@, + loc2.heap.pages_free_direct@.value.unwrap()@, + i, + j, + loc1.page_empty_global@.s.points_to@.pptr, + loc1.heap.pages@.value.unwrap()@[pq].first.id(), + ) +} - spec fn pfd_direct_update( - pfd1: Seq>, - pfd2: Seq>, - i: int, - j: int, - emp: int, - p: int, - ) -> bool { - &&& pfd1.len() == pfd2.len() == PAGES_DIRECT - &&& (forall|k| - #![trigger(pfd1.index(k))] - #![trigger(pfd2.index(k))] - 0 <= k < pfd1.len() && !(i <= k < j) ==> pfd1[k] == pfd2[k]) - &&& (forall|k| - #![trigger pfd2.index(k)] - 0 <= k < pfd2.len() && i <= k < j ==> pages_free_direct_match(pfd2[k].id(), p, emp)) - } +spec fn pfd_direct_update( + pfd1: Seq>, + pfd2: Seq>, + i: int, + j: int, + emp: int, + p: int, +) -> bool { + &&& pfd1.len() == pfd2.len() == PAGES_DIRECT + &&& (forall|k| + #![trigger(pfd1.index(k))] + #![trigger(pfd2.index(k))] + 0 <= k < pfd1.len() && !(i <= k < j) ==> pfd1[k] == pfd2[k]) + &&& (forall|k| + #![trigger pfd2.index(k)] + 0 <= k < pfd2.len() && i <= k < j ==> pages_free_direct_match(pfd2[k].id(), p, emp)) +} - proof fn holds_on_present_value(local: Local, pq: int) - requires - local.wf_main(), - valid_bin_idx(pq as int) || pq == BIN_FULL, - ensures - pq != BIN_FULL ==> (forall|k: int| - k < PAGES_DIRECT && pfd_lower(pq as int) <= k <= pfd_upper(pq as int) - ==> pages_free_direct_match( - #[trigger] - local.heap.pages_free_direct@.value.unwrap()@[k].id(), - local.heap.pages@.value.unwrap()@[pq].first.id(), - local.page_empty_global@.s.points_to@.pptr, - )), - { - if pq != BIN_FULL { - assert forall|k: int| - k < PAGES_DIRECT && pfd_lower(pq as int) <= k <= pfd_upper( - pq as int, - ) implies pages_free_direct_match( - #[trigger] - local.heap.pages_free_direct@.value.unwrap()@[k].id(), +proof fn holds_on_present_value(local: Local, pq: int) + requires + local.wf_main(), + valid_bin_idx(pq as int) || pq == BIN_FULL, + ensures + pq != BIN_FULL ==> (forall|k: int| + k < PAGES_DIRECT && pfd_lower(pq as int) <= k <= pfd_upper(pq as int) + ==> pages_free_direct_match( + #[trigger] local.heap.pages_free_direct@.value.unwrap()@[k].id(), local.heap.pages@.value.unwrap()@[pq].first.id(), local.page_empty_global@.s.points_to@.pptr, - ) by { - //assert(0 <= k < local.heap.pages_free_direct@.value.unwrap()@.len()); - idx_in_range_has_bin_size(pq as int, k as int); - } + )), +{ + if pq != BIN_FULL { + assert forall|k: int| + k < PAGES_DIRECT && pfd_lower(pq as int) <= k <= pfd_upper( + pq as int, + ) implies pages_free_direct_match( + #[trigger] local.heap.pages_free_direct@.value.unwrap()@[k].id(), + local.heap.pages@.value.unwrap()@[pq].first.id(), + local.page_empty_global@.s.points_to@.pptr, + ) by { + //assert(0 <= k < local.heap.pages_free_direct@.value.unwrap()@.len()); + idx_in_range_has_bin_size(pq as int, k as int); } } +} - fn heap_queue_first_update( - heap: HeapPtr, - pq: usize, - Tracked(local): Tracked<&mut Local>, - Ghost(old_p): Ghost, - ) - requires - old(local).wf_basic(), - heap.wf(), - heap.is_in(*old(local)), - valid_bin_idx(pq as int) || pq == BIN_FULL, - pq != BIN_FULL ==> (forall|k: int| - k < PAGES_DIRECT && pfd_lower(pq as int) <= k <= pfd_upper(pq as int) - ==> pages_free_direct_match( - #[trigger] - old(local).heap.pages_free_direct@.value.unwrap()@[k].id(), - old_p, - old(local).page_empty_global@.s.points_to@.pptr, - )), - ensures - pq == BIN_FULL ==> *local == *old(local), - pq != BIN_FULL ==> local_direct_update( - *old(local), - *local, - pfd_lower(pq as int) as int, - pfd_upper(pq as int) as int + 1, - pq as int, - ), - { +fn heap_queue_first_update( + heap: HeapPtr, + pq: usize, + Tracked(local): Tracked<&mut Local>, + Ghost(old_p): Ghost, +) + requires + old(local).wf_basic(), + heap.wf(), + heap.is_in(*old(local)), + valid_bin_idx(pq as int) || pq == BIN_FULL, + pq != BIN_FULL ==> (forall|k: int| + k < PAGES_DIRECT && pfd_lower(pq as int) <= k <= pfd_upper(pq as int) + ==> pages_free_direct_match( + #[trigger] old(local).heap.pages_free_direct@.value.unwrap()@[k].id(), + old_p, + old(local).page_empty_global@.s.points_to@.pptr, + )), + ensures + pq == BIN_FULL ==> *local == *old(local), + pq != BIN_FULL ==> local_direct_update( + *old(local), + *local, + pfd_lower(pq as int) as int, + pfd_upper(pq as int) as int + 1, + pq as int, + ), +{ + proof { + const_facts(); + } + let size = heap.get_pages(Tracked(&*local))[pq].block_size; + if size > SMALL_SIZE_MAX { proof { - const_facts(); - } - let size = heap.get_pages(Tracked(&*local))[pq].block_size; - if size > SMALL_SIZE_MAX { - proof { - if pq != BIN_FULL { - out_of_small_range(pq as int); - assert(pfd_lower(pq as int) >= PAGES_DIRECT); - } + if pq != BIN_FULL { + out_of_small_range(pq as int); + assert(pfd_lower(pq as int) >= PAGES_DIRECT); } - return ; } - assert(pq != BIN_FULL); - let mut page_ptr = heap.get_pages(Tracked(&*local))[pq].first; - if page_ptr.to_usize() == 0 { - let (_page, Tracked(emp)) = heap.get_page_empty(Tracked(&*local)); - page_ptr = _page; - } - let idx = size / 8; - if heap.get_pages_free_direct(Tracked(&*local))[idx].to_usize() == page_ptr.to_usize() { - /*proof { + return ; + } + assert(pq != BIN_FULL); + let mut page_ptr = heap.get_pages(Tracked(&*local))[pq].first; + if page_ptr.to_usize() == 0 { + let (_page, Tracked(emp)) = heap.get_page_empty(Tracked(&*local)); + page_ptr = _page; + } + let idx = size / 8; + if heap.get_pages_free_direct(Tracked(&*local))[idx].to_usize() == page_ptr.to_usize() { + /*proof { let i = pfd_lower(pq as int) as int; let j = pfd_upper(pq as int) as int + 1; assert(idx == j - 1); @@ -24917,14 +24811,14 @@ mod queues { } assert(local_direct_update(loc1, loc2, i, j, pq)); }*/ - return ; - } - let start = if idx <= 1 { - 0 - } else { - let b = bin(size); - let prev = pq - 1; - /* + return ; + } + let start = if idx <= 1 { + 0 + } else { + let b = bin(size); + let prev = pq - 1; + /* // for large minimal alignment, need to do something here loop invariant @@ -24939,59 +24833,59 @@ mod queues { } prev = prev - 1; }*/ - let prev_block_size = heap.get_pages(Tracked(&*local))[prev].block_size; - proof { - const_facts(); - if prev != 0 { - size_of_bin_bounds_not_huge(prev as int); - assert(valid_bin_idx(prev as int)); - assert(prev_block_size == size_of_bin(prev as int)); - } - } - let s = 1 + prev_block_size / 8; - s//let t = if s > idx { idx } else { s }; - //t - - }; + let prev_block_size = heap.get_pages(Tracked(&*local))[prev].block_size; proof { - if idx <= 1 { - size_le_8_implies_idx_eq_1(pq as int); - assert(pq == 1); - assert(start == pfd_lower(pq as int)); - } else { - size_gt_8_implies_idx_gt_1(pq as int); - assert(pq > 1); - assert(start == pfd_lower(pq as int)); + const_facts(); + if prev != 0 { + size_of_bin_bounds_not_huge(prev as int); + assert(valid_bin_idx(prev as int)); + assert(prev_block_size == size_of_bin(prev as int)); } - assert(idx == pfd_upper(pq as int)); - pfd_lower_le_upper(pq as int); - assert(start <= idx); } - let mut sz = start; - while sz <= idx - invariant - local.wf_basic(), - heap.wf(), - heap.is_in(*local), - start <= sz <= idx + 1, - idx < PAGES_DIRECT, - local_direct_update(*old(local), *local, start as int, sz as int, pq as int), - page_ptr.id() != 0, - pages_free_direct_match( - page_ptr.id(), - old(local).heap.pages@.value.unwrap()@[pq as int].first.id(), - local.page_empty_global@.s.points_to@.pptr, - ), - { - let ghost prev_local = *local; - heap_get_pages_free_direct!(heap, local, pages_free_direct => { + let s = 1 + prev_block_size / 8; + s //let t = if s > idx { idx } else { s }; + //t + + }; + proof { + if idx <= 1 { + size_le_8_implies_idx_eq_1(pq as int); + assert(pq == 1); + assert(start == pfd_lower(pq as int)); + } else { + size_gt_8_implies_idx_gt_1(pq as int); + assert(pq > 1); + assert(start == pfd_lower(pq as int)); + } + assert(idx == pfd_upper(pq as int)); + pfd_lower_le_upper(pq as int); + assert(start <= idx); + } + let mut sz = start; + while sz <= idx + invariant + local.wf_basic(), + heap.wf(), + heap.is_in(*local), + start <= sz <= idx + 1, + idx < PAGES_DIRECT, + local_direct_update(*old(local), *local, start as int, sz as int, pq as int), + page_ptr.id() != 0, + pages_free_direct_match( + page_ptr.id(), + old(local).heap.pages@.value.unwrap()@[pq as int].first.id(), + local.page_empty_global@.s.points_to@.pptr, + ), + { + let ghost prev_local = *local; + heap_get_pages_free_direct!(heap, local, pages_free_direct => { pages_free_direct.set(sz, page_ptr); }); - sz += 1; - } + sz += 1; } +} - } // verus! +} // verus! } mod init { @@ -25025,160 +24919,159 @@ mod init { verus! { - pub tracked struct Global { - pub tracked instance: Mim::Instance, - pub tracked my_inst: Mim::my_inst, - } +pub tracked struct Global { + pub tracked instance: Mim::Instance, + pub tracked my_inst: Mim::my_inst, +} - impl Global { - pub closed spec fn wf(&self) -> bool { - self.my_inst@.instance == self.instance && self.my_inst@.value == self.instance - } +impl Global { + pub closed spec fn wf(&self) -> bool { + self.my_inst@.instance == self.instance && self.my_inst@.value == self.instance } +} - type RightToUseThread = Mim::right_to_use_thread; +type RightToUseThread = Mim::right_to_use_thread; - pub open spec fn wf_right_to_use_thread( - global: Global, - right: RightToUseThread, - tid: ThreadId, - ) -> bool { - right@.instance == global.instance && right@.key == tid - } +pub open spec fn wf_right_to_use_thread( + global: Global, + right: RightToUseThread, + tid: ThreadId, +) -> bool { + right@.instance == global.instance && right@.key == tid +} - /* +/* impl RightToUseThread { pub open spec fn wf(tid: ThreadId) { true } // TODO } */ - //impl Copy for Global { } - pub proof fn global_init() -> (tracked res: ( - Global, - Map, - )) // $line_count$Trusted$ - ensures // $line_count$Trusted$ - - res.0.wf(), // $line_count$Trusted$ - forall|tid: ThreadId| - #[trigger] - res.1.dom().contains(tid) // $line_count$Trusted$ - && wf_right_to_use_thread( - res.0, - res.1[tid], - tid, - ) // $line_count$Trusted$ - , - { - let tracked ( - Tracked(instance), - Tracked(right_to_set_inst), - _, - _, - Tracked(rights), - _, - _, - _, - _, - _, - _, - _, - _, - ) = Mim::Instance::initialize( - Map::tracked_empty(), - Map::tracked_empty(), - Map::tracked_empty(), - Map::tracked_empty(), - Map::tracked_empty(), - Map::tracked_empty(), - ); - let tracked my_inst = instance.set_inst(instance, right_to_set_inst.tracked_unwrap()); - (Global { instance, my_inst }, rights) - } - - pub fn heap_init( - Tracked(global): Tracked, // $line_count$Trusted$ - Tracked(right): Tracked, // $line_count$Trusted$ - Tracked(cur_thread): Tracked // $line_count$Trusted$ +//impl Copy for Global { } +pub proof fn global_init() -> (tracked res: ( + Global, + Map, +)) // $line_count$Trusted$ + ensures // $line_count$Trusted$ + + res.0.wf(), // $line_count$Trusted$ + forall|tid: ThreadId| #[trigger] + res.1.dom().contains(tid) // $line_count$Trusted$ + && wf_right_to_use_thread( + res.0, + res.1[tid], + tid, + ) // $line_count$Trusted$ , - ) -> (res: (HeapPtr, Tracked>)) // $line_count$Trusted$ - requires - wf_right_to_use_thread(global, right, cur_thread@), // $line_count$Trusted$ - global.wf(), // $line_count$Trusted$ +{ + let tracked ( + Tracked(instance), + Tracked(right_to_set_inst), + _, + _, + Tracked(rights), + _, + _, + _, + _, + _, + _, + _, + _, + ) = Mim::Instance::initialize( + Map::tracked_empty(), + Map::tracked_empty(), + Map::tracked_empty(), + Map::tracked_empty(), + Map::tracked_empty(), + Map::tracked_empty(), + ); + let tracked my_inst = instance.set_inst(instance, right_to_set_inst.tracked_unwrap()); + (Global { instance, my_inst }, rights) +} - ensures - ({ - let (heap, local_opt) = res; - { // $line_count$Trusted$ - heap.heap_ptr.id() != 0 ==> // $line_count$Trusted$ - local_opt@.is_some() // $line_count$Trusted$ - && local_opt@.unwrap().wf() // $line_count$Trusted$ - && heap.wf() // $line_count$Trusted$ - && heap.is_in(local_opt@.unwrap()) // $line_count$Trusted$ +pub fn heap_init( + Tracked(global): Tracked, // $line_count$Trusted$ + Tracked(right): Tracked, // $line_count$Trusted$ + Tracked(cur_thread): Tracked // $line_count$Trusted$ + , +) -> (res: (HeapPtr, Tracked>)) // $line_count$Trusted$ + requires + wf_right_to_use_thread(global, right, cur_thread@), // $line_count$Trusted$ + global.wf(), // $line_count$Trusted$ - } - }) // $line_count$Trusted$ - , + ensures + ({ + let (heap, local_opt) = res; + { // $line_count$Trusted$ + heap.heap_ptr.id() != 0 ==> // $line_count$Trusted$ + local_opt@.is_some() // $line_count$Trusted$ + && local_opt@.unwrap().wf() // $line_count$Trusted$ + && heap.wf() // $line_count$Trusted$ + && heap.is_in(local_opt@.unwrap()) // $line_count$Trusted$ + + } + }) // $line_count$Trusted$ + , +{ + increment_thread_count(); + // TODO use a cache for thread data + let (addr, Tracked(mem)) = thread_data_alloc(); + if addr == 0 { + return ( + HeapPtr { heap_ptr: PPtr::from_usize(0), heap_id: Ghost(arbitrary()) }, + Tracked(None), + ); + } + proof { + const_facts(); + assert(SIZEOF_HEAP == vstd::layout::size_of::()); + assert(SIZEOF_TLD == vstd::layout::size_of::()); + assert(addr as int % vstd::layout::align_of::() as int == 0); + assert((addr + SIZEOF_HEAP) as int % vstd::layout::align_of::() as int == 0); + } + vstd::layout::layout_for_type_is_valid::(); // $line_count$Proof$ + vstd::layout::layout_for_type_is_valid::(); // $line_count$Proof$ + let tracked points_to_heap_raw = mem.take_points_to_range(addr as int, SIZEOF_HEAP as int); + let tracked points_to_tld_raw = mem.take_points_to_range(addr + SIZEOF_HEAP, SIZEOF_TLD as int); + let tracked mut points_to_heap = points_to_heap_raw.into_typed(addr as int); + let tracked mut points_to_tld = points_to_tld_raw.into_typed(addr + SIZEOF_HEAP); + let heap_ptr = PPtr::::from_usize(addr); + let tld_ptr = PPtr::::from_usize(addr + SIZEOF_HEAP); + let tracked (_, _, Tracked(uniq_reservation_tok)) = global.instance.reserve_uniq_identifier(); + let heap = HeapPtr { + heap_ptr, + heap_id: Ghost(HeapId { id: heap_ptr.id() as nat, uniq: uniq_reservation_tok@.key.uniq }), + }; + let tld = TldPtr { tld_ptr, tld_id: Ghost(TldId { id: tld_ptr.id() as nat }) }; + let page_empty_stuff = init_empty_page_ptr(); + let EmptyPageStuff { ptr: page_empty_ptr, pfa: Tracked(page_empty_ptr_access) } = + page_empty_stuff; + let mut pages_free_direct = pages_free_direct_tmp(); + let mut pages = pages_tmp(); + let mut span_queue_headers = span_queue_headers_tmp(); + let mut i = 0; + while i < PAGES_DIRECT + invariant + 0 <= i <= PAGES_DIRECT, + forall|j: int| 0 <= j < i ==> pages_free_direct[j] == page_empty_ptr, { - increment_thread_count(); - // TODO use a cache for thread data - let (addr, Tracked(mem)) = thread_data_alloc(); - if addr == 0 { - return ( - HeapPtr { heap_ptr: PPtr::from_usize(0), heap_id: Ghost(arbitrary()) }, - Tracked(None), - ); - } - proof { - const_facts(); - assert(SIZEOF_HEAP == vstd::layout::size_of::()); - assert(SIZEOF_TLD == vstd::layout::size_of::()); - assert(addr as int % vstd::layout::align_of::() as int == 0); - assert((addr + SIZEOF_HEAP) as int % vstd::layout::align_of::() as int == 0); - } - vstd::layout::layout_for_type_is_valid::(); // $line_count$Proof$ - vstd::layout::layout_for_type_is_valid::(); // $line_count$Proof$ - let tracked points_to_heap_raw = mem.take_points_to_range(addr as int, SIZEOF_HEAP as int); - let tracked points_to_tld_raw = mem.take_points_to_range(addr + SIZEOF_HEAP, SIZEOF_TLD as int); - let tracked mut points_to_heap = points_to_heap_raw.into_typed(addr as int); - let tracked mut points_to_tld = points_to_tld_raw.into_typed(addr + SIZEOF_HEAP); - let heap_ptr = PPtr::::from_usize(addr); - let tld_ptr = PPtr::::from_usize(addr + SIZEOF_HEAP); - let tracked (_, _, Tracked(uniq_reservation_tok)) = global.instance.reserve_uniq_identifier(); - let heap = HeapPtr { - heap_ptr, - heap_id: Ghost(HeapId { id: heap_ptr.id() as nat, uniq: uniq_reservation_tok@.key.uniq }), - }; - let tld = TldPtr { tld_ptr, tld_id: Ghost(TldId { id: tld_ptr.id() as nat }) }; - let page_empty_stuff = init_empty_page_ptr(); - let EmptyPageStuff { ptr: page_empty_ptr, pfa: Tracked(page_empty_ptr_access) } = - page_empty_stuff; - let mut pages_free_direct = pages_free_direct_tmp(); - let mut pages = pages_tmp(); - let mut span_queue_headers = span_queue_headers_tmp(); - let mut i = 0; - while i < PAGES_DIRECT - invariant - 0 <= i <= PAGES_DIRECT, - forall|j: int| 0 <= j < i ==> pages_free_direct[j] == page_empty_ptr, - { - pages_free_direct.set(i, page_empty_ptr); - i = i + 1; - } - let mut i = 0; - while i < SEGMENT_BIN_MAX + 1 - invariant - 0 <= i <= SEGMENT_BIN_MAX + 1, - forall|j: int| - 0 <= j < i ==> (#[trigger] - span_queue_headers[j]).first.id() == 0 && span_queue_headers[j].last.id() == 0, - { - span_queue_headers.set( - i, - SpanQueueHeader { first: PPtr::from_usize(0), last: PPtr::from_usize(0) }, - ); - i = i + 1; - }/*let mut i = 0; + pages_free_direct.set(i, page_empty_ptr); + i = i + 1; + } + let mut i = 0; + while i < SEGMENT_BIN_MAX + 1 + invariant + 0 <= i <= SEGMENT_BIN_MAX + 1, + forall|j: int| + 0 <= j < i ==> (#[trigger] span_queue_headers[j]).first.id() == 0 + && span_queue_headers[j].last.id() == 0, + { + span_queue_headers.set( + i, + SpanQueueHeader { first: PPtr::from_usize(0), last: PPtr::from_usize(0) }, + ); + i = i + 1; + }/*let mut i = 0; while i < BIN_FULL + 1 invariant 0 <= i <= BIN_FULL + 1 pages.len() == i, @@ -25193,746 +25086,743 @@ mod init { }); }*/ - let (pages_free_direct_pcell, Tracked(pages_free_direct_pointsto)) = PCell::new( - pages_free_direct, - ); - let (pages_pcell, Tracked(pages_pointsto)) = PCell::new(pages); - let (page_count_pcell, Tracked(page_count_pointsto)) = PCell::new(0); - let (page_retired_min_pcell, Tracked(page_retired_min_pointsto)) = PCell::new(0); - let (page_retired_max_pcell, Tracked(page_retired_max_pointsto)) = PCell::new(0); - let (thread_id, Tracked(is_thread)) = crate::thread::thread_id(); - proof { - is_thread.agrees(cur_thread); - } - heap_ptr.put( - Tracked(&mut points_to_heap), - Heap { - tld_ptr: tld, - pages_free_direct: pages_free_direct_pcell, - pages: pages_pcell, - thread_delayed_free: ThreadLLSimple::empty( - Ghost(global.instance), - Ghost(heap.heap_id@), - ), - thread_id, - arena_id: 0, - page_count: page_count_pcell, - page_retired_min: page_retired_min_pcell, - page_retired_max: page_retired_max_pcell, - no_reclaim: false, - page_empty_ptr, - }, - ); - tld_ptr.put( - Tracked(&mut points_to_tld), - Tld { - heap_backing: heap_ptr, - segments: SegmentsTld { - span_queue_headers, - count: 0, - peak_count: 0, - current_size: 0, - peak_size: 0, - }, - }, - ); - let tracked heap_shared_access = HeapSharedAccess { points_to: points_to_heap }; - assert(global.instance == right@.instance); - assert(right@.key == thread_id); - let tracked (Tracked(thread_token), Tracked(checked_token)) = - global.instance.create_thread_mk_tokens( + let (pages_free_direct_pcell, Tracked(pages_free_direct_pointsto)) = PCell::new( + pages_free_direct, + ); + let (pages_pcell, Tracked(pages_pointsto)) = PCell::new(pages); + let (page_count_pcell, Tracked(page_count_pointsto)) = PCell::new(0); + let (page_retired_min_pcell, Tracked(page_retired_min_pointsto)) = PCell::new(0); + let (page_retired_max_pcell, Tracked(page_retired_max_pointsto)) = PCell::new(0); + let (thread_id, Tracked(is_thread)) = crate::thread::thread_id(); + proof { + is_thread.agrees(cur_thread); + } + heap_ptr.put( + Tracked(&mut points_to_heap), + Heap { + tld_ptr: tld, + pages_free_direct: pages_free_direct_pcell, + pages: pages_pcell, + thread_delayed_free: ThreadLLSimple::empty( + Ghost(global.instance), + Ghost(heap.heap_id@), + ), thread_id, - ThreadState { - heap_id: heap.heap_id@, - heap: HeapState { shared_access: heap_shared_access }, - segments: Map::empty(), - pages: Map::empty(), + arena_id: 0, + page_count: page_count_pcell, + page_retired_min: page_retired_min_pcell, + page_retired_max: page_retired_max_pcell, + no_reclaim: false, + page_empty_ptr, + }, + ); + tld_ptr.put( + Tracked(&mut points_to_tld), + Tld { + heap_backing: heap_ptr, + segments: SegmentsTld { + span_queue_headers, + count: 0, + peak_count: 0, + current_size: 0, + peak_size: 0, }, - &global.my_inst, - right, - heap_shared_access, - uniq_reservation_tok, - ); - let ghost page_organization = PageOrg::take_step::initialize(); - let tracked my_inst = global.my_inst.clone(); - let tracked local = Local { - thread_id, - my_inst, - is_thread, - instance: global.instance, - thread_token, - checked_token, + }, + ); + let tracked heap_shared_access = HeapSharedAccess { points_to: points_to_heap }; + assert(global.instance == right@.instance); + assert(right@.key == thread_id); + let tracked (Tracked(thread_token), Tracked(checked_token)) = + global.instance.create_thread_mk_tokens( + thread_id, + ThreadState { heap_id: heap.heap_id@, - heap: HeapLocalAccess { - pages_free_direct: pages_free_direct_pointsto, - pages: pages_pointsto, - page_count: page_count_pointsto, - page_retired_min: page_retired_min_pointsto, - page_retired_max: page_retired_max_pointsto, - }, - tld_id: tld.tld_id@, - tld: points_to_tld, - segments: Map::tracked_empty(), - pages: Map::tracked_empty(), - psa: Map::empty(), - unused_pages: Map::tracked_empty(), - page_organization, - page_empty_global: page_empty_ptr_access, - }; - proof { - let emp = local.page_empty_global@.s.points_to@.pptr; - let pfd = local.heap.pages_free_direct@.value.unwrap()@; - let pages = local.heap.pages@.value.unwrap()@; - assert forall|wsize| 0 <= wsize < pfd.len() implies pages_free_direct_match( - (#[trigger] - pfd[wsize]).id(), - pages[smallest_bin_fitting_size(wsize * INTPTR_SIZE)].first.id(), - emp, - ) by { - bounds_for_smallest_bin_fitting_size(wsize * INTPTR_SIZE); - //assert(0 <= smallest_bin_fitting_size(wsize * INTPTR_SIZE)); - //assert(smallest_bin_fitting_size(wsize * INTPTR_SIZE) < pages.len()); - } - assert(pages_free_direct_is_correct( - local.heap.pages_free_direct@.value.unwrap()@, - local.heap.pages@.value.unwrap()@, - emp, - )); - assert(local.heap.wf_basic( - local.heap_id, - local.thread_token@.value.heap, - local.tld_id, - local.instance, - )); - assert(local.heap.wf( - local.heap_id, - local.thread_token@.value.heap, - local.tld_id, - local.instance, - local.page_empty_global@.s.points_to@.pptr, - )); - assert(local.wf_main()); - assert(local.wf()); - } - (heap, Tracked(Some(local))) - } - - impl PageQueue { - #[inline] - fn empty(wsize: usize) -> (pq: PageQueue) - requires - wsize < 0x1_0000_0000_0000, - ensures - pq.first.id() == 0, - pq.last.id() == 0, - pq.block_size == wsize * INTPTR_SIZE, - { - assert(INTPTR_SIZE as usize == 8); - PageQueue { - first: PPtr::from_usize(0), - last: PPtr::from_usize(0), - block_size: wsize * INTPTR_SIZE as usize, - } - } + heap: HeapState { shared_access: heap_shared_access }, + segments: Map::empty(), + pages: Map::empty(), + }, + &global.my_inst, + right, + heap_shared_access, + uniq_reservation_tok, + ); + let ghost page_organization = PageOrg::take_step::initialize(); + let tracked my_inst = global.my_inst.clone(); + let tracked local = Local { + thread_id, + my_inst, + is_thread, + instance: global.instance, + thread_token, + checked_token, + heap_id: heap.heap_id@, + heap: HeapLocalAccess { + pages_free_direct: pages_free_direct_pointsto, + pages: pages_pointsto, + page_count: page_count_pointsto, + page_retired_min: page_retired_min_pointsto, + page_retired_max: page_retired_max_pointsto, + }, + tld_id: tld.tld_id@, + tld: points_to_tld, + segments: Map::tracked_empty(), + pages: Map::tracked_empty(), + psa: Map::empty(), + unused_pages: Map::tracked_empty(), + page_organization, + page_empty_global: page_empty_ptr_access, + }; + proof { + let emp = local.page_empty_global@.s.points_to@.pptr; + let pfd = local.heap.pages_free_direct@.value.unwrap()@; + let pages = local.heap.pages@.value.unwrap()@; + assert forall|wsize| 0 <= wsize < pfd.len() implies pages_free_direct_match( + (#[trigger] pfd[wsize]).id(), + pages[smallest_bin_fitting_size(wsize * INTPTR_SIZE)].first.id(), + emp, + ) by { + bounds_for_smallest_bin_fitting_size(wsize * INTPTR_SIZE); + //assert(0 <= smallest_bin_fitting_size(wsize * INTPTR_SIZE)); + //assert(smallest_bin_fitting_size(wsize * INTPTR_SIZE) < pages.len()); + } + assert(pages_free_direct_is_correct( + local.heap.pages_free_direct@.value.unwrap()@, + local.heap.pages@.value.unwrap()@, + emp, + )); + assert(local.heap.wf_basic( + local.heap_id, + local.thread_token@.value.heap, + local.tld_id, + local.instance, + )); + assert(local.heap.wf( + local.heap_id, + local.thread_token@.value.heap, + local.tld_id, + local.instance, + local.page_empty_global@.s.points_to@.pptr, + )); + assert(local.wf_main()); + assert(local.wf()); } + (heap, Tracked(Some(local))) +} +impl PageQueue { #[inline] - fn pages_tmp() -> (pages: [PageQueue; 75]) + fn empty(wsize: usize) -> (pq: PageQueue) + requires + wsize < 0x1_0000_0000_0000, ensures - pages@.len() == BIN_FULL + 1, - forall|p| - 0 <= p < pages@.len() ==> (#[trigger] - pages[p]).first.id() == 0 && pages[p].last.id() == 0 && (valid_bin_idx(p) - ==> pages[p].block_size == size_of_bin(p)), - pages[0].block_size == 8, - pages[BIN_FULL as int].block_size == 8 * (524288 - + 2), //8 * (MEDIUM_OBJ_WSIZE_MAX + 2), + pq.first.id() == 0, + pq.last.id() == 0, + pq.block_size == wsize * INTPTR_SIZE, { - proof { - const_facts(); + assert(INTPTR_SIZE as usize == 8); + PageQueue { + first: PPtr::from_usize(0), + last: PPtr::from_usize(0), + block_size: wsize * INTPTR_SIZE as usize, } - let pages = [ - PageQueue::empty(1), - PageQueue::empty(1), - PageQueue::empty(2), - PageQueue::empty(3), - PageQueue::empty(4), - PageQueue::empty(5), - PageQueue::empty(6), - PageQueue::empty(7), - PageQueue::empty(8), - PageQueue::empty(10), - PageQueue::empty(12), - PageQueue::empty(14), - PageQueue::empty(16), - PageQueue::empty(20), - PageQueue::empty(24), - PageQueue::empty(28), - PageQueue::empty(32), - PageQueue::empty(40), - PageQueue::empty(48), - PageQueue::empty(56), - PageQueue::empty(64), - PageQueue::empty(80), - PageQueue::empty(96), - PageQueue::empty(112), - PageQueue::empty(128), - PageQueue::empty(160), - PageQueue::empty(192), - PageQueue::empty(224), - PageQueue::empty(256), - PageQueue::empty(320), - PageQueue::empty(384), - PageQueue::empty(448), - PageQueue::empty(512), - PageQueue::empty(640), - PageQueue::empty(768), - PageQueue::empty(896), - PageQueue::empty(1024), - PageQueue::empty(1280), - PageQueue::empty(1536), - PageQueue::empty(1792), - PageQueue::empty(2048), - PageQueue::empty(2560), - PageQueue::empty(3072), - PageQueue::empty(3584), - PageQueue::empty(4096), - PageQueue::empty(5120), - PageQueue::empty(6144), - PageQueue::empty(7168), - PageQueue::empty(8192), - PageQueue::empty(10240), - PageQueue::empty(12288), - PageQueue::empty(14336), - PageQueue::empty(16384), - PageQueue::empty(20480), - PageQueue::empty(24576), - PageQueue::empty(28672), - PageQueue::empty(32768), - PageQueue::empty(40960), - PageQueue::empty(49152), - PageQueue::empty(57344), - PageQueue::empty(65536), - PageQueue::empty(81920), - PageQueue::empty(98304), - PageQueue::empty(114688), - PageQueue::empty(131072), - PageQueue::empty(163840), - PageQueue::empty(196608), - PageQueue::empty(229376), - PageQueue::empty(262144), - PageQueue::empty(327680), - PageQueue::empty(393216), - PageQueue::empty(458752), - PageQueue::empty(524288), - //PageQueue::empty(MEDIUM_OBJ_WSIZE_MAX as usize + 1), - //PageQueue::empty(MEDIUM_OBJ_WSIZE_MAX as usize + 2), - PageQueue::empty(524288 + 1), - PageQueue::empty(524288 + 2), - ]; - proof { - assert forall|p| - 0 <= p < pages@.len() ==> (#[trigger] - pages[p]).first.id() == 0 && pages[p].last.id() == 0 && (valid_bin_idx(p) - ==> pages[p].block_size == size_of_bin(p)) by { - if valid_bin_idx(p) { - reveal(size_of_bin); - if p <= 1 { - assert(p == 1); - assert(size_of_bin(1) == 8) by (compute_only); - assert(pages[p].block_size == size_of_bin(p)); - } else if p <= 2 { - assert(p == 2); - assert(size_of_bin(2) == 16) by (compute_only); - assert(pages[p].block_size == size_of_bin(p)); - } else if p <= 3 { - assert(p == 3); - assert(size_of_bin(3) == 24) by (compute_only); - assert(pages[p].block_size == size_of_bin(p)); - } else if p <= 4 { - assert(p == 4); - assert(size_of_bin(4) == 32) by (compute_only); - assert(pages[p].block_size == size_of_bin(p)); - } else if p <= 5 { - assert(p == 5); - assert(size_of_bin(5) == 40) by (compute_only); - assert(pages[p].block_size == size_of_bin(p)); - } else if p <= 6 { - assert(p == 6); - assert(size_of_bin(6) == 48) by (compute_only); - assert(pages[p].block_size == size_of_bin(p)); - } else if p <= 7 { - assert(p == 7); - assert(size_of_bin(7) == 56) by (compute_only); - assert(pages[p].block_size == size_of_bin(p)); - } else if p <= 8 { - assert(p == 8); - assert(size_of_bin(8) == 64) by (compute_only); - assert(pages[p].block_size == size_of_bin(p)); - } else if p <= 9 { - assert(p == 9); - assert(size_of_bin(9) == 80) by (compute_only); - assert(pages[p].block_size == size_of_bin(p)); - } else if p <= 10 { - assert(p == 10); - assert(size_of_bin(10) == 96) by (compute_only); - assert(pages[p].block_size == size_of_bin(p)); - } else if p <= 11 { - assert(p == 11); - assert(size_of_bin(11) == 112) by (compute_only); - assert(pages[p].block_size == size_of_bin(p)); - } else if p <= 12 { - assert(p == 12); - assert(size_of_bin(12) == 128) by (compute_only); - assert(pages[p].block_size == size_of_bin(p)); - } else if p <= 13 { - assert(p == 13); - assert(size_of_bin(13) == 160) by (compute_only); - assert(pages[p].block_size == size_of_bin(p)); - } else if p <= 14 { - assert(p == 14); - assert(size_of_bin(14) == 192) by (compute_only); - assert(pages[p].block_size == size_of_bin(p)); - } else if p <= 15 { - assert(p == 15); - assert(size_of_bin(15) == 224) by (compute_only); - assert(pages[p].block_size == size_of_bin(p)); - } else if p <= 16 { - assert(p == 16); - assert(size_of_bin(16) == 256) by (compute_only); - assert(pages[p].block_size == size_of_bin(p)); - } else if p <= 17 { - assert(p == 17); - assert(size_of_bin(17) == 320) by (compute_only); - assert(pages[p].block_size == size_of_bin(p)); - } else if p <= 18 { - assert(p == 18); - assert(size_of_bin(18) == 384) by (compute_only); - assert(pages[p].block_size == size_of_bin(p)); - } else if p <= 19 { - assert(p == 19); - assert(size_of_bin(19) == 448) by (compute_only); - assert(pages[p].block_size == size_of_bin(p)); - } else if p <= 20 { - assert(p == 20); - assert(size_of_bin(20) == 512) by (compute_only); - assert(pages[p].block_size == size_of_bin(p)); - } else if p <= 21 { - assert(p == 21); - assert(size_of_bin(21) == 640) by (compute_only); - assert(pages[p].block_size == size_of_bin(p)); - } else if p <= 22 { - assert(p == 22); - assert(size_of_bin(22) == 768) by (compute_only); - assert(pages[p].block_size == size_of_bin(p)); - } else if p <= 23 { - assert(p == 23); - assert(size_of_bin(23) == 896) by (compute_only); - assert(pages[p].block_size == size_of_bin(p)); - } else if p <= 24 { - assert(p == 24); - assert(size_of_bin(24) == 1024) by (compute_only); - assert(pages[p].block_size == size_of_bin(p)); - } else if p <= 25 { - assert(p == 25); - assert(size_of_bin(25) == 1280) by (compute_only); - assert(pages[p].block_size == size_of_bin(p)); - } else if p <= 26 { - assert(p == 26); - assert(size_of_bin(26) == 1536) by (compute_only); - assert(pages[p].block_size == size_of_bin(p)); - } else if p <= 27 { - assert(p == 27); - assert(size_of_bin(27) == 1792) by (compute_only); - assert(pages[p].block_size == size_of_bin(p)); - } else if p <= 28 { - assert(p == 28); - assert(size_of_bin(28) == 2048) by (compute_only); - assert(pages[p].block_size == size_of_bin(p)); - } else if p <= 29 { - assert(p == 29); - assert(size_of_bin(29) == 2560) by (compute_only); - assert(pages[p].block_size == size_of_bin(p)); - } else if p <= 30 { - assert(p == 30); - assert(size_of_bin(30) == 3072) by (compute_only); - assert(pages[p].block_size == size_of_bin(p)); - } else if p <= 31 { - assert(p == 31); - assert(size_of_bin(31) == 3584) by (compute_only); - assert(pages[p].block_size == size_of_bin(p)); - } else if p <= 32 { - assert(p == 32); - assert(size_of_bin(32) == 4096) by (compute_only); - assert(pages[p].block_size == size_of_bin(p)); - } else if p <= 33 { - assert(p == 33); - assert(size_of_bin(33) == 5120) by (compute_only); - assert(pages[p].block_size == size_of_bin(p)); - } else if p <= 34 { - assert(p == 34); - assert(size_of_bin(34) == 6144) by (compute_only); - assert(pages[p].block_size == size_of_bin(p)); - } else if p <= 35 { - assert(p == 35); - assert(size_of_bin(35) == 7168) by (compute_only); - assert(pages[p].block_size == size_of_bin(p)); - } else if p <= 36 { - assert(p == 36); - assert(size_of_bin(36) == 8192) by (compute_only); - assert(pages[p].block_size == size_of_bin(p)); - } else if p <= 37 { - assert(p == 37); - assert(size_of_bin(37) == 10240) by (compute_only); - assert(pages[p].block_size == size_of_bin(p)); - } else if p <= 38 { - assert(p == 38); - assert(size_of_bin(38) == 12288) by (compute_only); - assert(pages[p].block_size == size_of_bin(p)); - } else if p <= 39 { - assert(p == 39); - assert(size_of_bin(39) == 14336) by (compute_only); - assert(pages[p].block_size == size_of_bin(p)); - } else if p <= 40 { - assert(p == 40); - assert(size_of_bin(40) == 16384) by (compute_only); - assert(pages[p].block_size == size_of_bin(p)); - } else if p <= 41 { - assert(p == 41); - assert(size_of_bin(41) == 20480) by (compute_only); - assert(pages[p].block_size == size_of_bin(p)); - } else if p <= 42 { - assert(p == 42); - assert(size_of_bin(42) == 24576) by (compute_only); - assert(pages[p].block_size == size_of_bin(p)); - } else if p <= 43 { - assert(p == 43); - assert(size_of_bin(43) == 28672) by (compute_only); - assert(pages[p].block_size == size_of_bin(p)); - } else if p <= 44 { - assert(p == 44); - assert(size_of_bin(44) == 32768) by (compute_only); - assert(pages[p].block_size == size_of_bin(p)); - } else if p <= 45 { - assert(p == 45); - assert(size_of_bin(45) == 40960) by (compute_only); - assert(pages[p].block_size == size_of_bin(p)); - } else if p <= 46 { - assert(p == 46); - assert(size_of_bin(46) == 49152) by (compute_only); - assert(pages[p].block_size == size_of_bin(p)); - } else if p <= 47 { - assert(p == 47); - assert(size_of_bin(47) == 57344) by (compute_only); - assert(pages[p].block_size == size_of_bin(p)); - } else if p <= 48 { - assert(p == 48); - assert(size_of_bin(48) == 65536) by (compute_only); - assert(pages[p].block_size == size_of_bin(p)); - } else if p <= 49 { - assert(p == 49); - assert(size_of_bin(49) == 81920) by (compute_only); - assert(pages[p].block_size == size_of_bin(p)); - } else if p <= 50 { - assert(p == 50); - assert(size_of_bin(50) == 98304) by (compute_only); - assert(pages[p].block_size == size_of_bin(p)); - } else if p <= 51 { - assert(p == 51); - assert(size_of_bin(51) == 114688) by (compute_only); - assert(pages[p].block_size == size_of_bin(p)); - } else if p <= 52 { - assert(p == 52); - assert(size_of_bin(52) == 131072) by (compute_only); - assert(pages[p].block_size == size_of_bin(p)); - } else if p <= 53 { - assert(p == 53); - assert(size_of_bin(53) == 163840) by (compute_only); - assert(pages[p].block_size == size_of_bin(p)); - } else if p <= 54 { - assert(p == 54); - assert(size_of_bin(54) == 196608) by (compute_only); - assert(pages[p].block_size == size_of_bin(p)); - } else if p <= 55 { - assert(p == 55); - assert(size_of_bin(55) == 229376) by (compute_only); - assert(pages[p].block_size == size_of_bin(p)); - } else if p <= 56 { - assert(p == 56); - assert(size_of_bin(56) == 262144) by (compute_only); - assert(pages[p].block_size == size_of_bin(p)); - } else if p <= 57 { - assert(p == 57); - assert(size_of_bin(57) == 327680) by (compute_only); - assert(pages[p].block_size == size_of_bin(p)); - } else if p <= 58 { - assert(p == 58); - assert(size_of_bin(58) == 393216) by (compute_only); - assert(pages[p].block_size == size_of_bin(p)); - } else if p <= 59 { - assert(p == 59); - assert(size_of_bin(59) == 458752) by (compute_only); - assert(pages[p].block_size == size_of_bin(p)); - } else if p <= 60 { - assert(p == 60); - assert(size_of_bin(60) == 524288) by (compute_only); - assert(pages[p].block_size == size_of_bin(p)); - } else if p <= 61 { - assert(p == 61); - assert(size_of_bin(61) == 655360) by (compute_only); - assert(pages[p].block_size == size_of_bin(p)); - } else if p <= 62 { - assert(p == 62); - assert(size_of_bin(62) == 786432) by (compute_only); - assert(pages[p].block_size == size_of_bin(p)); - } else if p <= 63 { - assert(p == 63); - assert(size_of_bin(63) == 917504) by (compute_only); - assert(pages[p].block_size == size_of_bin(p)); - } else if p <= 64 { - assert(p == 64); - assert(size_of_bin(64) == 1048576) by (compute_only); - assert(pages[p].block_size == size_of_bin(p)); - } else if p <= 65 { - assert(p == 65); - assert(size_of_bin(65) == 1310720) by (compute_only); - assert(pages[p].block_size == size_of_bin(p)); - } else if p <= 66 { - assert(p == 66); - assert(size_of_bin(66) == 1572864) by (compute_only); - assert(pages[p].block_size == size_of_bin(p)); - } else if p <= 67 { - assert(p == 67); - assert(size_of_bin(67) == 1835008) by (compute_only); - assert(pages[p].block_size == size_of_bin(p)); - } else if p <= 68 { - assert(p == 68); - assert(size_of_bin(68) == 2097152) by (compute_only); - assert(pages[p].block_size == size_of_bin(p)); - } else if p <= 69 { - assert(p == 69); - assert(size_of_bin(69) == 2621440) by (compute_only); - assert(pages[p].block_size == size_of_bin(p)); - } else if p <= 70 { - assert(p == 70); - assert(size_of_bin(70) == 3145728) by (compute_only); - assert(pages[p].block_size == size_of_bin(p)); - } else if p <= 71 { - assert(p == 71); - assert(size_of_bin(71) == 3670016) by (compute_only); - assert(pages[p].block_size == size_of_bin(p)); - } else if p <= 72 { - assert(p == 72); - assert(size_of_bin(72) == 4194304) by (compute_only); - assert(pages[p].block_size == size_of_bin(p)); - } else { - assert(p == 73); - assert(size_of_bin(73) == 8 * (524288 + 1)) by (compute_only); - assert(pages[p].block_size == size_of_bin(p)); - } + } +} + +#[inline] +fn pages_tmp() -> (pages: [PageQueue; 75]) + ensures + pages@.len() == BIN_FULL + 1, + forall|p| + 0 <= p < pages@.len() ==> (#[trigger] pages[p]).first.id() == 0 && pages[p].last.id() + == 0 && (valid_bin_idx(p) ==> pages[p].block_size == size_of_bin(p)), + pages[0].block_size == 8, + pages[BIN_FULL as int].block_size == 8 * (524288 + + 2), //8 * (MEDIUM_OBJ_WSIZE_MAX + 2), +{ + proof { + const_facts(); + } + let pages = [ + PageQueue::empty(1), + PageQueue::empty(1), + PageQueue::empty(2), + PageQueue::empty(3), + PageQueue::empty(4), + PageQueue::empty(5), + PageQueue::empty(6), + PageQueue::empty(7), + PageQueue::empty(8), + PageQueue::empty(10), + PageQueue::empty(12), + PageQueue::empty(14), + PageQueue::empty(16), + PageQueue::empty(20), + PageQueue::empty(24), + PageQueue::empty(28), + PageQueue::empty(32), + PageQueue::empty(40), + PageQueue::empty(48), + PageQueue::empty(56), + PageQueue::empty(64), + PageQueue::empty(80), + PageQueue::empty(96), + PageQueue::empty(112), + PageQueue::empty(128), + PageQueue::empty(160), + PageQueue::empty(192), + PageQueue::empty(224), + PageQueue::empty(256), + PageQueue::empty(320), + PageQueue::empty(384), + PageQueue::empty(448), + PageQueue::empty(512), + PageQueue::empty(640), + PageQueue::empty(768), + PageQueue::empty(896), + PageQueue::empty(1024), + PageQueue::empty(1280), + PageQueue::empty(1536), + PageQueue::empty(1792), + PageQueue::empty(2048), + PageQueue::empty(2560), + PageQueue::empty(3072), + PageQueue::empty(3584), + PageQueue::empty(4096), + PageQueue::empty(5120), + PageQueue::empty(6144), + PageQueue::empty(7168), + PageQueue::empty(8192), + PageQueue::empty(10240), + PageQueue::empty(12288), + PageQueue::empty(14336), + PageQueue::empty(16384), + PageQueue::empty(20480), + PageQueue::empty(24576), + PageQueue::empty(28672), + PageQueue::empty(32768), + PageQueue::empty(40960), + PageQueue::empty(49152), + PageQueue::empty(57344), + PageQueue::empty(65536), + PageQueue::empty(81920), + PageQueue::empty(98304), + PageQueue::empty(114688), + PageQueue::empty(131072), + PageQueue::empty(163840), + PageQueue::empty(196608), + PageQueue::empty(229376), + PageQueue::empty(262144), + PageQueue::empty(327680), + PageQueue::empty(393216), + PageQueue::empty(458752), + PageQueue::empty(524288), + //PageQueue::empty(MEDIUM_OBJ_WSIZE_MAX as usize + 1), + //PageQueue::empty(MEDIUM_OBJ_WSIZE_MAX as usize + 2), + PageQueue::empty(524288 + 1), + PageQueue::empty(524288 + 2), + ]; + proof { + assert forall|p| + 0 <= p < pages@.len() ==> (#[trigger] pages[p]).first.id() == 0 && pages[p].last.id() + == 0 && (valid_bin_idx(p) ==> pages[p].block_size == size_of_bin(p)) by { + if valid_bin_idx(p) { + reveal(size_of_bin); + if p <= 1 { + assert(p == 1); + assert(size_of_bin(1) == 8) by (compute_only); + assert(pages[p].block_size == size_of_bin(p)); + } else if p <= 2 { + assert(p == 2); + assert(size_of_bin(2) == 16) by (compute_only); + assert(pages[p].block_size == size_of_bin(p)); + } else if p <= 3 { + assert(p == 3); + assert(size_of_bin(3) == 24) by (compute_only); + assert(pages[p].block_size == size_of_bin(p)); + } else if p <= 4 { + assert(p == 4); + assert(size_of_bin(4) == 32) by (compute_only); + assert(pages[p].block_size == size_of_bin(p)); + } else if p <= 5 { + assert(p == 5); + assert(size_of_bin(5) == 40) by (compute_only); + assert(pages[p].block_size == size_of_bin(p)); + } else if p <= 6 { + assert(p == 6); + assert(size_of_bin(6) == 48) by (compute_only); + assert(pages[p].block_size == size_of_bin(p)); + } else if p <= 7 { + assert(p == 7); + assert(size_of_bin(7) == 56) by (compute_only); + assert(pages[p].block_size == size_of_bin(p)); + } else if p <= 8 { + assert(p == 8); + assert(size_of_bin(8) == 64) by (compute_only); + assert(pages[p].block_size == size_of_bin(p)); + } else if p <= 9 { + assert(p == 9); + assert(size_of_bin(9) == 80) by (compute_only); + assert(pages[p].block_size == size_of_bin(p)); + } else if p <= 10 { + assert(p == 10); + assert(size_of_bin(10) == 96) by (compute_only); + assert(pages[p].block_size == size_of_bin(p)); + } else if p <= 11 { + assert(p == 11); + assert(size_of_bin(11) == 112) by (compute_only); + assert(pages[p].block_size == size_of_bin(p)); + } else if p <= 12 { + assert(p == 12); + assert(size_of_bin(12) == 128) by (compute_only); + assert(pages[p].block_size == size_of_bin(p)); + } else if p <= 13 { + assert(p == 13); + assert(size_of_bin(13) == 160) by (compute_only); + assert(pages[p].block_size == size_of_bin(p)); + } else if p <= 14 { + assert(p == 14); + assert(size_of_bin(14) == 192) by (compute_only); + assert(pages[p].block_size == size_of_bin(p)); + } else if p <= 15 { + assert(p == 15); + assert(size_of_bin(15) == 224) by (compute_only); + assert(pages[p].block_size == size_of_bin(p)); + } else if p <= 16 { + assert(p == 16); + assert(size_of_bin(16) == 256) by (compute_only); + assert(pages[p].block_size == size_of_bin(p)); + } else if p <= 17 { + assert(p == 17); + assert(size_of_bin(17) == 320) by (compute_only); + assert(pages[p].block_size == size_of_bin(p)); + } else if p <= 18 { + assert(p == 18); + assert(size_of_bin(18) == 384) by (compute_only); + assert(pages[p].block_size == size_of_bin(p)); + } else if p <= 19 { + assert(p == 19); + assert(size_of_bin(19) == 448) by (compute_only); + assert(pages[p].block_size == size_of_bin(p)); + } else if p <= 20 { + assert(p == 20); + assert(size_of_bin(20) == 512) by (compute_only); + assert(pages[p].block_size == size_of_bin(p)); + } else if p <= 21 { + assert(p == 21); + assert(size_of_bin(21) == 640) by (compute_only); + assert(pages[p].block_size == size_of_bin(p)); + } else if p <= 22 { + assert(p == 22); + assert(size_of_bin(22) == 768) by (compute_only); + assert(pages[p].block_size == size_of_bin(p)); + } else if p <= 23 { + assert(p == 23); + assert(size_of_bin(23) == 896) by (compute_only); + assert(pages[p].block_size == size_of_bin(p)); + } else if p <= 24 { + assert(p == 24); + assert(size_of_bin(24) == 1024) by (compute_only); + assert(pages[p].block_size == size_of_bin(p)); + } else if p <= 25 { + assert(p == 25); + assert(size_of_bin(25) == 1280) by (compute_only); + assert(pages[p].block_size == size_of_bin(p)); + } else if p <= 26 { + assert(p == 26); + assert(size_of_bin(26) == 1536) by (compute_only); + assert(pages[p].block_size == size_of_bin(p)); + } else if p <= 27 { + assert(p == 27); + assert(size_of_bin(27) == 1792) by (compute_only); + assert(pages[p].block_size == size_of_bin(p)); + } else if p <= 28 { + assert(p == 28); + assert(size_of_bin(28) == 2048) by (compute_only); + assert(pages[p].block_size == size_of_bin(p)); + } else if p <= 29 { + assert(p == 29); + assert(size_of_bin(29) == 2560) by (compute_only); + assert(pages[p].block_size == size_of_bin(p)); + } else if p <= 30 { + assert(p == 30); + assert(size_of_bin(30) == 3072) by (compute_only); + assert(pages[p].block_size == size_of_bin(p)); + } else if p <= 31 { + assert(p == 31); + assert(size_of_bin(31) == 3584) by (compute_only); + assert(pages[p].block_size == size_of_bin(p)); + } else if p <= 32 { + assert(p == 32); + assert(size_of_bin(32) == 4096) by (compute_only); + assert(pages[p].block_size == size_of_bin(p)); + } else if p <= 33 { + assert(p == 33); + assert(size_of_bin(33) == 5120) by (compute_only); + assert(pages[p].block_size == size_of_bin(p)); + } else if p <= 34 { + assert(p == 34); + assert(size_of_bin(34) == 6144) by (compute_only); + assert(pages[p].block_size == size_of_bin(p)); + } else if p <= 35 { + assert(p == 35); + assert(size_of_bin(35) == 7168) by (compute_only); + assert(pages[p].block_size == size_of_bin(p)); + } else if p <= 36 { + assert(p == 36); + assert(size_of_bin(36) == 8192) by (compute_only); + assert(pages[p].block_size == size_of_bin(p)); + } else if p <= 37 { + assert(p == 37); + assert(size_of_bin(37) == 10240) by (compute_only); + assert(pages[p].block_size == size_of_bin(p)); + } else if p <= 38 { + assert(p == 38); + assert(size_of_bin(38) == 12288) by (compute_only); + assert(pages[p].block_size == size_of_bin(p)); + } else if p <= 39 { + assert(p == 39); + assert(size_of_bin(39) == 14336) by (compute_only); + assert(pages[p].block_size == size_of_bin(p)); + } else if p <= 40 { + assert(p == 40); + assert(size_of_bin(40) == 16384) by (compute_only); + assert(pages[p].block_size == size_of_bin(p)); + } else if p <= 41 { + assert(p == 41); + assert(size_of_bin(41) == 20480) by (compute_only); + assert(pages[p].block_size == size_of_bin(p)); + } else if p <= 42 { + assert(p == 42); + assert(size_of_bin(42) == 24576) by (compute_only); + assert(pages[p].block_size == size_of_bin(p)); + } else if p <= 43 { + assert(p == 43); + assert(size_of_bin(43) == 28672) by (compute_only); + assert(pages[p].block_size == size_of_bin(p)); + } else if p <= 44 { + assert(p == 44); + assert(size_of_bin(44) == 32768) by (compute_only); + assert(pages[p].block_size == size_of_bin(p)); + } else if p <= 45 { + assert(p == 45); + assert(size_of_bin(45) == 40960) by (compute_only); + assert(pages[p].block_size == size_of_bin(p)); + } else if p <= 46 { + assert(p == 46); + assert(size_of_bin(46) == 49152) by (compute_only); + assert(pages[p].block_size == size_of_bin(p)); + } else if p <= 47 { + assert(p == 47); + assert(size_of_bin(47) == 57344) by (compute_only); + assert(pages[p].block_size == size_of_bin(p)); + } else if p <= 48 { + assert(p == 48); + assert(size_of_bin(48) == 65536) by (compute_only); + assert(pages[p].block_size == size_of_bin(p)); + } else if p <= 49 { + assert(p == 49); + assert(size_of_bin(49) == 81920) by (compute_only); + assert(pages[p].block_size == size_of_bin(p)); + } else if p <= 50 { + assert(p == 50); + assert(size_of_bin(50) == 98304) by (compute_only); + assert(pages[p].block_size == size_of_bin(p)); + } else if p <= 51 { + assert(p == 51); + assert(size_of_bin(51) == 114688) by (compute_only); + assert(pages[p].block_size == size_of_bin(p)); + } else if p <= 52 { + assert(p == 52); + assert(size_of_bin(52) == 131072) by (compute_only); + assert(pages[p].block_size == size_of_bin(p)); + } else if p <= 53 { + assert(p == 53); + assert(size_of_bin(53) == 163840) by (compute_only); + assert(pages[p].block_size == size_of_bin(p)); + } else if p <= 54 { + assert(p == 54); + assert(size_of_bin(54) == 196608) by (compute_only); + assert(pages[p].block_size == size_of_bin(p)); + } else if p <= 55 { + assert(p == 55); + assert(size_of_bin(55) == 229376) by (compute_only); + assert(pages[p].block_size == size_of_bin(p)); + } else if p <= 56 { + assert(p == 56); + assert(size_of_bin(56) == 262144) by (compute_only); + assert(pages[p].block_size == size_of_bin(p)); + } else if p <= 57 { + assert(p == 57); + assert(size_of_bin(57) == 327680) by (compute_only); + assert(pages[p].block_size == size_of_bin(p)); + } else if p <= 58 { + assert(p == 58); + assert(size_of_bin(58) == 393216) by (compute_only); + assert(pages[p].block_size == size_of_bin(p)); + } else if p <= 59 { + assert(p == 59); + assert(size_of_bin(59) == 458752) by (compute_only); + assert(pages[p].block_size == size_of_bin(p)); + } else if p <= 60 { + assert(p == 60); + assert(size_of_bin(60) == 524288) by (compute_only); + assert(pages[p].block_size == size_of_bin(p)); + } else if p <= 61 { + assert(p == 61); + assert(size_of_bin(61) == 655360) by (compute_only); + assert(pages[p].block_size == size_of_bin(p)); + } else if p <= 62 { + assert(p == 62); + assert(size_of_bin(62) == 786432) by (compute_only); + assert(pages[p].block_size == size_of_bin(p)); + } else if p <= 63 { + assert(p == 63); + assert(size_of_bin(63) == 917504) by (compute_only); + assert(pages[p].block_size == size_of_bin(p)); + } else if p <= 64 { + assert(p == 64); + assert(size_of_bin(64) == 1048576) by (compute_only); + assert(pages[p].block_size == size_of_bin(p)); + } else if p <= 65 { + assert(p == 65); + assert(size_of_bin(65) == 1310720) by (compute_only); + assert(pages[p].block_size == size_of_bin(p)); + } else if p <= 66 { + assert(p == 66); + assert(size_of_bin(66) == 1572864) by (compute_only); + assert(pages[p].block_size == size_of_bin(p)); + } else if p <= 67 { + assert(p == 67); + assert(size_of_bin(67) == 1835008) by (compute_only); + assert(pages[p].block_size == size_of_bin(p)); + } else if p <= 68 { + assert(p == 68); + assert(size_of_bin(68) == 2097152) by (compute_only); + assert(pages[p].block_size == size_of_bin(p)); + } else if p <= 69 { + assert(p == 69); + assert(size_of_bin(69) == 2621440) by (compute_only); + assert(pages[p].block_size == size_of_bin(p)); + } else if p <= 70 { + assert(p == 70); + assert(size_of_bin(70) == 3145728) by (compute_only); + assert(pages[p].block_size == size_of_bin(p)); + } else if p <= 71 { + assert(p == 71); + assert(size_of_bin(71) == 3670016) by (compute_only); + assert(pages[p].block_size == size_of_bin(p)); + } else if p <= 72 { + assert(p == 72); + assert(size_of_bin(72) == 4194304) by (compute_only); + assert(pages[p].block_size == size_of_bin(p)); + } else { + assert(p == 73); + assert(size_of_bin(73) == 8 * (524288 + 1)) by (compute_only); assert(pages[p].block_size == size_of_bin(p)); } + assert(pages[p].block_size == size_of_bin(p)); } } - pages - } - - fn pages_free_direct_tmp() -> [PPtr; 129] { - [ - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - PPtr::from_usize(0), - ] - } - - fn span_queue_headers_tmp() -> [SpanQueueHeader; 32] { - [ - SpanQueueHeader { first: PPtr::from_usize(0), last: PPtr::from_usize(0) }, - SpanQueueHeader { first: PPtr::from_usize(0), last: PPtr::from_usize(0) }, - SpanQueueHeader { first: PPtr::from_usize(0), last: PPtr::from_usize(0) }, - SpanQueueHeader { first: PPtr::from_usize(0), last: PPtr::from_usize(0) }, - SpanQueueHeader { first: PPtr::from_usize(0), last: PPtr::from_usize(0) }, - SpanQueueHeader { first: PPtr::from_usize(0), last: PPtr::from_usize(0) }, - SpanQueueHeader { first: PPtr::from_usize(0), last: PPtr::from_usize(0) }, - SpanQueueHeader { first: PPtr::from_usize(0), last: PPtr::from_usize(0) }, - SpanQueueHeader { first: PPtr::from_usize(0), last: PPtr::from_usize(0) }, - SpanQueueHeader { first: PPtr::from_usize(0), last: PPtr::from_usize(0) }, - SpanQueueHeader { first: PPtr::from_usize(0), last: PPtr::from_usize(0) }, - SpanQueueHeader { first: PPtr::from_usize(0), last: PPtr::from_usize(0) }, - SpanQueueHeader { first: PPtr::from_usize(0), last: PPtr::from_usize(0) }, - SpanQueueHeader { first: PPtr::from_usize(0), last: PPtr::from_usize(0) }, - SpanQueueHeader { first: PPtr::from_usize(0), last: PPtr::from_usize(0) }, - SpanQueueHeader { first: PPtr::from_usize(0), last: PPtr::from_usize(0) }, - SpanQueueHeader { first: PPtr::from_usize(0), last: PPtr::from_usize(0) }, - SpanQueueHeader { first: PPtr::from_usize(0), last: PPtr::from_usize(0) }, - SpanQueueHeader { first: PPtr::from_usize(0), last: PPtr::from_usize(0) }, - SpanQueueHeader { first: PPtr::from_usize(0), last: PPtr::from_usize(0) }, - SpanQueueHeader { first: PPtr::from_usize(0), last: PPtr::from_usize(0) }, - SpanQueueHeader { first: PPtr::from_usize(0), last: PPtr::from_usize(0) }, - SpanQueueHeader { first: PPtr::from_usize(0), last: PPtr::from_usize(0) }, - SpanQueueHeader { first: PPtr::from_usize(0), last: PPtr::from_usize(0) }, - SpanQueueHeader { first: PPtr::from_usize(0), last: PPtr::from_usize(0) }, - SpanQueueHeader { first: PPtr::from_usize(0), last: PPtr::from_usize(0) }, - SpanQueueHeader { first: PPtr::from_usize(0), last: PPtr::from_usize(0) }, - SpanQueueHeader { first: PPtr::from_usize(0), last: PPtr::from_usize(0) }, - SpanQueueHeader { first: PPtr::from_usize(0), last: PPtr::from_usize(0) }, - SpanQueueHeader { first: PPtr::from_usize(0), last: PPtr::from_usize(0) }, - SpanQueueHeader { first: PPtr::from_usize(0), last: PPtr::from_usize(0) }, - SpanQueueHeader { first: PPtr::from_usize(0), last: PPtr::from_usize(0) }, - ] } + pages +} - fn thread_data_alloc() -> (res: (usize, Tracked)) - ensures - ({ - let (addr, mc) = res; - { - addr != 0 ==> (mc@.pointsto_has_range(addr as int, SIZEOF_HEAP + SIZEOF_TLD) && addr - + page_size() <= usize::MAX && addr % 4096 == 0) - } - }), - { - let (addr, Tracked(mc)) = crate::os_mem::mmap_prot_read_write(0, 4096); - if addr == MAP_FAILED { - todo(); - } - proof { - //assert(set_int_range(addr as int, addr as int + 4096) <= mc.range_os_rw()); - //assert(set_int_range(addr as int, addr as int + 4096) <= mc.range_points_to()); - //assert(SIZEOF_HEAP + SIZEOF_TLD < page_size()); - //assert(mc.pointsto_has_range(addr as int, SIZEOF_HEAP + SIZEOF_TLD)); - } - (addr, Tracked(mc)) +fn pages_free_direct_tmp() -> [PPtr; 129] { + [ + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + PPtr::from_usize(0), + ] +} + +fn span_queue_headers_tmp() -> [SpanQueueHeader; 32] { + [ + SpanQueueHeader { first: PPtr::from_usize(0), last: PPtr::from_usize(0) }, + SpanQueueHeader { first: PPtr::from_usize(0), last: PPtr::from_usize(0) }, + SpanQueueHeader { first: PPtr::from_usize(0), last: PPtr::from_usize(0) }, + SpanQueueHeader { first: PPtr::from_usize(0), last: PPtr::from_usize(0) }, + SpanQueueHeader { first: PPtr::from_usize(0), last: PPtr::from_usize(0) }, + SpanQueueHeader { first: PPtr::from_usize(0), last: PPtr::from_usize(0) }, + SpanQueueHeader { first: PPtr::from_usize(0), last: PPtr::from_usize(0) }, + SpanQueueHeader { first: PPtr::from_usize(0), last: PPtr::from_usize(0) }, + SpanQueueHeader { first: PPtr::from_usize(0), last: PPtr::from_usize(0) }, + SpanQueueHeader { first: PPtr::from_usize(0), last: PPtr::from_usize(0) }, + SpanQueueHeader { first: PPtr::from_usize(0), last: PPtr::from_usize(0) }, + SpanQueueHeader { first: PPtr::from_usize(0), last: PPtr::from_usize(0) }, + SpanQueueHeader { first: PPtr::from_usize(0), last: PPtr::from_usize(0) }, + SpanQueueHeader { first: PPtr::from_usize(0), last: PPtr::from_usize(0) }, + SpanQueueHeader { first: PPtr::from_usize(0), last: PPtr::from_usize(0) }, + SpanQueueHeader { first: PPtr::from_usize(0), last: PPtr::from_usize(0) }, + SpanQueueHeader { first: PPtr::from_usize(0), last: PPtr::from_usize(0) }, + SpanQueueHeader { first: PPtr::from_usize(0), last: PPtr::from_usize(0) }, + SpanQueueHeader { first: PPtr::from_usize(0), last: PPtr::from_usize(0) }, + SpanQueueHeader { first: PPtr::from_usize(0), last: PPtr::from_usize(0) }, + SpanQueueHeader { first: PPtr::from_usize(0), last: PPtr::from_usize(0) }, + SpanQueueHeader { first: PPtr::from_usize(0), last: PPtr::from_usize(0) }, + SpanQueueHeader { first: PPtr::from_usize(0), last: PPtr::from_usize(0) }, + SpanQueueHeader { first: PPtr::from_usize(0), last: PPtr::from_usize(0) }, + SpanQueueHeader { first: PPtr::from_usize(0), last: PPtr::from_usize(0) }, + SpanQueueHeader { first: PPtr::from_usize(0), last: PPtr::from_usize(0) }, + SpanQueueHeader { first: PPtr::from_usize(0), last: PPtr::from_usize(0) }, + SpanQueueHeader { first: PPtr::from_usize(0), last: PPtr::from_usize(0) }, + SpanQueueHeader { first: PPtr::from_usize(0), last: PPtr::from_usize(0) }, + SpanQueueHeader { first: PPtr::from_usize(0), last: PPtr::from_usize(0) }, + SpanQueueHeader { first: PPtr::from_usize(0), last: PPtr::from_usize(0) }, + SpanQueueHeader { first: PPtr::from_usize(0), last: PPtr::from_usize(0) }, + ] +} + +fn thread_data_alloc() -> (res: (usize, Tracked)) + ensures + ({ + let (addr, mc) = res; + { + addr != 0 ==> (mc@.pointsto_has_range(addr as int, SIZEOF_HEAP + SIZEOF_TLD) && addr + + page_size() <= usize::MAX && addr % 4096 == 0) + } + }), +{ + let (addr, Tracked(mc)) = crate::os_mem::mmap_prot_read_write(0, 4096); + if addr == MAP_FAILED { + todo(); } + proof { + //assert(set_int_range(addr as int, addr as int + 4096) <= mc.range_os_rw()); + //assert(set_int_range(addr as int, addr as int + 4096) <= mc.range_points_to()); + //assert(SIZEOF_HEAP + SIZEOF_TLD < page_size()); + //assert(mc.pointsto_has_range(addr as int, SIZEOF_HEAP + SIZEOF_TLD)); + } + (addr, Tracked(mc)) +} - ///// The global 'empty page' - /* +///// The global 'empty page' +/* pub fn get_page_empty() -> (res: (PPtr, Tracked>)) ensures ({ let (page_ptr, pfa) = res; { @@ -25946,96 +25836,96 @@ mod init { } */ - struct EmptyPageStuff { - ptr: PPtr, - pfa: Tracked>, - } +struct EmptyPageStuff { + ptr: PPtr, + pfa: Tracked>, +} - impl EmptyPageStuff { - pub closed spec fn wf(&self) -> bool { - self.pfa@@.wf_empty_page_global() && self.pfa@@.s.points_to@.pptr == self.ptr.id() - && self.ptr.id() != 0 - } +impl EmptyPageStuff { + pub closed spec fn wf(&self) -> bool { + self.pfa@@.wf_empty_page_global() && self.pfa@@.s.points_to@.pptr == self.ptr.id() + && self.ptr.id() != 0 } +} - /* +/* #[verifier::external] static EMPTY_PAGE_PTR: std::sync::LazyLock = std::sync::LazyLock::new(init_empty_page_ptr); */ - fn init_empty_page_ptr() -> (e: EmptyPageStuff) - ensures - e.wf(), - { - let (pt, Tracked(mut mc)) = crate::os_mem::mmap_prot_read_write(0, 4096); - if pt == MAP_FAILED { - todo(); - } - proof { - const_facts(); - } - assert(set_int_range(pt as int, pt as int + 4096) <= mc.range_os_rw()); - assert(set_int_range(pt as int, pt as int + 4096) <= mc.range_points_to()); - assert(mc.pointsto_has_range(pt as int, 4096)); - assert(mc.pointsto_has_range(pt as int, SIZEOF_PAGE_HEADER as int)); - let tracked points_to_raw = mc.take_points_to_range(pt as int, SIZEOF_PAGE_HEADER as int); - proof { - assert(SIZEOF_PAGE_HEADER == vstd::layout::size_of::()); - mod_trans(pt as int, 4096, vstd::layout::align_of::() as int); - assert(pt as int % vstd::layout::align_of::() as int == 0); - } - vstd::layout::layout_for_type_is_valid::(); // $line_count$Proof$ - let tracked mut points_to = points_to_raw.into_typed::(pt as int); - proof { - points_to.is_nonnull(); - } - let (count_pcell, Tracked(count_perm)) = PCell::empty(); - let (prev_pcell, Tracked(prev_perm)) = PCell::empty(); - let (next_pcell, Tracked(next_perm)) = PCell::empty(); - let (inner_pcell, Tracked(inner_perm)) = PCell::new( - PageInner { - flags0: 0, - flags1: 0, - flags2: 0, - capacity: 0, - reserved: 0, - free: LL::empty(), - used: 0, - xblock_size: 0, - local_free: LL::empty(), - }, - ); - let tracked fake_inst = global_init().0.instance; - let page_ptr = PPtr::::from_usize(pt); - page_ptr.put( - Tracked(&mut points_to), - Page { - count: count_pcell, - offset: 0, - inner: inner_pcell, - xthread_free: ThreadLLWithDelayBits::empty(Tracked(fake_inst)), - xheap: AtomicHeapPtr::empty(), - prev: prev_pcell, - next: next_pcell, - padding: 0, - }, - ); - let tracked pfa = Duplicable::new( - PageFullAccess { - s: PageSharedAccess { points_to }, - l: PageLocalAccess { - count: count_perm, - inner: inner_perm, - prev: prev_perm, - next: next_perm, - }, - }, - ); - EmptyPageStuff { ptr: page_ptr, pfa: Tracked(pfa) } +fn init_empty_page_ptr() -> (e: EmptyPageStuff) + ensures + e.wf(), +{ + let (pt, Tracked(mut mc)) = crate::os_mem::mmap_prot_read_write(0, 4096); + if pt == MAP_FAILED { + todo(); + } + proof { + const_facts(); } + assert(set_int_range(pt as int, pt as int + 4096) <= mc.range_os_rw()); + assert(set_int_range(pt as int, pt as int + 4096) <= mc.range_points_to()); + assert(mc.pointsto_has_range(pt as int, 4096)); + assert(mc.pointsto_has_range(pt as int, SIZEOF_PAGE_HEADER as int)); + let tracked points_to_raw = mc.take_points_to_range(pt as int, SIZEOF_PAGE_HEADER as int); + proof { + assert(SIZEOF_PAGE_HEADER == vstd::layout::size_of::()); + mod_trans(pt as int, 4096, vstd::layout::align_of::() as int); + assert(pt as int % vstd::layout::align_of::() as int == 0); + } + vstd::layout::layout_for_type_is_valid::(); // $line_count$Proof$ + let tracked mut points_to = points_to_raw.into_typed::(pt as int); + proof { + points_to.is_nonnull(); + } + let (count_pcell, Tracked(count_perm)) = PCell::empty(); + let (prev_pcell, Tracked(prev_perm)) = PCell::empty(); + let (next_pcell, Tracked(next_perm)) = PCell::empty(); + let (inner_pcell, Tracked(inner_perm)) = PCell::new( + PageInner { + flags0: 0, + flags1: 0, + flags2: 0, + capacity: 0, + reserved: 0, + free: LL::empty(), + used: 0, + xblock_size: 0, + local_free: LL::empty(), + }, + ); + let tracked fake_inst = global_init().0.instance; + let page_ptr = PPtr::::from_usize(pt); + page_ptr.put( + Tracked(&mut points_to), + Page { + count: count_pcell, + offset: 0, + inner: inner_pcell, + xthread_free: ThreadLLWithDelayBits::empty(Tracked(fake_inst)), + xheap: AtomicHeapPtr::empty(), + prev: prev_pcell, + next: next_pcell, + padding: 0, + }, + ); + let tracked pfa = Duplicable::new( + PageFullAccess { + s: PageSharedAccess { points_to }, + l: PageLocalAccess { + count: count_perm, + inner: inner_perm, + prev: prev_perm, + next: next_perm, + }, + }, + ); + EmptyPageStuff { ptr: page_ptr, pfa: Tracked(pfa) } +} - /* +/* #[verifier::external_body] fn get_empty_page_stuff() -> (e: &'static EmptyPageStuff) ensures e.wf() @@ -26044,8 +25934,8 @@ mod init { } */ - //// Current thread count - /* +//// Current thread count +/* struct_with_invariants!{ pub struct ThreadCountAtomic { pub atomic: AtomicUsize<_, (), _>, @@ -26074,22 +25964,22 @@ mod init { } */ - exec static THREAD_COUNT: core::sync::atomic::AtomicUsize = core::sync::atomic::AtomicUsize::new(0); +exec static THREAD_COUNT: core::sync::atomic::AtomicUsize = core::sync::atomic::AtomicUsize::new(0); - //exec static THREAD_COUNT: core::sync::atomic::AtomicUsize - // ensures true - // { core::sync::atomic::AtomicUsize::new(0) } - #[inline] - fn increment_thread_count() { - THREAD_COUNT.fetch_add(1, core::sync::atomic::Ordering::Relaxed); - } +//exec static THREAD_COUNT: core::sync::atomic::AtomicUsize +// ensures true +// { core::sync::atomic::AtomicUsize::new(0) } +#[inline] +fn increment_thread_count() { + THREAD_COUNT.fetch_add(1, core::sync::atomic::Ordering::Relaxed); +} - #[inline] - pub fn current_thread_count() -> usize { - THREAD_COUNT.load(core::sync::atomic::Ordering::Relaxed) - } +#[inline] +pub fn current_thread_count() -> usize { + THREAD_COUNT.load(core::sync::atomic::Ordering::Relaxed) +} - } // verus! +} // verus! } use vstd::prelude::*; diff --git a/examples/nr.rs b/examples/nr.rs index 9b9d421..4806b98 100644 --- a/examples/nr.rs +++ b/examples/nr.rs @@ -41,7 +41,7 @@ pub struct ConcreteLogEntry { } } // verus! -} + } pub mod utils { #[allow(unused_imports)] @@ -217,7 +217,7 @@ pub proof fn int_mod_less_than_same(i: int, len: int) } } // verus! -} + } // the linearization proof pub mod linearization { @@ -1196,7 +1196,7 @@ spec fn all_reads_for( } } // verus! -// end verus! + // end verus! } // the simple log model @@ -3801,7 +3801,7 @@ pub proof fn compute_nrstate_at_version_preserves( } } // verus! -// end verus! + // end verus! } pub mod unbounded_log_refines_simplelog { @@ -4321,7 +4321,7 @@ proof fn state_at_version_refines( } } // verus! -// end verus! + // end verus! } // cyclic buffer @@ -5214,7 +5214,7 @@ pub proof fn log_entry_alive_value_wrap_around(i: LogicalLogIdx, buffer_size: na } } // verus! -} + } // the flag combiner pub mod flat_combiner { @@ -5623,7 +5623,7 @@ tokenized_state_machine! { } // verus! -} + } // the RW lock pub mod rwlock { @@ -5930,7 +5930,7 @@ tokenized_state_machine!{ } } // verus! -} + } } mod exec { @@ -6453,7 +6453,7 @@ impl RwLock { } } // verus! -} + } pub mod log { #[allow(unused_imports)] @@ -8423,7 +8423,7 @@ struct_with_invariants!{ } // verus! -} + } pub mod replica { #[allow(unused_imports)] @@ -9865,7 +9865,7 @@ impl ThreadOpsData
{ } } // verus! -} + } pub mod context { #[allow(unused_imports)] @@ -10433,7 +10433,7 @@ impl FCClientRequestResponseGhost
{ } } // verus! -} + } pub mod utils { #[allow(unused_imports)] @@ -10632,7 +10632,7 @@ pub proof fn rids_match_pop( } } // verus! -} + } verus! { @@ -11224,7 +11224,7 @@ impl ReturnType { } } // verus! -use std::sync::Arc; + use std::sync::Arc; struct NrCounter( Arc>, diff --git a/examples/pagetable.rs b/examples/pagetable.rs index ecfdc4b..2e602b1 100644 --- a/examples/pagetable.rs +++ b/examples/pagetable.rs @@ -428,7 +428,7 @@ pub proof fn lemma_finite_map_union() } } // verus! -} + } pub mod l1 { #![allow(unused_imports)] @@ -2560,7 +2560,7 @@ pub open spec(checked) fn result_map(res: Result, f: FnSpec(A) -> B) } } // verus! -} + } pub mod l2_impl { #![allow(unused_imports)] @@ -6631,7 +6631,7 @@ pub proof fn lemma_set_union_empty_equals_set(s: Set) } } // verus! -} + } pub mod l2_refinement { #![allow(unused_imports)] @@ -7630,7 +7630,7 @@ impl impl_spec::InterfaceSpec for impl_spec::PageTableImpl { } } // verus! -} + } pub mod spec_pt { #![allow(unused_imports)] @@ -7787,7 +7787,7 @@ pub open spec fn next(s1: PageTableVariables, s2: PageTableVariables) -> bool { } } // verus! -} + } pub mod indexing { #![allow(unused_imports)] @@ -8066,7 +8066,7 @@ pub proof fn lemma_index_from_base_and_addr(base: nat, addr: nat, entry_size: na } } // verus! -} + } pub mod os_refinement { #![allow(unused_imports)] @@ -8922,7 +8922,7 @@ proof fn next_step_refines_hl_next_step(s1: OSVariables, s2: OSVariables, step: } } // verus! -} + } } pub mod definitions_t { @@ -10027,7 +10027,7 @@ pub open spec fn next(c: AbstractConstants, s1: AbstractVariables, s2: AbstractV } } // verus! -} + } pub mod hardware { #![allow(unused_imports)] @@ -10726,7 +10726,7 @@ pub open spec fn next(s1: HWVariables, s2: HWVariables) -> bool { // } } // verus! -} + } pub mod os { #![allow(unused_imports)] @@ -10959,7 +10959,7 @@ pub open spec fn init(s: OSVariables) -> bool { } } // verus! -} + } pub mod impl_spec { #![allow(unused_imports)] @@ -11058,7 +11058,7 @@ pub proof fn theorem() } } // verus! -} + } pub mod mem { #![allow(unused_imports)] @@ -11316,7 +11316,7 @@ impl PageTableMemory { } } // verus! -} + } } pub mod extra { diff --git a/examples/storage.rs b/examples/storage.rs index 63d396b..890c13e 100644 --- a/examples/storage.rs +++ b/examples/storage.rs @@ -19,37 +19,36 @@ pub mod infinitelog_t { use vstd::set::*; verus! { - #[verifier::ext_equal] - pub struct AbstractInfiniteLogState { - pub head: int, - pub log: Seq, - pub capacity: int, - } - impl AbstractInfiniteLogState { - pub open spec fn initialize(capacity: int) -> Self { - Self{ head: 0int, log: Seq::::empty(), capacity: capacity } - } +#[verifier::ext_equal] +pub struct AbstractInfiniteLogState { + pub head: int, + pub log: Seq, + pub capacity: int, +} - pub open spec fn append(self, bytes: Seq) -> Self { - Self{ head: self.head, log: self.log + bytes, capacity: self.capacity } - } +impl AbstractInfiniteLogState { + pub open spec fn initialize(capacity: int) -> Self { + Self { head: 0int, log: Seq::::empty(), capacity: capacity } + } - pub open spec fn advance_head(self, new_head: int) -> Self - { - if self.head <= new_head <= self.head + self.log.len() { - let new_log = self.log.subrange(new_head - self.head, self.log.len() as int); - Self{ head: new_head, log: new_log, capacity: self.capacity } - } - else { - self - } - } - } + pub open spec fn append(self, bytes: Seq) -> Self { + Self { head: self.head, log: self.log + bytes, capacity: self.capacity } + } + pub open spec fn advance_head(self, new_head: int) -> Self { + if self.head <= new_head <= self.head + self.log.len() { + let new_log = self.log.subrange(new_head - self.head, self.log.len() as int); + Self { head: new_head, log: new_log, capacity: self.capacity } + } else { + self + } } } +} // verus! +} + pub mod logimpl_v { use crate::infinitelog_t::*; use crate::main_t::*; @@ -69,1998 +68,2521 @@ pub mod logimpl_v { verus! { - // entire header structure: - // bytes 0-7: incorruptible boolean - // bytes 8-39: header 1 - // bytes 40-71: header 2 +// entire header structure: +// bytes 0-7: incorruptible boolean +// bytes 8-39: header 1 +// bytes 40-71: header 2 +// header version structure: +// 0-7: header CRC +// 8-15: logical head +// 16-23: logical tail +// 24-31: log size +pub const incorruptible_bool_pos: u64 = 0; + +pub const header1_pos: u64 = 8; + +pub const header2_pos: u64 = 40; + +// offsets of fields within the header structure +pub const header_crc_offset: u64 = 0; + +pub const header_head_offset: u64 = 8; + +pub const header_tail_offset: u64 = 16; + +pub const header_log_size_offset: u64 = 24; + +pub const header_size: u64 = 32; + +/// Converts the view of a PM region into its incorruptible Boolean, a view of its header, +/// and a data region. +pub open spec fn pm_to_views(pm: Seq) -> (u64, HeaderView, Seq) { + let incorruptible_bool = spec_u64_from_le_bytes( + pm.subrange(incorruptible_bool_pos as int, incorruptible_bool_pos + 8), + ); + // read the CRC, then read the rest of the metadata, then combine them + let crc1 = spec_u64_from_le_bytes( + pm.subrange(header1_pos + header_crc_offset, header1_pos + header_crc_offset + 8), + ); + let crc2 = spec_u64_from_le_bytes( + pm.subrange(header2_pos + header_crc_offset, header2_pos + header_crc_offset + 8), + ); + let header1_metadata = spec_bytes_to_metadata( + pm.subrange(header1_pos + header_head_offset, header1_pos + header_size), + ); + let header2_metadata = spec_bytes_to_metadata( + pm.subrange(header2_pos + header_head_offset, header2_pos + header_size), + ); + let header_view = HeaderView { + header1: PersistentHeader { crc: crc1, metadata: header1_metadata }, + header2: PersistentHeader { crc: crc2, metadata: header2_metadata }, + }; + let data_view = pm.subrange(contents_offset as int, pm.len() as int); + (incorruptible_bool, header_view, data_view) +} - // header version structure: - // 0-7: header CRC - // 8-15: logical head - // 16-23: logical tail - // 24-31: log size +pub open spec fn spec_get_live_header(pm: Seq) -> PersistentHeader { + let (ib, headers, _) = pm_to_views(pm); + if ib == cdb0_val { + headers.header1 + } else { + headers.header2 + } +} - pub const incorruptible_bool_pos: u64 = 0; - pub const header1_pos: u64 = 8; - pub const header2_pos: u64 = 40; +pub open spec fn permissions_depend_only_on_recovery_view>>( + perm: &Perm, +) -> bool { + forall|s1, s2| + recovery_view()(s1) == recovery_view()(s2) ==> perm.check_permission(s1) + == perm.check_permission(s2) +} - // offsets of fields within the header structure - pub const header_crc_offset: u64 = 0; - pub const header_head_offset: u64 = 8; - pub const header_tail_offset: u64 = 16; - pub const header_log_size_offset: u64 = 24; +pub proof fn lemma_same_permissions>>( + pm1: Seq, + pm2: Seq, + perm: &Perm, +) + requires + recovery_view()(pm1) =~= recovery_view()(pm2), + perm.check_permission(pm1), + permissions_depend_only_on_recovery_view(perm), + ensures + perm.check_permission(pm2), +{ +} - pub const header_size: u64 = 32; +/// Proves that a PM region has the given header at the given position. Useful for +/// associating a region with a header structure when the struct will be used later +/// in a proof. +pub proof fn lemma_header_match(pm: Seq, header_pos: int, header: PersistentHeader) + requires + pm.len() > contents_offset, + header_pos == header1_pos || header_pos == header2_pos, + spec_bytes_to_header(pm.subrange(header_pos as int, header_pos + header_size)) == header, + ensures + ({ + let (_, headers, _) = pm_to_views(pm); + &&& header_pos == header1_pos ==> headers.header1 == header + &&& header_pos == header2_pos ==> headers.header2 == header + }), +{ + assert(pm.subrange(header_pos as int, header_pos + header_size) =~= pm.subrange( + header_pos + header_crc_offset, + header_pos + header_crc_offset + 8, + ) + pm.subrange(header_pos + header_head_offset, header_pos + header_size)); + lemma_bytes_combine_into_header( + pm.subrange(header_pos + header_crc_offset, header_pos + header_crc_offset + 8), + pm.subrange(header_pos + header_head_offset, header_pos + header_size), + header, + ); +} - /// Converts the view of a PM region into its incorruptible Boolean, a view of its header, - /// and a data region. - pub open spec fn pm_to_views(pm: Seq) -> (u64, HeaderView, Seq) - { - let incorruptible_bool = spec_u64_from_le_bytes(pm.subrange(incorruptible_bool_pos as int, incorruptible_bool_pos + 8)); - // read the CRC, then read the rest of the metadata, then combine them - let crc1 = spec_u64_from_le_bytes(pm.subrange(header1_pos + header_crc_offset, header1_pos + header_crc_offset + 8)); - let crc2 = spec_u64_from_le_bytes(pm.subrange(header2_pos + header_crc_offset, header2_pos + header_crc_offset + 8)); - - let header1_metadata = spec_bytes_to_metadata(pm.subrange(header1_pos + header_head_offset, header1_pos + header_size)); - let header2_metadata = spec_bytes_to_metadata(pm.subrange(header2_pos + header_head_offset, header2_pos + header_size)); - let header_view = HeaderView { - header1: PersistentHeader { - crc: crc1, - metadata: header1_metadata, - }, - header2: PersistentHeader { - crc: crc2, - metadata: header2_metadata, - } +/// Proves that a given header structure consists of a CRC given in bytes as `crc_bytes` and a metadata structure +/// given in bytes as `metadata_bytes`. +pub proof fn lemma_bytes_combine_into_header( + crc_bytes: Seq, + metadata_bytes: Seq, + header: PersistentHeader, +) + requires + crc_bytes.len() == 8, + metadata_bytes.len() == header_size - 8, + spec_bytes_to_header((crc_bytes + metadata_bytes)) == header, + ensures + ({ + let combined_header = PersistentHeader { + crc: spec_u64_from_le_bytes(crc_bytes), + metadata: spec_bytes_to_metadata(metadata_bytes), }; - let data_view = pm.subrange(contents_offset as int, pm.len() as int); - ( - incorruptible_bool, - header_view, - data_view - ) - } - - pub open spec fn spec_get_live_header(pm: Seq) -> PersistentHeader - { - let (ib, headers, _) = pm_to_views(pm); - if ib == cdb0_val { - headers.header1 - } else { - headers.header2 - } - } - - pub open spec fn permissions_depend_only_on_recovery_view>>(perm: &Perm) -> bool - { - forall |s1, s2| recovery_view()(s1) == recovery_view()(s2) ==> perm.check_permission(s1) == perm.check_permission(s2) - } - - pub proof fn lemma_same_permissions>>(pm1: Seq, pm2: Seq, perm: &Perm) - requires - recovery_view()(pm1) =~= recovery_view()(pm2), - perm.check_permission(pm1), - permissions_depend_only_on_recovery_view(perm) - ensures - perm.check_permission(pm2) - {} - - /// Proves that a PM region has the given header at the given position. Useful for - /// associating a region with a header structure when the struct will be used later - /// in a proof. - pub proof fn lemma_header_match(pm: Seq, header_pos: int, header: PersistentHeader) - requires - pm.len() > contents_offset, - header_pos == header1_pos || header_pos == header2_pos, - spec_bytes_to_header(pm.subrange(header_pos as int, header_pos + header_size)) == header, - ensures - ({ - let (_, headers, _) = pm_to_views(pm); - &&& header_pos == header1_pos ==> - headers.header1 == header - &&& header_pos == header2_pos ==> - headers.header2 == header - }) - { - assert(pm.subrange(header_pos as int, header_pos + header_size) =~= - pm.subrange(header_pos + header_crc_offset, header_pos + header_crc_offset + 8) + - pm.subrange(header_pos + header_head_offset, header_pos + header_size) - ); - lemma_bytes_combine_into_header( - pm.subrange(header_pos + header_crc_offset, header_pos + header_crc_offset + 8), - pm.subrange(header_pos + header_head_offset, header_pos + header_size), - header - ); - } - - /// Proves that a given header structure consists of a CRC given in bytes as `crc_bytes` and a metadata structure - /// given in bytes as `metadata_bytes`. - pub proof fn lemma_bytes_combine_into_header(crc_bytes: Seq, metadata_bytes: Seq, header: PersistentHeader) - requires - crc_bytes.len() == 8, - metadata_bytes.len() == header_size - 8, - spec_bytes_to_header((crc_bytes + metadata_bytes)) == header, - ensures - ({ - let combined_header = PersistentHeader { crc: spec_u64_from_le_bytes(crc_bytes), metadata: spec_bytes_to_metadata(metadata_bytes) }; - header == combined_header - }) - { - let crc_val = spec_u64_from_le_bytes(crc_bytes); - let metadata = spec_bytes_to_metadata(metadata_bytes); - lemma_seq_addition(crc_bytes, metadata_bytes); - - let combined_header = spec_bytes_to_header((crc_bytes + metadata_bytes)); - assert(combined_header.crc == crc_val); - assert(metadata == spec_bytes_to_metadata((crc_bytes + metadata_bytes).subrange(header_head_offset as int, header_size as int))); - assert(combined_header.metadata == metadata); - } - - /// Converse of lemma_bytes_combine_into_header; proves that the byte representation of a header consists of - /// the byte representations of its CRC and metadata - pub proof fn lemma_header_split_into_bytes(crc_bytes: Seq, metadata_bytes: Seq, header_bytes: Seq) - requires - crc_bytes.len() == 8, - metadata_bytes.len() == header_size - 8, - header_bytes.len() == header_size, - ({ - let header = PersistentHeader { crc: spec_u64_from_le_bytes(crc_bytes), metadata: spec_bytes_to_metadata(metadata_bytes) }; - spec_bytes_to_header(header_bytes) == header - }), - ensures - crc_bytes + metadata_bytes =~= header_bytes - { - lemma_auto_spec_u64_to_from_le_bytes(); - let header = PersistentHeader { crc: spec_u64_from_le_bytes(crc_bytes), metadata: spec_bytes_to_metadata(metadata_bytes) }; - assert(header.crc == spec_u64_from_le_bytes(crc_bytes)); - assert(header_bytes.subrange(header_crc_offset as int, header_crc_offset + 8) =~= spec_u64_to_le_bytes(header.crc)); - assert(crc_bytes =~= spec_u64_to_le_bytes(header.crc)); - - assert(header.metadata == spec_bytes_to_metadata(metadata_bytes)); - assert(header.metadata == spec_bytes_to_metadata(header_bytes.subrange(header_head_offset as int, header_size as int))); - lemma_metadata_bytes_eq(metadata_bytes, header_bytes.subrange(header_head_offset as int, header_size as int), header.metadata); - assert(header_bytes.subrange(header_head_offset as int, header_size as int) =~= metadata_bytes); - - } - - pub proof fn lemma_seq_addition(bytes1: Seq, bytes2: Seq) - ensures - ({ - let i = bytes1.len() as int; - let j = bytes2.len() as int; - &&& (bytes1 + bytes2).subrange(0, i) =~= bytes1 - &&& (bytes1 + bytes2).subrange(i, i + j) =~= bytes2 - }) - { - assert(forall |i: int| #![auto] 0 <= i < bytes1.len() ==> (bytes1 + bytes2)[i] == bytes1[i]); - assert(forall |i: int| #![auto] 0 <= i < bytes2.len() ==> (bytes1 + bytes2)[bytes1.len() + i] == bytes2[i]); - } + header == combined_header + }), +{ + let crc_val = spec_u64_from_le_bytes(crc_bytes); + let metadata = spec_bytes_to_metadata(metadata_bytes); + lemma_seq_addition(crc_bytes, metadata_bytes); + let combined_header = spec_bytes_to_header((crc_bytes + metadata_bytes)); + assert(combined_header.crc == crc_val); + assert(metadata == spec_bytes_to_metadata( + (crc_bytes + metadata_bytes).subrange(header_head_offset as int, header_size as int), + )); + assert(combined_header.metadata == metadata); +} - #[verifier::ext_equal] - pub struct PersistentHeader { - pub crc: u64, - pub metadata: PersistentHeaderMetadata, - } +/// Converse of lemma_bytes_combine_into_header; proves that the byte representation of a header consists of +/// the byte representations of its CRC and metadata +pub proof fn lemma_header_split_into_bytes( + crc_bytes: Seq, + metadata_bytes: Seq, + header_bytes: Seq, +) + requires + crc_bytes.len() == 8, + metadata_bytes.len() == header_size - 8, + header_bytes.len() == header_size, + ({ + let header = PersistentHeader { + crc: spec_u64_from_le_bytes(crc_bytes), + metadata: spec_bytes_to_metadata(metadata_bytes), + }; + spec_bytes_to_header(header_bytes) == header + }), + ensures + crc_bytes + metadata_bytes =~= header_bytes, +{ + lemma_auto_spec_u64_to_from_le_bytes(); + let header = PersistentHeader { + crc: spec_u64_from_le_bytes(crc_bytes), + metadata: spec_bytes_to_metadata(metadata_bytes), + }; + assert(header.crc == spec_u64_from_le_bytes(crc_bytes)); + assert(header_bytes.subrange(header_crc_offset as int, header_crc_offset + 8) + =~= spec_u64_to_le_bytes(header.crc)); + assert(crc_bytes =~= spec_u64_to_le_bytes(header.crc)); + assert(header.metadata == spec_bytes_to_metadata(metadata_bytes)); + assert(header.metadata == spec_bytes_to_metadata( + header_bytes.subrange(header_head_offset as int, header_size as int), + )); + lemma_metadata_bytes_eq( + metadata_bytes, + header_bytes.subrange(header_head_offset as int, header_size as int), + header.metadata, + ); + assert(header_bytes.subrange(header_head_offset as int, header_size as int) =~= metadata_bytes); +} - #[verifier::ext_equal] - pub struct PersistentHeaderMetadata { - pub head: u64, - pub tail: u64, - pub log_size: u64, - } +pub proof fn lemma_seq_addition(bytes1: Seq, bytes2: Seq) + ensures + ({ + let i = bytes1.len() as int; + let j = bytes2.len() as int; + &&& (bytes1 + bytes2).subrange(0, i) =~= bytes1 + &&& (bytes1 + bytes2).subrange(i, i + j) =~= bytes2 + }), +{ + assert(forall|i: int| #![auto] 0 <= i < bytes1.len() ==> (bytes1 + bytes2)[i] == bytes1[i]); + assert(forall|i: int| + #![auto] + 0 <= i < bytes2.len() ==> (bytes1 + bytes2)[bytes1.len() + i] == bytes2[i]); +} - #[verifier::ext_equal] - pub struct HeaderView { - pub header1: PersistentHeader, - pub header2: PersistentHeader, - } +#[verifier::ext_equal] +pub struct PersistentHeader { + pub crc: u64, + pub metadata: PersistentHeaderMetadata, +} - /// Spec code only converts byte representations to structures and does not go the other way - /// to simplify reasoning about persistent structures (although the opposite direction is - /// implemented in exec code). +#[verifier::ext_equal] +pub struct PersistentHeaderMetadata { + pub head: u64, + pub tail: u64, + pub log_size: u64, +} - exec fn bytes_to_header(bytes: &[u8]) -> (out: PersistentHeader) - requires - bytes@.len() == header_size - ensures - out == spec_bytes_to_header(bytes@) - { - let crc_bytes = slice_subrange(bytes, header_crc_offset as usize, (header_crc_offset + 8) as usize); - let metadata_bytes = slice_subrange(bytes, header_head_offset as usize, header_size as usize); +#[verifier::ext_equal] +pub struct HeaderView { + pub header1: PersistentHeader, + pub header2: PersistentHeader, +} - PersistentHeader { - crc: u64_from_le_bytes(crc_bytes), - metadata: bytes_to_metadata(metadata_bytes), - } - } +/// Spec code only converts byte representations to structures and does not go the other way +/// to simplify reasoning about persistent structures (although the opposite direction is +/// implemented in exec code). +exec fn bytes_to_header(bytes: &[u8]) -> (out: PersistentHeader) + requires + bytes@.len() == header_size, + ensures + out == spec_bytes_to_header(bytes@), +{ + let crc_bytes = slice_subrange( + bytes, + header_crc_offset as usize, + (header_crc_offset + 8) as usize, + ); + let metadata_bytes = slice_subrange(bytes, header_head_offset as usize, header_size as usize); + PersistentHeader { + crc: u64_from_le_bytes(crc_bytes), + metadata: bytes_to_metadata(metadata_bytes), + } +} - exec fn header_to_bytes(header: &PersistentHeader) -> (out: Vec) - ensures - header == spec_bytes_to_header(out@), - spec_u64_from_le_bytes(out@.subrange(header_crc_offset as int, header_crc_offset + 8)) == header.crc, - spec_bytes_to_metadata(out@.subrange(header_head_offset as int, header_size as int)) == header.metadata, - out@.len() == header_size - { - proof { lemma_auto_spec_u64_to_from_le_bytes(); } +exec fn header_to_bytes(header: &PersistentHeader) -> (out: Vec) + ensures + header == spec_bytes_to_header(out@), + spec_u64_from_le_bytes(out@.subrange(header_crc_offset as int, header_crc_offset + 8)) + == header.crc, + spec_bytes_to_metadata(out@.subrange(header_head_offset as int, header_size as int)) + == header.metadata, + out@.len() == header_size, +{ + proof { + lemma_auto_spec_u64_to_from_le_bytes(); + } + let mut metadata_bytes = metadata_to_bytes(&header.metadata); + let mut crc_bytes = u64_to_le_bytes(header.crc); + let ghost old_metadata_bytes = metadata_bytes@; + let ghost old_crc_bytes = crc_bytes@; + crc_bytes.append(&mut metadata_bytes); + proof { + lemma_auto_spec_u64_to_from_le_bytes(); + assert(old_crc_bytes =~= crc_bytes@.subrange( + header_crc_offset as int, + header_crc_offset + 8, + )); + assert(old_metadata_bytes =~= crc_bytes@.subrange( + header_head_offset as int, + header_size as int, + )); + } + crc_bytes +} - let mut metadata_bytes = metadata_to_bytes(&header.metadata); - let mut crc_bytes = u64_to_le_bytes(header.crc); - let ghost old_metadata_bytes = metadata_bytes@; - let ghost old_crc_bytes = crc_bytes@; - crc_bytes.append(&mut metadata_bytes); - proof { - lemma_auto_spec_u64_to_from_le_bytes(); - assert(old_crc_bytes =~= crc_bytes@.subrange(header_crc_offset as int, header_crc_offset + 8)); - assert(old_metadata_bytes =~= crc_bytes@.subrange(header_head_offset as int, header_size as int)); - } - crc_bytes - } +exec fn bytes_to_metadata(bytes: &[u8]) -> (out: PersistentHeaderMetadata) + requires + bytes@.len() == header_size - 8, + ensures + out == spec_bytes_to_metadata(bytes@), +{ + let head_bytes = slice_subrange( + bytes, + (header_head_offset - 8) as usize, + (header_head_offset - 8 + 8) as usize, + ); + let tail_bytes = slice_subrange( + bytes, + (header_tail_offset - 8) as usize, + (header_tail_offset - 8 + 8) as usize, + ); + let log_size_bytes = slice_subrange( + bytes, + (header_log_size_offset - 8) as usize, + (header_log_size_offset - 8 + 8) as usize, + ); + PersistentHeaderMetadata { + head: u64_from_le_bytes(head_bytes), + tail: u64_from_le_bytes(tail_bytes), + log_size: u64_from_le_bytes(log_size_bytes), + } +} - exec fn bytes_to_metadata(bytes: &[u8]) -> (out: PersistentHeaderMetadata) - requires - bytes@.len() == header_size - 8 - ensures - out == spec_bytes_to_metadata(bytes@) - { - let head_bytes = slice_subrange(bytes, (header_head_offset - 8) as usize, (header_head_offset - 8 + 8) as usize); - let tail_bytes = slice_subrange(bytes, (header_tail_offset - 8) as usize, (header_tail_offset - 8+ 8) as usize); - let log_size_bytes = slice_subrange(bytes, (header_log_size_offset - 8) as usize, (header_log_size_offset - 8 + 8) as usize); - - PersistentHeaderMetadata { - head: u64_from_le_bytes(head_bytes), - tail: u64_from_le_bytes(tail_bytes), - log_size: u64_from_le_bytes(log_size_bytes), - } - } +exec fn metadata_to_bytes(metadata: &PersistentHeaderMetadata) -> (out: Vec) + ensures + metadata == spec_bytes_to_metadata(out@), + out@.len() == header_size - 8, +{ + let mut bytes: Vec = Vec::new(); + let ghost old_bytes = bytes@; + let mut head_bytes = u64_to_le_bytes(metadata.head); + let ghost old_head_bytes = head_bytes@; + let mut tail_bytes = u64_to_le_bytes(metadata.tail); + let ghost old_tail_bytes = tail_bytes@; + let mut log_size_bytes = u64_to_le_bytes(metadata.log_size); + let ghost old_log_size_bytes = log_size_bytes@; + bytes.append(&mut head_bytes); + bytes.append(&mut tail_bytes); + bytes.append(&mut log_size_bytes); + proof { + lemma_auto_spec_u64_to_from_le_bytes(); + assert(old_bytes == Seq::::empty()); + assert(old_head_bytes =~= bytes@.subrange( + header_head_offset - 8, + header_head_offset - 8 + 8, + )); + assert(old_tail_bytes =~= bytes@.subrange( + header_tail_offset - 8, + header_tail_offset - 8 + 8, + )); + assert(old_log_size_bytes =~= bytes@.subrange( + header_log_size_offset - 8, + header_log_size_offset - 8 + 8, + )); + } + bytes +} - exec fn metadata_to_bytes(metadata: &PersistentHeaderMetadata) -> (out: Vec) - ensures - metadata == spec_bytes_to_metadata(out@), - out@.len() == header_size - 8, - { - let mut bytes: Vec = Vec::new(); - let ghost old_bytes = bytes@; +exec fn crc_and_metadata_bytes_to_header(crc_bytes: &[u8], header_bytes: &[u8]) -> (out: + PersistentHeader) + requires + crc_bytes@.len() == 8, + header_bytes@.len() == header_size - 8, + ensures + out.crc == spec_u64_from_le_bytes(crc_bytes@), + out.metadata == spec_bytes_to_metadata(header_bytes@), +{ + let head_bytes = slice_subrange( + header_bytes, + (header_head_offset - 8) as usize, + (header_head_offset + 8 - 8) as usize, + ); + let tail_bytes = slice_subrange( + header_bytes, + (header_tail_offset - 8) as usize, + (header_tail_offset + 8 - 8) as usize, + ); + let log_size_bytes = slice_subrange( + header_bytes, + (header_log_size_offset - 8) as usize, + (header_log_size_offset + 8 - 8) as usize, + ); + PersistentHeader { + crc: u64_from_le_bytes(crc_bytes), + metadata: PersistentHeaderMetadata { + head: u64_from_le_bytes(head_bytes), + tail: u64_from_le_bytes(tail_bytes), + log_size: u64_from_le_bytes(log_size_bytes), + }, + } +} - let mut head_bytes = u64_to_le_bytes(metadata.head); - let ghost old_head_bytes = head_bytes@; - let mut tail_bytes = u64_to_le_bytes(metadata.tail); - let ghost old_tail_bytes = tail_bytes@; - let mut log_size_bytes = u64_to_le_bytes(metadata.log_size); - let ghost old_log_size_bytes = log_size_bytes@; +pub open spec(checked) fn spec_bytes_to_metadata(header_seq: Seq) -> PersistentHeaderMetadata + recommends + header_seq.len() == 3 * 8, +{ + let head = spec_u64_from_le_bytes( + header_seq.subrange(header_head_offset - 8, header_head_offset - 8 + 8), + ); + let tail = spec_u64_from_le_bytes( + header_seq.subrange(header_tail_offset - 8, header_tail_offset - 8 + 8), + ); + let log_size = spec_u64_from_le_bytes( + header_seq.subrange(header_log_size_offset - 8, header_log_size_offset - 8 + 8), + ); + PersistentHeaderMetadata { head, tail, log_size } +} - bytes.append(&mut head_bytes); - bytes.append(&mut tail_bytes); - bytes.append(&mut log_size_bytes); +/// Proves that two sequences of bytes (assumed to be the subrange of a persistent memory device containing +/// the PersistentHeaderMetadata) are equivalent if their PersistentHeaderMetadata representations are equivalent +pub proof fn lemma_metadata_bytes_eq( + bytes1: Seq, + bytes2: Seq, + metadata: PersistentHeaderMetadata, +) + requires + bytes1.len() == header_size - 8, + bytes2.len() == header_size - 8, + metadata == spec_bytes_to_metadata(bytes1), + metadata == spec_bytes_to_metadata(bytes2), + ensures + bytes1 =~= bytes2, +{ + let metadata1 = spec_bytes_to_metadata(bytes1); + let metadata2 = spec_bytes_to_metadata(bytes2); + // TODO: could write a lemma that triggers on from instead of to - might help here + lemma_auto_spec_u64_to_from_le_bytes(); + assert(spec_u64_to_le_bytes(metadata1.head) == spec_u64_to_le_bytes(metadata2.head)); + assert(metadata1.head == spec_u64_from_le_bytes( + bytes1.subrange(header_head_offset - 8, header_head_offset - 8 + 8), + )); + assert(metadata2.head == spec_u64_from_le_bytes( + bytes2.subrange(header_head_offset - 8, header_head_offset - 8 + 8), + )); + assert(bytes1.subrange(header_head_offset - 8, header_head_offset - 8 + 8) =~= bytes2.subrange( + header_head_offset - 8, + header_head_offset - 8 + 8, + )); + assert(spec_u64_to_le_bytes(metadata1.tail) == spec_u64_to_le_bytes(metadata2.tail)); + assert(metadata1.tail == spec_u64_from_le_bytes( + bytes1.subrange(header_tail_offset - 8, header_tail_offset - 8 + 8), + )); + assert(metadata2.tail == spec_u64_from_le_bytes( + bytes2.subrange(header_tail_offset - 8, header_tail_offset - 8 + 8), + )); + assert(bytes1.subrange(header_tail_offset - 8, header_tail_offset - 8 + 8) =~= bytes2.subrange( + header_tail_offset - 8, + header_tail_offset - 8 + 8, + )); + assert(spec_u64_to_le_bytes(metadata1.log_size) == spec_u64_to_le_bytes(metadata2.log_size)); + assert(metadata1.log_size == spec_u64_from_le_bytes( + bytes1.subrange(header_log_size_offset - 8, header_log_size_offset - 8 + 8), + )); + assert(metadata2.log_size == spec_u64_from_le_bytes( + bytes2.subrange(header_log_size_offset - 8, header_log_size_offset - 8 + 8), + )); + assert(bytes1.subrange(header_log_size_offset - 8, header_log_size_offset - 8 + 8) + =~= bytes2.subrange(header_log_size_offset - 8, header_log_size_offset - 8 + 8)); + assert(bytes1 =~= bytes1.subrange(header_head_offset - 8, header_head_offset - 8 + 8) + + bytes1.subrange(header_tail_offset - 8, header_tail_offset - 8 + 8) + bytes1.subrange( + header_log_size_offset - 8, + header_log_size_offset - 8 + 8, + )); +} - proof { - lemma_auto_spec_u64_to_from_le_bytes(); - assert(old_bytes == Seq::::empty()); - assert(old_head_bytes =~= bytes@.subrange(header_head_offset - 8, header_head_offset - 8 + 8)); - assert(old_tail_bytes =~= bytes@.subrange(header_tail_offset - 8, header_tail_offset - 8 + 8)); - assert(old_log_size_bytes =~= bytes@.subrange(header_log_size_offset - 8, header_log_size_offset - 8 + 8)); - } - bytes - } +pub open spec(checked) fn spec_bytes_to_header(header_seq: Seq) -> PersistentHeader + recommends + header_seq.len() == header_size, +{ + let crc_val = spec_u64_from_le_bytes( + header_seq.subrange(header_crc_offset as int, header_crc_offset + 8), + ); + let metadata = spec_bytes_to_metadata( + header_seq.subrange(header_head_offset as int, header_size as int), + ); + PersistentHeader { crc: crc_val, metadata } +} - exec fn crc_and_metadata_bytes_to_header(crc_bytes: &[u8], header_bytes: &[u8]) -> (out: PersistentHeader) - requires - crc_bytes@.len() == 8, - header_bytes@.len() == header_size - 8 - ensures - out.crc == spec_u64_from_le_bytes(crc_bytes@), - out.metadata == spec_bytes_to_metadata(header_bytes@) - { - let head_bytes = slice_subrange(header_bytes, (header_head_offset - 8) as usize, (header_head_offset + 8 - 8) as usize); - let tail_bytes = slice_subrange(header_bytes, (header_tail_offset - 8) as usize, (header_tail_offset + 8 - 8) as usize); - let log_size_bytes = slice_subrange(header_bytes, (header_log_size_offset - 8) as usize, (header_log_size_offset + 8 - 8) as usize); - - PersistentHeader { - crc: u64_from_le_bytes(crc_bytes), - metadata: PersistentHeaderMetadata { - head: u64_from_le_bytes(head_bytes), - tail: u64_from_le_bytes(tail_bytes), - log_size: u64_from_le_bytes(log_size_bytes) - } +/// Proves that a write to data that does not touch any metadata is crash safe. +pub proof fn lemma_data_write_is_safe( + pm: Seq, + bytes: Seq, + write_addr: int, + perm: &Perm, +) where Perm: CheckPermission> + requires + UntrustedLogImpl::recover(pm).is_Some(), + pm.len() > contents_offset, + contents_offset <= write_addr < pm.len(), + perm.check_permission(pm), + permissions_depend_only_on_recovery_view(perm), + ({ + // write must be a valid write and not overlap the live log + let live_header = spec_get_live_header(pm); + let physical_head = spec_addr_logical_to_physical( + live_header.metadata.head as int, + live_header.metadata.log_size as int, + ); + let physical_tail = spec_addr_logical_to_physical( + live_header.metadata.tail as int, + live_header.metadata.log_size as int, + ); + &&& physical_head <= physical_tail ==> { + &&& write_addr + bytes.len() <= live_header.metadata.log_size + contents_offset + &&& write_addr < physical_head ==> write_addr + bytes.len() <= physical_head + &&& (physical_tail <= write_addr || write_addr < physical_head) } - } - - pub open spec(checked) fn spec_bytes_to_metadata(header_seq: Seq) -> PersistentHeaderMetadata - recommends - header_seq.len() == 3*8 - { - let head = spec_u64_from_le_bytes(header_seq.subrange(header_head_offset - 8, header_head_offset - 8 + 8)); - let tail = spec_u64_from_le_bytes(header_seq.subrange(header_tail_offset - 8, header_tail_offset - 8 + 8)); - let log_size = spec_u64_from_le_bytes(header_seq.subrange(header_log_size_offset - 8, header_log_size_offset - 8 + 8)); - PersistentHeaderMetadata { - head, - tail, - log_size + &&& physical_tail < physical_head ==> { + &&& physical_tail <= write_addr <= write_addr + bytes.len() < physical_head } - } - - /// Proves that two sequences of bytes (assumed to be the subrange of a persistent memory device containing - /// the PersistentHeaderMetadata) are equivalent if their PersistentHeaderMetadata representations are equivalent - pub proof fn lemma_metadata_bytes_eq(bytes1: Seq, bytes2: Seq, metadata: PersistentHeaderMetadata) - requires - bytes1.len() == header_size - 8, - bytes2.len() == header_size - 8, - metadata == spec_bytes_to_metadata(bytes1), - metadata == spec_bytes_to_metadata(bytes2), - ensures - bytes1 =~= bytes2 + }), + ensures + UntrustedLogImpl::recover(pm).is_Some(), + forall|chunks_flushed| + { + let new_pm = #[trigger] update_contents_to_reflect_partially_flushed_write( + pm, + write_addr, + bytes, + chunks_flushed, + ); + perm.check_permission(new_pm) + }, + ({ + let new_pm = update_contents_to_reflect_write(pm, write_addr, bytes); + perm.check_permission(new_pm) + }), + update_data_view_postcond(pm, bytes, write_addr), +{ + let new_pm = update_contents_to_reflect_write(pm, write_addr, bytes); + lemma_append_data_update_view(pm, bytes, write_addr); + lemma_same_log_state(pm, new_pm); + assert forall|chunks_flushed| { - let metadata1 = spec_bytes_to_metadata(bytes1); - let metadata2 = spec_bytes_to_metadata(bytes2); + let new_pm = #[trigger] update_contents_to_reflect_partially_flushed_write( + pm, + write_addr, + bytes, + chunks_flushed, + ); + perm.check_permission(new_pm) + } by { + let new_pm = update_contents_to_reflect_partially_flushed_write( + pm, + write_addr, + bytes, + chunks_flushed, + ); + lemma_append_data_update_view_crash(pm, bytes, write_addr, chunks_flushed); + lemma_same_log_state(pm, new_pm); + lemma_same_permissions(pm, new_pm, perm); + } +} - // TODO: could write a lemma that triggers on from instead of to - might help here - lemma_auto_spec_u64_to_from_le_bytes(); - assert(spec_u64_to_le_bytes(metadata1.head) == spec_u64_to_le_bytes(metadata2.head)); - assert(metadata1.head == spec_u64_from_le_bytes(bytes1.subrange(header_head_offset - 8, header_head_offset - 8 + 8))); - assert(metadata2.head == spec_u64_from_le_bytes(bytes2.subrange(header_head_offset - 8, header_head_offset - 8 + 8))); - assert(bytes1.subrange(header_head_offset - 8, header_head_offset - 8 + 8) =~= bytes2.subrange(header_head_offset - 8, header_head_offset - 8 + 8)); - - assert(spec_u64_to_le_bytes(metadata1.tail) == spec_u64_to_le_bytes(metadata2.tail)); - assert(metadata1.tail == spec_u64_from_le_bytes(bytes1.subrange(header_tail_offset - 8, header_tail_offset - 8 + 8))); - assert(metadata2.tail == spec_u64_from_le_bytes(bytes2.subrange(header_tail_offset - 8, header_tail_offset - 8 + 8))); - assert(bytes1.subrange(header_tail_offset - 8, header_tail_offset - 8 + 8) =~= bytes2.subrange(header_tail_offset - 8, header_tail_offset - 8 + 8)); - - assert(spec_u64_to_le_bytes(metadata1.log_size) == spec_u64_to_le_bytes(metadata2.log_size)); - assert(metadata1.log_size == spec_u64_from_le_bytes(bytes1.subrange(header_log_size_offset - 8, header_log_size_offset - 8 + 8))); - assert(metadata2.log_size == spec_u64_from_le_bytes(bytes2.subrange(header_log_size_offset - 8, header_log_size_offset - 8 + 8))); - assert(bytes1.subrange(header_log_size_offset - 8, header_log_size_offset - 8 + 8) =~= bytes2.subrange(header_log_size_offset - 8, header_log_size_offset - 8 + 8)); - - assert(bytes1 =~= bytes1.subrange(header_head_offset - 8, header_head_offset - 8 + 8) + - bytes1.subrange(header_tail_offset - 8, header_tail_offset - 8 + 8) + - bytes1.subrange(header_log_size_offset - 8, header_log_size_offset - 8 + 8)); - } +pub open spec fn update_data_view_postcond( + pm: Seq, + new_bytes: Seq, + write_addr: int, +) -> bool { + let new_pm = update_contents_to_reflect_write(pm, write_addr, new_bytes); + let (old_ib, old_headers, old_data) = pm_to_views(pm); + let (new_ib, new_headers, new_data) = pm_to_views(new_pm); + let live_header = spec_get_live_header(pm); + let physical_head = spec_addr_logical_to_physical( + live_header.metadata.head as int, + live_header.metadata.log_size as int, + ); + let physical_tail = spec_addr_logical_to_physical( + live_header.metadata.tail as int, + live_header.metadata.log_size as int, + ); + &&& old_ib == new_ib + &&& old_headers == new_headers + &&& new_data.len() == old_data.len() + &&& new_data.subrange( + write_addr - contents_offset, + write_addr - contents_offset + new_bytes.len(), + ) =~= new_bytes + &&& new_data.subrange(0, write_addr - contents_offset) =~= old_data.subrange( + 0, + write_addr - contents_offset, + ) + &&& new_data.subrange(write_addr - contents_offset + new_bytes.len(), new_data.len() as int) + =~= old_data.subrange(write_addr - contents_offset + new_bytes.len(), old_data.len() as int) + &&& UntrustedLogImpl::recover(new_pm).is_Some() + &&& physical_head < physical_tail ==> new_data.subrange( + physical_head - contents_offset, + physical_tail - contents_offset, + ) =~= old_data.subrange(physical_head - contents_offset, physical_tail - contents_offset) + &&& physical_tail < physical_head ==> { + &&& old_data.subrange(physical_head - contents_offset, live_header.metadata.log_size as int) + =~= new_data.subrange( + physical_head - contents_offset, + live_header.metadata.log_size as int, + ) + &&& old_data.subrange(0, physical_tail - contents_offset) =~= new_data.subrange( + 0, + physical_tail - contents_offset, + ) + } +} - pub open spec(checked) fn spec_bytes_to_header(header_seq: Seq) -> PersistentHeader - recommends - header_seq.len() == header_size - { - let crc_val = spec_u64_from_le_bytes(header_seq.subrange(header_crc_offset as int, header_crc_offset +8)); - let metadata = spec_bytes_to_metadata(header_seq.subrange(header_head_offset as int, header_size as int)); - PersistentHeader { - crc: crc_val, - metadata +/// Proves that a non-crashing data write updates data bytes but no log metadata. +pub proof fn lemma_append_data_update_view(pm: Seq, new_bytes: Seq, write_addr: int) + requires + UntrustedLogImpl::recover(pm).is_Some(), + pm.len() > contents_offset, + contents_offset <= write_addr < pm.len(), + ({ + // write must be a valid write and not overlap the live log + let live_header = spec_get_live_header(pm); + let physical_head = spec_addr_logical_to_physical( + live_header.metadata.head as int, + live_header.metadata.log_size as int, + ); + let physical_tail = spec_addr_logical_to_physical( + live_header.metadata.tail as int, + live_header.metadata.log_size as int, + ); + &&& physical_head <= physical_tail ==> { + &&& write_addr + new_bytes.len() <= live_header.metadata.log_size + contents_offset + &&& write_addr < physical_head ==> write_addr + new_bytes.len() <= physical_head + &&& (physical_tail <= write_addr || write_addr < physical_head) } - } - - /// Proves that a write to data that does not touch any metadata is crash safe. - pub proof fn lemma_data_write_is_safe(pm: Seq, bytes: Seq, write_addr: int, perm: &Perm) - where - Perm: CheckPermission>, - requires - UntrustedLogImpl::recover(pm).is_Some(), - pm.len() > contents_offset, - contents_offset <= write_addr < pm.len(), - perm.check_permission(pm), - permissions_depend_only_on_recovery_view(perm), - ({ - // write must be a valid write and not overlap the live log - let live_header = spec_get_live_header(pm); - let physical_head = spec_addr_logical_to_physical(live_header.metadata.head as int, live_header.metadata.log_size as int); - let physical_tail = spec_addr_logical_to_physical(live_header.metadata.tail as int, live_header.metadata.log_size as int); - &&& physical_head <= physical_tail ==> { - &&& write_addr + bytes.len() <= live_header.metadata.log_size + contents_offset - &&& write_addr < physical_head ==> write_addr + bytes.len() <= physical_head - &&& (physical_tail <= write_addr || write_addr < physical_head) - } - &&& physical_tail < physical_head ==> { - &&& physical_tail <= write_addr <= write_addr + bytes.len() < physical_head - } - }), - ensures - UntrustedLogImpl::recover(pm).is_Some(), - forall |chunks_flushed| { - let new_pm = #[trigger] update_contents_to_reflect_partially_flushed_write( - pm, write_addr, bytes, chunks_flushed); - perm.check_permission(new_pm) - }, - ({ - let new_pm = update_contents_to_reflect_write(pm, write_addr, bytes); - perm.check_permission(new_pm) - }), - update_data_view_postcond(pm, bytes, write_addr), - { - let new_pm = update_contents_to_reflect_write(pm, write_addr, bytes); - lemma_append_data_update_view(pm, bytes, write_addr); - lemma_same_log_state(pm, new_pm); - - assert forall |chunks_flushed| { - let new_pm = #[trigger] update_contents_to_reflect_partially_flushed_write( - pm, write_addr, bytes, chunks_flushed); - perm.check_permission(new_pm) - } by { - let new_pm = update_contents_to_reflect_partially_flushed_write( - pm, write_addr, bytes, chunks_flushed); - lemma_append_data_update_view_crash(pm, bytes, write_addr, chunks_flushed); - lemma_same_log_state(pm, new_pm); - lemma_same_permissions(pm, new_pm, perm); + &&& physical_tail < physical_head ==> { + &&& physical_tail <= write_addr <= write_addr + new_bytes.len() < physical_head } - } + }), + ensures + UntrustedLogImpl::recover(pm).is_Some(), + update_data_view_postcond(pm, new_bytes, write_addr), +{ + let live_header = spec_get_live_header(pm); + let physical_head = spec_addr_logical_to_physical( + live_header.metadata.head as int, + live_header.metadata.log_size as int, + ); + let physical_tail = spec_addr_logical_to_physical( + live_header.metadata.tail as int, + live_header.metadata.log_size as int, + ); + let new_pm = update_contents_to_reflect_write(pm, write_addr, new_bytes); + lemma_headers_unchanged(pm, new_pm); + lemma_incorruptible_bool_unchanged(pm, new_pm); + assert(live_header == spec_get_live_header(new_pm)); + assert(new_pm.subrange(0, write_addr) =~= pm.subrange(0, write_addr)); + assert(new_pm.subrange(write_addr + new_bytes.len(), new_pm.len() as int) =~= pm.subrange( + write_addr + new_bytes.len(), + pm.len() as int, + )); + lemma_subrange_equality_implies_subsubrange_equality(pm, new_pm, 0, write_addr); + lemma_subrange_equality_implies_subsubrange_equality( + pm, + new_pm, + write_addr + new_bytes.len(), + new_pm.len() as int, + ); + if physical_head < physical_tail { + assert(new_pm.subrange(physical_head as int, physical_tail as int) =~= pm.subrange( + physical_head as int, + physical_tail as int, + )); + } +} - pub open spec fn update_data_view_postcond(pm: Seq, new_bytes: Seq, write_addr: int) -> bool - { - let new_pm = update_contents_to_reflect_write(pm, write_addr, new_bytes); +/// Proves that a crashing data write updates data bytes but no log metadata. +pub proof fn lemma_append_data_update_view_crash( + pm: Seq, + new_bytes: Seq, + write_addr: int, + chunks_flushed: Set, +) + requires + UntrustedLogImpl::recover(pm).is_Some(), + pm.len() > contents_offset, + contents_offset <= write_addr < pm.len(), + ({ + // write must be a valid write and not overlap the live log + let live_header = spec_get_live_header(pm); + let physical_head = spec_addr_logical_to_physical( + live_header.metadata.head as int, + live_header.metadata.log_size as int, + ); + let physical_tail = spec_addr_logical_to_physical( + live_header.metadata.tail as int, + live_header.metadata.log_size as int, + ); + &&& physical_head <= physical_tail ==> write_addr + new_bytes.len() + <= live_header.metadata.log_size + contents_offset + &&& physical_tail < physical_head ==> write_addr + new_bytes.len() < physical_head + }), + ensures + UntrustedLogImpl::recover(pm).is_Some(), + ({ + let new_pm = update_contents_to_reflect_partially_flushed_write( + pm, + write_addr, + new_bytes, + chunks_flushed, + ); let (old_ib, old_headers, old_data) = pm_to_views(pm); let (new_ib, new_headers, new_data) = pm_to_views(new_pm); - let live_header = spec_get_live_header(pm); - let physical_head = spec_addr_logical_to_physical(live_header.metadata.head as int, live_header.metadata.log_size as int); - let physical_tail = spec_addr_logical_to_physical(live_header.metadata.tail as int, live_header.metadata.log_size as int); &&& old_ib == new_ib &&& old_headers == new_headers &&& new_data.len() == old_data.len() - &&& new_data.subrange(write_addr - contents_offset, write_addr - contents_offset + new_bytes.len()) =~= new_bytes - &&& new_data.subrange(0, write_addr - contents_offset) =~= old_data.subrange(0, write_addr - contents_offset) - &&& new_data.subrange(write_addr - contents_offset + new_bytes.len(), new_data.len() as int) =~= - old_data.subrange(write_addr - contents_offset + new_bytes.len(), old_data.len() as int) + &&& new_data.subrange(0, write_addr - contents_offset) =~= old_data.subrange( + 0, + write_addr - contents_offset, + ) + &&& new_data.subrange( + write_addr - contents_offset + new_bytes.len(), + new_data.len() as int, + ) =~= old_data.subrange( + write_addr - contents_offset + new_bytes.len(), + old_data.len() as int, + ) &&& UntrustedLogImpl::recover(new_pm).is_Some() + }), +{ + let live_header = spec_get_live_header(pm); + let physical_tail = spec_addr_logical_to_physical( + live_header.metadata.tail as int, + live_header.metadata.log_size as int, + ); + let new_pm = update_contents_to_reflect_partially_flushed_write( + pm, + write_addr, + new_bytes, + chunks_flushed, + ); + lemma_headers_unchanged(pm, new_pm); + lemma_incorruptible_bool_unchanged(pm, new_pm); + assert(new_pm.subrange(0, write_addr) =~= pm.subrange(0, write_addr)); + assert(new_pm.subrange(write_addr + new_bytes.len(), new_pm.len() as int) =~= pm.subrange( + write_addr + new_bytes.len(), + pm.len() as int, + )); + lemma_subrange_equality_implies_subsubrange_equality(pm, new_pm, 0, write_addr); +} - &&& physical_head < physical_tail ==> - new_data.subrange(physical_head - contents_offset, physical_tail - contents_offset) =~= old_data.subrange(physical_head - contents_offset, physical_tail - contents_offset) - &&& physical_tail < physical_head ==> { - &&& old_data.subrange(physical_head - contents_offset, live_header.metadata.log_size as int) =~= new_data.subrange(physical_head - contents_offset, live_header.metadata.log_size as int) - &&& old_data.subrange(0, physical_tail - contents_offset) =~= new_data.subrange(0, physical_tail - contents_offset) - } - } - - /// Proves that a non-crashing data write updates data bytes but no log metadata. - pub proof fn lemma_append_data_update_view(pm: Seq, new_bytes: Seq, write_addr: int) - requires - UntrustedLogImpl::recover(pm).is_Some(), - pm.len() > contents_offset, - contents_offset <= write_addr < pm.len(), - ({ - // write must be a valid write and not overlap the live log - let live_header = spec_get_live_header(pm); - let physical_head = spec_addr_logical_to_physical(live_header.metadata.head as int, live_header.metadata.log_size as int); - let physical_tail = spec_addr_logical_to_physical(live_header.metadata.tail as int, live_header.metadata.log_size as int); - &&& physical_head <= physical_tail ==> { - &&& write_addr + new_bytes.len() <= live_header.metadata.log_size + contents_offset - &&& write_addr < physical_head ==> write_addr + new_bytes.len() <= physical_head - &&& (physical_tail <= write_addr || write_addr < physical_head) - } - &&& physical_tail < physical_head ==> { - &&& physical_tail <= write_addr <= write_addr + new_bytes.len() < physical_head - } - }), - ensures - UntrustedLogImpl::recover(pm).is_Some(), - update_data_view_postcond(pm, new_bytes, write_addr), - { - let live_header = spec_get_live_header(pm); - let physical_head = spec_addr_logical_to_physical(live_header.metadata.head as int, live_header.metadata.log_size as int); - let physical_tail = spec_addr_logical_to_physical(live_header.metadata.tail as int, live_header.metadata.log_size as int); - let new_pm = update_contents_to_reflect_write(pm, write_addr, new_bytes); - lemma_headers_unchanged(pm, new_pm); - lemma_incorruptible_bool_unchanged(pm, new_pm); - assert(live_header == spec_get_live_header(new_pm)); - assert(new_pm.subrange(0, write_addr) =~= pm.subrange(0, write_addr)); - assert(new_pm.subrange(write_addr + new_bytes.len(), new_pm.len() as int) =~= pm.subrange(write_addr + new_bytes.len(), pm.len() as int)); - lemma_subrange_equality_implies_subsubrange_equality(pm, new_pm, 0, write_addr); - lemma_subrange_equality_implies_subsubrange_equality(pm, new_pm, write_addr + new_bytes.len(), new_pm.len() as int); - if physical_head < physical_tail { - assert(new_pm.subrange(physical_head as int, physical_tail as int) =~= pm.subrange(physical_head as int, physical_tail as int)); - } - } - - /// Proves that a crashing data write updates data bytes but no log metadata. - pub proof fn lemma_append_data_update_view_crash(pm: Seq, new_bytes: Seq, write_addr: int, chunks_flushed: Set) - requires - UntrustedLogImpl::recover(pm).is_Some(), - pm.len() > contents_offset, - contents_offset <= write_addr < pm.len(), - ({ - // write must be a valid write and not overlap the live log - let live_header = spec_get_live_header(pm); - let physical_head = spec_addr_logical_to_physical(live_header.metadata.head as int, live_header.metadata.log_size as int); - let physical_tail = spec_addr_logical_to_physical(live_header.metadata.tail as int, live_header.metadata.log_size as int); - &&& physical_head <= physical_tail ==> write_addr + new_bytes.len() <= live_header.metadata.log_size + contents_offset - &&& physical_tail < physical_head ==> write_addr + new_bytes.len() < physical_head - }) - ensures - UntrustedLogImpl::recover(pm).is_Some(), - ({ - let new_pm = update_contents_to_reflect_partially_flushed_write(pm, write_addr, new_bytes, chunks_flushed); - let (old_ib, old_headers, old_data) = pm_to_views(pm); - let (new_ib, new_headers, new_data) = pm_to_views(new_pm); - &&& old_ib == new_ib - &&& old_headers == new_headers - &&& new_data.len() == old_data.len() - &&& new_data.subrange(0, write_addr - contents_offset) =~= old_data.subrange(0, write_addr - contents_offset) - &&& new_data.subrange(write_addr - contents_offset + new_bytes.len(), new_data.len() as int) =~= - old_data.subrange(write_addr - contents_offset + new_bytes.len(), old_data.len() as int) - &&& UntrustedLogImpl::recover(new_pm).is_Some() - }) - { - let live_header = spec_get_live_header(pm); - let physical_tail = spec_addr_logical_to_physical(live_header.metadata.tail as int, live_header.metadata.log_size as int); - let new_pm = update_contents_to_reflect_partially_flushed_write(pm, write_addr, new_bytes, chunks_flushed); - lemma_headers_unchanged(pm, new_pm); - lemma_incorruptible_bool_unchanged(pm, new_pm); - assert(new_pm.subrange(0, write_addr) =~= pm.subrange(0, write_addr)); - assert(new_pm.subrange(write_addr + new_bytes.len(), new_pm.len() as int) =~= pm.subrange(write_addr + new_bytes.len(), pm.len() as int)); - lemma_subrange_equality_implies_subsubrange_equality(pm, new_pm, 0, write_addr); - } - - /// Proves that a non-crashing update to the inactive header does not change any visible PM state. - pub proof fn lemma_inactive_header_update_view(pm: Seq, new_header_bytes: Seq, header_pos: int) - requires - UntrustedLogImpl::recover(pm).is_Some(), - header_pos == header1_pos || header_pos == header2_pos, - ({ - // the new bytes must be written to the inactive header - let (old_ib, old_headers, old_data) = pm_to_views(pm); - &&& old_ib == cdb0_val ==> header_pos == header2_pos - &&& old_ib == cdb1_val ==> header_pos == header1_pos - }), - new_header_bytes.len() == header_size, - pm.len() > contents_offset, - ensures - ({ - let new_pm = update_contents_to_reflect_write(pm, header_pos, new_header_bytes); - let (old_ib, old_headers, old_data) = pm_to_views(pm); - let (new_ib, new_headers, new_data) = pm_to_views(new_pm); - &&& old_ib == new_ib - &&& old_data =~= old_data - &&& header_pos == header1_pos ==> - old_headers.header2 == new_headers.header2 - &&& header_pos == header2_pos ==> - old_headers.header1 == new_headers.header1 - &&& UntrustedLogImpl::recover(new_pm).is_Some() - }) - { +/// Proves that a non-crashing update to the inactive header does not change any visible PM state. +pub proof fn lemma_inactive_header_update_view( + pm: Seq, + new_header_bytes: Seq, + header_pos: int, +) + requires + UntrustedLogImpl::recover(pm).is_Some(), + header_pos == header1_pos || header_pos == header2_pos, + ({ + // the new bytes must be written to the inactive header + let (old_ib, old_headers, old_data) = pm_to_views(pm); + &&& old_ib == cdb0_val ==> header_pos == header2_pos + &&& old_ib == cdb1_val ==> header_pos == header1_pos + }), + new_header_bytes.len() == header_size, + pm.len() > contents_offset, + ensures + ({ let new_pm = update_contents_to_reflect_write(pm, header_pos, new_header_bytes); + let (old_ib, old_headers, old_data) = pm_to_views(pm); let (new_ib, new_headers, new_data) = pm_to_views(new_pm); - assert(pm.subrange(incorruptible_bool_pos as int, incorruptible_bool_pos + 8) =~= new_pm.subrange(incorruptible_bool_pos as int, incorruptible_bool_pos + 8)); - if header_pos == header1_pos { - // we wrote to header1, so header2 should have stayed the same - assert(pm.subrange(header2_pos + header_crc_offset, header2_pos + header_crc_offset + 8) =~= - new_pm.subrange(header2_pos + header_crc_offset, header2_pos + header_crc_offset + 8)); - - assert(pm.subrange(header2_pos + header_head_offset, header2_pos + header_size) =~= - new_pm.subrange(header2_pos + header_head_offset, header2_pos + header_size)); - } else { - // we wrote to header2, so header1 should have stayed the same - assert(pm.subrange(header1_pos + header_crc_offset, header1_pos + header_crc_offset + 8) =~= - new_pm.subrange(header1_pos + header_crc_offset, header1_pos + header_crc_offset + 8)); - - assert(pm.subrange(header1_pos + header_head_offset, header1_pos + header_size) =~= - new_pm.subrange(header1_pos + header_head_offset, header1_pos + header_size)); - } - } + &&& old_ib == new_ib + &&& old_data =~= old_data + &&& header_pos == header1_pos ==> old_headers.header2 == new_headers.header2 + &&& header_pos == header2_pos ==> old_headers.header1 == new_headers.header1 + &&& UntrustedLogImpl::recover(new_pm).is_Some() + }), +{ + let new_pm = update_contents_to_reflect_write(pm, header_pos, new_header_bytes); + let (new_ib, new_headers, new_data) = pm_to_views(new_pm); + assert(pm.subrange(incorruptible_bool_pos as int, incorruptible_bool_pos + 8) + =~= new_pm.subrange(incorruptible_bool_pos as int, incorruptible_bool_pos + 8)); + if header_pos == header1_pos { + // we wrote to header1, so header2 should have stayed the same + assert(pm.subrange(header2_pos + header_crc_offset, header2_pos + header_crc_offset + 8) + =~= new_pm.subrange( + header2_pos + header_crc_offset, + header2_pos + header_crc_offset + 8, + )); + assert(pm.subrange(header2_pos + header_head_offset, header2_pos + header_size) + =~= new_pm.subrange(header2_pos + header_head_offset, header2_pos + header_size)); + } else { + // we wrote to header2, so header1 should have stayed the same + assert(pm.subrange(header1_pos + header_crc_offset, header1_pos + header_crc_offset + 8) + =~= new_pm.subrange( + header1_pos + header_crc_offset, + header1_pos + header_crc_offset + 8, + )); + assert(pm.subrange(header1_pos + header_head_offset, header1_pos + header_size) + =~= new_pm.subrange(header1_pos + header_head_offset, header1_pos + header_size)); + } +} - /// Proves that a crashing update to the inactive header does not change any visible PM state. - pub proof fn lemma_inactive_header_update_view_crash(pm: Seq, new_header_bytes: Seq, header_pos: int, chunks_flushed: Set) - requires - UntrustedLogImpl::recover(pm).is_Some(), - header_pos == header1_pos || header_pos == header2_pos, - ({ - // the new bytes must be written to the inactive header - let (old_ib, old_headers, old_data) = pm_to_views(pm); - &&& old_ib == cdb0_val ==> header_pos == header2_pos - &&& old_ib == cdb1_val ==> header_pos == header1_pos - }), - new_header_bytes.len() == header_size, - pm.len() > contents_offset, - ensures - ({ - let new_pm = update_contents_to_reflect_partially_flushed_write( - pm, header_pos, new_header_bytes, chunks_flushed); - let (old_ib, old_headers, old_data) = pm_to_views(pm); - let (new_ib, new_headers, new_data) = pm_to_views(new_pm); - &&& old_ib == new_ib - &&& old_data =~= old_data - &&& header_pos == header1_pos ==> - old_headers.header2 == new_headers.header2 - &&& header_pos == header2_pos ==> - old_headers.header1 == new_headers.header1 - &&& UntrustedLogImpl::recover(new_pm).is_Some() - }) - { +/// Proves that a crashing update to the inactive header does not change any visible PM state. +pub proof fn lemma_inactive_header_update_view_crash( + pm: Seq, + new_header_bytes: Seq, + header_pos: int, + chunks_flushed: Set, +) + requires + UntrustedLogImpl::recover(pm).is_Some(), + header_pos == header1_pos || header_pos == header2_pos, + ({ + // the new bytes must be written to the inactive header + let (old_ib, old_headers, old_data) = pm_to_views(pm); + &&& old_ib == cdb0_val ==> header_pos == header2_pos + &&& old_ib == cdb1_val ==> header_pos == header1_pos + }), + new_header_bytes.len() == header_size, + pm.len() > contents_offset, + ensures + ({ let new_pm = update_contents_to_reflect_partially_flushed_write( - pm, header_pos, new_header_bytes, chunks_flushed); - assert(pm.subrange(incorruptible_bool_pos as int, incorruptible_bool_pos + 8) =~= new_pm.subrange(incorruptible_bool_pos as int, incorruptible_bool_pos + 8)); - if header_pos == header1_pos { - // we wrote to header1, so header2 should have stayed the same - assert(pm.subrange(header2_pos + header_crc_offset, header2_pos + header_crc_offset + 8) =~= - new_pm.subrange(header2_pos + header_crc_offset, header2_pos + header_crc_offset + 8)); - - assert(pm.subrange(header2_pos + header_head_offset, header2_pos + header_size) =~= - new_pm.subrange(header2_pos + header_head_offset, header2_pos + header_size)); - } else { - // we wrote to header2, so header1 should have stayed the same - assert(pm.subrange(header1_pos + header_crc_offset, header1_pos + header_crc_offset + 8) =~= - new_pm.subrange(header1_pos + header_crc_offset, header1_pos + header_crc_offset + 8)); + pm, + header_pos, + new_header_bytes, + chunks_flushed, + ); + let (old_ib, old_headers, old_data) = pm_to_views(pm); + let (new_ib, new_headers, new_data) = pm_to_views(new_pm); + &&& old_ib == new_ib + &&& old_data =~= old_data + &&& header_pos == header1_pos ==> old_headers.header2 == new_headers.header2 + &&& header_pos == header2_pos ==> old_headers.header1 == new_headers.header1 + &&& UntrustedLogImpl::recover(new_pm).is_Some() + }), +{ + let new_pm = update_contents_to_reflect_partially_flushed_write( + pm, + header_pos, + new_header_bytes, + chunks_flushed, + ); + assert(pm.subrange(incorruptible_bool_pos as int, incorruptible_bool_pos + 8) + =~= new_pm.subrange(incorruptible_bool_pos as int, incorruptible_bool_pos + 8)); + if header_pos == header1_pos { + // we wrote to header1, so header2 should have stayed the same + assert(pm.subrange(header2_pos + header_crc_offset, header2_pos + header_crc_offset + 8) + =~= new_pm.subrange( + header2_pos + header_crc_offset, + header2_pos + header_crc_offset + 8, + )); + assert(pm.subrange(header2_pos + header_head_offset, header2_pos + header_size) + =~= new_pm.subrange(header2_pos + header_head_offset, header2_pos + header_size)); + } else { + // we wrote to header2, so header1 should have stayed the same + assert(pm.subrange(header1_pos + header_crc_offset, header1_pos + header_crc_offset + 8) + =~= new_pm.subrange( + header1_pos + header_crc_offset, + header1_pos + header_crc_offset + 8, + )); + assert(pm.subrange(header1_pos + header_head_offset, header1_pos + header_size) + =~= new_pm.subrange(header1_pos + header_head_offset, header1_pos + header_size)); + } +} - assert(pm.subrange(header1_pos + header_head_offset, header1_pos + header_size) =~= - new_pm.subrange(header1_pos + header_head_offset, header1_pos + header_size)); +/// Proves that an update to the incorruptible boolean is crash-safe and switches the log's +/// active header. This lemma does most of the work to prove that untrusted_append is +/// implemented correctly. +pub proof fn lemma_append_ib_update>>( + pm: Seq, + new_ib: u64, + bytes_to_append: Seq, + new_header_bytes: Seq, + perm: &Perm, +) + requires + pm.len() > contents_offset, + UntrustedLogImpl::recover(pm).is_Some(), + new_ib == cdb0_val || new_ib == cdb1_val, + new_ib == cdb0_val ==> pm.subrange(header1_pos as int, header1_pos + header_size) + == new_header_bytes, + new_ib == cdb1_val ==> pm.subrange(header2_pos as int, header2_pos + header_size) + == new_header_bytes, + new_header_bytes.subrange(header_crc_offset as int, header_crc_offset + 8) + == spec_crc_bytes( + new_header_bytes.subrange(header_head_offset as int, header_size as int), + ), + ({ + let new_header = spec_bytes_to_header(new_header_bytes); + let live_header = spec_get_live_header(pm); + &&& new_header.metadata.tail == live_header.metadata.tail + bytes_to_append.len() + &&& new_header.metadata.head == live_header.metadata.head + &&& new_header.metadata.log_size == live_header.metadata.log_size + &&& new_header.metadata.tail - new_header.metadata.head < new_header.metadata.log_size + }), + perm.check_permission(pm), + permissions_depend_only_on_recovery_view(perm), + ({ + let live_header = spec_get_live_header(pm); + let physical_head = spec_addr_logical_to_physical( + live_header.metadata.head as int, + live_header.metadata.log_size as int, + ); + let physical_tail = spec_addr_logical_to_physical( + live_header.metadata.tail as int, + live_header.metadata.log_size as int, + ); + let contents_end = (live_header.metadata.log_size + contents_offset) as int; + let append_size = bytes_to_append.len(); + let len1 = (contents_end - physical_tail); + let len2 = bytes_to_append.len() - len1; + &&& physical_tail + append_size >= contents_end ==> { + &&& pm.subrange(physical_tail, contents_end) =~= bytes_to_append.subrange(0, len1) + &&& pm.subrange(contents_offset as int, contents_offset + len2) + =~= bytes_to_append.subrange(len1 as int, append_size as int) + &&& bytes_to_append =~= pm.subrange(physical_tail, contents_end) + pm.subrange( + contents_offset as int, + contents_offset + len2, + ) } - } - - /// Proves that an update to the incorruptible boolean is crash-safe and switches the log's - /// active header. This lemma does most of the work to prove that untrusted_append is - /// implemented correctly. - pub proof fn lemma_append_ib_update>>( - pm: Seq, - new_ib: u64, - bytes_to_append: Seq, - new_header_bytes: Seq, - perm: &Perm - ) - requires - pm.len() > contents_offset, - UntrustedLogImpl::recover(pm).is_Some(), - new_ib == cdb0_val || new_ib == cdb1_val, - new_ib == cdb0_val ==> - pm.subrange(header1_pos as int, header1_pos + header_size) == new_header_bytes, - new_ib == cdb1_val ==> - pm.subrange(header2_pos as int, header2_pos + header_size) == new_header_bytes, - new_header_bytes.subrange(header_crc_offset as int, header_crc_offset + 8) == - spec_crc_bytes(new_header_bytes.subrange(header_head_offset as int, header_size as int)), - ({ - let new_header = spec_bytes_to_header(new_header_bytes); - let live_header = spec_get_live_header(pm); - &&& new_header.metadata.tail == live_header.metadata.tail + bytes_to_append.len() - &&& new_header.metadata.head == live_header.metadata.head - &&& new_header.metadata.log_size == live_header.metadata.log_size - &&& new_header.metadata.tail - new_header.metadata.head < new_header.metadata.log_size - }), - perm.check_permission(pm), - permissions_depend_only_on_recovery_view(perm), - ({ - let live_header = spec_get_live_header(pm); - let physical_head = spec_addr_logical_to_physical(live_header.metadata.head as int, live_header.metadata.log_size as int); - let physical_tail = spec_addr_logical_to_physical(live_header.metadata.tail as int, live_header.metadata.log_size as int); - let contents_end = (live_header.metadata.log_size + contents_offset) as int; - let append_size = bytes_to_append.len(); - let len1 = (contents_end - physical_tail); - let len2 = bytes_to_append.len() - len1; - - &&& physical_tail + append_size >= contents_end ==> { - &&& pm.subrange(physical_tail, contents_end) =~= bytes_to_append.subrange(0, len1) - &&& pm.subrange(contents_offset as int, contents_offset + len2) =~= bytes_to_append.subrange(len1 as int, append_size as int) - &&& bytes_to_append =~= pm.subrange(physical_tail, contents_end) + pm.subrange(contents_offset as int, contents_offset + len2) - } - &&& physical_head <= physical_tail && physical_tail + append_size < contents_end ==> { - pm.subrange(physical_tail, physical_tail + append_size) =~= bytes_to_append - } - &&& physical_tail < physical_head ==> { - &&& physical_tail + append_size < physical_head - &&& pm.subrange(physical_tail, physical_tail + append_size) =~= bytes_to_append - } - }), - ({ - let old_log_state = UntrustedLogImpl::recover(pm); - forall |pm_state| #[trigger] perm.check_permission(pm_state) <==> { - let log_state = UntrustedLogImpl::recover(pm_state); - log_state == old_log_state || log_state == Some(old_log_state.unwrap().append(bytes_to_append)) - } - }), - ensures - ({ - let ib_bytes = spec_u64_to_le_bytes(new_ib); - let new_pm = update_contents_to_reflect_write(pm, incorruptible_bool_pos as int, ib_bytes); - let old_log_state = UntrustedLogImpl::recover(pm); - let new_log_state = UntrustedLogImpl::recover(new_pm); - let new_live_header = spec_get_live_header(new_pm); - let (new_pm_ib, _, _) = pm_to_views(new_pm); - &&& match (old_log_state, new_log_state) { - (Some(old_log_state), Some(new_log_state)) => { - &&& new_log_state =~= old_log_state.append(bytes_to_append) - &&& perm.check_permission(new_pm) - } - _ => false, - } - &&& new_live_header == spec_bytes_to_header(new_header_bytes) - &&& new_ib == new_pm_ib - }), - forall |chunks_flushed| { - let new_pm = #[trigger] update_contents_to_reflect_partially_flushed_write( - pm, incorruptible_bool_pos as int, spec_u64_to_le_bytes(new_ib), chunks_flushed); - &&& perm.check_permission(new_pm) + &&& physical_head <= physical_tail && physical_tail + append_size < contents_end ==> { + pm.subrange(physical_tail, physical_tail + append_size) =~= bytes_to_append + } + &&& physical_tail < physical_head ==> { + &&& physical_tail + append_size < physical_head + &&& pm.subrange(physical_tail, physical_tail + append_size) =~= bytes_to_append + } + }), + ({ + let old_log_state = UntrustedLogImpl::recover(pm); + forall|pm_state| #[trigger] + perm.check_permission(pm_state) <==> { + let log_state = UntrustedLogImpl::recover(pm_state); + log_state == old_log_state || log_state == Some( + old_log_state.unwrap().append(bytes_to_append), + ) } - { + }), + ensures + ({ let ib_bytes = spec_u64_to_le_bytes(new_ib); - let live_header = spec_get_live_header(pm); - let append_size = bytes_to_append.len(); - let contents_end = live_header.metadata.log_size + contents_offset; - let physical_tail = spec_addr_logical_to_physical(live_header.metadata.tail as int, live_header.metadata.log_size as int); - - lemma_auto_spec_u64_to_from_le_bytes(); - lemma_single_write_crash(pm, incorruptible_bool_pos as int, ib_bytes); - assert(perm.check_permission(pm)); - - let new_pm = update_contents_to_reflect_write(pm, incorruptible_bool_pos as int, ib_bytes); - lemma_headers_unchanged(pm, new_pm); - assert(new_pm.subrange(incorruptible_bool_pos as int, incorruptible_bool_pos + 8) =~= ib_bytes); - - let new_header = spec_bytes_to_header(new_header_bytes); - let (ib, headers, data) = pm_to_views(new_pm); - let header_pos = if new_ib == cdb0_val { - header1_pos - } else { - header2_pos - }; - assert(new_pm.subrange(header_pos as int, header_pos + header_size) =~= new_header_bytes); - lemma_header_match(new_pm, header_pos as int, new_header); - lemma_header_correct(new_pm, new_header_bytes, header_pos as int); - - // prove that new pm has the append update - let new_log_state = UntrustedLogImpl::recover(new_pm); + let new_pm = update_contents_to_reflect_write( + pm, + incorruptible_bool_pos as int, + ib_bytes, + ); let old_log_state = UntrustedLogImpl::recover(pm); - - match (new_log_state, old_log_state) { - (Some(new_log_state), Some(old_log_state)) => { - lemma_pm_state_header(new_pm); - lemma_pm_state_header(pm); - - let old_header = spec_get_live_header(pm); - let live_header = spec_get_live_header(new_pm); - assert(live_header == new_header); - - assert(live_header.metadata.head == old_header.metadata.head); - assert(live_header.metadata.tail == old_header.metadata.tail + bytes_to_append.len()); - - let physical_head = spec_addr_logical_to_physical(live_header.metadata.head as int, live_header.metadata.log_size as int); - let new_physical_tail = spec_addr_logical_to_physical(live_header.metadata.tail as int, live_header.metadata.log_size as int); - let old_physical_tail = spec_addr_logical_to_physical(old_header.metadata.tail as int, old_header.metadata.log_size as int); - assert(old_physical_tail == physical_tail); - - let (_, _, old_data) = pm_to_views(pm); - let (_, _, new_data) = pm_to_views(pm); - - if physical_head <= old_physical_tail { - if old_physical_tail + append_size >= contents_end { - assert(new_log_state.log =~= new_data.subrange(physical_head - contents_offset, old_physical_tail - contents_offset) + - new_data.subrange(old_physical_tail - contents_offset, contents_end - contents_offset) + - new_data.subrange(0, new_physical_tail - contents_offset)); - assert(new_log_state.log =~= old_data.subrange(physical_head - contents_offset, old_physical_tail - contents_offset) + - new_data.subrange(old_physical_tail - contents_offset, contents_end - contents_offset) + - new_data.subrange(0, new_physical_tail - contents_offset)); - let len1 = (contents_end - old_physical_tail); - let len2 = bytes_to_append.len() - len1; - assert(bytes_to_append =~= new_data.subrange(old_physical_tail - contents_offset, contents_end - contents_offset) + - new_data.subrange(0, new_physical_tail - contents_offset)); - assert(new_log_state.log =~= old_data.subrange(physical_head - contents_offset, old_physical_tail - contents_offset) + bytes_to_append); - } else { - assert(old_data.subrange(0, old_physical_tail - contents_offset) =~= new_data.subrange(0, old_physical_tail - contents_offset)); - assert(new_data.subrange(old_physical_tail - contents_offset, old_physical_tail - contents_offset + append_size) =~= bytes_to_append); - } - } else { // physical_tail < physical_head - assert(old_physical_tail + append_size < physical_head); - } - assert(new_log_state =~= old_log_state.append(bytes_to_append)); - assert(perm.check_permission(new_pm)); + let new_log_state = UntrustedLogImpl::recover(new_pm); + let new_live_header = spec_get_live_header(new_pm); + let (new_pm_ib, _, _) = pm_to_views(new_pm); + &&& match (old_log_state, new_log_state) { + (Some(old_log_state), Some(new_log_state)) => { + &&& new_log_state =~= old_log_state.append(bytes_to_append) + &&& perm.check_permission(new_pm) + }, + _ => false, + } + &&& new_live_header == spec_bytes_to_header(new_header_bytes) + &&& new_ib == new_pm_ib + }), + forall|chunks_flushed| + { + let new_pm = #[trigger] update_contents_to_reflect_partially_flushed_write( + pm, + incorruptible_bool_pos as int, + spec_u64_to_le_bytes(new_ib), + chunks_flushed, + ); + &&& perm.check_permission(new_pm) + }, +{ + let ib_bytes = spec_u64_to_le_bytes(new_ib); + let live_header = spec_get_live_header(pm); + let append_size = bytes_to_append.len(); + let contents_end = live_header.metadata.log_size + contents_offset; + let physical_tail = spec_addr_logical_to_physical( + live_header.metadata.tail as int, + live_header.metadata.log_size as int, + ); + lemma_auto_spec_u64_to_from_le_bytes(); + lemma_single_write_crash(pm, incorruptible_bool_pos as int, ib_bytes); + assert(perm.check_permission(pm)); + let new_pm = update_contents_to_reflect_write(pm, incorruptible_bool_pos as int, ib_bytes); + lemma_headers_unchanged(pm, new_pm); + assert(new_pm.subrange(incorruptible_bool_pos as int, incorruptible_bool_pos + 8) =~= ib_bytes); + let new_header = spec_bytes_to_header(new_header_bytes); + let (ib, headers, data) = pm_to_views(new_pm); + let header_pos = if new_ib == cdb0_val { + header1_pos + } else { + header2_pos + }; + assert(new_pm.subrange(header_pos as int, header_pos + header_size) =~= new_header_bytes); + lemma_header_match(new_pm, header_pos as int, new_header); + lemma_header_correct(new_pm, new_header_bytes, header_pos as int); + // prove that new pm has the append update + let new_log_state = UntrustedLogImpl::recover(new_pm); + let old_log_state = UntrustedLogImpl::recover(pm); + match (new_log_state, old_log_state) { + (Some(new_log_state), Some(old_log_state)) => { + lemma_pm_state_header(new_pm); + lemma_pm_state_header(pm); + let old_header = spec_get_live_header(pm); + let live_header = spec_get_live_header(new_pm); + assert(live_header == new_header); + assert(live_header.metadata.head == old_header.metadata.head); + assert(live_header.metadata.tail == old_header.metadata.tail + bytes_to_append.len()); + let physical_head = spec_addr_logical_to_physical( + live_header.metadata.head as int, + live_header.metadata.log_size as int, + ); + let new_physical_tail = spec_addr_logical_to_physical( + live_header.metadata.tail as int, + live_header.metadata.log_size as int, + ); + let old_physical_tail = spec_addr_logical_to_physical( + old_header.metadata.tail as int, + old_header.metadata.log_size as int, + ); + assert(old_physical_tail == physical_tail); + let (_, _, old_data) = pm_to_views(pm); + let (_, _, new_data) = pm_to_views(pm); + if physical_head <= old_physical_tail { + if old_physical_tail + append_size >= contents_end { + assert(new_log_state.log =~= new_data.subrange( + physical_head - contents_offset, + old_physical_tail - contents_offset, + ) + new_data.subrange( + old_physical_tail - contents_offset, + contents_end - contents_offset, + ) + new_data.subrange(0, new_physical_tail - contents_offset)); + assert(new_log_state.log =~= old_data.subrange( + physical_head - contents_offset, + old_physical_tail - contents_offset, + ) + new_data.subrange( + old_physical_tail - contents_offset, + contents_end - contents_offset, + ) + new_data.subrange(0, new_physical_tail - contents_offset)); + let len1 = (contents_end - old_physical_tail); + let len2 = bytes_to_append.len() - len1; + assert(bytes_to_append =~= new_data.subrange( + old_physical_tail - contents_offset, + contents_end - contents_offset, + ) + new_data.subrange(0, new_physical_tail - contents_offset)); + assert(new_log_state.log =~= old_data.subrange( + physical_head - contents_offset, + old_physical_tail - contents_offset, + ) + bytes_to_append); + } else { + assert(old_data.subrange(0, old_physical_tail - contents_offset) + =~= new_data.subrange(0, old_physical_tail - contents_offset)); + assert(new_data.subrange( + old_physical_tail - contents_offset, + old_physical_tail - contents_offset + append_size, + ) =~= bytes_to_append); } - _ => assert(false), + } else { // physical_tail < physical_head + assert(old_physical_tail + append_size < physical_head); } - } + assert(new_log_state =~= old_log_state.append(bytes_to_append)); + assert(perm.check_permission(new_pm)); + }, + _ => assert(false), + } +} - pub open spec fn live_data_view_eq(old_pm: Seq, new_pm: Seq) -> bool - { +pub open spec fn live_data_view_eq(old_pm: Seq, new_pm: Seq) -> bool { + let (old_ib, old_headers, old_data) = pm_to_views(old_pm); + let (new_ib, new_headers, new_data) = pm_to_views(new_pm); + let old_live_header = spec_get_live_header(old_pm); + let new_live_header = spec_get_live_header(new_pm); + let physical_head = spec_addr_logical_to_physical( + old_live_header.metadata.head as int, + old_live_header.metadata.log_size as int, + ); + let physical_tail = spec_addr_logical_to_physical( + old_live_header.metadata.tail as int, + old_live_header.metadata.log_size as int, + ); + let log_size = old_live_header.metadata.log_size; + let physical_data_head = physical_head - contents_offset; + let physical_data_tail = physical_tail - contents_offset; + &&& new_live_header == old_live_header + &&& physical_head < physical_tail ==> old_data.subrange(physical_data_head, physical_data_tail) + =~= new_data.subrange(physical_data_head, physical_data_tail) + &&& physical_tail < physical_head ==> { + &&& old_data.subrange(physical_data_head as int, log_size as int) =~= new_data.subrange( + physical_data_head as int, + log_size as int, + ) + &&& old_data.subrange(0, physical_data_tail as int) =~= new_data.subrange( + 0, + physical_data_tail as int, + ) + } + &&& physical_tail == physical_head ==> physical_data_head == physical_data_tail +} + +pub proof fn lemma_same_log_state(old_pm: Seq, new_pm: Seq) + requires + UntrustedLogImpl::recover(old_pm).is_Some(), + UntrustedLogImpl::recover(new_pm).is_Some(), + live_data_view_eq(old_pm, new_pm), + ({ let (old_ib, old_headers, old_data) = pm_to_views(old_pm); let (new_ib, new_headers, new_data) = pm_to_views(new_pm); - let old_live_header = spec_get_live_header(old_pm); - let new_live_header = spec_get_live_header(new_pm); - let physical_head = spec_addr_logical_to_physical(old_live_header.metadata.head as int, old_live_header.metadata.log_size as int); - let physical_tail = spec_addr_logical_to_physical(old_live_header.metadata.tail as int, old_live_header.metadata.log_size as int); - let log_size = old_live_header.metadata.log_size; - let physical_data_head = physical_head - contents_offset; - let physical_data_tail = physical_tail - contents_offset; - - &&& new_live_header == old_live_header - &&& physical_head < physical_tail ==> - old_data.subrange(physical_data_head, physical_data_tail) =~= new_data.subrange(physical_data_head, physical_data_tail) - &&& physical_tail < physical_head ==> { - &&& old_data.subrange(physical_data_head as int, log_size as int) =~= new_data.subrange(physical_data_head as int, log_size as int) - &&& old_data.subrange(0, physical_data_tail as int) =~= new_data.subrange(0, physical_data_tail as int) + &&& old_ib == cdb0_val || old_ib == cdb1_val + &&& old_ib == new_ib + &&& old_ib == cdb0_val ==> { + &&& old_headers.header1 == new_headers.header1 } - &&& physical_tail == physical_head ==> - physical_data_head == physical_data_tail - } + &&& old_ib == cdb1_val ==> { + &&& old_headers.header2 == new_headers.header2 + } + }), + ensures + UntrustedLogImpl::recover(old_pm) =~= UntrustedLogImpl::recover(new_pm), +{ + let old_state = UntrustedLogImpl::recover(old_pm); + let new_state = UntrustedLogImpl::recover(new_pm); + let (old_ib, old_headers, old_data) = pm_to_views(old_pm); + let (new_ib, new_headers, new_data) = pm_to_views(new_pm); + assert(old_state.is_Some()); + assert(new_state.is_Some()); + match (old_state, new_state) { + (Some(old_state), Some(new_state)) => { + let (old_live_header, new_live_header) = if old_ib == cdb0_val { + (old_headers.header1, new_headers.header1) + } else { + (old_headers.header2, new_headers.header2) + }; + assert(old_state.head == old_live_header.metadata.head); + assert(new_state.head == new_live_header.metadata.head); + assert(old_live_header.metadata.tail == new_live_header.metadata.tail); + let physical_head = spec_addr_logical_to_physical( + old_live_header.metadata.head as int, + old_live_header.metadata.log_size as int, + ); + let physical_tail = spec_addr_logical_to_physical( + old_live_header.metadata.tail as int, + old_live_header.metadata.log_size as int, + ); + let contents_end = old_live_header.metadata.log_size + contents_offset; + if physical_head < physical_tail { + assert(old_pm.subrange(physical_head, physical_tail) =~= old_data.subrange( + physical_head - contents_offset, + physical_tail - contents_offset, + )); + assert(old_pm.subrange(physical_head, physical_tail) =~= new_pm.subrange( + physical_head, + physical_tail, + )); + } else if physical_tail < physical_head { + assert(old_pm.subrange(physical_head, contents_end) =~= old_data.subrange( + physical_head - contents_offset, + contents_end - contents_offset, + )); + assert(old_pm.subrange(contents_offset as int, physical_tail) =~= old_data.subrange( + contents_offset - contents_offset, + physical_tail - contents_offset, + )); + assert(old_pm.subrange(physical_head, contents_end) + old_pm.subrange( + contents_offset as int, + physical_tail, + ) =~= new_pm.subrange(physical_head, contents_end) + new_pm.subrange( + contents_offset as int, + physical_tail, + )); + } else { + assert(physical_head == physical_tail); + assert(old_state.log.len() == 0); + assert(new_state.log.len() == 0); + } + }, + _ => assert(false), + } +} - pub proof fn lemma_same_log_state(old_pm: Seq, new_pm: Seq) - requires - UntrustedLogImpl::recover(old_pm).is_Some(), - UntrustedLogImpl::recover(new_pm).is_Some(), - live_data_view_eq(old_pm, new_pm), - ({ - let (old_ib, old_headers, old_data) = pm_to_views(old_pm); - let (new_ib, new_headers, new_data) = pm_to_views(new_pm); - &&& old_ib == cdb0_val || old_ib == cdb1_val - &&& old_ib == new_ib - &&& old_ib == cdb0_val ==> { - &&& old_headers.header1 == new_headers.header1 - } - &&& old_ib == cdb1_val ==> { - &&& old_headers.header2 == new_headers.header2 - } - }) - ensures - UntrustedLogImpl::recover(old_pm) =~= - UntrustedLogImpl::recover(new_pm) - { - let old_state = UntrustedLogImpl::recover(old_pm); - let new_state = UntrustedLogImpl::recover(new_pm); - let (old_ib, old_headers, old_data) = pm_to_views(old_pm); - let (new_ib, new_headers, new_data) = pm_to_views(new_pm); +pub proof fn lemma_subrange_equality_implies_index_equality( + s1: Seq, + s2: Seq, + i: int, + j: int, +) + requires + 0 <= i <= j <= s1.len(), + j <= s2.len(), + s1.subrange(i, j) == s2.subrange(i, j), + ensures + forall|k| i <= k < j ==> s1[k] == s2[k], +{ + assert forall|k| i <= k < j implies s1[k] == s2[k] by { + // Trigger axiom_seq_subrange_index + assert(s1[k] == s1.subrange(i, j)[k - i]); + assert(s2[k] == s2.subrange(i, j)[k - i]); + } +} - assert(old_state.is_Some()); - assert(new_state.is_Some()); - match (old_state, new_state) { - (Some(old_state), Some(new_state)) => { - let (old_live_header, new_live_header) = if old_ib == cdb0_val { - (old_headers.header1, new_headers.header1) - } else { - (old_headers.header2, new_headers.header2) - }; - - assert(old_state.head == old_live_header.metadata.head); - assert(new_state.head == new_live_header.metadata.head); - assert(old_live_header.metadata.tail == new_live_header.metadata.tail); - let physical_head = spec_addr_logical_to_physical(old_live_header.metadata.head as int, old_live_header.metadata.log_size as int); - let physical_tail = spec_addr_logical_to_physical(old_live_header.metadata.tail as int, old_live_header.metadata.log_size as int); - let contents_end = old_live_header.metadata.log_size + contents_offset; - - if physical_head < physical_tail { - assert(old_pm.subrange(physical_head, physical_tail) =~= old_data.subrange(physical_head - contents_offset, physical_tail - contents_offset)); - assert(old_pm.subrange(physical_head, physical_tail) =~= new_pm.subrange(physical_head, physical_tail)); - } else if physical_tail < physical_head { - assert(old_pm.subrange(physical_head, contents_end) =~= old_data.subrange(physical_head - contents_offset, contents_end - contents_offset)); - assert(old_pm.subrange(contents_offset as int, physical_tail) =~= old_data.subrange(contents_offset - contents_offset, physical_tail - contents_offset)); - assert(old_pm.subrange(physical_head, contents_end) + old_pm.subrange(contents_offset as int, physical_tail) =~= - new_pm.subrange(physical_head, contents_end) + new_pm.subrange(contents_offset as int, physical_tail)); - } else { - assert(physical_head == physical_tail); - assert(old_state.log.len() == 0); - assert(new_state.log.len() == 0); - } - } - _ => assert(false), - } - } +pub proof fn lemma_subrange_equality_implies_subsubrange_equality( + s1: Seq, + s2: Seq, + i: int, + j: int, +) + requires + 0 <= i <= j <= s1.len(), + j <= s2.len(), + s1.subrange(i, j) == s2.subrange(i, j), + ensures + forall|k, m| i <= k <= m <= j ==> s1.subrange(k, m) == s2.subrange(k, m), +{ + lemma_subrange_equality_implies_index_equality(s1, s2, i, j); + assert forall|k, m| i <= k <= m <= j implies s1.subrange(k, m) == s2.subrange(k, m) by { + assert(s1.subrange(k, m) =~= s2.subrange(k, m)); + } +} - pub proof fn lemma_subrange_equality_implies_index_equality(s1: Seq, s2: Seq, i: int, j: int) - requires - 0 <= i <= j <= s1.len(), - j <= s2.len(), - s1.subrange(i, j) == s2.subrange(i, j) - ensures - forall |k| i <= k < j ==> s1[k] == s2[k] +pub proof fn lemma_subrange_equality_implies_subsubrange_equality_forall() + ensures + forall|s1: Seq, s2: Seq, i: int, j: int, k: int, m: int| + { + &&& 0 <= i <= j <= s1.len() + &&& j <= s2.len() + &&& s1.subrange(i, j) == s2.subrange(i, j) + &&& i <= k <= m <= j + } ==> s1.subrange(k, m) == s2.subrange(k, m), +{ + assert forall|s1: Seq, s2: Seq, i: int, j: int, k: int, m: int| { - assert forall |k| i <= k < j implies s1[k] == s2[k] by { - // Trigger axiom_seq_subrange_index - assert (s1[k] == s1.subrange(i, j)[k - i]); - assert (s2[k] == s2.subrange(i, j)[k - i]); - } - } + &&& 0 <= i <= j <= s1.len() + &&& j <= s2.len() + &&& s1.subrange(i, j) == s2.subrange(i, j) + &&& i <= k <= m <= j + } implies s1.subrange(k, m) == s2.subrange(k, m) by { + lemma_subrange_equality_implies_subsubrange_equality(s1, s2, i, j); + } +} - pub proof fn lemma_subrange_equality_implies_subsubrange_equality(s1: Seq, s2: Seq, i: int, j: int) - requires - 0 <= i <= j <= s1.len(), - j <= s2.len(), - s1.subrange(i, j) == s2.subrange(i, j) - ensures - forall |k, m| i <= k <= m <= j ==> s1.subrange(k, m) == s2.subrange(k, m) - { - lemma_subrange_equality_implies_index_equality(s1, s2, i, j); - assert forall |k, m| i <= k <= m <= j implies s1.subrange(k, m) == s2.subrange(k, m) by { - assert (s1.subrange(k, m) =~= s2.subrange(k, m)); - } - } +pub proof fn lemma_headers_unchanged(old_pm: Seq, new_pm: Seq) + requires + old_pm.len() == new_pm.len(), + old_pm.len() >= contents_offset, + old_pm.subrange(header1_pos as int, header1_pos + header_size) =~= new_pm.subrange( + header1_pos as int, + header1_pos + header_size, + ), + old_pm.subrange(header2_pos as int, header2_pos + header_size) =~= new_pm.subrange( + header2_pos as int, + header2_pos + header_size, + ), + ensures + ({ + let (_, old_headers, _) = pm_to_views(old_pm); + let (_, new_headers, _) = pm_to_views(new_pm); + old_headers == new_headers + }), +{ + lemma_subrange_equality_implies_subsubrange_equality_forall::(); +} - pub proof fn lemma_subrange_equality_implies_subsubrange_equality_forall() - ensures - forall |s1: Seq, s2: Seq, i: int, j: int, k: int, m: int| - { - &&& 0 <= i <= j <= s1.len() - &&& j <= s2.len() - &&& s1.subrange(i, j) == s2.subrange(i, j) - &&& i <= k <= m <= j - } - ==> s1.subrange(k, m) == s2.subrange(k, m) - { - assert forall |s1: Seq, s2: Seq, i: int, j: int, k: int, m: int| - { - &&& 0 <= i <= j <= s1.len() - &&& j <= s2.len() - &&& s1.subrange(i, j) == s2.subrange(i, j) - &&& i <= k <= m <= j - } - implies s1.subrange(k, m) == s2.subrange(k, m) by { - lemma_subrange_equality_implies_subsubrange_equality(s1, s2, i, j); - } - } +pub proof fn lemma_incorruptible_bool_unchanged(old_pm: Seq, new_pm: Seq) + requires + old_pm.len() == new_pm.len(), + old_pm.len() >= contents_offset, + old_pm.subrange(incorruptible_bool_pos as int, incorruptible_bool_pos + 8) + =~= new_pm.subrange(incorruptible_bool_pos as int, incorruptible_bool_pos + 8), + ensures + ({ + let (old_ib, _, _) = pm_to_views(old_pm); + let (new_ib, _, _) = pm_to_views(new_pm); + old_ib == new_ib + }), +{ +} - pub proof fn lemma_headers_unchanged(old_pm: Seq, new_pm: Seq) - requires - old_pm.len() == new_pm.len(), - old_pm.len() >= contents_offset, - old_pm.subrange(header1_pos as int, header1_pos + header_size) =~= new_pm.subrange(header1_pos as int, header1_pos + header_size), - old_pm.subrange(header2_pos as int, header2_pos + header_size) =~= new_pm.subrange(header2_pos as int, header2_pos + header_size), - ensures - ({ - let (_, old_headers, _) = pm_to_views(old_pm); - let (_, new_headers, _) = pm_to_views(new_pm); - old_headers == new_headers - }) - { - lemma_subrange_equality_implies_subsubrange_equality_forall::(); - } +pub proof fn lemma_header_crc_correct( + header_bytes: Seq, + crc_bytes: Seq, + metadata_bytes: Seq, +) + requires + header_bytes.len() == header_size, + crc_bytes.len() == 8, + metadata_bytes.len() == header_size - 8, + crc_bytes =~= spec_crc_bytes(metadata_bytes), + header_bytes =~= crc_bytes + metadata_bytes, + ensures + header_bytes.subrange(header_crc_offset as int, header_crc_offset + 8) =~= crc_bytes, + header_bytes.subrange(header_head_offset as int, header_size as int) =~= metadata_bytes, + header_bytes.subrange(header_crc_offset as int, header_crc_offset + 8) =~= spec_crc_bytes( + header_bytes.subrange(header_head_offset as int, header_size as int), + ), +{ + assert(header_bytes.subrange(header_crc_offset as int, header_crc_offset + 8) =~= crc_bytes); + assert(header_bytes.subrange(header_head_offset as int, header_size as int) =~= metadata_bytes); +} - pub proof fn lemma_incorruptible_bool_unchanged(old_pm: Seq, new_pm: Seq) - requires - old_pm.len() == new_pm.len(), - old_pm.len() >= contents_offset, - old_pm.subrange(incorruptible_bool_pos as int, incorruptible_bool_pos + 8) =~= new_pm.subrange(incorruptible_bool_pos as int, incorruptible_bool_pos + 8) - ensures - ({ - let (old_ib, _, _) = pm_to_views(old_pm); - let (new_ib, _, _) = pm_to_views(new_pm); - old_ib == new_ib - }) - {} - - pub proof fn lemma_header_crc_correct(header_bytes: Seq, crc_bytes: Seq, metadata_bytes: Seq) - requires - header_bytes.len() == header_size, - crc_bytes.len() == 8, - metadata_bytes.len() == header_size - 8, - crc_bytes =~= spec_crc_bytes(metadata_bytes), - header_bytes =~= crc_bytes + metadata_bytes - ensures - header_bytes.subrange(header_crc_offset as int, header_crc_offset + 8) =~= crc_bytes, - header_bytes.subrange(header_head_offset as int, header_size as int) =~= metadata_bytes, - header_bytes.subrange(header_crc_offset as int, header_crc_offset + 8) =~= - spec_crc_bytes(header_bytes.subrange(header_head_offset as int, header_size as int)) - { - assert(header_bytes.subrange(header_crc_offset as int, header_crc_offset + 8) =~= crc_bytes); - assert(header_bytes.subrange(header_head_offset as int, header_size as int) =~= metadata_bytes); - } +pub proof fn lemma_header_correct(pm: Seq, header_bytes: Seq, header_pos: int) + requires + pm.len() > contents_offset, + header_bytes.len() == header_size, + header_pos == header1_pos || header_pos == header2_pos, + header_bytes.subrange(header_crc_offset as int, header_crc_offset + 8) =~= spec_crc_bytes( + header_bytes.subrange(header_head_offset as int, header_size as int), + ), + pm.subrange(header_pos, header_pos + header_size) =~= header_bytes, + ensures + pm.subrange(header_pos + header_crc_offset, header_pos + header_crc_offset + 8) + =~= header_bytes.subrange(header_crc_offset as int, header_crc_offset + 8), + pm.subrange(header_pos + header_head_offset, header_pos + header_size) + =~= header_bytes.subrange(header_head_offset as int, header_size as int), + pm.subrange(header_pos + header_crc_offset, header_pos + header_crc_offset + 8) + =~= spec_crc_bytes( + pm.subrange(header_pos + header_head_offset, header_pos + header_size), + ), +{ + assert(pm.subrange(header_pos + header_crc_offset, header_pos + header_crc_offset + 8) + =~= header_bytes.subrange(header_crc_offset as int, header_crc_offset + 8)); + assert(pm.subrange(header_pos + header_head_offset, header_pos + header_size) + =~= header_bytes.subrange(header_head_offset as int, header_size as int)); +} - pub proof fn lemma_header_correct(pm: Seq, header_bytes: Seq, header_pos: int) - requires - pm.len() > contents_offset, - header_bytes.len() == header_size, - header_pos == header1_pos || header_pos == header2_pos, - header_bytes.subrange(header_crc_offset as int, header_crc_offset + 8) =~= - spec_crc_bytes(header_bytes.subrange(header_head_offset as int, header_size as int)), - pm.subrange(header_pos, header_pos + header_size) =~= header_bytes - ensures - pm.subrange(header_pos + header_crc_offset, header_pos + header_crc_offset + 8) =~= - header_bytes.subrange(header_crc_offset as int, header_crc_offset + 8), - pm.subrange(header_pos + header_head_offset, header_pos + header_size) =~= - header_bytes.subrange(header_head_offset as int, header_size as int), - pm.subrange(header_pos + header_crc_offset, header_pos + header_crc_offset + 8) =~= - spec_crc_bytes(pm.subrange(header_pos + header_head_offset, header_pos + header_size)) - { - assert(pm.subrange(header_pos + header_crc_offset, header_pos + header_crc_offset + 8) =~= - header_bytes.subrange(header_crc_offset as int, header_crc_offset + 8)); - assert(pm.subrange(header_pos + header_head_offset, header_pos + header_size) =~= - header_bytes.subrange(header_head_offset as int, header_size as int)); - } +pub proof fn lemma_u64_bytes_eq(val1: u64, val2: u64) + requires + val1 == val2, + ensures + spec_u64_to_le_bytes(val1) =~= spec_u64_to_le_bytes(val2), +{ +} - pub proof fn lemma_u64_bytes_eq(val1: u64, val2: u64) - requires - val1 == val2 - ensures - spec_u64_to_le_bytes(val1) =~= spec_u64_to_le_bytes(val2) - {} - - pub proof fn lemma_subrange_eq(bytes1: Seq, bytes2: Seq) - requires - bytes1 =~= bytes2 - ensures - forall |i: int, j: int| 0 <= i < j < bytes1.len() ==> bytes1.subrange(i, j) =~= bytes2.subrange(i, j) - {} - - /// If our write is persistence_chunk_size-sized and -aligned, then there are only 2 possible - /// resulting crash states, one with the write and one without. - pub proof fn lemma_single_write_crash(pm: Seq, write_addr: int, bytes_to_write: Seq) - requires - bytes_to_write.len() == persistence_chunk_size, - write_addr % persistence_chunk_size == 0, // currently seems to succeed without nonlinear arith - 0 <= write_addr < pm.len(), - write_addr + bytes_to_write.len() <= pm.len() - ensures - ({ - forall |chunks_flushed: Set| { - let new_crash_contents = #[trigger] update_contents_to_reflect_partially_flushed_write( - pm, write_addr, bytes_to_write, chunks_flushed); - let new_contents = update_contents_to_reflect_write(pm, write_addr, bytes_to_write); - new_crash_contents =~= pm || new_crash_contents =~= new_contents - } - }) - {} - - pub proof fn lemma_pm_state_header(pm: Seq) - requires - UntrustedLogImpl::recover(pm).is_Some(), - ({ - let header = spec_get_live_header(pm); - header.metadata.tail - header.metadata.head < header.metadata.log_size - }) - ensures - ({ - let pm_state = UntrustedLogImpl::recover(pm); - let header = spec_get_live_header(pm); - match pm_state { - Some(pm_state) => { - &&& header.metadata.head == pm_state.head - &&& pm_state.log.len() == header.metadata.tail - header.metadata.head - } - None => false - } - }) - { +pub proof fn lemma_subrange_eq(bytes1: Seq, bytes2: Seq) + requires + bytes1 =~= bytes2, + ensures + forall|i: int, j: int| + 0 <= i < j < bytes1.len() ==> bytes1.subrange(i, j) =~= bytes2.subrange(i, j), +{ +} + +/// If our write is persistence_chunk_size-sized and -aligned, then there are only 2 possible +/// resulting crash states, one with the write and one without. +pub proof fn lemma_single_write_crash(pm: Seq, write_addr: int, bytes_to_write: Seq) + requires + bytes_to_write.len() == persistence_chunk_size, + write_addr % persistence_chunk_size == 0, // currently seems to succeed without nonlinear arith + 0 <= write_addr < pm.len(), + write_addr + bytes_to_write.len() <= pm.len(), + ensures + ({ + forall|chunks_flushed: Set| + { + let new_crash_contents = + #[trigger] update_contents_to_reflect_partially_flushed_write( + pm, + write_addr, + bytes_to_write, + chunks_flushed, + ); + let new_contents = update_contents_to_reflect_write( + pm, + write_addr, + bytes_to_write, + ); + new_crash_contents =~= pm || new_crash_contents =~= new_contents + } + }), +{ +} + +pub proof fn lemma_pm_state_header(pm: Seq) + requires + UntrustedLogImpl::recover(pm).is_Some(), + ({ + let header = spec_get_live_header(pm); + header.metadata.tail - header.metadata.head < header.metadata.log_size + }), + ensures + ({ let pm_state = UntrustedLogImpl::recover(pm); let header = spec_get_live_header(pm); - lemma_mod_range(header.metadata.head as int, header.metadata.log_size as int); - lemma_mod_range(header.metadata.tail as int, header.metadata.log_size as int); - let head = header.metadata.head as int; - let tail = header.metadata.tail as int; - let log_size = header.metadata.log_size as int; - let physical_head = spec_addr_logical_to_physical(head, log_size); - let physical_tail = spec_addr_logical_to_physical(tail, log_size); match pm_state { Some(pm_state) => { - if physical_head < physical_tail { - // log does not wrap - lemma_mod_difference_equal(head, tail, log_size); - } else if physical_tail < physical_head { - // log wraps - lemma_mod_wrapped_len(head, tail, log_size); - } else { - // size is 0 - lemma_mod_equal(head, tail, log_size); - } - } - None => assert(false), + &&& header.metadata.head == pm_state.head + &&& pm_state.log.len() == header.metadata.tail - header.metadata.head + }, + None => false, } - } + }), +{ + let pm_state = UntrustedLogImpl::recover(pm); + let header = spec_get_live_header(pm); + lemma_mod_range(header.metadata.head as int, header.metadata.log_size as int); + lemma_mod_range(header.metadata.tail as int, header.metadata.log_size as int); + let head = header.metadata.head as int; + let tail = header.metadata.tail as int; + let log_size = header.metadata.log_size as int; + let physical_head = spec_addr_logical_to_physical(head, log_size); + let physical_tail = spec_addr_logical_to_physical(tail, log_size); + match pm_state { + Some(pm_state) => { + if physical_head < physical_tail { + // log does not wrap + lemma_mod_difference_equal(head, tail, log_size); + } else if physical_tail < physical_head { + // log wraps + lemma_mod_wrapped_len(head, tail, log_size); + } else { + // size is 0 + lemma_mod_equal(head, tail, log_size); + } + }, + None => assert(false), + } +} - pub open spec fn spec_addr_logical_to_physical(addr: int, log_size: int) -> int { - (addr % log_size) + contents_offset - } +pub open spec fn spec_addr_logical_to_physical(addr: int, log_size: int) -> int { + (addr % log_size) + contents_offset +} - pub struct UntrustedLogImpl { - pub incorruptible_bool: u64, - // header fields are stored separately because of limitations - // on deriving Copy/Clone for the header structures - pub header_crc: u64, - pub head: u64, - pub tail: u64, - pub log_size: u64, - } +pub struct UntrustedLogImpl { + pub incorruptible_bool: u64, + // header fields are stored separately because of limitations + // on deriving Copy/Clone for the header structures + pub header_crc: u64, + pub head: u64, + pub tail: u64, + pub log_size: u64, +} - // offset of actual log contents from the beginning of the device - pub const contents_offset: u64 = header2_pos + header_log_size_offset + 8; +// offset of actual log contents from the beginning of the device +pub const contents_offset: u64 = header2_pos + header_log_size_offset + 8; + +impl UntrustedLogImpl { + pub exec fn addr_logical_to_physical(addr: u64, log_size: u64) -> (out: u64) + requires + addr <= u64::MAX, + log_size > 0, + log_size + contents_offset <= u64::MAX, + ensures + out == spec_addr_logical_to_physical(addr as int, log_size as int), + { + (addr % log_size) + contents_offset + } - impl UntrustedLogImpl { + pub open spec fn log_state_is_valid(pm: Seq) -> bool { + let (ib, headers, data) = pm_to_views(pm); + let live_header = if ib == cdb0_val { + headers.header1 + } else { + headers.header2 + }; + let head = live_header.metadata.head as int; + let tail = live_header.metadata.tail as int; + let log_size = live_header.metadata.log_size as int; + &&& ib == cdb0_val || ib == cdb1_val + &&& log_size + contents_offset <= u64::MAX + &&& log_size > 0 + &&& log_size + contents_offset == pm.len() + &&& tail - head < log_size + &&& ib == cdb0_val ==> { + &&& live_header.crc == spec_u64_from_le_bytes( + spec_crc_bytes( + pm.subrange(header1_pos + header_head_offset, header1_pos + header_size), + ), + ) + &&& pm.subrange(header1_pos + header_crc_offset, header1_pos + header_crc_offset + 8) + =~= spec_crc_bytes( + pm.subrange(header1_pos + header_head_offset, header1_pos + header_size), + ) + } + &&& ib == cdb1_val ==> { + &&& live_header.crc == spec_u64_from_le_bytes( + spec_crc_bytes( + pm.subrange(header2_pos + header_head_offset, header2_pos + header_size), + ), + ) + &&& pm.subrange(header2_pos + header_crc_offset, header2_pos + header_crc_offset + 8) + =~= spec_crc_bytes( + pm.subrange(header2_pos + header_head_offset, header2_pos + header_size), + ) + } + &&& head <= tail + } - pub exec fn addr_logical_to_physical(addr: u64, log_size: u64) -> (out: u64) - requires - addr <= u64::MAX, - log_size > 0, - log_size + contents_offset <= u64::MAX, - ensures - out == spec_addr_logical_to_physical(addr as int, log_size as int) - { - (addr % log_size) + contents_offset - } + pub open spec fn recover(pm: Seq) -> Option { + let (ib, headers, data) = pm_to_views(pm); + if !Self::log_state_is_valid(pm) { + None + } else { + let live_header = if ib == cdb0_val { + headers.header1 + } else { + headers.header2 + }; + let head = live_header.metadata.head as int; + let tail = live_header.metadata.tail as int; + let log_size = live_header.metadata.log_size as int; + let contents_end = log_size + contents_offset; + let physical_head = spec_addr_logical_to_physical(head, log_size); + let physical_tail = spec_addr_logical_to_physical(tail, log_size); + let abstract_log = if physical_head < physical_tail { + pm.subrange(physical_head, physical_tail) + } else if physical_tail < physical_head { + let range1 = pm.subrange(physical_head, contents_end); + let range2 = pm.subrange(contents_offset as int, physical_tail); + range1 + range2 + } else { + Seq::empty() + }; + Some(AbstractInfiniteLogState { head: head, log: abstract_log, capacity: log_size - 1 }) + } + } - pub open spec fn log_state_is_valid(pm: Seq) -> bool { - let (ib, headers, data) = pm_to_views(pm); - let live_header = if ib == cdb0_val { - headers.header1 - } else { - headers.header2 - }; - - let head = live_header.metadata.head as int; - let tail = live_header.metadata.tail as int; - let log_size = live_header.metadata.log_size as int; - - &&& ib == cdb0_val || ib == cdb1_val - &&& log_size + contents_offset <= u64::MAX - &&& log_size > 0 - &&& log_size + contents_offset == pm.len() - &&& tail - head < log_size - &&& ib == cdb0_val ==> { - &&& live_header.crc == spec_u64_from_le_bytes(spec_crc_bytes(pm.subrange(header1_pos + header_head_offset, header1_pos + header_size))) - &&& pm.subrange(header1_pos + header_crc_offset, header1_pos + header_crc_offset + 8) =~= spec_crc_bytes(pm.subrange(header1_pos + header_head_offset, header1_pos + header_size)) - } - &&& ib == cdb1_val ==> { - &&& live_header.crc == spec_u64_from_le_bytes(spec_crc_bytes(pm.subrange(header2_pos + header_head_offset, header2_pos + header_size))) - &&& pm.subrange(header2_pos + header_crc_offset, header2_pos + header_crc_offset + 8) =~= spec_crc_bytes(pm.subrange(header2_pos + header_head_offset, header2_pos + header_size)) - } - &&& head <= tail - } + // This is the invariant that the untrusted log implementation + // maintains between its local state and the contents of + // persistent memory. + pub open spec fn inv_pm_contents(self, contents: Seq) -> bool { + let (ib, headers, data) = pm_to_views(contents); + let header_pos = if ib == cdb0_val { + header1_pos + } else { + header2_pos + }; + let header = spec_get_live_header(contents); + let head = header.metadata.head; + let tail = header.metadata.tail; + let log_size = header.metadata.log_size; + &&& ib == cdb0_val || ib == cdb1_val + &&& spec_crc_bytes( + contents.subrange(header_pos + header_head_offset, header_pos + header_size), + ) == contents.subrange(header_pos + header_crc_offset, header_pos + header_crc_offset + 8) + &&& log_size + contents_offset <= u64::MAX + &&& tail - head < log_size + &&& log_size + contents_offset == contents.len() + &&& self.header_crc == header.crc + &&& self.head == head + &&& self.tail == tail + &&& self.log_size == log_size + &&& self.incorruptible_bool == ib + &&& match Self::recover(contents) { + Some(inf_log) => tail == head + inf_log.log.len(), + None => false, + } + } - pub open spec fn recover(pm: Seq) -> Option - { - let (ib, headers, data) = pm_to_views(pm); - if !Self::log_state_is_valid(pm) { - None - } else { - let live_header = if ib == cdb0_val { - headers.header1 - } else { - headers.header2 - }; - - let head = live_header.metadata.head as int; - let tail = live_header.metadata.tail as int; - let log_size = live_header.metadata.log_size as int; - let contents_end = log_size + contents_offset; - let physical_head = spec_addr_logical_to_physical(head, log_size); - let physical_tail = spec_addr_logical_to_physical(tail, log_size); - - let abstract_log = if physical_head < physical_tail { - pm.subrange(physical_head, physical_tail) - } else if physical_tail < physical_head { - let range1 = pm.subrange(physical_head, contents_end); - let range2 = pm.subrange(contents_offset as int, physical_tail); - range1 + range2 - } else { - Seq::empty() - }; + // This is the invariant that the untrusted log implementation + // maintains between its local state and the write-restricted + // persistent memory. + pub open spec fn inv( + self, + wrpm: &WriteRestrictedPersistentMemory, + ) -> bool where Perm: CheckPermission>, PM: PersistentMemory { + &&& wrpm.inv() + &&& self.inv_pm_contents(wrpm@) + } - Some(AbstractInfiniteLogState { head: head, log: abstract_log, capacity: log_size - 1 }) + pub exec fn read_incorruptible_boolean(pm: &PM) -> (result: Result< + u64, + InfiniteLogErr, + >) + requires + Self::recover(pm@).is_Some(), + pm.inv(), + pm@.len() > contents_offset, + ensures + match result { + Ok(ib) => { + let (spec_ib, _, _) = pm_to_views(pm@); + ib == spec_ib + }, + Err(InfiniteLogErr::CRCMismatch) => !pm.constants().impervious_to_corruption, + _ => false, + }, + { + let bytes = pm.read(incorruptible_bool_pos, 8); + let ib = u64_from_le_bytes(bytes.as_slice()); + let ghost addrs = Seq::::new(8, |i: int| i + incorruptible_bool_pos); + if ib == cdb0_val || ib == cdb1_val { + proof { + let (spec_ib, _, _) = pm_to_views(pm@); + lemma_auto_spec_u64_to_from_le_bytes(); + if !pm.constants().impervious_to_corruption { + axiom_corruption_detecting_boolean(ib, spec_ib, addrs); } } + Ok(ib) + } else { + Err(InfiniteLogErr::CRCMismatch) + } + } - // This is the invariant that the untrusted log implementation - // maintains between its local state and the contents of - // persistent memory. - pub open spec fn inv_pm_contents(self, contents: Seq) -> bool - { - let (ib, headers, data) = pm_to_views(contents); - let header_pos = if ib == cdb0_val { header1_pos } else { header2_pos }; - let header = spec_get_live_header(contents); - let head = header.metadata.head; - let tail = header.metadata.tail; - let log_size = header.metadata.log_size; - &&& ib == cdb0_val || ib == cdb1_val - &&& spec_crc_bytes(contents.subrange(header_pos + header_head_offset, header_pos + header_size)) == - contents.subrange(header_pos + header_crc_offset, header_pos + header_crc_offset + 8) - &&& log_size + contents_offset <= u64::MAX - &&& tail - head < log_size - &&& log_size + contents_offset == contents.len() - &&& self.header_crc == header.crc - &&& self.head == head - &&& self.tail == tail - &&& self.log_size == log_size - &&& self.incorruptible_bool == ib - &&& match Self::recover(contents) { - Some(inf_log) => tail == head + inf_log.log.len(), - None => false, - } + exec fn update_header( + &mut self, + wrpm: &mut WriteRestrictedPersistentMemory, + Tracked(perm): Tracked<&Perm>, + new_header_bytes: &Vec, + ) where Perm: CheckPermission>, PM: PersistentMemory + requires + permissions_depend_only_on_recovery_view(perm), + contents_offset < old(wrpm)@.len(), + old(self).inv(&*old(wrpm)), + Self::recover(old(wrpm)@).is_Some(), + new_header_bytes@.subrange(header_crc_offset as int, header_crc_offset + 8) + =~= spec_crc_bytes( + new_header_bytes@.subrange(header_head_offset as int, header_size as int), + ), + new_header_bytes.len() == header_size, + match Self::recover(old(wrpm)@) { + Some(log_state) => perm.check_permission(old(wrpm)@), + None => false, + }, + ensures + self.inv(wrpm), + Self::recover(wrpm@).is_Some(), + wrpm.constants() == old(wrpm).constants(), + match (Self::recover(old(wrpm)@), Self::recover(wrpm@)) { + (Some(old_log_state), Some(new_log_state)) => old_log_state =~= new_log_state, + _ => false, + }, + ({ + let (old_pm_ib, old_metadata, old_data) = pm_to_views(old(wrpm)@); + let (new_pm_ib, new_metadata, new_data) = pm_to_views(wrpm@); + let new_header = spec_bytes_to_header(new_header_bytes@); + &&& old_pm_ib == new_pm_ib + &&& old_pm_ib == cdb0_val ==> { + &&& new_metadata.header1 == old_metadata.header1 + &&& new_metadata.header2 == new_header + &&& wrpm@.subrange( + header2_pos + header_crc_offset, + header2_pos + header_crc_offset + 8, + ) =~= spec_crc_bytes( + wrpm@.subrange(header2_pos + header_head_offset, header2_pos + header_size), + ) + &&& wrpm@.subrange(header2_pos as int, header2_pos + header_size) + =~= new_header_bytes@ + } + &&& old_pm_ib == cdb1_val ==> { + &&& new_metadata.header1 == new_header + &&& new_metadata.header2 == old_metadata.header2 + &&& wrpm@.subrange( + header1_pos + header_crc_offset, + header1_pos + header_crc_offset + 8, + ) =~= spec_crc_bytes( + wrpm@.subrange(header1_pos + header_head_offset, header1_pos + header_size), + ) + &&& wrpm@.subrange(header1_pos as int, header1_pos + header_size) + =~= new_header_bytes@ + } + &&& old_data =~= new_data + }), + { + let ghost original_wrpm = wrpm@; + // write to the header that is NOT pointed to by the IB + let header_pos = if self.incorruptible_bool == cdb0_val { + header2_pos + } else { + header1_pos + }; + // TODO: we could probably roll all of this into a single lemma that contains all of the proofs + proof { + let new_pm = update_contents_to_reflect_write( + wrpm@, + header_pos as int, + new_header_bytes@, + ); + lemma_inactive_header_update_view(wrpm@, new_header_bytes@, header_pos as int); + lemma_same_log_state(wrpm@, new_pm); + assert(Self::recover(wrpm@) =~= Self::recover(new_pm)); + // prove crash consistency + assert forall|chunks_flushed| + { + let new_pm = #[trigger] update_contents_to_reflect_partially_flushed_write( + wrpm@, + header_pos as int, + new_header_bytes@, + chunks_flushed, + ); + perm.check_permission(new_pm) + } by { + let new_pm = update_contents_to_reflect_partially_flushed_write( + wrpm@, + header_pos as int, + new_header_bytes@, + chunks_flushed, + ); + lemma_inactive_header_update_view_crash( + wrpm@, + new_header_bytes@, + header_pos as int, + chunks_flushed, + ); + lemma_same_log_state(wrpm@, new_pm); + assert(permissions_depend_only_on_recovery_view(perm)); + lemma_same_permissions(wrpm@, new_pm, perm); } + } + wrpm.write(header_pos, new_header_bytes.as_slice(), Tracked(perm)); + proof { + // TODO: clean up once ib update is done. put this all in a lemma + assert(Self::recover(wrpm@).is_Some()); + let (_, headers, _) = pm_to_views(wrpm@); + assert(wrpm@.subrange(header_pos as int, header_pos + header_size) + =~= new_header_bytes@); + lemma_header_correct(wrpm@, new_header_bytes@, header_pos as int); + // live header is unchanged + let live_header_pos = if header_pos == header1_pos { + header2_pos + } else { + assert(header_pos == header2_pos); + header1_pos + }; + // TODO: refactor into a lemma (ideally lemma_header_correct) + assert(old(wrpm)@.subrange(live_header_pos as int, live_header_pos + header_size) + =~= wrpm@.subrange(live_header_pos as int, live_header_pos + header_size)); + assert(old(wrpm)@.subrange( + live_header_pos + header_crc_offset, + live_header_pos + header_crc_offset + 8, + ) =~= spec_crc_bytes( + old(wrpm)@.subrange( + live_header_pos + header_head_offset, + live_header_pos + header_size, + ), + )); + assert(old(wrpm)@.subrange( + live_header_pos + header_crc_offset, + live_header_pos + header_crc_offset + 8, + ) =~= wrpm@.subrange( + live_header_pos + header_crc_offset, + live_header_pos + header_crc_offset + 8, + )); + assert(old(wrpm)@.subrange( + live_header_pos + header_head_offset, + live_header_pos + header_size, + ) =~= wrpm@.subrange( + live_header_pos + header_head_offset, + live_header_pos + header_size, + )); + assert(wrpm@.subrange( + live_header_pos + header_crc_offset, + live_header_pos + header_crc_offset + 8, + ) =~= spec_crc_bytes( + wrpm@.subrange(live_header_pos + header_head_offset, live_header_pos + header_size), + )); + } + } - // This is the invariant that the untrusted log implementation - // maintains between its local state and the write-restricted - // persistent memory. - pub open spec fn inv(self, wrpm: &WriteRestrictedPersistentMemory) -> bool - where - Perm: CheckPermission>, - PM: PersistentMemory - { - &&& wrpm.inv() - &&& self.inv_pm_contents(wrpm@) + // Since untrusted_setup doesn't take a WriteRestrictedPersistentMemory, it is not guaranteed + // to perform crash-safe updates. + pub exec fn untrusted_setup(pm: &mut PM, device_size: u64) -> (result: Result< + u64, + InfiniteLogErr, + >) where PM: PersistentMemory + requires + old(pm).inv(), + old(pm)@.len() == device_size, + ensures + pm.inv(), + pm.constants() == old(pm).constants(), + pm@.len() == device_size, + match result { + Ok(capacity) => Self::recover(pm@) == Some( + AbstractInfiniteLogState::initialize(capacity as int), + ), + Err(InfiniteLogErr::InsufficientSpaceForSetup { required_space }) => device_size + < required_space, + _ => false, + }, + { + if device_size <= contents_offset { + return Err( + InfiniteLogErr::InsufficientSpaceForSetup { required_space: contents_offset + 1 }, + ); + } + let log_size = device_size - contents_offset; + let log_header_metadata = PersistentHeaderMetadata { head: 0, tail: 0, log_size }; + let metadata_bytes = metadata_to_bytes(&log_header_metadata); + let crc_bytes = bytes_crc(&metadata_bytes); + let log_header = PersistentHeader { + crc: u64_from_le_bytes(crc_bytes.as_slice()), + metadata: log_header_metadata, + }; + let header_bytes = header_to_bytes(&log_header); + let initial_ib_bytes = u64_to_le_bytes(cdb0_val); + pm.write(header1_pos, header_bytes.as_slice()); + pm.write(incorruptible_bool_pos, initial_ib_bytes.as_slice()); + proof { + lemma_auto_spec_u64_to_from_le_bytes(); + assert(pm@.subrange(header1_pos as int, header1_pos + header_size) =~= header_bytes@); + assert(pm@.subrange(incorruptible_bool_pos as int, incorruptible_bool_pos + 8) + =~= initial_ib_bytes@); + lemma_header_split_into_bytes(crc_bytes@, metadata_bytes@, header_bytes@); + assert(pm@.subrange(header1_pos + header_head_offset, header1_pos + header_size) + =~= metadata_bytes@); + lemma_header_match(pm@, header1_pos as int, log_header); + let log_state = Self::recover(pm@); + match log_state { + Some(log_state) => { + assert(log_state.head == 0); + assert(log_state.log == Seq::::empty()); + assert(log_state.capacity == log_size - 1); + }, + None => assert(false), } + } + Ok(log_size - 1) + } - pub exec fn read_incorruptible_boolean(pm: &PM) -> (result: Result) - requires - Self::recover(pm@).is_Some(), - pm.inv(), - pm@.len() > contents_offset - ensures - match result { - Ok(ib) => { - let (spec_ib, _, _) = pm_to_views(pm@); - ib == spec_ib - } - Err(InfiniteLogErr::CRCMismatch) => !pm.constants().impervious_to_corruption, - _ => false, - } - { - let bytes = pm.read(incorruptible_bool_pos, 8); - let ib = u64_from_le_bytes(bytes.as_slice()); - let ghost addrs = Seq::::new(8, |i: int| i + incorruptible_bool_pos); - if ib == cdb0_val || ib == cdb1_val { - proof { - let (spec_ib, _, _) = pm_to_views(pm@); - lemma_auto_spec_u64_to_from_le_bytes(); - if !pm.constants().impervious_to_corruption { - axiom_corruption_detecting_boolean(ib, spec_ib, addrs); - } - } - Ok(ib) - } else { - Err(InfiniteLogErr::CRCMismatch) + pub exec fn untrusted_start( + wrpm: &mut WriteRestrictedPersistentMemory, + device_size: u64, + Tracked(perm): Tracked<&Perm>, + ) -> (result: Result) where + Perm: CheckPermission>, + PM: PersistentMemory, + + requires + Self::recover(old(wrpm)@).is_Some(), + old(wrpm).inv(), + old(wrpm)@.len() == device_size, + header_crc_offset < header_crc_offset + crc_size <= header_head_offset + < header_tail_offset < header_log_size_offset, + // The restriction on writing persistent memory during initialization is + // that it can't change the interpretation of that memory's contents. + ({ + forall|pm_state| #[trigger] + perm.check_permission(pm_state) <==> Self::recover(pm_state) == Self::recover( + old(wrpm)@, + ) + }), + ensures + Self::recover(old(wrpm)@) == Self::recover(wrpm@), + wrpm.constants() == old(wrpm).constants(), + match result { + Ok(log_impl) => log_impl.inv(wrpm), + Err(InfiniteLogErr::CRCMismatch) => !wrpm.constants().impervious_to_corruption, + _ => false, + }, + { + let pm = wrpm.get_pm_ref(); + assert(device_size > contents_offset); + let ib = match Self::read_incorruptible_boolean(pm) { + Ok(ib) => ib, + Err(e) => return Err(e), + }; + let header_pos = if ib == cdb0_val { + header1_pos + } else { + assert(ib == cdb1_val); + header2_pos + }; + let crc_bytes = pm.read(header_pos + header_crc_offset, 8); + let ghost crc_addrs = Seq::::new(8, |i: int| i + header_pos + header_crc_offset); + let header_bytes = pm.read( + header_pos + header_head_offset, + header_size - header_head_offset, + ); + let ghost header_addrs = Seq::::new( + (header_size - header_head_offset) as nat, + |i: int| i + header_pos + header_head_offset, + ); + let header = if u64_from_le_bytes(bytes_crc(&header_bytes).as_slice()) == u64_from_le_bytes( + crc_bytes.as_slice(), + ) { + proof { + lemma_auto_spec_u64_to_from_le_bytes(); + lemma_u64_bytes_eq( + spec_u64_from_le_bytes(spec_crc_bytes(header_bytes@)), + spec_u64_from_le_bytes(crc_bytes@), + ); + if !wrpm.constants().impervious_to_corruption { + axiom_bytes_uncorrupted( + header_bytes@, + pm@.subrange(header_pos + header_head_offset, header_pos + header_size), + header_addrs, + crc_bytes@, + pm@.subrange( + header_pos + header_crc_offset, + header_pos + header_crc_offset + 8, + ), + crc_addrs, + ); } } + crc_and_metadata_bytes_to_header(crc_bytes.as_slice(), header_bytes.as_slice()) + } else { + return Err(InfiniteLogErr::CRCMismatch); + }; + let head = header.metadata.head; + let tail = header.metadata.tail; + let log_size = header.metadata.log_size; + // check log validity now that we have its uncorrupted metadata + assert(device_size == log_size + contents_offset); + assert(head <= tail); + assert(tail - head < log_size); + let untrusted_log = UntrustedLogImpl { + incorruptible_bool: ib, + header_crc: u64_from_le_bytes(crc_bytes.as_slice()), + head, + tail, + log_size, + }; + proof { + lemma_pm_state_header(pm@); + } + Ok(untrusted_log) + } - exec fn update_header - ( - &mut self, - wrpm: &mut WriteRestrictedPersistentMemory, - Tracked(perm): Tracked<&Perm>, - new_header_bytes: &Vec - ) - where - Perm: CheckPermission>, - PM: PersistentMemory - requires - permissions_depend_only_on_recovery_view(perm), - contents_offset < old(wrpm)@.len(), - old(self).inv(&*old(wrpm)), - Self::recover(old(wrpm)@).is_Some(), - new_header_bytes@.subrange(header_crc_offset as int, header_crc_offset + 8) =~= - spec_crc_bytes(new_header_bytes@.subrange(header_head_offset as int, header_size as int)), - new_header_bytes.len() == header_size, - match Self::recover(old(wrpm)@) { - Some(log_state) => perm.check_permission(old(wrpm)@), - None => false + pub exec fn untrusted_append( + &mut self, + wrpm: &mut WriteRestrictedPersistentMemory, + bytes_to_append: &Vec, + Tracked(perm): Tracked<&Perm>, + ) -> (result: Result) where + Perm: CheckPermission>, + PM: PersistentMemory, + + requires + old(self).inv(&*old(wrpm)), + Self::recover(old(wrpm)@).is_Some(), + ({ + let old_log_state = Self::recover(old(wrpm)@); + forall|pm_state| #[trigger] + perm.check_permission(pm_state) <==> { + let log_state = Self::recover(pm_state); + log_state == old_log_state || log_state == Some( + old_log_state.unwrap().append(bytes_to_append@), + ) } - ensures - self.inv(wrpm), - Self::recover(wrpm@).is_Some(), - wrpm.constants() == old(wrpm).constants(), - match (Self::recover(old(wrpm)@), Self::recover(wrpm@)) { - (Some(old_log_state), Some(new_log_state)) => old_log_state =~= new_log_state, - _ => false + }), + ensures + self.inv(wrpm), + wrpm.constants() == old(wrpm).constants(), + ({ + let old_log_state = Self::recover(old(wrpm)@); + let new_log_state = Self::recover(wrpm@); + match (result, old_log_state, new_log_state) { + (Ok(offset), Some(old_log_state), Some(new_log_state)) => { + &&& offset as nat == old_log_state.log.len() + old_log_state.head + &&& new_log_state == old_log_state.append(bytes_to_append@) }, - ({ - let (old_pm_ib, old_metadata, old_data) = pm_to_views(old(wrpm)@); - let (new_pm_ib, new_metadata, new_data) = pm_to_views(wrpm@); - let new_header = spec_bytes_to_header(new_header_bytes@); - &&& old_pm_ib == new_pm_ib - &&& old_pm_ib == cdb0_val ==> { - &&& new_metadata.header1 == old_metadata.header1 - &&& new_metadata.header2 == new_header - &&& wrpm@.subrange(header2_pos + header_crc_offset, header2_pos + header_crc_offset + 8) =~= - spec_crc_bytes(wrpm@.subrange(header2_pos + header_head_offset, header2_pos + header_size)) - &&& wrpm@.subrange(header2_pos as int, header2_pos + header_size) =~= new_header_bytes@ - } - &&& old_pm_ib == cdb1_val ==> { - &&& new_metadata.header1 == new_header - &&& new_metadata.header2 == old_metadata.header2 - &&& wrpm@.subrange(header1_pos + header_crc_offset, header1_pos + header_crc_offset + 8) =~= - spec_crc_bytes(wrpm@.subrange(header1_pos + header_head_offset, header1_pos + header_size)) - &&& wrpm@.subrange(header1_pos as int, header1_pos + header_size) =~= new_header_bytes@ + (Err(InfiniteLogErr::InsufficientSpaceForAppend { available_space }), _, _) => { + &&& new_log_state == old_log_state + &&& available_space < bytes_to_append@.len() + &&& { + let log = old_log_state.unwrap(); + ||| available_space == log.capacity - log.log.len() + ||| available_space == u64::MAX - log.head - log.log.len() } - &&& old_data =~= new_data - }), - - { - let ghost original_wrpm = wrpm@; - - // write to the header that is NOT pointed to by the IB - let header_pos = if self.incorruptible_bool == cdb0_val { - header2_pos - } else { - header1_pos - }; - - // TODO: we could probably roll all of this into a single lemma that contains all of the proofs - proof { - let new_pm = update_contents_to_reflect_write(wrpm@, header_pos as int, new_header_bytes@); - lemma_inactive_header_update_view(wrpm@, new_header_bytes@, header_pos as int); - lemma_same_log_state(wrpm@, new_pm); - assert(Self::recover(wrpm@) =~= Self::recover(new_pm)); - - // prove crash consistency - assert forall |chunks_flushed| { - let new_pm = #[trigger] update_contents_to_reflect_partially_flushed_write( - wrpm@, header_pos as int, new_header_bytes@, chunks_flushed); - perm.check_permission(new_pm) - } by { - let new_pm = update_contents_to_reflect_partially_flushed_write( - wrpm@, header_pos as int, new_header_bytes@, chunks_flushed); - lemma_inactive_header_update_view_crash(wrpm@, new_header_bytes@, header_pos as int, chunks_flushed); - lemma_same_log_state(wrpm@, new_pm); - assert(permissions_depend_only_on_recovery_view(perm)); - lemma_same_permissions(wrpm@, new_pm, perm); - } - } - wrpm.write(header_pos, new_header_bytes.as_slice(), Tracked(perm)); - proof { - // TODO: clean up once ib update is done. put this all in a lemma - assert(Self::recover(wrpm@).is_Some()); - let (_, headers, _) = pm_to_views(wrpm@); - assert(wrpm@.subrange(header_pos as int, header_pos + header_size) =~= new_header_bytes@); - lemma_header_correct(wrpm@, new_header_bytes@, header_pos as int); - - // live header is unchanged - let live_header_pos = if header_pos == header1_pos { - header2_pos - } else { - assert(header_pos == header2_pos); - header1_pos - }; - - // TODO: refactor into a lemma (ideally lemma_header_correct) - assert(old(wrpm)@.subrange(live_header_pos as int, live_header_pos + header_size) =~= - wrpm@.subrange(live_header_pos as int, live_header_pos + header_size)); - assert(old(wrpm)@.subrange(live_header_pos + header_crc_offset, live_header_pos + header_crc_offset + 8) =~= - spec_crc_bytes(old(wrpm)@.subrange(live_header_pos + header_head_offset, live_header_pos + header_size))); - assert(old(wrpm)@.subrange(live_header_pos + header_crc_offset, live_header_pos + header_crc_offset + 8) =~= - wrpm@.subrange(live_header_pos + header_crc_offset, live_header_pos + header_crc_offset + 8)); - assert(old(wrpm)@.subrange(live_header_pos + header_head_offset, live_header_pos + header_size) =~= - wrpm@.subrange(live_header_pos + header_head_offset, live_header_pos + header_size)); - - assert(wrpm@.subrange(live_header_pos + header_crc_offset, live_header_pos + header_crc_offset + 8) =~= - spec_crc_bytes(wrpm@.subrange(live_header_pos + header_head_offset, live_header_pos + header_size))); - } - } - - // Since untrusted_setup doesn't take a WriteRestrictedPersistentMemory, it is not guaranteed - // to perform crash-safe updates. - pub exec fn untrusted_setup(pm: &mut PM, device_size: u64) -> (result: Result) - where - PM: PersistentMemory - requires - old(pm).inv(), - old(pm)@.len() == device_size - ensures - pm.inv(), - pm.constants() == old(pm).constants(), - pm@.len() == device_size, - match result { - Ok(capacity) => Self::recover(pm@) == - Some(AbstractInfiniteLogState::initialize(capacity as int)), - Err(InfiniteLogErr::InsufficientSpaceForSetup{ required_space }) => device_size < required_space, - _ => false - } - { - if device_size <= contents_offset { - return Err(InfiniteLogErr::InsufficientSpaceForSetup { required_space: contents_offset + 1 }); - } - - let log_size = device_size - contents_offset; - - let log_header_metadata = PersistentHeaderMetadata { - head: 0, - tail: 0, - log_size - }; - let metadata_bytes = metadata_to_bytes(&log_header_metadata); - let crc_bytes = bytes_crc(&metadata_bytes); - let log_header = PersistentHeader { - crc: u64_from_le_bytes(crc_bytes.as_slice()), - metadata: log_header_metadata, - }; - let header_bytes = header_to_bytes(&log_header); - - let initial_ib_bytes = u64_to_le_bytes(cdb0_val); - pm.write(header1_pos, header_bytes.as_slice()); - pm.write(incorruptible_bool_pos, initial_ib_bytes.as_slice()); - - proof { - lemma_auto_spec_u64_to_from_le_bytes(); - assert(pm@.subrange(header1_pos as int, header1_pos + header_size) =~= header_bytes@); - assert(pm@.subrange(incorruptible_bool_pos as int, incorruptible_bool_pos + 8) =~= initial_ib_bytes@); - lemma_header_split_into_bytes(crc_bytes@, metadata_bytes@, header_bytes@); - assert(pm@.subrange(header1_pos + header_head_offset, header1_pos + header_size) =~= metadata_bytes@); - lemma_header_match(pm@, header1_pos as int, log_header); - let log_state = Self::recover(pm@); - match log_state { - Some(log_state) => { - assert(log_state.head == 0); - assert(log_state.log == Seq::::empty()); - assert(log_state.capacity == log_size - 1); - } - None => assert(false), - } + }, + (_, _, _) => false, } - - Ok(log_size - 1) - } - - pub exec fn untrusted_start(wrpm: &mut WriteRestrictedPersistentMemory, - device_size: u64, - Tracked(perm): Tracked<&Perm>) - -> (result: Result) - where - Perm: CheckPermission>, - PM: PersistentMemory - requires - Self::recover(old(wrpm)@).is_Some(), - old(wrpm).inv(), - old(wrpm)@.len() == device_size, - header_crc_offset < header_crc_offset + crc_size <= header_head_offset < header_tail_offset < header_log_size_offset, - // The restriction on writing persistent memory during initialization is - // that it can't change the interpretation of that memory's contents. - ({ - forall |pm_state| #[trigger] perm.check_permission(pm_state) <==> - Self::recover(pm_state) == - Self::recover(old(wrpm)@) - }), - ensures - Self::recover(old(wrpm)@) == Self::recover(wrpm@), - wrpm.constants() == old(wrpm).constants(), - match result { - Ok(log_impl) => log_impl.inv(wrpm), - Err(InfiniteLogErr::CRCMismatch) => !wrpm.constants().impervious_to_corruption, - _ => false - } - { - let pm = wrpm.get_pm_ref(); - assert (device_size > contents_offset); - - let ib = match Self::read_incorruptible_boolean(pm) { - Ok(ib) => ib, - Err(e) => return Err(e) - }; - - let header_pos = if ib == cdb0_val { - header1_pos - } else { - assert(ib == cdb1_val); - header2_pos - }; - let crc_bytes = pm.read(header_pos + header_crc_offset, 8); - let ghost crc_addrs = Seq::::new(8, |i: int| i + header_pos + header_crc_offset); - let header_bytes = pm.read(header_pos + header_head_offset, header_size - header_head_offset); - let ghost header_addrs = Seq::::new((header_size - header_head_offset) as nat, |i: int| i + header_pos + header_head_offset); - - let header = if u64_from_le_bytes(bytes_crc(&header_bytes).as_slice()) == u64_from_le_bytes(crc_bytes.as_slice()) { - proof { - lemma_auto_spec_u64_to_from_le_bytes(); - lemma_u64_bytes_eq(spec_u64_from_le_bytes(spec_crc_bytes(header_bytes@)), spec_u64_from_le_bytes(crc_bytes@)); - if !wrpm.constants().impervious_to_corruption { - axiom_bytes_uncorrupted( - header_bytes@, - pm@.subrange(header_pos + header_head_offset, header_pos + header_size), - header_addrs, - crc_bytes@, - pm@.subrange(header_pos + header_crc_offset, header_pos + header_crc_offset + 8), - crc_addrs, - ); - } - } - crc_and_metadata_bytes_to_header(crc_bytes.as_slice(), header_bytes.as_slice()) + }), + { + assert(permissions_depend_only_on_recovery_view(perm)); + let pm = wrpm.get_pm_ref(); + let ghost original_pm = wrpm@; + let physical_head = Self::addr_logical_to_physical(self.head, self.log_size); + let physical_tail = Self::addr_logical_to_physical(self.tail, self.log_size); + let contents_end = self.log_size + contents_offset; + let append_size: u64 = bytes_to_append.len() as u64; + let old_logical_tail = self.tail; + if self.tail > u64::MAX - append_size { + Err( + InfiniteLogErr::InsufficientSpaceForAppend { + available_space: u64::MAX - self.tail, + }, + ) + } else if append_size >= self.log_size - (self.tail - self.head) { + Err( + InfiniteLogErr::InsufficientSpaceForAppend { + available_space: self.log_size - 1 - (self.tail - self.head), + }, + ) + } else { + let mut header_metadata = PersistentHeaderMetadata { + head: self.head, + tail: self.tail, + log_size: self.log_size, + }; + assert(header_metadata == spec_get_live_header(wrpm@).metadata); + if physical_head <= physical_tail { + if physical_tail >= contents_end - append_size { + // wrap case + self.append_wrap(wrpm, bytes_to_append, &header_metadata, Tracked(perm)); } else { - return Err(InfiniteLogErr::CRCMismatch); - }; - - let head = header.metadata.head; - let tail = header.metadata.tail; - let log_size = header.metadata.log_size; - // check log validity now that we have its uncorrupted metadata - assert(device_size == log_size + contents_offset); - assert(head <= tail); - assert(tail - head < log_size); - - let untrusted_log = UntrustedLogImpl { - incorruptible_bool: ib, - header_crc: u64_from_le_bytes(crc_bytes.as_slice()), - head, - tail, - log_size - }; - - proof { lemma_pm_state_header(pm@); } - Ok(untrusted_log) - } - - pub exec fn untrusted_append( - &mut self, - wrpm: &mut WriteRestrictedPersistentMemory, - bytes_to_append: &Vec, - Tracked(perm): Tracked<&Perm> - ) -> (result: Result) - where - Perm: CheckPermission>, - PM: PersistentMemory - requires - old(self).inv(&*old(wrpm)), - Self::recover(old(wrpm)@).is_Some(), - ({ - let old_log_state = Self::recover(old(wrpm)@); - forall |pm_state| #[trigger] perm.check_permission(pm_state) <==> { - let log_state = Self::recover(pm_state); - log_state == old_log_state || log_state == Some(old_log_state.unwrap().append(bytes_to_append@)) - } - }), - ensures - self.inv(wrpm), - wrpm.constants() == old(wrpm).constants(), - ({ - let old_log_state = Self::recover(old(wrpm)@); - let new_log_state = Self::recover(wrpm@); - match (result, old_log_state, new_log_state) { - (Ok(offset), Some(old_log_state), Some(new_log_state)) => { - &&& offset as nat == old_log_state.log.len() + old_log_state.head - &&& new_log_state == old_log_state.append(bytes_to_append@) - }, - (Err(InfiniteLogErr::InsufficientSpaceForAppend{ available_space }), _, _) => { - &&& new_log_state == old_log_state - &&& available_space < bytes_to_append@.len() - &&& { - let log = old_log_state.unwrap(); - ||| available_space == log.capacity - log.log.len() - ||| available_space == u64::MAX - log.head - log.log.len() - } - }, - (_, _, _) => false - } - }), - { - assert(permissions_depend_only_on_recovery_view(perm)); - - let pm = wrpm.get_pm_ref(); - let ghost original_pm = wrpm@; - - let physical_head = Self::addr_logical_to_physical(self.head, self.log_size); - let physical_tail = Self::addr_logical_to_physical(self.tail, self.log_size); - let contents_end = self.log_size + contents_offset; - let append_size: u64 = bytes_to_append.len() as u64; - let old_logical_tail = self.tail; - - if self.tail > u64::MAX - append_size { - Err(InfiniteLogErr::InsufficientSpaceForAppend{ available_space: u64::MAX - self.tail }) + // no wrap + self.append_no_wrap(wrpm, bytes_to_append, &header_metadata, Tracked(perm)); } - else if append_size >= self.log_size - (self.tail - self.head) { - Err(InfiniteLogErr::InsufficientSpaceForAppend{ available_space: self.log_size - 1 - (self.tail - self.head) }) - } else { - let mut header_metadata = - PersistentHeaderMetadata { head: self.head, tail: self.tail, log_size: self.log_size }; - assert(header_metadata == spec_get_live_header(wrpm@).metadata); - - if physical_head <= physical_tail { - if physical_tail >= contents_end - append_size { - // wrap case - self.append_wrap(wrpm, bytes_to_append, &header_metadata, Tracked(perm)); - } else { - // no wrap - self.append_no_wrap(wrpm, bytes_to_append, &header_metadata, Tracked(perm)); - } - } else { // physical_tail < physical_head - if physical_tail + append_size >= physical_head { - return Err(InfiniteLogErr::InsufficientSpaceForAppend { available_space: physical_head - physical_tail }); - } - // no wrap - self.append_no_wrap(wrpm, bytes_to_append, &header_metadata, Tracked(perm)); - } - - let new_tail = self.tail + append_size; - header_metadata.tail = new_tail; - - let mut metadata_bytes = metadata_to_bytes(&header_metadata); - let new_crc_bytes = bytes_crc(&metadata_bytes); - let new_crc_val = u64_from_le_bytes(new_crc_bytes.as_slice()); - let ghost old_metadata_bytes = metadata_bytes@; - let mut new_header_bytes = new_crc_bytes; - new_header_bytes.append(&mut metadata_bytes); - - proof { lemma_header_crc_correct(new_header_bytes@, new_crc_bytes@, old_metadata_bytes); } - - self.update_header(wrpm, Tracked(perm), &new_header_bytes); - - // update incorruptible boolean - let old_ib = self.incorruptible_bool; - let new_ib = if old_ib == cdb0_val { - cdb1_val - } else { - assert(old_ib == cdb1_val); - cdb0_val - }; - let new_ib_bytes = u64_to_le_bytes(new_ib); - - proof { - lemma_append_ib_update(wrpm@, new_ib, bytes_to_append@, new_header_bytes@, perm); - } - - wrpm.write(incorruptible_bool_pos, new_ib_bytes.as_slice(), Tracked(perm)); - self.incorruptible_bool = new_ib; - self.tail = new_tail; - self.header_crc = new_crc_val; - - Ok(old_logical_tail) + } else { // physical_tail < physical_head + if physical_tail + append_size >= physical_head { + return Err( + InfiniteLogErr::InsufficientSpaceForAppend { + available_space: physical_head - physical_tail, + }, + ); } - } + // no wrap - exec fn append_no_wrap( - &mut self, - wrpm: &mut WriteRestrictedPersistentMemory, - bytes_to_append: &Vec, - old_header: &PersistentHeaderMetadata, - Tracked(perm): Tracked<&Perm> - ) - where - Perm: CheckPermission>, - PM: PersistentMemory - requires - permissions_depend_only_on_recovery_view(perm), - perm.check_permission(old(wrpm)@), - old(self).inv(&*old(wrpm)), - Self::recover(old(wrpm)@).is_Some(), - old_header == spec_get_live_header(old(wrpm)@).metadata, - // TODO: clean up - ({ - let physical_tail = spec_addr_logical_to_physical(old_header.tail as int, old_header.log_size as int); - physical_tail + bytes_to_append@.len() < old_header.log_size + contents_offset - }), - ({ - let physical_head = spec_addr_logical_to_physical(old_header.head as int, old_header.log_size as int); - let physical_tail = spec_addr_logical_to_physical(old_header.tail as int, old_header.log_size as int); - let contents_end = old_header.log_size + contents_offset; - &&& physical_head <= physical_tail ==> physical_tail + bytes_to_append@.len() < contents_end - &&& physical_tail < physical_head ==> physical_tail <= physical_tail + bytes_to_append@.len() < physical_head - }) - ensures - self.inv(wrpm), - wrpm.constants() == old(wrpm).constants(), - Self::recover(wrpm@).is_Some(), - match (Self::recover(old(wrpm)@), Self::recover(wrpm@)) { - (Some(old_log_state), Some(new_log_state)) => old_log_state =~= new_log_state, - _ => false - }, - ({ - let (old_ib, old_headers, old_data) = pm_to_views(old(wrpm)@); - let (new_ib, new_headers, new_data) = pm_to_views(wrpm@); - let physical_tail = spec_addr_logical_to_physical(old_header.tail as int, old_header.log_size as int); - &&& old_ib == new_ib - &&& old_headers == new_headers - &&& new_data.subrange(physical_tail - contents_offset, physical_tail - contents_offset + bytes_to_append@.len() as int) =~= bytes_to_append@ - &&& new_data.subrange(0, physical_tail - contents_offset) =~= old_data.subrange(0, physical_tail - contents_offset) - &&& new_data.subrange(physical_tail - contents_offset + bytes_to_append@.len(), new_data.len() as int) =~= - old_data.subrange(physical_tail - contents_offset + bytes_to_append@.len(), old_data.len() as int) - }) - { - let physical_tail = Self::addr_logical_to_physical(old_header.tail, old_header.log_size); - proof { lemma_data_write_is_safe(wrpm@, bytes_to_append@, physical_tail as int, perm); } - wrpm.write(physical_tail, bytes_to_append.as_slice(), Tracked(perm)); - proof { - assert(wrpm@.subrange(0, physical_tail as int) =~= old(wrpm)@.subrange(0, physical_tail as int)); - lemma_subrange_equality_implies_subsubrange_equality(wrpm@, old(wrpm)@, 0, physical_tail as int); - } + self.append_no_wrap(wrpm, bytes_to_append, &header_metadata, Tracked(perm)); + } + let new_tail = self.tail + append_size; + header_metadata.tail = new_tail; + let mut metadata_bytes = metadata_to_bytes(&header_metadata); + let new_crc_bytes = bytes_crc(&metadata_bytes); + let new_crc_val = u64_from_le_bytes(new_crc_bytes.as_slice()); + let ghost old_metadata_bytes = metadata_bytes@; + let mut new_header_bytes = new_crc_bytes; + new_header_bytes.append(&mut metadata_bytes); + proof { + lemma_header_crc_correct(new_header_bytes@, new_crc_bytes@, old_metadata_bytes); } + self.update_header(wrpm, Tracked(perm), &new_header_bytes); + // update incorruptible boolean + let old_ib = self.incorruptible_bool; + let new_ib = if old_ib == cdb0_val { + cdb1_val + } else { + assert(old_ib == cdb1_val); + cdb0_val + }; + let new_ib_bytes = u64_to_le_bytes(new_ib); + proof { + lemma_append_ib_update(wrpm@, new_ib, bytes_to_append@, new_header_bytes@, perm); + } + wrpm.write(incorruptible_bool_pos, new_ib_bytes.as_slice(), Tracked(perm)); + self.incorruptible_bool = new_ib; + self.tail = new_tail; + self.header_crc = new_crc_val; + Ok(old_logical_tail) + } + } - pub exec fn append_wrap( - &mut self, - wrpm: &mut WriteRestrictedPersistentMemory, - bytes_to_append: &Vec, - old_header: &PersistentHeaderMetadata, - Tracked(perm): Tracked<&Perm> - ) - where - Perm: CheckPermission>, - PM: PersistentMemory - requires - permissions_depend_only_on_recovery_view(perm), - perm.check_permission(old(wrpm)@), - old(self).inv(&*old(wrpm)), - Self::recover(old(wrpm)@).is_Some(), - old_header == spec_get_live_header(old(wrpm)@).metadata, - ({ - let physical_head = spec_addr_logical_to_physical(old_header.head as int, old_header.log_size as int); - let physical_tail = spec_addr_logical_to_physical(old_header.tail as int, old_header.log_size as int); - let contents_end = old_header.log_size + contents_offset; - &&& contents_offset < physical_head - &&& physical_tail + bytes_to_append@.len() >= contents_end - &&& physical_head <= physical_tail - &&& bytes_to_append@.len() <= old_header.log_size - (old_header.tail - old_header.head) - }), - ensures - self.inv(wrpm), - Self::recover(wrpm@).is_Some(), - wrpm.constants() == old(wrpm).constants(), - match (Self::recover(old(wrpm)@), Self::recover(wrpm@)) { - (Some(old_log_state), Some(new_log_state)) => old_log_state =~= new_log_state, - _ => false - }, - ({ - let (old_ib, old_headers, old_data) = pm_to_views(old(wrpm)@); - let (new_ib, new_headers, new_data) = pm_to_views(wrpm@); - let contents_end = old_header.log_size + contents_offset; - let physical_tail = spec_addr_logical_to_physical(old_header.tail as int, old_header.log_size as int); - let len1 = (contents_end - physical_tail); - let len2 = bytes_to_append@.len() - len1; - &&& old_ib == new_ib - &&& old_headers == new_headers - &&& new_data.subrange(physical_tail - contents_offset, contents_end - contents_offset) =~= bytes_to_append@.subrange(0, len1) - &&& new_data.subrange(0, len2 as int) =~= bytes_to_append@.subrange(len1 as int, bytes_to_append@.len() as int) - &&& new_data.subrange(len2 as int, physical_tail - contents_offset) =~= old_data.subrange(len2 as int, physical_tail - contents_offset) - &&& bytes_to_append@ =~= new_data.subrange(physical_tail - contents_offset, contents_end - contents_offset) + new_data.subrange(0, len2 as int) - }) - { - let physical_head = Self::addr_logical_to_physical(old_header.head, old_header.log_size); - let physical_tail = Self::addr_logical_to_physical(old_header.tail, old_header.log_size); + exec fn append_no_wrap( + &mut self, + wrpm: &mut WriteRestrictedPersistentMemory, + bytes_to_append: &Vec, + old_header: &PersistentHeaderMetadata, + Tracked(perm): Tracked<&Perm>, + ) where Perm: CheckPermission>, PM: PersistentMemory + requires + permissions_depend_only_on_recovery_view(perm), + perm.check_permission(old(wrpm)@), + old(self).inv(&*old(wrpm)), + Self::recover(old(wrpm)@).is_Some(), + old_header == spec_get_live_header(old(wrpm)@).metadata, + // TODO: clean up + ({ + let physical_tail = spec_addr_logical_to_physical( + old_header.tail as int, + old_header.log_size as int, + ); + physical_tail + bytes_to_append@.len() < old_header.log_size + contents_offset + }), + ({ + let physical_head = spec_addr_logical_to_physical( + old_header.head as int, + old_header.log_size as int, + ); + let physical_tail = spec_addr_logical_to_physical( + old_header.tail as int, + old_header.log_size as int, + ); let contents_end = old_header.log_size + contents_offset; - let append_size = bytes_to_append.len(); - - let len1 = (contents_end - physical_tail) as usize; - let len2 = bytes_to_append.len() - len1; - let append_bytes_slice = bytes_to_append.as_slice(); - let bytes1 = slice_subrange(append_bytes_slice, 0, len1); - let bytes2 = slice_subrange(append_bytes_slice, len1, append_size); - - proof { lemma_data_write_is_safe(wrpm@, bytes1@, physical_tail as int, perm); } - wrpm.write(physical_tail, bytes1, Tracked(perm)); + &&& physical_head <= physical_tail ==> physical_tail + bytes_to_append@.len() + < contents_end + &&& physical_tail < physical_head ==> physical_tail <= physical_tail + + bytes_to_append@.len() < physical_head + }), + ensures + self.inv(wrpm), + wrpm.constants() == old(wrpm).constants(), + Self::recover(wrpm@).is_Some(), + match (Self::recover(old(wrpm)@), Self::recover(wrpm@)) { + (Some(old_log_state), Some(new_log_state)) => old_log_state =~= new_log_state, + _ => false, + }, + ({ + let (old_ib, old_headers, old_data) = pm_to_views(old(wrpm)@); + let (new_ib, new_headers, new_data) = pm_to_views(wrpm@); + let physical_tail = spec_addr_logical_to_physical( + old_header.tail as int, + old_header.log_size as int, + ); + &&& old_ib == new_ib + &&& old_headers == new_headers + &&& new_data.subrange( + physical_tail - contents_offset, + physical_tail - contents_offset + bytes_to_append@.len() as int, + ) =~= bytes_to_append@ + &&& new_data.subrange(0, physical_tail - contents_offset) =~= old_data.subrange( + 0, + physical_tail - contents_offset, + ) + &&& new_data.subrange( + physical_tail - contents_offset + bytes_to_append@.len(), + new_data.len() as int, + ) =~= old_data.subrange( + physical_tail - contents_offset + bytes_to_append@.len(), + old_data.len() as int, + ) + }), + { + let physical_tail = Self::addr_logical_to_physical(old_header.tail, old_header.log_size); + proof { + lemma_data_write_is_safe(wrpm@, bytes_to_append@, physical_tail as int, perm); + } + wrpm.write(physical_tail, bytes_to_append.as_slice(), Tracked(perm)); + proof { + assert(wrpm@.subrange(0, physical_tail as int) =~= old(wrpm)@.subrange( + 0, + physical_tail as int, + )); + lemma_subrange_equality_implies_subsubrange_equality( + wrpm@, + old(wrpm)@, + 0, + physical_tail as int, + ); + } + } - proof { lemma_data_write_is_safe(wrpm@, bytes2@, contents_offset as int, perm); } - wrpm.write(contents_offset, bytes2, Tracked(perm)); + pub exec fn append_wrap( + &mut self, + wrpm: &mut WriteRestrictedPersistentMemory, + bytes_to_append: &Vec, + old_header: &PersistentHeaderMetadata, + Tracked(perm): Tracked<&Perm>, + ) where Perm: CheckPermission>, PM: PersistentMemory + requires + permissions_depend_only_on_recovery_view(perm), + perm.check_permission(old(wrpm)@), + old(self).inv(&*old(wrpm)), + Self::recover(old(wrpm)@).is_Some(), + old_header == spec_get_live_header(old(wrpm)@).metadata, + ({ + let physical_head = spec_addr_logical_to_physical( + old_header.head as int, + old_header.log_size as int, + ); + let physical_tail = spec_addr_logical_to_physical( + old_header.tail as int, + old_header.log_size as int, + ); + let contents_end = old_header.log_size + contents_offset; + &&& contents_offset < physical_head + &&& physical_tail + bytes_to_append@.len() >= contents_end + &&& physical_head <= physical_tail + &&& bytes_to_append@.len() <= old_header.log_size - (old_header.tail + - old_header.head) + }), + ensures + self.inv(wrpm), + Self::recover(wrpm@).is_Some(), + wrpm.constants() == old(wrpm).constants(), + match (Self::recover(old(wrpm)@), Self::recover(wrpm@)) { + (Some(old_log_state), Some(new_log_state)) => old_log_state =~= new_log_state, + _ => false, + }, + ({ + let (old_ib, old_headers, old_data) = pm_to_views(old(wrpm)@); + let (new_ib, new_headers, new_data) = pm_to_views(wrpm@); + let contents_end = old_header.log_size + contents_offset; + let physical_tail = spec_addr_logical_to_physical( + old_header.tail as int, + old_header.log_size as int, + ); + let len1 = (contents_end - physical_tail); + let len2 = bytes_to_append@.len() - len1; + &&& old_ib == new_ib + &&& old_headers == new_headers + &&& new_data.subrange( + physical_tail - contents_offset, + contents_end - contents_offset, + ) =~= bytes_to_append@.subrange(0, len1) + &&& new_data.subrange(0, len2 as int) =~= bytes_to_append@.subrange( + len1 as int, + bytes_to_append@.len() as int, + ) + &&& new_data.subrange(len2 as int, physical_tail - contents_offset) + =~= old_data.subrange(len2 as int, physical_tail - contents_offset) + &&& bytes_to_append@ =~= new_data.subrange( + physical_tail - contents_offset, + contents_end - contents_offset, + ) + new_data.subrange(0, len2 as int) + }), + { + let physical_head = Self::addr_logical_to_physical(old_header.head, old_header.log_size); + let physical_tail = Self::addr_logical_to_physical(old_header.tail, old_header.log_size); + let contents_end = old_header.log_size + contents_offset; + let append_size = bytes_to_append.len(); + let len1 = (contents_end - physical_tail) as usize; + let len2 = bytes_to_append.len() - len1; + let append_bytes_slice = bytes_to_append.as_slice(); + let bytes1 = slice_subrange(append_bytes_slice, 0, len1); + let bytes2 = slice_subrange(append_bytes_slice, len1, append_size); + proof { + lemma_data_write_is_safe(wrpm@, bytes1@, physical_tail as int, perm); + } + wrpm.write(physical_tail, bytes1, Tracked(perm)); + proof { + lemma_data_write_is_safe(wrpm@, bytes2@, contents_offset as int, perm); + } + wrpm.write(contents_offset, bytes2, Tracked(perm)); + proof { + assert(wrpm@.subrange(0, contents_offset as int) =~= old(wrpm)@.subrange( + 0, + contents_offset as int, + )); + lemma_subrange_equality_implies_subsubrange_equality( + wrpm@, + old(wrpm)@, + 0, + contents_offset as int, + ); + } + } - proof { - assert(wrpm@.subrange(0, contents_offset as int) =~= old(wrpm)@.subrange(0, contents_offset as int)); - lemma_subrange_equality_implies_subsubrange_equality(wrpm@, old(wrpm)@, 0, contents_offset as int); + pub exec fn untrusted_advance_head( + &mut self, + wrpm: &mut WriteRestrictedPersistentMemory, + new_head: u64, + Tracked(perm): Tracked<&Perm>, + ) -> (result: Result<(), InfiniteLogErr>) where + Perm: CheckPermission>, + PM: PersistentMemory, + + requires + old(self).inv(&*old(wrpm)), + Self::recover(old(wrpm)@).is_Some(), + ({ + let old_log_state = Self::recover(old(wrpm)@); + forall|pm_state| #[trigger] + perm.check_permission(pm_state) <==> { + let log_state = Self::recover(pm_state); + ||| log_state == old_log_state + ||| log_state == Some(old_log_state.unwrap().advance_head(new_head as int)) + } + }), + ensures + self.inv(wrpm), + wrpm.constants() == old(wrpm).constants(), + ({ + let old_log_state = Self::recover(old(wrpm)@); + let new_log_state = Self::recover(wrpm@); + match (result, old_log_state, new_log_state) { + (Ok(_), Some(old_log_state), Some(new_log_state)) => { + &&& old_log_state.head <= new_head <= old_log_state.head + + old_log_state.log.len() + &&& new_log_state == old_log_state.advance_head(new_head as int) + }, + ( + Err(InfiniteLogErr::CantAdvanceHeadPositionBeforeHead { head }), + Some(old_log_state), + Some(new_log_state), + ) => { + &&& new_log_state == old_log_state + &&& head == old_log_state.head + &&& new_head < head + }, + ( + Err(InfiniteLogErr::CantAdvanceHeadPositionBeyondTail { tail }), + Some(old_log_state), + Some(new_log_state), + ) => { + &&& new_log_state == old_log_state + &&& tail == old_log_state.head + old_log_state.log.len() + &&& new_head > tail + }, + (_, _, _) => false, } + }), + { + let pm = wrpm.get_pm_ref(); + let ghost original_pm = wrpm@; + let live_header = PersistentHeader { + crc: self.header_crc, + metadata: PersistentHeaderMetadata { + head: self.head, + tail: self.tail, + log_size: self.log_size, + }, + }; + if new_head < live_header.metadata.head { + assert(self.header_crc == old(self).header_crc); + return Err( + InfiniteLogErr::CantAdvanceHeadPositionBeforeHead { + head: live_header.metadata.head, + }, + ); + } + if new_head > live_header.metadata.tail { + assert(self.header_crc == old(self).header_crc); + return Err( + InfiniteLogErr::CantAdvanceHeadPositionBeyondTail { + tail: live_header.metadata.tail, + }, + ); + } + // copy the header and update it + + let mut new_header = live_header; + new_header.metadata.head = new_head; + let mut metadata_bytes = metadata_to_bytes(&new_header.metadata); + let new_crc_bytes = bytes_crc(&metadata_bytes); + let new_crc_val = u64_from_le_bytes(new_crc_bytes.as_slice()); + let ghost old_metadata_bytes = metadata_bytes@; + let mut new_header_bytes = new_crc_bytes; + new_header_bytes.append(&mut metadata_bytes); + proof { + lemma_header_crc_correct(new_header_bytes@, new_crc_bytes@, old_metadata_bytes); + } + self.update_header(wrpm, Tracked(perm), &new_header_bytes); + // TODO: put ib update in a lemma + let old_ib = self.incorruptible_bool; + let new_ib = if old_ib == cdb0_val { + cdb1_val + } else { + assert(old_ib == cdb1_val); + cdb0_val + }; + let new_ib_bytes = u64_to_le_bytes(new_ib); + proof { + lemma_auto_spec_u64_to_from_le_bytes(); + lemma_single_write_crash(wrpm@, incorruptible_bool_pos as int, new_ib_bytes@); + assert(perm.check_permission(old(wrpm)@)); + let new_pm = update_contents_to_reflect_write( + wrpm@, + incorruptible_bool_pos as int, + new_ib_bytes@, + ); + lemma_headers_unchanged(wrpm@, new_pm); + assert(new_pm.subrange(incorruptible_bool_pos as int, incorruptible_bool_pos + 8) + =~= new_ib_bytes@); + let new_header = spec_bytes_to_header(new_header_bytes@); + let (ib, headers, data) = pm_to_views(new_pm); + let header_pos = if new_ib == cdb0_val { + header1_pos + } else { + header2_pos + }; + assert(new_pm.subrange(header_pos as int, header_pos + header_size) + =~= new_header_bytes@); + lemma_header_match(new_pm, header_pos as int, new_header); + lemma_header_correct(new_pm, new_header_bytes@, header_pos as int); + // prove that new pm has the advance head update + let new_log_state = Self::recover(new_pm); + let old_log_state = Self::recover(old(wrpm)@); + match (new_log_state, old_log_state) { + (Some(new_log_state), Some(old_log_state)) => { + lemma_pm_state_header(new_pm); + lemma_pm_state_header(old(wrpm)@); + assert(new_log_state =~= old_log_state.advance_head(new_head as int)); + assert(perm.check_permission(new_pm)); + }, + _ => assert(false), } + } + wrpm.write(incorruptible_bool_pos, new_ib_bytes.as_slice(), Tracked(perm)); + self.incorruptible_bool = new_ib; + self.head = new_head; + self.header_crc = new_crc_val; + Ok(()) + } - pub exec fn untrusted_advance_head( - &mut self, - wrpm: &mut WriteRestrictedPersistentMemory, - new_head: u64, - Tracked(perm): Tracked<&Perm> - ) -> (result: Result<(), InfiniteLogErr>) - where - Perm: CheckPermission>, - PM: PersistentMemory - requires - old(self).inv(&*old(wrpm)), - Self::recover(old(wrpm)@).is_Some(), - ({ - let old_log_state = Self::recover(old(wrpm)@); - forall |pm_state| #[trigger] perm.check_permission(pm_state) <==> { - let log_state = Self::recover(pm_state); - ||| log_state == old_log_state - ||| log_state == Some(old_log_state.unwrap().advance_head(new_head as int)) - } - }) - ensures - self.inv(wrpm), - wrpm.constants() == old(wrpm).constants(), - ({ - let old_log_state = Self::recover(old(wrpm)@); - let new_log_state = Self::recover(wrpm@); - match (result, old_log_state, new_log_state) { - (Ok(_), Some(old_log_state), Some(new_log_state)) => { - &&& old_log_state.head <= new_head <= old_log_state.head + old_log_state.log.len() - &&& new_log_state == old_log_state.advance_head(new_head as int) - }, - (Err(InfiniteLogErr::CantAdvanceHeadPositionBeforeHead{ head }), Some(old_log_state), Some(new_log_state)) => { - &&& new_log_state == old_log_state - &&& head == old_log_state.head - &&& new_head < head - }, - (Err(InfiniteLogErr::CantAdvanceHeadPositionBeyondTail{ tail }), Some(old_log_state), Some(new_log_state)) => { - &&& new_log_state == old_log_state - &&& tail == old_log_state.head + old_log_state.log.len() - &&& new_head > tail - }, - (_, _, _) => false - } - }) - { - let pm = wrpm.get_pm_ref(); - let ghost original_pm = wrpm@; - - let live_header = PersistentHeader { - crc: self.header_crc, - metadata: PersistentHeaderMetadata { head: self.head, tail: self.tail, log_size: self.log_size } - }; - - if new_head < live_header.metadata.head { - assert(self.header_crc == old(self).header_crc); - return Err(InfiniteLogErr::CantAdvanceHeadPositionBeforeHead{ head: live_header.metadata.head }); - } - - if new_head > live_header.metadata.tail { - assert(self.header_crc == old(self).header_crc); - return Err(InfiniteLogErr::CantAdvanceHeadPositionBeyondTail{ tail: live_header.metadata.tail }); + pub exec fn untrusted_read( + &self, + wrpm: &WriteRestrictedPersistentMemory, + pos: u64, + len: u64, + ) -> (result: Result, InfiniteLogErr>) where + Perm: CheckPermission>, + PM: PersistentMemory, + + requires + self.inv(wrpm), + Self::recover(wrpm@).is_Some(), + ensures + ({ + let log = Self::recover(wrpm@).unwrap(); + match result { + Ok(bytes) => { + let true_bytes = log.log.subrange(pos - log.head, pos + len - log.head); + &&& pos >= log.head + &&& pos + len <= log.head + log.log.len() + &&& read_correct_modulo_corruption( + bytes@, + true_bytes, + wrpm.constants().impervious_to_corruption, + ) + }, + Err(InfiniteLogErr::CantReadBeforeHead { head: head_pos }) => { + &&& pos < log.head + &&& head_pos == log.head + }, + Err(InfiniteLogErr::CantReadPastTail { tail }) => { + &&& pos + len > log.head + log.log.len() + &&& tail == log.head + log.log.len() + }, + _ => false, } - - // copy the header and update it - let mut new_header = live_header; - new_header.metadata.head = new_head; - let mut metadata_bytes = metadata_to_bytes(&new_header.metadata); - let new_crc_bytes = bytes_crc(&metadata_bytes); - let new_crc_val = u64_from_le_bytes(new_crc_bytes.as_slice()); - let ghost old_metadata_bytes = metadata_bytes@; - let mut new_header_bytes = new_crc_bytes; - new_header_bytes.append(&mut metadata_bytes); - - proof { lemma_header_crc_correct(new_header_bytes@, new_crc_bytes@, old_metadata_bytes); } - - self.update_header(wrpm, Tracked(perm), &new_header_bytes); - - // TODO: put ib update in a lemma - let old_ib = self.incorruptible_bool; - let new_ib = if old_ib == cdb0_val { - cdb1_val + }), + { + let pm = wrpm.get_pm_ref(); + let physical_pos = Self::addr_logical_to_physical(pos, self.log_size); + let contents_end = self.log_size + contents_offset; + if pos < self.head { + Err(InfiniteLogErr::CantReadBeforeHead { head: self.head }) + } else if pos > u64::MAX - len { + Err(InfiniteLogErr::CantReadPastTail { tail: self.tail }) + } else if pos + len > self.tail { + Err(InfiniteLogErr::CantReadPastTail { tail: self.tail }) + } else { + proof { + // we get a type error if we calculate physical head and tail in non-ghost code and use them here, + // so we need to calculate them here for the proof and again later for execution + let physical_head = spec_addr_logical_to_physical( + self.head as int, + self.log_size as int, + ); + let physical_tail = spec_addr_logical_to_physical( + self.tail as int, + self.log_size as int, + ); + if physical_head == physical_tail { + lemma_mod_equal(self.head as int, self.tail as int, self.log_size as int); + assert(len == 0); + } else if physical_head < physical_tail { + // read cannot wrap around + lemma_mod_between( + self.log_size as int, + self.head as int, + self.tail as int, + pos as int, + ); + lemma_mod_difference_equal(self.head as int, pos as int, self.log_size as int); } else { - assert(old_ib == cdb1_val); - cdb0_val - }; - let new_ib_bytes = u64_to_le_bytes(new_ib); - - proof { - lemma_auto_spec_u64_to_from_le_bytes(); - lemma_single_write_crash(wrpm@, incorruptible_bool_pos as int, new_ib_bytes@); - assert(perm.check_permission(old(wrpm)@)); - let new_pm = update_contents_to_reflect_write(wrpm@, incorruptible_bool_pos as int, new_ib_bytes@); - lemma_headers_unchanged(wrpm@, new_pm); - assert(new_pm.subrange(incorruptible_bool_pos as int, incorruptible_bool_pos + 8) =~= new_ib_bytes@); - - let new_header = spec_bytes_to_header(new_header_bytes@); - let (ib, headers, data) = pm_to_views(new_pm); - let header_pos = if new_ib == cdb0_val { - header1_pos + // read may wrap around + lemma_mod_not_between( + self.log_size as int, + self.head as int, + self.tail as int, + pos as int, + ); + if physical_pos <= physical_tail { + lemma_mod_wrapped_len(self.head as int, pos as int, self.log_size as int); } else { - header2_pos - }; - assert(new_pm.subrange(header_pos as int, header_pos + header_size) =~= new_header_bytes@); - lemma_header_match(new_pm, header_pos as int, new_header); - lemma_header_correct(new_pm, new_header_bytes@, header_pos as int); - - // prove that new pm has the advance head update - let new_log_state = Self::recover(new_pm); - let old_log_state = Self::recover(old(wrpm)@); - match (new_log_state, old_log_state) { - (Some(new_log_state), Some(old_log_state)) => { - lemma_pm_state_header(new_pm); - lemma_pm_state_header(old(wrpm)@); - assert(new_log_state =~= old_log_state.advance_head(new_head as int)); - assert(perm.check_permission(new_pm)); - } - _ => assert(false), + lemma_mod_difference_equal( + self.head as int, + pos as int, + self.log_size as int, + ); } } - - wrpm.write(incorruptible_bool_pos, new_ib_bytes.as_slice(), Tracked(perm)); - self.incorruptible_bool = new_ib; - self.head = new_head; - self.header_crc = new_crc_val; - - Ok(()) } - - pub exec fn untrusted_read( - &self, - wrpm: &WriteRestrictedPersistentMemory, - pos: u64, - len: u64 - ) -> (result: Result, InfiniteLogErr>) - where - Perm: CheckPermission>, - PM: PersistentMemory - requires - self.inv(wrpm), - Self::recover(wrpm@).is_Some(), - ensures - ({ - let log = Self::recover(wrpm@).unwrap(); - match result { - Ok(bytes) => { - let true_bytes = log.log.subrange(pos - log.head, pos + len - log.head); - &&& pos >= log.head - &&& pos + len <= log.head + log.log.len() - &&& read_correct_modulo_corruption(bytes@, true_bytes, - wrpm.constants().impervious_to_corruption) - }, - Err(InfiniteLogErr::CantReadBeforeHead{ head: head_pos }) => { - &&& pos < log.head - &&& head_pos == log.head - }, - Err(InfiniteLogErr::CantReadPastTail{ tail }) => { - &&& pos + len > log.head + log.log.len() - &&& tail == log.head + log.log.len() - }, - _ => false - } - }) - { - let pm = wrpm.get_pm_ref(); - let physical_pos = Self::addr_logical_to_physical(pos, self.log_size); - let contents_end = self.log_size + contents_offset; - if pos < self.head { - Err(InfiniteLogErr::CantReadBeforeHead{ head: self.head }) - } else if pos > u64::MAX - len { - Err(InfiniteLogErr::CantReadPastTail{ tail: self.tail }) - } else if pos + len > self.tail { - Err(InfiniteLogErr::CantReadPastTail{ tail: self.tail }) + let physical_head = Self::addr_logical_to_physical(self.head, self.log_size); + let physical_tail = Self::addr_logical_to_physical(self.tail, self.log_size); + let ghost log = Self::recover(pm@).unwrap(); + let ghost true_bytes = log.log.subrange(pos - log.head, pos + len - log.head); + if physical_head == physical_tail { + assert(Seq::::empty() =~= log.log.subrange( + pos - log.head, + pos + len - log.head, + )); + let buf = Vec::new(); + let ghost addrs = Seq::::empty(); + assert(if wrpm.constants().impervious_to_corruption { + buf@ == true_bytes } else { - proof { - // we get a type error if we calculate physical head and tail in non-ghost code and use them here, - // so we need to calculate them here for the proof and again later for execution - let physical_head = spec_addr_logical_to_physical(self.head as int, self.log_size as int); - let physical_tail = spec_addr_logical_to_physical(self.tail as int, self.log_size as int); - if physical_head == physical_tail { - lemma_mod_equal(self.head as int, self.tail as int, self.log_size as int); - assert(len == 0); - } else if physical_head < physical_tail { - // read cannot wrap around - lemma_mod_between(self.log_size as int, self.head as int, self.tail as int, pos as int); - lemma_mod_difference_equal(self.head as int, pos as int, self.log_size as int); - } else { - // read may wrap around - lemma_mod_not_between(self.log_size as int, self.head as int, self.tail as int, pos as int); - if physical_pos <= physical_tail { - lemma_mod_wrapped_len(self.head as int, pos as int, self.log_size as int); - } else { - lemma_mod_difference_equal(self.head as int, pos as int, self.log_size as int); - } - } - } - - let physical_head = Self::addr_logical_to_physical(self.head, self.log_size); - let physical_tail = Self::addr_logical_to_physical(self.tail, self.log_size); - - let ghost log = Self::recover(pm@).unwrap(); - let ghost true_bytes = log.log.subrange(pos - log.head, pos + len - log.head); - if physical_head == physical_tail { - assert (Seq::::empty() =~= log.log.subrange(pos - log.head, pos + len - log.head)); - let buf = Vec::new(); - let ghost addrs = Seq::::empty(); - assert (if wrpm.constants().impervious_to_corruption { buf@ == true_bytes } - else { maybe_corrupted(buf@, true_bytes, addrs) }); - Ok(buf) - } else if physical_pos >= physical_head && physical_pos >= contents_end - len { - let r1_len: u64 = contents_end - physical_pos; - let r2_len: u64 = len - r1_len; - - let mut r1 = pm.read(physical_pos, r1_len); - let mut r2 = pm.read(contents_offset, r2_len); - let ghost r1_addrs = Seq::::new(r1_len as nat, |i: int| i + physical_pos as int); - let ghost r2_addrs = Seq::::new(r2_len as nat, |i: int| i + contents_offset as int); - let ghost addrs: Seq = r1_addrs.add(r2_addrs); - - r1.append(&mut r2); - assert (pm@.subrange(physical_pos as int, physical_pos + r1_len) - + pm@.subrange(contents_offset as int, contents_offset + r2_len) - =~= log.log.subrange(pos - log.head, pos + len - log.head)); - assert (if wrpm.constants().impervious_to_corruption { r1@ == true_bytes } - else { maybe_corrupted(r1@, true_bytes, addrs) }); - Ok(r1) - } else { - assert (pm@.subrange(physical_pos as int, physical_pos + len) =~= - log.log.subrange(pos - log.head, pos + len - log.head)); - let ghost addrs = Seq::::new(len as nat, |i: int| i + physical_pos); - let buf = pm.read(physical_pos, len); - assert (if wrpm.constants().impervious_to_corruption { buf@ == true_bytes } - else { maybe_corrupted(buf@, true_bytes, addrs) }); - Ok(buf) - } - } + maybe_corrupted(buf@, true_bytes, addrs) + }); + Ok(buf) + } else if physical_pos >= physical_head && physical_pos >= contents_end - len { + let r1_len: u64 = contents_end - physical_pos; + let r2_len: u64 = len - r1_len; + let mut r1 = pm.read(physical_pos, r1_len); + let mut r2 = pm.read(contents_offset, r2_len); + let ghost r1_addrs = Seq::::new( + r1_len as nat, + |i: int| i + physical_pos as int, + ); + let ghost r2_addrs = Seq::::new( + r2_len as nat, + |i: int| i + contents_offset as int, + ); + let ghost addrs: Seq = r1_addrs.add(r2_addrs); + r1.append(&mut r2); + assert(pm@.subrange(physical_pos as int, physical_pos + r1_len) + pm@.subrange( + contents_offset as int, + contents_offset + r2_len, + ) =~= log.log.subrange(pos - log.head, pos + len - log.head)); + assert(if wrpm.constants().impervious_to_corruption { + r1@ == true_bytes + } else { + maybe_corrupted(r1@, true_bytes, addrs) + }); + Ok(r1) + } else { + assert(pm@.subrange(physical_pos as int, physical_pos + len) =~= log.log.subrange( + pos - log.head, + pos + len - log.head, + )); + let ghost addrs = Seq::::new(len as nat, |i: int| i + physical_pos); + let buf = pm.read(physical_pos, len); + assert(if wrpm.constants().impervious_to_corruption { + buf@ == true_bytes + } else { + maybe_corrupted(buf@, true_bytes, addrs) + }); + Ok(buf) } + } + } - pub exec fn untrusted_get_head_and_tail( - &self, - wrpm: &WriteRestrictedPersistentMemory - ) -> (result: Result<(u64, u64, u64), InfiniteLogErr>) - where - Perm: CheckPermission>, - PM: PersistentMemory - requires - self.inv(wrpm), - Self::recover(wrpm@).is_Some() - ensures - match result { - Ok((result_head, result_tail, result_capacity)) => - match Self::recover(wrpm@).unwrap() { - AbstractInfiniteLogState{ head: head, log: log, capacity: capacity } => { - &&& result_head == head - &&& result_tail == head + log.len() - &&& result_capacity == capacity - } - }, - Err(_) => false, - } - { - let pm = wrpm.get_pm_ref(); - proof { lemma_pm_state_header(pm@); } - Ok((self.head, self.tail, self.log_size - 1)) - } + pub exec fn untrusted_get_head_and_tail( + &self, + wrpm: &WriteRestrictedPersistentMemory, + ) -> (result: Result<(u64, u64, u64), InfiniteLogErr>) where + Perm: CheckPermission>, + PM: PersistentMemory, + + requires + self.inv(wrpm), + Self::recover(wrpm@).is_Some(), + ensures + match result { + Ok((result_head, result_tail, result_capacity)) => match Self::recover( + wrpm@, + ).unwrap() { + AbstractInfiniteLogState { head: head, log: log, capacity: capacity } => { + &&& result_head == head + &&& result_tail == head + log.len() + &&& result_capacity == capacity + }, + }, + Err(_) => false, + }, + { + let pm = wrpm.get_pm_ref(); + proof { + lemma_pm_state_header(pm@); } + Ok((self.head, self.tail, self.log_size - 1)) } } +} // verus! +} + pub mod main_t { use std::fmt::Write; @@ -2076,290 +2598,310 @@ pub mod main_t { verus! { - pub open spec fn recovery_view() -> (result: FnSpec(Seq) -> Option) - { - |c| UntrustedLogImpl::recover(c) - } +pub open spec fn recovery_view() -> (result: FnSpec(Seq) -> Option) { + |c| UntrustedLogImpl::recover(c) +} - pub open spec fn read_correct_modulo_corruption(bytes: Seq, true_bytes: Seq, - impervious_to_corruption: bool) -> bool - { - if impervious_to_corruption { - bytes == true_bytes - } - else { - exists |addrs: Seq| { - &&& all_elements_unique(addrs) - &&& #[trigger] maybe_corrupted(bytes, true_bytes, addrs) - } +pub open spec fn read_correct_modulo_corruption( + bytes: Seq, + true_bytes: Seq, + impervious_to_corruption: bool, +) -> bool { + if impervious_to_corruption { + bytes == true_bytes + } else { + exists|addrs: Seq| + { + &&& all_elements_unique(addrs) + &&& #[trigger] maybe_corrupted(bytes, true_bytes, addrs) } - } - - /// A `TrustedPermission` indicates what states of persistent - /// memory are permitted. The struct isn't public, so it can't be - /// created outside of this file. As a further defense against one - /// being created outside this file, its fields aren't public, and - /// the constructor `TrustedPermission::new` isn't public. + } +} - struct TrustedPermission { - ghost is_state_allowable: FnSpec(Seq) -> bool - } +/// A `TrustedPermission` indicates what states of persistent +/// memory are permitted. The struct isn't public, so it can't be +/// created outside of this file. As a further defense against one +/// being created outside this file, its fields aren't public, and +/// the constructor `TrustedPermission::new` isn't public. +struct TrustedPermission { + ghost is_state_allowable: FnSpec(Seq) -> bool, +} - impl CheckPermission> for TrustedPermission { - closed spec fn check_permission(&self, state: Seq) -> bool { - (self.is_state_allowable)(state) - } - } +impl CheckPermission> for TrustedPermission { + closed spec fn check_permission(&self, state: Seq) -> bool { + (self.is_state_allowable)(state) + } +} - impl TrustedPermission { - proof fn new(cur: Seq, next: FnSpec(AbstractInfiniteLogState, AbstractInfiniteLogState) -> bool) - -> (tracked perm: Self) - ensures - forall |s| #[trigger] perm.check_permission(s) <==> - crate::sccf::is_state_allowable(cur, s, recovery_view(), next) - { - Self { is_state_allowable: |s| crate::sccf::is_state_allowable(cur, s, recovery_view(), next) } - } +impl TrustedPermission { + proof fn new( + cur: Seq, + next: FnSpec(AbstractInfiniteLogState, AbstractInfiniteLogState) -> bool, + ) -> (tracked perm: Self) + ensures + forall|s| #[trigger] + perm.check_permission(s) <==> crate::sccf::is_state_allowable( + cur, + s, + recovery_view(), + next, + ), + { + Self { + is_state_allowable: |s| crate::sccf::is_state_allowable(cur, s, recovery_view(), next), } + } +} - /// A `InfiniteLogImpl` wraps an `UntrustedLogImpl` to provide the - /// executable interface that turns a persistent memory region - /// into an effectively infinite log. It provides a simple - /// interface to higher-level code. - pub struct InfiniteLogImpl { - untrusted_log_impl: UntrustedLogImpl, - wrpm: WriteRestrictedPersistentMemory, - } +/// A `InfiniteLogImpl` wraps an `UntrustedLogImpl` to provide the +/// executable interface that turns a persistent memory region +/// into an effectively infinite log. It provides a simple +/// interface to higher-level code. +pub struct InfiniteLogImpl { + untrusted_log_impl: UntrustedLogImpl, + wrpm: WriteRestrictedPersistentMemory, +} - pub enum InfiniteLogErr { - InsufficientSpaceForSetup { required_space: u64 }, - CantReadBeforeHead { head: u64 }, - CantReadPastTail { tail: u64 }, - InsufficientSpaceForAppend { available_space: u64 }, - CRCMismatch, - CantAdvanceHeadPositionBeforeHead { head: u64 }, - CantAdvanceHeadPositionBeyondTail { tail: u64 }, - } +pub enum InfiniteLogErr { + InsufficientSpaceForSetup { required_space: u64 }, + CantReadBeforeHead { head: u64 }, + CantReadPastTail { tail: u64 }, + InsufficientSpaceForAppend { available_space: u64 }, + CRCMismatch, + CantAdvanceHeadPositionBeforeHead { head: u64 }, + CantAdvanceHeadPositionBeyondTail { tail: u64 }, +} - impl InfiniteLogImpl { - pub closed spec fn view(self) -> Option - { - recovery_view()(self.wrpm@) - } +impl InfiniteLogImpl { + pub closed spec fn view(self) -> Option { + recovery_view()(self.wrpm@) + } - pub closed spec fn constants(self) -> PersistentMemoryConstants - { - self.wrpm.constants() - } + pub closed spec fn constants(self) -> PersistentMemoryConstants { + self.wrpm.constants() + } - pub closed spec fn valid(self) -> bool { - &&& self.untrusted_log_impl.inv(&self.wrpm) - &&& recovery_view()(self.wrpm@).is_Some() - } + pub closed spec fn valid(self) -> bool { + &&& self.untrusted_log_impl.inv(&self.wrpm) + &&& recovery_view()(self.wrpm@).is_Some() + } - /// This static function takes a `PersistentMemory` and writes - /// to it such that its state represents an empty log starting - /// at head position 0. This function is meant to be called - /// exactly once per log, to create and initialize it. - pub exec fn setup(pm: &mut PM, device_size: u64) -> (result: Result) - requires - old(pm).inv(), - old(pm)@.len() == device_size - ensures - pm.inv(), - pm.constants() == old(pm).constants(), - pm@.len() == device_size, - match result { - Ok(log_capacity) => - recovery_view()(pm@) == Some(AbstractInfiniteLogState::initialize(log_capacity as int)), - Err(InfiniteLogErr::InsufficientSpaceForSetup{ required_space }) => device_size < required_space, - _ => false - } - { - UntrustedLogImpl::untrusted_setup(pm, device_size) - } + /// This static function takes a `PersistentMemory` and writes + /// to it such that its state represents an empty log starting + /// at head position 0. This function is meant to be called + /// exactly once per log, to create and initialize it. + pub exec fn setup(pm: &mut PM, device_size: u64) -> (result: Result) + requires + old(pm).inv(), + old(pm)@.len() == device_size, + ensures + pm.inv(), + pm.constants() == old(pm).constants(), + pm@.len() == device_size, + match result { + Ok(log_capacity) => recovery_view()(pm@) == Some( + AbstractInfiniteLogState::initialize(log_capacity as int), + ), + Err(InfiniteLogErr::InsufficientSpaceForSetup { required_space }) => device_size + < required_space, + _ => false, + }, + { + UntrustedLogImpl::untrusted_setup(pm, device_size) + } - /// This static function takes a `PersistentMemory` and wraps - /// it into an `InfiniteLogImpl`. It's meant to be called after - /// setting up the persistent memory or after crashing and - /// restarting. - pub exec fn start(pm: PM, device_size: u64) -> (result: Result, InfiniteLogErr>) - requires - pm.inv(), - pm@.len() == device_size, - recovery_view()(pm@).is_Some() - ensures - match result { - Ok(trusted_log_impl) => { - &&& trusted_log_impl.valid() - &&& trusted_log_impl@ == recovery_view()(pm@) - &&& trusted_log_impl.constants() == pm.constants() - }, - Err(InfiniteLogErr::CRCMismatch) => !pm.constants().impervious_to_corruption, - _ => false - } - { - // The untrusted `start` routine may write to persistent memory, as long - // as it keeps its abstraction as a log unchanged. - let mut wrpm = WriteRestrictedPersistentMemory::new(pm); - let tracked perm = TrustedPermission::new(pm@, |s1, s2| false); - match UntrustedLogImpl::untrusted_start(&mut wrpm, device_size, Tracked(&perm)) { - Ok(untrusted_log_impl) => Ok(InfiniteLogImpl { untrusted_log_impl, wrpm }), - Err(e) => Err(e) - } - } + /// This static function takes a `PersistentMemory` and wraps + /// it into an `InfiniteLogImpl`. It's meant to be called after + /// setting up the persistent memory or after crashing and + /// restarting. + pub exec fn start(pm: PM, device_size: u64) -> (result: Result< + InfiniteLogImpl, + InfiniteLogErr, + >) + requires + pm.inv(), + pm@.len() == device_size, + recovery_view()(pm@).is_Some(), + ensures + match result { + Ok(trusted_log_impl) => { + &&& trusted_log_impl.valid() + &&& trusted_log_impl@ == recovery_view()(pm@) + &&& trusted_log_impl.constants() == pm.constants() + }, + Err(InfiniteLogErr::CRCMismatch) => !pm.constants().impervious_to_corruption, + _ => false, + }, + { + // The untrusted `start` routine may write to persistent memory, as long + // as it keeps its abstraction as a log unchanged. + let mut wrpm = WriteRestrictedPersistentMemory::new(pm); + let tracked perm = TrustedPermission::new(pm@, |s1, s2| false); + match UntrustedLogImpl::untrusted_start(&mut wrpm, device_size, Tracked(&perm)) { + Ok(untrusted_log_impl) => Ok(InfiniteLogImpl { untrusted_log_impl, wrpm }), + Err(e) => Err(e), + } + } - /// This function appends to the log and returns the offset at - /// which the append happened. - pub exec fn append(&mut self, bytes_to_append: &Vec) -> (result: Result) - requires - old(self).valid() - ensures - self.valid(), - self.constants() == old(self).constants(), - match result { - Ok(offset) => - match (old(self)@, self@) { - (Some(old_log), Some(new_log)) => { - &&& offset as nat == old_log.log.len() + old_log.head - &&& new_log == old_log.append(bytes_to_append@) - }, - _ => false - }, - Err(InfiniteLogErr::InsufficientSpaceForAppend{ available_space }) => { - &&& self@ == old(self)@ - &&& available_space < bytes_to_append.len() - &&& { - let log = old(self)@.unwrap(); - ||| available_space == log.capacity - log.log.len() - ||| available_space == u64::MAX - log.head - log.log.len() - } - }, - _ => false + /// This function appends to the log and returns the offset at + /// which the append happened. + pub exec fn append(&mut self, bytes_to_append: &Vec) -> (result: Result< + u64, + InfiniteLogErr, + >) + requires + old(self).valid(), + ensures + self.valid(), + self.constants() == old(self).constants(), + match result { + Ok(offset) => match (old(self)@, self@) { + (Some(old_log), Some(new_log)) => { + &&& offset as nat == old_log.log.len() + old_log.head + &&& new_log == old_log.append(bytes_to_append@) + }, + _ => false, + }, + Err(InfiniteLogErr::InsufficientSpaceForAppend { available_space }) => { + &&& self@ == old(self)@ + &&& available_space < bytes_to_append.len() + &&& { + let log = old(self)@.unwrap(); + ||| available_space == log.capacity - log.log.len() + ||| available_space == u64::MAX - log.head - log.log.len() } - { - // For crash safety, we must restrict the untrusted code's - // writes to persistent memory. We must only let it write - // such that, if a crash happens in the middle of a write, - // the view of the persistent state is either the current - // state or the current state with `bytes_to_append` - // appended. - - let tracked perm = TrustedPermission::new(self.wrpm@, - |s1: AbstractInfiniteLogState, s2| s2 == s1.append(bytes_to_append@)); - self.untrusted_log_impl.untrusted_append(&mut self.wrpm, bytes_to_append, Tracked(&perm)) - } + }, + _ => false, + }, + { + // For crash safety, we must restrict the untrusted code's + // writes to persistent memory. We must only let it write + // such that, if a crash happens in the middle of a write, + // the view of the persistent state is either the current + // state or the current state with `bytes_to_append` + // appended. + let tracked perm = TrustedPermission::new( + self.wrpm@, + |s1: AbstractInfiniteLogState, s2| s2 == s1.append(bytes_to_append@), + ); + self.untrusted_log_impl.untrusted_append(&mut self.wrpm, bytes_to_append, Tracked(&perm)) + } - /// This function advances the head index of the log. - pub exec fn advance_head(&mut self, new_head: u64) -> (result: Result<(), InfiniteLogErr>) - requires - old(self).valid() - ensures - self.valid(), - self.constants() == old(self).constants(), - match result { - Ok(offset) => { - match (old(self)@, self@) { - (Some(old_log), Some(new_log)) => { - &&& old_log.head <= new_head <= old_log.head + old_log.log.len() - &&& new_log == old_log.advance_head(new_head as int) - }, - _ => false - } - } - Err(InfiniteLogErr::CantAdvanceHeadPositionBeforeHead{ head }) => { - &&& self@ == old(self)@ - &&& head == self@.unwrap().head - &&& new_head < head - }, - Err(InfiniteLogErr::CantAdvanceHeadPositionBeyondTail{ tail }) => { - &&& self@ == old(self)@ - &&& tail == self@.unwrap().head + self@.unwrap().log.len() - &&& new_head > tail + /// This function advances the head index of the log. + pub exec fn advance_head(&mut self, new_head: u64) -> (result: Result<(), InfiniteLogErr>) + requires + old(self).valid(), + ensures + self.valid(), + self.constants() == old(self).constants(), + match result { + Ok(offset) => { + match (old(self)@, self@) { + (Some(old_log), Some(new_log)) => { + &&& old_log.head <= new_head <= old_log.head + old_log.log.len() + &&& new_log == old_log.advance_head(new_head as int) }, - _ => false + _ => false, } - { - // For crash safety, we must restrict the untrusted code's - // writes to persistent memory. We must only let it write - // such that, if a crash happens in the middle of a write, - // the view of the persistent state is either the current - // state or the current state with the head advanced to - // `new_head`. - - let tracked perm = TrustedPermission::new(self.wrpm@, - |s1: AbstractInfiniteLogState, s2| s2 == s1.advance_head(new_head as int)); - self.untrusted_log_impl.untrusted_advance_head(&mut self.wrpm, new_head, Tracked(&perm)) - } + }, + Err(InfiniteLogErr::CantAdvanceHeadPositionBeforeHead { head }) => { + &&& self@ == old(self)@ + &&& head == self@.unwrap().head + &&& new_head < head + }, + Err(InfiniteLogErr::CantAdvanceHeadPositionBeyondTail { tail }) => { + &&& self@ == old(self)@ + &&& tail == self@.unwrap().head + self@.unwrap().log.len() + &&& new_head > tail + }, + _ => false, + }, + { + // For crash safety, we must restrict the untrusted code's + // writes to persistent memory. We must only let it write + // such that, if a crash happens in the middle of a write, + // the view of the persistent state is either the current + // state or the current state with the head advanced to + // `new_head`. + let tracked perm = TrustedPermission::new( + self.wrpm@, + |s1: AbstractInfiniteLogState, s2| s2 == s1.advance_head(new_head as int), + ); + self.untrusted_log_impl.untrusted_advance_head(&mut self.wrpm, new_head, Tracked(&perm)) + } - /// This function reads `len` bytes from byte position `pos` - /// in the log. It returns a vector of those bytes. - pub exec fn read(&self, pos: u64, len: u64) -> (result: Result, InfiniteLogErr>) - requires - self.valid(), - pos + len <= u64::MAX - ensures - ({ - let state = self@.unwrap(); - let head = state.head; - let log = state.log; - match result { - Ok(bytes) => { - let true_bytes = log.subrange(pos - head, pos + len - head); - &&& pos >= head - &&& pos + len <= head + log.len() - &&& read_correct_modulo_corruption(bytes@, true_bytes, - self.constants().impervious_to_corruption) - }, - Err(InfiniteLogErr::CantReadBeforeHead{ head: head_pos }) => { - &&& pos < head - &&& head_pos == head - }, - Err(InfiniteLogErr::CantReadPastTail{ tail }) => { - &&& pos + len > head + log.len() - &&& tail == head + log.len() - }, - _ => false - } - }) - { - // We don't need to provide permission to write to the - // persistent memory because the untrusted code is only - // getting a non-mutable reference to it and thus can't - // write it. Note that the `UntrustedLogImpl` itself *is* - // mutable, so it can freely update its in-memory state - // (e.g., its cache) if it chooses. - self.untrusted_log_impl.untrusted_read(&self.wrpm, pos, len) - } + /// This function reads `len` bytes from byte position `pos` + /// in the log. It returns a vector of those bytes. + pub exec fn read(&self, pos: u64, len: u64) -> (result: Result, InfiniteLogErr>) + requires + self.valid(), + pos + len <= u64::MAX, + ensures + ({ + let state = self@.unwrap(); + let head = state.head; + let log = state.log; + match result { + Ok(bytes) => { + let true_bytes = log.subrange(pos - head, pos + len - head); + &&& pos >= head + &&& pos + len <= head + log.len() + &&& read_correct_modulo_corruption( + bytes@, + true_bytes, + self.constants().impervious_to_corruption, + ) + }, + Err(InfiniteLogErr::CantReadBeforeHead { head: head_pos }) => { + &&& pos < head + &&& head_pos == head + }, + Err(InfiniteLogErr::CantReadPastTail { tail }) => { + &&& pos + len > head + log.len() + &&& tail == head + log.len() + }, + _ => false, + } + }), + { + // We don't need to provide permission to write to the + // persistent memory because the untrusted code is only + // getting a non-mutable reference to it and thus can't + // write it. Note that the `UntrustedLogImpl` itself *is* + // mutable, so it can freely update its in-memory state + // (e.g., its cache) if it chooses. + self.untrusted_log_impl.untrusted_read(&self.wrpm, pos, len) + } - /// This function returns a tuple consisting of the head and - /// tail positions of the log. - pub exec fn get_head_and_tail(&self) -> (result: Result<(u64, u64, u64), InfiniteLogErr>) - requires - self.valid() - ensures - match result { - Ok((result_head, result_tail, result_capacity)) => { - let inf_log = self@.unwrap(); - &&& result_head == inf_log.head - &&& result_tail == inf_log.head + inf_log.log.len() - &&& result_capacity == inf_log.capacity - }, - Err(_) => false - } - { - // We don't need to provide permission to write to the - // persistent memory because the untrusted code is only - // getting a non-mutable reference to it and thus can't - // write it. Note that the `UntrustedLogImpl` itself *is* - // mutable, so it can freely update its in-memory state - // (e.g., its local copy of head and tail) if it chooses. - self.untrusted_log_impl.untrusted_get_head_and_tail(&self.wrpm) - } - } + /// This function returns a tuple consisting of the head and + /// tail positions of the log. + pub exec fn get_head_and_tail(&self) -> (result: Result<(u64, u64, u64), InfiniteLogErr>) + requires + self.valid(), + ensures + match result { + Ok((result_head, result_tail, result_capacity)) => { + let inf_log = self@.unwrap(); + &&& result_head == inf_log.head + &&& result_tail == inf_log.head + inf_log.log.len() + &&& result_capacity == inf_log.capacity + }, + Err(_) => false, + }, + { + // We don't need to provide permission to write to the + // persistent memory because the untrusted code is only + // getting a non-mutable reference to it and thus can't + // write it. Note that the `UntrustedLogImpl` itself *is* + // mutable, so it can freely update its in-memory state + // (e.g., its local copy of head and tail) if it chooses. + self.untrusted_log_impl.untrusted_get_head_and_tail(&self.wrpm) } } +} // verus! +} + pub mod math { #![allow(unused_imports)] use builtin::*; @@ -2368,877 +2910,863 @@ pub mod math { verus! { - /* +/* From Ironfleet's math library's mul_nonlinear.i.dfy */ +#[verifier(nonlinear)] +pub proof fn lemma_mul_strictly_positive(x: int, y: int) + ensures + (0 < x && 0 < y) ==> (0 < x * y), +{ +} - #[verifier(nonlinear)] - pub proof fn lemma_mul_strictly_positive(x: int, y: int) - ensures - (0 < x && 0 < y) ==> (0 < x*y) - { - } - - #[verifier(nonlinear)] - pub proof fn lemma_mul_nonzero(x: int, y: int) - ensures - x*y != 0 <==> x != 0 && y != 0 - { - } +#[verifier(nonlinear)] +pub proof fn lemma_mul_nonzero(x: int, y: int) + ensures + x * y != 0 <==> x != 0 && y != 0, +{ +} - #[verifier(nonlinear)] - pub proof fn lemma_mul_is_associative(x: int, y: int, z: int) - ensures - x * (y * z) == (x * y) * z - { - } +#[verifier(nonlinear)] +pub proof fn lemma_mul_is_associative(x: int, y: int, z: int) + ensures + x * (y * z) == (x * y) * z, +{ +} - #[verifier(nonlinear)] - pub proof fn lemma_mul_is_distributive_add(x: int, y: int, z: int) - ensures - x*(y + z) == x*y + x*z - { - } +#[verifier(nonlinear)] +pub proof fn lemma_mul_is_distributive_add(x: int, y: int, z: int) + ensures + x * (y + z) == x * y + x * z, +{ +} - #[verifier(nonlinear)] - pub proof fn lemma_mul_ordering(x: int, y: int) - requires - 0 < x, - 0 < y, - 0 <= x*y - ensures - x <= x*y && y <= x*y - { - } +#[verifier(nonlinear)] +pub proof fn lemma_mul_ordering(x: int, y: int) + requires + 0 < x, + 0 < y, + 0 <= x * y, + ensures + x <= x * y && y <= x * y, +{ +} - #[verifier(nonlinear)] - pub proof fn lemma_mul_strict_inequality(x: int, y: int, z: int) - requires - x < y, - z > 0 - ensures - x*z < y*z - { - } +#[verifier(nonlinear)] +pub proof fn lemma_mul_strict_inequality(x: int, y: int, z: int) + requires + x < y, + z > 0, + ensures + x * z < y * z, +{ +} - pub proof fn lemma_mul_by_zero_is_zero(x: int) - ensures - 0*x == 0, - x*0 == 0 - { - } +pub proof fn lemma_mul_by_zero_is_zero(x: int) + ensures + 0 * x == 0, + x * 0 == 0, +{ +} - /* +/* From Ironfleet's math library's mul.i.dfy */ - #[verifier(opaque)] - pub open spec fn mul_pos(x: int, y: int) -> int - recommends - x >= 0 - decreases - x - { - if x <= 0 { - 0 - } - else { - y + mul_pos(x - 1, y) - } - } +#[verifier(opaque)] +pub open spec fn mul_pos(x: int, y: int) -> int + recommends + x >= 0, + decreases x, +{ + if x <= 0 { + 0 + } else { + y + mul_pos(x - 1, y) + } +} - pub open spec fn mul_recursive(x: int, y: int) -> int - { - if x >= 0 { - mul_pos(x, y) - } - else { - -1 * mul_pos(-1 * x, y) - } - } +pub open spec fn mul_recursive(x: int, y: int) -> int { + if x >= 0 { + mul_pos(x, y) + } else { + -1 * mul_pos(-1 * x, y) + } +} - pub proof fn lemma_mul_is_mul_pos(x: int, y: int) - requires - x >= 0 - ensures - x * y == mul_pos(x, y) - decreases - x - { - reveal(mul_pos); - if x > 0 { - lemma_mul_is_mul_pos(x - 1, y); - lemma_mul_is_distributive_add_other_way(y, x - 1, 1); - assert (x * y == (x - 1) * y + y); - } - } +pub proof fn lemma_mul_is_mul_pos(x: int, y: int) + requires + x >= 0, + ensures + x * y == mul_pos(x, y), + decreases x, +{ + reveal(mul_pos); + if x > 0 { + lemma_mul_is_mul_pos(x - 1, y); + lemma_mul_is_distributive_add_other_way(y, x - 1, 1); + assert(x * y == (x - 1) * y + y); + } +} - pub proof fn lemma_mul_is_mul_recursive(x: int, y: int) - ensures - x * y == mul_recursive(x, y) - { - if (x >= 0) { - lemma_mul_is_mul_pos(x, y); - } - else if (x <= 0) { - lemma_mul_is_mul_pos(-x, y); - lemma_mul_is_associative(-1, -x, y); - } - } +pub proof fn lemma_mul_is_mul_recursive(x: int, y: int) + ensures + x * y == mul_recursive(x, y), +{ + if (x >= 0) { + lemma_mul_is_mul_pos(x, y); + } else if (x <= 0) { + lemma_mul_is_mul_pos(-x, y); + lemma_mul_is_associative(-1, -x, y); + } +} - pub proof fn lemma_mul_basics(x: int) - ensures - 0 * x == 0, - x * 0 == 0, - 1 * x == x, - x * 1 == x - { - } +pub proof fn lemma_mul_basics(x: int) + ensures + 0 * x == 0, + x * 0 == 0, + 1 * x == x, + x * 1 == x, +{ +} - pub proof fn lemma_mul_is_commutative(x: int, y: int) - ensures - x * y == y * x - { - } +pub proof fn lemma_mul_is_commutative(x: int, y: int) + ensures + x * y == y * x, +{ +} - pub proof fn lemma_mul_inequality(x: int, y: int, z: int) - requires - x <= y, - z >= 0, - ensures - x * z <= y * z - decreases - z - { - if z > 0 { - lemma_mul_inequality(x, y, z - 1); - lemma_mul_is_distributive_add(x, z - 1, 1); - lemma_mul_basics(x); - assert (x * z == x * (z - 1) + x); - lemma_mul_is_distributive_add(y, z - 1, 1); - lemma_mul_basics(y); - assert (y * z == y * (z - 1) + y); - } - } +pub proof fn lemma_mul_inequality(x: int, y: int, z: int) + requires + x <= y, + z >= 0, + ensures + x * z <= y * z, + decreases z, +{ + if z > 0 { + lemma_mul_inequality(x, y, z - 1); + lemma_mul_is_distributive_add(x, z - 1, 1); + lemma_mul_basics(x); + assert(x * z == x * (z - 1) + x); + lemma_mul_is_distributive_add(y, z - 1, 1); + lemma_mul_basics(y); + assert(y * z == y * (z - 1) + y); + } +} - pub proof fn lemma_mul_upper_bound(x: int, x_bound: int, y: int, y_bound: int) - requires - x <= x_bound, - y <= y_bound, - 0 <= x, - 0 <= y - ensures - x * y <= x_bound * y_bound - { - lemma_mul_inequality(x, x_bound, y); - lemma_mul_inequality(y, y_bound, x_bound); - } +pub proof fn lemma_mul_upper_bound(x: int, x_bound: int, y: int, y_bound: int) + requires + x <= x_bound, + y <= y_bound, + 0 <= x, + 0 <= y, + ensures + x * y <= x_bound * y_bound, +{ + lemma_mul_inequality(x, x_bound, y); + lemma_mul_inequality(y, y_bound, x_bound); +} - /// This lemma is less precise than the non-strict version, since - /// it uses two < facts to achieve only one < result. Thus, use it with - /// caution -- it may be throwing away precision you'll require later. - #[verifier(nonlinear)] - pub proof fn lemma_mul_strict_upper_bound(x: int, x_bound: int, y: int, y_bound: int) - requires - x < x_bound, - y < y_bound, - 0 <= x, - 0 <= y - ensures - x * y < x_bound * y_bound - decreases - y - { - lemma_mul_upper_bound(x, x_bound - 1, y, y_bound - 1); - assert ((x_bound - 1) * (y_bound - 1) == x_bound * y_bound - y_bound - x_bound + 1); - } +/// This lemma is less precise than the non-strict version, since +/// it uses two < facts to achieve only one < result. Thus, use it with +/// caution -- it may be throwing away precision you'll require later. +#[verifier(nonlinear)] +pub proof fn lemma_mul_strict_upper_bound(x: int, x_bound: int, y: int, y_bound: int) + requires + x < x_bound, + y < y_bound, + 0 <= x, + 0 <= y, + ensures + x * y < x_bound * y_bound, + decreases y, +{ + lemma_mul_upper_bound(x, x_bound - 1, y, y_bound - 1); + assert((x_bound - 1) * (y_bound - 1) == x_bound * y_bound - y_bound - x_bound + 1); +} - #[verifier(nonlinear)] - pub proof fn lemma_mul_left_inequality(x: int, y: int, z: int) - requires - x > 0 - ensures - y <= z ==> x * y <= x * z, - y < z ==> x * y < x * z - decreases - x - { - if x > 1 { - lemma_mul_left_inequality(x - 1, y, z); - assert (x * y == (x - 1) * y + y); - assert (x * z == (x - 1) * z + z); - } - } +#[verifier(nonlinear)] +pub proof fn lemma_mul_left_inequality(x: int, y: int, z: int) + requires + x > 0, + ensures + y <= z ==> x * y <= x * z, + y < z ==> x * y < x * z, + decreases x, +{ + if x > 1 { + lemma_mul_left_inequality(x - 1, y, z); + assert(x * y == (x - 1) * y + y); + assert(x * z == (x - 1) * z + z); + } +} - #[verifier(nonlinear)] - pub proof fn lemma_mul_inequality_converse(x: int, y: int, z: int) - requires - x*z <= y*z, - z > 0 - ensures - x <= y - decreases - z - { - if z > 1 { - if (x * (z - 1) <= y * (z - 1)) { - lemma_mul_inequality_converse(x, y, z - 1); - } - else { - lemma_mul_inequality_converse(y, x, z - 1); - assert (false); - } - } +#[verifier(nonlinear)] +pub proof fn lemma_mul_inequality_converse(x: int, y: int, z: int) + requires + x * z <= y * z, + z > 0, + ensures + x <= y, + decreases z, +{ + if z > 1 { + if (x * (z - 1) <= y * (z - 1)) { + lemma_mul_inequality_converse(x, y, z - 1); + } else { + lemma_mul_inequality_converse(y, x, z - 1); + assert(false); } + } +} + +pub proof fn lemma_mul_equality_converse(x: int, y: int, z: int) + requires + x * z == y * z, + 0 < z, + ensures + x == y, +{ + lemma_mul_inequality_converse(x, y, z); + lemma_mul_inequality_converse(y, x, z); +} + +#[verifier(nonlinear)] +pub proof fn lemma_mul_is_distributive_add_other_way(x: int, y: int, z: int) + ensures + (y + z) * x == y * x + z * x, +{ +} + +#[verifier(nonlinear)] +pub proof fn lemma_mul_is_distributive_sub(x: int, y: int, z: int) + ensures + x * (y - z) == x * y - x * z, +{ +} - pub proof fn lemma_mul_equality_converse(x: int, y: int, z: int) - requires - x*z == y*z, - 0 1 { + lemma_mul_increases(x - 1, y); + assert(x * y == (x - 1) * y + y); + } +} - #[verifier(nonlinear)] - pub proof fn lemma_mul_is_distributive(x: int, y: int, z: int) - ensures - x*(y + z) == x*y + x*z, - x*(y - z) == x*y - x*z, - (y + z)*x == y*x + z*x, - (y - z)*x == y*x - z*x, - x*(y + z) == (y + z)*x, - x*(y - z) == (y - z)*x, - x*y == y*x, - x*z == z*x - { - } +pub proof fn lemma_mul_nonnegative(x: int, y: int) + requires + 0 <= x, + 0 <= y, + ensures + 0 <= x * y, +{ + if x != 0 && y != 0 { + lemma_mul_strictly_positive(x, y); + } +} - #[verifier(nonlinear)] - pub proof fn lemma_mul_strictly_increases(x: int, y: int) - requires - 1 < x, - 0 < y - ensures - y < x*y - { - assert (x * y == (x - 1) * y + y); - lemma_mul_strictly_positive(x - 1, y); - } +#[verifier(nonlinear)] +pub proof fn lemma_mul_unary_negation(x: int, y: int) + ensures + (-x) * y == -(x * y), + -(x * y) == x * (-y), +{ +} - #[verifier(nonlinear)] - pub proof fn lemma_mul_increases(x: int, y: int) - requires - 0 1 { - lemma_mul_increases(x - 1, y); - assert (x * y == (x - 1) * y + y); - } +#[verifier(nonlinear)] +pub proof fn lemma_mul_one_to_one(m: int, x: int, y: int) + requires + m != 0, + m * x == m * y, + ensures + x == y, +{ + if m > 0 { + if x < y { + lemma_mul_strict_inequality(x, y, m); } - - pub proof fn lemma_mul_nonnegative(x: int, y: int) - requires - 0 <= x, - 0 <= y - ensures - 0 <= x*y - { - if x != 0 && y != 0 { - lemma_mul_strictly_positive(x, y); - } + if x > y { + lemma_mul_strict_inequality(y, x, m); } - - #[verifier(nonlinear)] - pub proof fn lemma_mul_unary_negation(x: int, y: int) - ensures - (-x)*y == -(x*y), - -(x*y) == x*(-y) - { + } else { + assert(x * m == -(x * -m)); + assert(y * m == -(y * -m)); + if x < y { + lemma_mul_strict_inequality(x, y, -m); } - - #[verifier(nonlinear)] - pub proof fn lemma_mul_one_to_one(m: int, x: int, y: int) - requires - m!=0, - m*x == m*y - ensures - x == y - { - if m > 0 { - if x < y { - lemma_mul_strict_inequality(x, y, m); - } - if x > y { - lemma_mul_strict_inequality(y, x, m); - } - } - else { - assert (x * m == -(x * -m)); - assert (y * m == -(y * -m)); - if x < y { - lemma_mul_strict_inequality(x, y, -m); - } - if x > y { - lemma_mul_strict_inequality(y, x, -m); - } - } + if x > y { + lemma_mul_strict_inequality(y, x, -m); } + } +} - /* +/* From Ironfleet's math library's div_nonlinear.i.dfy */ - pub proof fn lemma_div_of_0(d: int) - requires - d != 0 - ensures - 0int / d == 0 - { - } +pub proof fn lemma_div_of_0(d: int) + requires + d != 0, + ensures + 0int / d == 0, +{ +} - pub proof fn lemma_div_by_self(d: int) - requires - d != 0 - ensures - d / d == 1 - { - } +pub proof fn lemma_div_by_self(d: int) + requires + d != 0, + ensures + d / d == 1, +{ +} - #[verifier(nonlinear)] - pub proof fn lemma_small_div(x: int, d: int) - requires - 0 <= x < d, - d > 0 - ensures - x / d == 0 - { - } +#[verifier(nonlinear)] +pub proof fn lemma_small_div(x: int, d: int) + requires + 0 <= x < d, + d > 0, + ensures + x / d == 0, +{ +} - pub proof fn lemma_mod_of_zero_is_zero(m: int) - requires - 0 < m - ensures - 0int % m == 0 - { - } +pub proof fn lemma_mod_of_zero_is_zero(m: int) + requires + 0 < m, + ensures + 0int % m == 0, +{ +} - #[verifier(nonlinear)] - pub proof fn lemma_fundamental_div_mod(x: int, d: int) - requires - d != 0 - ensures - x == d * (x/d) + (x%d) - { - } +#[verifier(nonlinear)] +pub proof fn lemma_fundamental_div_mod(x: int, d: int) + requires + d != 0, + ensures + x == d * (x / d) + (x % d), +{ +} - #[verifier(nonlinear)] - pub proof fn lemma_small_mod(x: int, m: int) - requires - 0 <= x < m, - 0 < m - ensures - x % m == x - { - } +#[verifier(nonlinear)] +pub proof fn lemma_small_mod(x: int, m: int) + requires + 0 <= x < m, + 0 < m, + ensures + x % m == x, +{ +} - pub proof fn lemma_mod_range(x: int, m: int) - requires - m > 0 - ensures - 0 <= x % m < m - { - } +pub proof fn lemma_mod_range(x: int, m: int) + requires + m > 0, + ensures + 0 <= x % m < m, +{ +} - /* +/* From Ironfleet's math library's mod_auto_proofs.i.dfy */ - pub proof fn lemma_mod_auto_basics(n: int, x: int) - requires - n > 0 - ensures - (x + n) % n == x % n, - (x - n) % n == x % n, - (x + n) / n == x / n + 1, - (x - n) / n == x / n - 1, - 0 <= x < n <==> x % n == x, - { - lemma_mod_range(x, n); - lemma_fundamental_div_mod(x, n); - lemma_fundamental_div_mod(x + n, n); - lemma_fundamental_div_mod(x - n, n); - lemma_mod_range(x, n); - lemma_mod_range(x + n, n); - lemma_mod_range(x - n, n); - let zp = (x + n) / n - x / n - 1; - let zm = (x - n) / n - x / n + 1; - lemma_mul_is_distributive_sub(n, (x + n) / n, x / n + 1); - lemma_mul_is_distributive_add(n, x / n, 1); - assert (n * zp == n * ((x + n) / n) - n * (x / n) - n * 1); - assert (0 == n * zp + ((x + n) % n) - (x % n)); - lemma_mul_is_distributive_sub(n, (x - n) / n, x / n - 1); - lemma_mul_is_distributive_sub(n, x / n, 1); - assert (n * zm == n * ((x - n) / n) - n * (x / n) + n * 1); - assert (0 == n * zm + ((x - n) % n) - (x % n)); - if (zp > 0) { lemma_mul_inequality(1, zp, n); } - if (zp < 0) { lemma_mul_inequality(zp, -1, n); } - if (zp == 0) { lemma_mul_by_zero_is_zero(n); } - if (zm > 0) { lemma_mul_inequality(1, zm, n); } - if (zm < 0) { lemma_mul_inequality(zm, -1, n); } - if 0 <= x < n { - lemma_small_div(x, n); - } - } +pub proof fn lemma_mod_auto_basics(n: int, x: int) + requires + n > 0, + ensures + (x + n) % n == x % n, + (x - n) % n == x % n, + (x + n) / n == x / n + 1, + (x - n) / n == x / n - 1, + 0 <= x < n <==> x % n == x, +{ + lemma_mod_range(x, n); + lemma_fundamental_div_mod(x, n); + lemma_fundamental_div_mod(x + n, n); + lemma_fundamental_div_mod(x - n, n); + lemma_mod_range(x, n); + lemma_mod_range(x + n, n); + lemma_mod_range(x - n, n); + let zp = (x + n) / n - x / n - 1; + let zm = (x - n) / n - x / n + 1; + lemma_mul_is_distributive_sub(n, (x + n) / n, x / n + 1); + lemma_mul_is_distributive_add(n, x / n, 1); + assert(n * zp == n * ((x + n) / n) - n * (x / n) - n * 1); + assert(0 == n * zp + ((x + n) % n) - (x % n)); + lemma_mul_is_distributive_sub(n, (x - n) / n, x / n - 1); + lemma_mul_is_distributive_sub(n, x / n, 1); + assert(n * zm == n * ((x - n) / n) - n * (x / n) + n * 1); + assert(0 == n * zm + ((x - n) % n) - (x % n)); + if (zp > 0) { + lemma_mul_inequality(1, zp, n); + } + if (zp < 0) { + lemma_mul_inequality(zp, -1, n); + } + if (zp == 0) { + lemma_mul_by_zero_is_zero(n); + } + if (zm > 0) { + lemma_mul_inequality(1, zm, n); + } + if (zm < 0) { + lemma_mul_inequality(zm, -1, n); + } + if 0 <= x < n { + lemma_small_div(x, n); + } +} - /* +/* From Ironfleet's div.i.dfy */ - proof fn lemma_fundamental_div_mod_converse_helper_1(u: int, d: int, r: int) - requires - d != 0, - 0 <= r < d - ensures - u == (u * d + r) / d - decreases - if u >= 0 { u } else { -u } - { - if u < 0 { - lemma_fundamental_div_mod_converse_helper_1(u + 1, d, r); - lemma_mod_auto_basics(d, u * d + r); - lemma_mul_is_distributive_add_other_way(d, u + 1, -1); - assert (u * d + r == (u + 1) * d + r - d); - assert (u == (u * d + r) / d); - } - else if u == 0 { - lemma_small_div(r, d); - assert (u == 0 ==> u * d == 0) by (nonlinear_arith); - assert (u == (u * d + r) / d); - } - else { - lemma_fundamental_div_mod_converse_helper_1(u - 1, d, r); - lemma_mod_auto_basics(d, (u - 1) * d + r); - lemma_mul_is_distributive_add_other_way(d, u - 1, 1); - assert (u * d + r == (u - 1) * d + r + d); - assert (u == (u * d + r) / d); - } - } +proof fn lemma_fundamental_div_mod_converse_helper_1(u: int, d: int, r: int) + requires + d != 0, + 0 <= r < d, + ensures + u == (u * d + r) / d, + decreases + if u >= 0 { + u + } else { + -u + }, +{ + if u < 0 { + lemma_fundamental_div_mod_converse_helper_1(u + 1, d, r); + lemma_mod_auto_basics(d, u * d + r); + lemma_mul_is_distributive_add_other_way(d, u + 1, -1); + assert(u * d + r == (u + 1) * d + r - d); + assert(u == (u * d + r) / d); + } else if u == 0 { + lemma_small_div(r, d); + assert(u == 0 ==> u * d == 0) by (nonlinear_arith); + assert(u == (u * d + r) / d); + } else { + lemma_fundamental_div_mod_converse_helper_1(u - 1, d, r); + lemma_mod_auto_basics(d, (u - 1) * d + r); + lemma_mul_is_distributive_add_other_way(d, u - 1, 1); + assert(u * d + r == (u - 1) * d + r + d); + assert(u == (u * d + r) / d); + } +} - proof fn lemma_fundamental_div_mod_converse_helper_2(u: int, d: int, r: int) - requires - d != 0, - 0 <= r < d - ensures - r == (u * d + r) % d - decreases - if u >= 0 { u } else { -u } - { - if u < 0 { - lemma_fundamental_div_mod_converse_helper_2(u + 1, d, r); - lemma_mod_auto_basics(d, u * d + r); - lemma_mul_is_distributive_add_other_way(d, u + 1, -1); - assert (u * d == (u + 1) * d + (-1) * d); - assert (u * d + r == (u + 1) * d + r - d); - assert (r == (u * d + r) % d); - } - else if u == 0 { - assert (u == 0 ==> u * d == 0) by (nonlinear_arith); - if d > 0 { - lemma_small_mod(r, d); - } - else { - lemma_small_mod(r, -d); - } - assert (r == (u * d + r) % d); - } - else { - lemma_fundamental_div_mod_converse_helper_2(u - 1, d, r); - lemma_mod_auto_basics(d, (u - 1) * d + r); - lemma_mul_is_distributive_add_other_way(d, u - 1, 1); - assert (u * d + r == (u - 1) * d + r + d); - assert (r == (u * d + r) % d); - } +proof fn lemma_fundamental_div_mod_converse_helper_2(u: int, d: int, r: int) + requires + d != 0, + 0 <= r < d, + ensures + r == (u * d + r) % d, + decreases + if u >= 0 { + u + } else { + -u + }, +{ + if u < 0 { + lemma_fundamental_div_mod_converse_helper_2(u + 1, d, r); + lemma_mod_auto_basics(d, u * d + r); + lemma_mul_is_distributive_add_other_way(d, u + 1, -1); + assert(u * d == (u + 1) * d + (-1) * d); + assert(u * d + r == (u + 1) * d + r - d); + assert(r == (u * d + r) % d); + } else if u == 0 { + assert(u == 0 ==> u * d == 0) by (nonlinear_arith); + if d > 0 { + lemma_small_mod(r, d); + } else { + lemma_small_mod(r, -d); } + assert(r == (u * d + r) % d); + } else { + lemma_fundamental_div_mod_converse_helper_2(u - 1, d, r); + lemma_mod_auto_basics(d, (u - 1) * d + r); + lemma_mul_is_distributive_add_other_way(d, u - 1, 1); + assert(u * d + r == (u - 1) * d + r + d); + assert(r == (u * d + r) % d); + } +} - pub proof fn lemma_fundamental_div_mod_converse(x: int, d: int, q: int, r: int) - requires - d != 0, - 0 <= r < d, - x == q * d + r - ensures - q == x / d, - r == x % d - { - lemma_fundamental_div_mod_converse_helper_1(q, d, r); - assert (q == (q * d + r) / d); - lemma_fundamental_div_mod_converse_helper_2(q, d, r); - } +pub proof fn lemma_fundamental_div_mod_converse(x: int, d: int, q: int, r: int) + requires + d != 0, + 0 <= r < d, + x == q * d + r, + ensures + q == x / d, + r == x % d, +{ + lemma_fundamental_div_mod_converse_helper_1(q, d, r); + assert(q == (q * d + r) / d); + lemma_fundamental_div_mod_converse_helper_2(q, d, r); +} - /* +/* Lemmas we need for this project */ - pub proof fn lemma_div_relation_when_mods_have_same_order(d: int, x: int, y: int) - requires - d > 0, - x < y, - y - x <= d, - x % d < y % d - ensures - y / d == x / d - { - lemma_fundamental_div_mod(x, d); - lemma_fundamental_div_mod(y, d); - lemma_mod_range(x, d); - lemma_mod_range(y, d); - - lemma_mul_is_distributive_sub_other_way(d, y / d, x / d); - lemma_mul_is_commutative(y / d, d); - lemma_mul_is_commutative(x / d, d); - - if (y / d) > (x / d) { - lemma_mul_inequality(1, (y / d) - (x / d), d); - assert (((y / d) - (x / d)) * d >= 1 * d); - assert ((y / d) * d - (x / d) * d >= d); - assert (false); - } - if (y / d) < (x / d) { - lemma_mul_inequality((y / d) - (x / d), -1, d); - assert (((y / d) - (x / d)) * d <= (-1) * d); - lemma_mul_is_distributive_sub_other_way(d, y / d, x / d); - assert (false); - } - } - - pub proof fn lemma_div_relation_when_mods_have_same_order_alt(d: int, x: int, y: int) - requires - d > 0, - x <= y, - y - x < d, - x % d <= y % d - ensures - y / d == x / d - { - lemma_fundamental_div_mod(x, d); - lemma_fundamental_div_mod(y, d); - lemma_mod_range(x, d); - lemma_mod_range(y, d); - - lemma_mul_is_distributive_sub_other_way(d, y / d, x / d); - lemma_mul_is_commutative(y / d, d); - lemma_mul_is_commutative(x / d, d); - - if (y / d) > (x / d) { - lemma_mul_inequality(1, (y / d) - (x / d), d); - assert (((y / d) - (x / d)) * d >= 1 * d); - assert (false); - } - if (y / d) < (x / d) { - lemma_mul_inequality((y / d) - (x / d), -1, d); - assert (((y / d) - (x / d)) * d <= (-1) * d); - assert (false); - } - } +pub proof fn lemma_div_relation_when_mods_have_same_order(d: int, x: int, y: int) + requires + d > 0, + x < y, + y - x <= d, + x % d < y % d, + ensures + y / d == x / d, +{ + lemma_fundamental_div_mod(x, d); + lemma_fundamental_div_mod(y, d); + lemma_mod_range(x, d); + lemma_mod_range(y, d); + lemma_mul_is_distributive_sub_other_way(d, y / d, x / d); + lemma_mul_is_commutative(y / d, d); + lemma_mul_is_commutative(x / d, d); + if (y / d) > (x / d) { + lemma_mul_inequality(1, (y / d) - (x / d), d); + assert(((y / d) - (x / d)) * d >= 1 * d); + assert((y / d) * d - (x / d) * d >= d); + assert(false); + } + if (y / d) < (x / d) { + lemma_mul_inequality((y / d) - (x / d), -1, d); + assert(((y / d) - (x / d)) * d <= (-1) * d); + lemma_mul_is_distributive_sub_other_way(d, y / d, x / d); + assert(false); + } +} - pub proof fn lemma_div_relation_when_mods_have_different_order(d: int, x: int, y: int) - requires - d > 0, - x < y, - y - x <= d, - y % d <= x % d - ensures - y / d == x / d + 1 - { - lemma_fundamental_div_mod(x, d); - lemma_fundamental_div_mod(y, d); - lemma_mod_range(x, d); - lemma_mod_range(y, d); - - lemma_mul_is_distributive_sub_other_way(d, y / d, x / d); - lemma_mul_is_commutative(y / d, d); - lemma_mul_is_commutative(x / d, d); - - if (y / d) > (x / d) + 1 { - lemma_mul_inequality(2, (y / d) - (x / d), d); - assert (((y / d) - (x / d)) * d >= 2 * d); - assert (false); - } - if (y / d) <= (x / d) { - lemma_mul_inequality(0, (x / d) - (y / d), d); - assert (0 * d <= ((x / d) - (y / d)) * d); - lemma_mul_is_commutative((x / d) - (y / d), d); - lemma_mul_is_distributive_sub(d, x / d, y / d); - assert (d * ((x / d) - (y / d)) == d * (x / d) - d * (y / d)); - assert (0 * d <= x - y - x % d + y % d); - assert (false); - } - } +pub proof fn lemma_div_relation_when_mods_have_same_order_alt(d: int, x: int, y: int) + requires + d > 0, + x <= y, + y - x < d, + x % d <= y % d, + ensures + y / d == x / d, +{ + lemma_fundamental_div_mod(x, d); + lemma_fundamental_div_mod(y, d); + lemma_mod_range(x, d); + lemma_mod_range(y, d); + lemma_mul_is_distributive_sub_other_way(d, y / d, x / d); + lemma_mul_is_commutative(y / d, d); + lemma_mul_is_commutative(x / d, d); + if (y / d) > (x / d) { + lemma_mul_inequality(1, (y / d) - (x / d), d); + assert(((y / d) - (x / d)) * d >= 1 * d); + assert(false); + } + if (y / d) < (x / d) { + lemma_mul_inequality((y / d) - (x / d), -1, d); + assert(((y / d) - (x / d)) * d <= (-1) * d); + assert(false); + } +} - pub proof fn lemma_div_relation_when_mods_have_different_order_alt(d: int, x: int, y: int) - requires - d > 0, - x <= y, - y - x < d, - y % d < x % d - ensures - y / d == x / d + 1 - { - lemma_fundamental_div_mod(x, d); - lemma_fundamental_div_mod(y, d); - lemma_mod_range(x, d); - lemma_mod_range(y, d); - - lemma_mul_is_commutative(y / d, d); - lemma_mul_is_commutative(x / d, d); - - if (y / d) > (x / d) + 1 { - lemma_mul_inequality(2, (y / d) - (x / d), d); - lemma_mul_is_distributive_sub_other_way(d, y / d, x / d); - assert (((y / d) - (x / d)) * d >= 2 * d); - assert (false); - } - if (y / d) <= (x / d) { - lemma_mul_inequality(0, (x / d) - (y / d), d); - assert (0 * d <= ((x / d) - (y / d)) * d); - lemma_mul_is_commutative((x / d) - (y / d), d); - lemma_mul_is_distributive_sub(d, x / d, y / d); - assert (d * ((x / d) - (y / d)) == d * (x / d) - d * (y / d)); - assert (0 * d <= x - y - x % d + y % d); - assert (false); - } - } +pub proof fn lemma_div_relation_when_mods_have_different_order(d: int, x: int, y: int) + requires + d > 0, + x < y, + y - x <= d, + y % d <= x % d, + ensures + y / d == x / d + 1, +{ + lemma_fundamental_div_mod(x, d); + lemma_fundamental_div_mod(y, d); + lemma_mod_range(x, d); + lemma_mod_range(y, d); + lemma_mul_is_distributive_sub_other_way(d, y / d, x / d); + lemma_mul_is_commutative(y / d, d); + lemma_mul_is_commutative(x / d, d); + if (y / d) > (x / d) + 1 { + lemma_mul_inequality(2, (y / d) - (x / d), d); + assert(((y / d) - (x / d)) * d >= 2 * d); + assert(false); + } + if (y / d) <= (x / d) { + lemma_mul_inequality(0, (x / d) - (y / d), d); + assert(0 * d <= ((x / d) - (y / d)) * d); + lemma_mul_is_commutative((x / d) - (y / d), d); + lemma_mul_is_distributive_sub(d, x / d, y / d); + assert(d * ((x / d) - (y / d)) == d * (x / d) - d * (y / d)); + assert(0 * d <= x - y - x % d + y % d); + assert(false); + } +} - pub proof fn lemma_mod_between(d: int, x: int, y: int, z: int) - requires - d > 0, - x % d < y % d, - y - x <= d, - x <= z <= y - ensures - x % d <= z % d <= y % d - { - if y - x == d { - lemma_mod_auto_basics(d, x); - assert (y % d == x % d); - assert (false); - } - else { - lemma_fundamental_div_mod(x, d); - lemma_fundamental_div_mod(y, d); - lemma_fundamental_div_mod(z, d); - lemma_mod_range(x, d); - lemma_mod_range(y, d); - lemma_mod_range(z, d); - assert (d * (y / d) - d * (x / d) + y % d - x % d < d); - assert (d * (y / d) - d * (x / d) < d); - lemma_mul_is_distributive_sub(d, (y / d), (x / d)); - assert (d * ((y / d) - (x / d)) < d); - - lemma_div_relation_when_mods_have_same_order(d, x, y); - - let z_mod_d = x % d + (z - x); - assert (z == (x / d) * d + z_mod_d) by { - assert (z == d * (x / d) + z_mod_d); - lemma_mul_is_commutative(d, (x / d)); - } - lemma_fundamental_div_mod_converse(z, d, (x / d), z_mod_d); - } - } +pub proof fn lemma_div_relation_when_mods_have_different_order_alt(d: int, x: int, y: int) + requires + d > 0, + x <= y, + y - x < d, + y % d < x % d, + ensures + y / d == x / d + 1, +{ + lemma_fundamental_div_mod(x, d); + lemma_fundamental_div_mod(y, d); + lemma_mod_range(x, d); + lemma_mod_range(y, d); + lemma_mul_is_commutative(y / d, d); + lemma_mul_is_commutative(x / d, d); + if (y / d) > (x / d) + 1 { + lemma_mul_inequality(2, (y / d) - (x / d), d); + lemma_mul_is_distributive_sub_other_way(d, y / d, x / d); + assert(((y / d) - (x / d)) * d >= 2 * d); + assert(false); + } + if (y / d) <= (x / d) { + lemma_mul_inequality(0, (x / d) - (y / d), d); + assert(0 * d <= ((x / d) - (y / d)) * d); + lemma_mul_is_commutative((x / d) - (y / d), d); + lemma_mul_is_distributive_sub(d, x / d, y / d); + assert(d * ((x / d) - (y / d)) == d * (x / d) - d * (y / d)); + assert(0 * d <= x - y - x % d + y % d); + assert(false); + } +} - pub proof fn lemma_mod_not_between(d: int, x: int, y: int, z: int) - requires - d > 0, - y % d < x % d, - y - x <= d, - x <= z <= y - ensures - z % d <= y % d || z % d >= x % d - { - if y - x == d { - lemma_mod_auto_basics(d, x); - assert (y % d == x % d); - assert (false); - } - else { - lemma_fundamental_div_mod(x, d); - lemma_fundamental_div_mod(y, d); - lemma_fundamental_div_mod(z, d); - lemma_mod_range(x, d); - lemma_mod_range(y, d); - lemma_mod_range(z, d); - assert (d * (y / d) - d * (x / d) + y % d - x % d >= 0); - assert (d * (y / d) - d * (x / d) >= 0); - lemma_mul_is_distributive_sub(d, (y / d), (x / d)); - assert (d * ((y / d) - (x / d)) >= 0); - - lemma_div_relation_when_mods_have_different_order(d, x, y); - - if y % d < z % d < x % d { - lemma_div_relation_when_mods_have_different_order(d, z, y); - lemma_div_relation_when_mods_have_same_order(d, z, x); - assert (false); - } - } +pub proof fn lemma_mod_between(d: int, x: int, y: int, z: int) + requires + d > 0, + x % d < y % d, + y - x <= d, + x <= z <= y, + ensures + x % d <= z % d <= y % d, +{ + if y - x == d { + lemma_mod_auto_basics(d, x); + assert(y % d == x % d); + assert(false); + } else { + lemma_fundamental_div_mod(x, d); + lemma_fundamental_div_mod(y, d); + lemma_fundamental_div_mod(z, d); + lemma_mod_range(x, d); + lemma_mod_range(y, d); + lemma_mod_range(z, d); + assert(d * (y / d) - d * (x / d) + y % d - x % d < d); + assert(d * (y / d) - d * (x / d) < d); + lemma_mul_is_distributive_sub(d, (y / d), (x / d)); + assert(d * ((y / d) - (x / d)) < d); + lemma_div_relation_when_mods_have_same_order(d, x, y); + let z_mod_d = x % d + (z - x); + assert(z == (x / d) * d + z_mod_d) by { + assert(z == d * (x / d) + z_mod_d); + lemma_mul_is_commutative(d, (x / d)); } + lemma_fundamental_div_mod_converse(z, d, (x / d), z_mod_d); + } +} - pub proof fn lemma_mod_addition_when_bounded(x: int, y: int, d: int) - requires - d > 0, - y >= 0, - (x % d) + y < d, - ensures - (x + y) % d == (x % d) + y - { - lemma_fundamental_div_mod(x, d); - lemma_mul_is_commutative(x / d, d); - lemma_fundamental_div_mod_converse(x + y, d, x / d, x % d + y); +pub proof fn lemma_mod_not_between(d: int, x: int, y: int, z: int) + requires + d > 0, + y % d < x % d, + y - x <= d, + x <= z <= y, + ensures + z % d <= y % d || z % d >= x % d, +{ + if y - x == d { + lemma_mod_auto_basics(d, x); + assert(y % d == x % d); + assert(false); + } else { + lemma_fundamental_div_mod(x, d); + lemma_fundamental_div_mod(y, d); + lemma_fundamental_div_mod(z, d); + lemma_mod_range(x, d); + lemma_mod_range(y, d); + lemma_mod_range(z, d); + assert(d * (y / d) - d * (x / d) + y % d - x % d >= 0); + assert(d * (y / d) - d * (x / d) >= 0); + lemma_mul_is_distributive_sub(d, (y / d), (x / d)); + assert(d * ((y / d) - (x / d)) >= 0); + lemma_div_relation_when_mods_have_different_order(d, x, y); + if y % d < z % d < x % d { + lemma_div_relation_when_mods_have_different_order(d, z, y); + lemma_div_relation_when_mods_have_same_order(d, z, x); + assert(false); } + } +} - pub proof fn lemma_mod_difference_equal(x: int, y: int, d: int) - requires - d > 0, - x <= y, - x % d <= y % d, - y - x < d - ensures - y % d - x % d == y - x - { - lemma_fundamental_div_mod(x, d); - lemma_fundamental_div_mod(y, d); - lemma_mod_range(x, d); - lemma_mod_range(y, d); - - assert (d * (y / d) - d * (x / d) + y % d - x % d == y - x); - lemma_mul_is_distributive_sub(d, y / d, x / d); - assert (d * (y / d - x / d) + y % d - x % d == y - x); - assert (0 <= d * (y / d - x / d) + y % d - x % d < d); - lemma_div_relation_when_mods_have_same_order_alt(d, x, y); - assert (y / d == x / d); - } +pub proof fn lemma_mod_addition_when_bounded(x: int, y: int, d: int) + requires + d > 0, + y >= 0, + (x % d) + y < d, + ensures + (x + y) % d == (x % d) + y, +{ + lemma_fundamental_div_mod(x, d); + lemma_mul_is_commutative(x / d, d); + lemma_fundamental_div_mod_converse(x + y, d, x / d, x % d + y); +} - pub proof fn lemma_mod_wrapped_len(x: int, y: int, d: int) - requires - d > 0, - x <= y, - x % d > y % d, - y - x < d - ensures - d - (x % d) + (y % d) == y - x - { - lemma_fundamental_div_mod(x, d); - lemma_fundamental_div_mod(y, d); - lemma_mod_range(x, d); - lemma_mod_range(y, d); - assert (d * (y / d) - d * (x / d) + y % d - x % d == y - x); - lemma_mul_is_distributive_sub(d, y / d, x / d); - assert (d * (y / d - x / d) + y % d - x % d == y - x); - assert (0 <= d * (y / d - x / d) + y % d - x % d < d); - lemma_div_relation_when_mods_have_different_order_alt(d, x, y); - assert (y / d == x / d + 1); - assert (y / d - x / d == 1 ==> d * (y / d - x / d) == d) by (nonlinear_arith); - } +pub proof fn lemma_mod_difference_equal(x: int, y: int, d: int) + requires + d > 0, + x <= y, + x % d <= y % d, + y - x < d, + ensures + y % d - x % d == y - x, +{ + lemma_fundamental_div_mod(x, d); + lemma_fundamental_div_mod(y, d); + lemma_mod_range(x, d); + lemma_mod_range(y, d); + assert(d * (y / d) - d * (x / d) + y % d - x % d == y - x); + lemma_mul_is_distributive_sub(d, y / d, x / d); + assert(d * (y / d - x / d) + y % d - x % d == y - x); + assert(0 <= d * (y / d - x / d) + y % d - x % d < d); + lemma_div_relation_when_mods_have_same_order_alt(d, x, y); + assert(y / d == x / d); +} - pub proof fn lemma_mod_equal(x: int, y: int, d: int) - requires - d > 0, - x <= y, - x % d == y % d, - y - x < d - ensures - x == y - { - lemma_mod_difference_equal(x, y, d); - } +pub proof fn lemma_mod_wrapped_len(x: int, y: int, d: int) + requires + d > 0, + x <= y, + x % d > y % d, + y - x < d, + ensures + d - (x % d) + (y % d) == y - x, +{ + lemma_fundamental_div_mod(x, d); + lemma_fundamental_div_mod(y, d); + lemma_mod_range(x, d); + lemma_mod_range(y, d); + assert(d * (y / d) - d * (x / d) + y % d - x % d == y - x); + lemma_mul_is_distributive_sub(d, y / d, x / d); + assert(d * (y / d - x / d) + y % d - x % d == y - x); + assert(0 <= d * (y / d - x / d) + y % d - x % d < d); + lemma_div_relation_when_mods_have_different_order_alt(d, x, y); + assert(y / d == x / d + 1); + assert(y / d - x / d == 1 ==> d * (y / d - x / d) == d) by (nonlinear_arith); +} - pub proof fn lemma_mod_equal_converse(x: int, y: int, d: int) - requires - d > 0, - x == y, - ensures - x % d == y % d - {} - - pub proof fn lemma_mod_not_equal(x: int, y: int, d: int) - requires - d > 0, - y - x < d, - y - x >= 0, - x != y, - ensures - x % d != y % d - { - if x % d == y % d { - if x < y { - lemma_mod_equal(x, y, d); - assert(false); - } else { - assert(y - x < 0); - assert(false); - } - } +pub proof fn lemma_mod_equal(x: int, y: int, d: int) + requires + d > 0, + x <= y, + x % d == y % d, + y - x < d, + ensures + x == y, +{ + lemma_mod_difference_equal(x, y, d); +} - } +pub proof fn lemma_mod_equal_converse(x: int, y: int, d: int) + requires + d > 0, + x == y, + ensures + x % d == y % d, +{ +} - #[verifier(nonlinear)] - pub proof fn lemma_mul_div_equal(x: int, q: int, d: int) - requires - q * d <= x < (q + 1) * d - ensures - (x / d) == q - {} - - pub proof fn lemma_mod_subtract(x: int, y: int, d: int) - requires - d > 0, - (x % d) + y >= d, - 0 <= y < d - ensures - (x % d) + y - d == (x + y) % d - { - assert(d <= (x % d) + y < 2 * d); - assert((x / d) * d + d <= (x / d) * d + (x % d) + y < (x / d) * d + 2 * d); - lemma_fundamental_div_mod(x, d); - lemma_mul_is_commutative(x / d, d); - lemma_mul_is_distributive_add_other_way(d, x / d, 1); - lemma_mul_is_distributive_add_other_way(d, x / d, 2); - assert((x / d + 1) * d <= x + y < (x / d + 2) * d); - lemma_mul_div_equal(x + y, (x / d + 1), d); - assert(x / d + 1 == (x + y) / d); - lemma_fundamental_div_mod(x + y, d); - assert(x + y == d * ((x + y) / d) + (x + y) % d); +pub proof fn lemma_mod_not_equal(x: int, y: int, d: int) + requires + d > 0, + y - x < d, + y - x >= 0, + x != y, + ensures + x % d != y % d, +{ + if x % d == y % d { + if x < y { + lemma_mod_equal(x, y, d); + assert(false); + } else { + assert(y - x < 0); + assert(false); } } } +#[verifier(nonlinear)] +pub proof fn lemma_mul_div_equal(x: int, q: int, d: int) + requires + q * d <= x < (q + 1) * d, + ensures + (x / d) == q, +{ +} + +pub proof fn lemma_mod_subtract(x: int, y: int, d: int) + requires + d > 0, + (x % d) + y >= d, + 0 <= y < d, + ensures + (x % d) + y - d == (x + y) % d, +{ + assert(d <= (x % d) + y < 2 * d); + assert((x / d) * d + d <= (x / d) * d + (x % d) + y < (x / d) * d + 2 * d); + lemma_fundamental_div_mod(x, d); + lemma_mul_is_commutative(x / d, d); + lemma_mul_is_distributive_add_other_way(d, x / d, 1); + lemma_mul_is_distributive_add_other_way(d, x / d, 2); + assert((x / d + 1) * d <= x + y < (x / d + 2) * d); + lemma_mul_div_equal(x + y, (x / d + 1), d); + assert(x / d + 1 == (x + y) / d); + lemma_fundamental_div_mod(x + y, d); + assert(x + y == d * ((x + y) / d) + (x + y) % d); +} + +} // verus! +} + pub mod pmemmock_t { use crate::pmemspec_t::*; use builtin::*; @@ -3248,59 +3776,53 @@ pub mod pmemmock_t { verus! { - pub struct VolatileMemoryMockingPersistentMemory - { - contents: Vec - } - - impl VolatileMemoryMockingPersistentMemory { - #[verifier::external_body] - pub fn new(device_size: u64) -> (result: Result) - ensures - match result { - Ok(pm) => pm@.len() == device_size && pm.inv(), - Err(_) => true - } - { - Ok(Self {contents: vec![0; device_size as usize]}) - } - } +pub struct VolatileMemoryMockingPersistentMemory { + contents: Vec, +} - impl PersistentMemory for VolatileMemoryMockingPersistentMemory { - closed spec fn view(self) -> Seq - { - self.contents@ - } +impl VolatileMemoryMockingPersistentMemory { + #[verifier::external_body] + pub fn new(device_size: u64) -> (result: Result) + ensures + match result { + Ok(pm) => pm@.len() == device_size && pm.inv(), + Err(_) => true, + }, + { + Ok(Self { contents: vec![0; device_size as usize] }) + } +} - closed spec fn inv(self) -> bool - { - self.contents.len() <= u64::MAX - } +impl PersistentMemory for VolatileMemoryMockingPersistentMemory { + closed spec fn view(self) -> Seq { + self.contents@ + } - closed spec fn constants(self) -> PersistentMemoryConstants - { - PersistentMemoryConstants { impervious_to_corruption: true } - } + closed spec fn inv(self) -> bool { + self.contents.len() <= u64::MAX + } - #[verifier::external_body] - fn read(&self, addr: u64, num_bytes: u64) -> Vec - { - let addr_usize: usize = addr.try_into().unwrap(); - let num_bytes_usize: usize = num_bytes.try_into().unwrap(); - self.contents[addr_usize..addr_usize+num_bytes_usize].to_vec() - } + closed spec fn constants(self) -> PersistentMemoryConstants { + PersistentMemoryConstants { impervious_to_corruption: true } + } - #[verifier::external_body] - fn write(&mut self, addr: u64, bytes: &[u8]) - { - let addr_usize: usize = addr.try_into().unwrap(); - self.contents.splice(addr_usize..addr_usize+bytes.len(), bytes.iter().cloned()); - } - } + #[verifier::external_body] + fn read(&self, addr: u64, num_bytes: u64) -> Vec { + let addr_usize: usize = addr.try_into().unwrap(); + let num_bytes_usize: usize = num_bytes.try_into().unwrap(); + self.contents[addr_usize..addr_usize + num_bytes_usize].to_vec() + } + #[verifier::external_body] + fn write(&mut self, addr: u64, bytes: &[u8]) { + let addr_usize: usize = addr.try_into().unwrap(); + self.contents.splice(addr_usize..addr_usize + bytes.len(), bytes.iter().cloned()); } } +} // verus! +} + pub mod pmemspec_t { /* @@ -3324,288 +3846,318 @@ pub mod pmemspec_t { verus! { - pub open spec fn all_elements_unique(seq: Seq) -> bool { - forall |i: int, j: int| 0 <= i < j < seq.len() ==> seq[i] != seq[j] - } +pub open spec fn all_elements_unique(seq: Seq) -> bool { + forall|i: int, j: int| 0 <= i < j < seq.len() ==> seq[i] != seq[j] +} - pub closed spec fn maybe_corrupted_byte(byte: u8, true_byte: u8, addr: int) -> bool; +pub closed spec fn maybe_corrupted_byte(byte: u8, true_byte: u8, addr: int) -> bool; - pub open spec fn maybe_corrupted(bytes: Seq, true_bytes: Seq, addrs: Seq) -> bool { - &&& bytes.len() == true_bytes.len() == addrs.len() - &&& forall |i: int| #![auto] 0 <= i < bytes.len() ==> maybe_corrupted_byte(bytes[i], true_bytes[i], addrs[i]) - } +pub open spec fn maybe_corrupted(bytes: Seq, true_bytes: Seq, addrs: Seq) -> bool { + &&& bytes.len() == true_bytes.len() == addrs.len() + &&& forall|i: int| + #![auto] + 0 <= i < bytes.len() ==> maybe_corrupted_byte(bytes[i], true_bytes[i], addrs[i]) +} - pub const crc_size: u64 = 8; +pub const crc_size: u64 = 8; - pub closed spec fn spec_crc_bytes(header_bytes: Seq) -> Seq; +pub closed spec fn spec_crc_bytes(header_bytes: Seq) -> Seq; - #[verifier::external_body] - pub exec fn bytes_crc(header_bytes: &Vec) -> (out: Vec) - ensures - spec_crc_bytes(header_bytes@) == out@, - out@.len() == crc_size - { - #[cfg(not(verus_keep_ghost))] - { - let mut c = Digest::new(); - c.write(header_bytes.as_slice()); - u64_to_le_bytes(c.sum64()) - } - #[cfg(verus_keep_ghost)] +#[verifier::external_body] +pub exec fn bytes_crc(header_bytes: &Vec) -> (out: Vec) + ensures + spec_crc_bytes(header_bytes@) == out@, + out@.len() == crc_size, +{ + #[cfg(not(verus_keep_ghost))] + { + let mut c = Digest::new(); + c.write(header_bytes.as_slice()); + u64_to_le_bytes(c.sum64()) + } + #[cfg(verus_keep_ghost)] unimplemented!() - } +} - // We make two assumptions about how CRCs can be used to detect - // corruption. - - // The first assumption, encapsulated in - // `axiom_bytes_uncorrupted`, is that if we store byte sequences - // `x` and `y` to persistent memory where `y` is the CRC of `x`, - // then we can detect an absence of corruption by reading both of - // them. Specifically, if we read from those locations and get - // `x_c` and `y_c` (corruptions of `x` and `y` respectively), and - // `y_c` is the CRC of `x_c`, then we can conclude that `x` wasn't - // corrupted, i.e., that `x_c == x`. - - #[verifier(external_body)] - pub proof fn axiom_bytes_uncorrupted(x_c: Seq, x: Seq, x_addrs: Seq, - y_c: Seq, y: Seq, y_addrs: Seq) - requires - maybe_corrupted(x_c, x, x_addrs), - maybe_corrupted(y_c, y, y_addrs), - y == spec_crc_bytes(x), - y_c == spec_crc_bytes(x_c), - all_elements_unique(x_addrs), - all_elements_unique(y_addrs) - ensures - x == x_c - {} - - // The second assumption, encapsulated in - // `axiom_corruption_detecting_boolean`, is that the values - // `cdb0_val` and `cdb1_val` are so randomly different from each - // other that corruption can't make one appear to be the other. - // That is, if we know we wrote either `cdb0_val` or `cdb1_val` to - // a certain part of persistent memory, and when we read that same - // part we get `cdb0_val` or `cdb1_val`, we can assume it matches - // what we last wrote to it. To justify the assumption that - // `cdb0_val` and `cdb1_val` are different from each other, we set - // them to CRC(b"0") and CRC(b"1"), respectively. - - pub const cdb0_val: u64 = 0xa32842d19001605e; // CRC(b"0") - pub const cdb1_val: u64 = 0xab21aa73069531b7; // CRC(b"1") - - #[verifier(external_body)] - pub proof fn axiom_corruption_detecting_boolean(cdb_c: u64, cdb: u64, addrs: Seq) - requires - maybe_corrupted(spec_u64_to_le_bytes(cdb_c), spec_u64_to_le_bytes(cdb), addrs), - all_elements_unique(addrs), - cdb == cdb0_val || cdb == cdb1_val, - cdb_c == cdb0_val || cdb_c == cdb1_val, - ensures - cdb_c == cdb - {} - - pub struct PersistentMemoryConstants { - pub impervious_to_corruption: bool - } +// We make two assumptions about how CRCs can be used to detect +// corruption. +// The first assumption, encapsulated in +// `axiom_bytes_uncorrupted`, is that if we store byte sequences +// `x` and `y` to persistent memory where `y` is the CRC of `x`, +// then we can detect an absence of corruption by reading both of +// them. Specifically, if we read from those locations and get +// `x_c` and `y_c` (corruptions of `x` and `y` respectively), and +// `y_c` is the CRC of `x_c`, then we can conclude that `x` wasn't +// corrupted, i.e., that `x_c == x`. +#[verifier(external_body)] +pub proof fn axiom_bytes_uncorrupted( + x_c: Seq, + x: Seq, + x_addrs: Seq, + y_c: Seq, + y: Seq, + y_addrs: Seq, +) + requires + maybe_corrupted(x_c, x, x_addrs), + maybe_corrupted(y_c, y, y_addrs), + y == spec_crc_bytes(x), + y_c == spec_crc_bytes(x_c), + all_elements_unique(x_addrs), + all_elements_unique(y_addrs), + ensures + x == x_c, +{ +} - // We mark this as `external_body` so that the verifier can't see - // that there's nothing important in it and thereby shortcut some - // checks. - - pub trait PersistentMemory : Sized { - spec fn view(self) -> Seq; - - spec fn inv(self) -> bool; - - spec fn constants(self) -> PersistentMemoryConstants; - - /// This is the model of some routine that reads the - /// `num_bytes` bytes at address `addr`. - fn read(&self, addr: u64, num_bytes: u64) -> (bytes: Vec) - requires - self.inv(), - addr + num_bytes <= self@.len() - ensures - ({ - let true_bytes = self@.subrange(addr as int, addr + num_bytes); - let addrs = Seq::::new(num_bytes as nat, |i: int| i + addr); - if self.constants().impervious_to_corruption { - bytes@ == true_bytes - } - else { - maybe_corrupted(bytes@, true_bytes, addrs) - } - }); - - /// This is the model of some routine that writes `bytes` - /// starting at address `addr`. - fn write(&mut self, addr: u64, bytes: &[u8]) - requires - old(self).inv(), - addr + bytes@.len() <= (old(self))@.len(), - addr + bytes@.len() <= u64::MAX - ensures - self.inv(), - self.constants() == old(self).constants(), - self@ == update_contents_to_reflect_write(old(self)@, addr as int, bytes@); - } +// The second assumption, encapsulated in +// `axiom_corruption_detecting_boolean`, is that the values +// `cdb0_val` and `cdb1_val` are so randomly different from each +// other that corruption can't make one appear to be the other. +// That is, if we know we wrote either `cdb0_val` or `cdb1_val` to +// a certain part of persistent memory, and when we read that same +// part we get `cdb0_val` or `cdb1_val`, we can assume it matches +// what we last wrote to it. To justify the assumption that +// `cdb0_val` and `cdb1_val` are different from each other, we set +// them to CRC(b"0") and CRC(b"1"), respectively. +pub const cdb0_val: u64 = 0xa32842d19001605e; + // CRC(b"0") +pub const cdb1_val: u64 = 0xab21aa73069531b7; + // CRC(b"1") +#[verifier(external_body)] +pub proof fn axiom_corruption_detecting_boolean(cdb_c: u64, cdb: u64, addrs: Seq) + requires + maybe_corrupted(spec_u64_to_le_bytes(cdb_c), spec_u64_to_le_bytes(cdb), addrs), + all_elements_unique(addrs), + cdb == cdb0_val || cdb == cdb1_val, + cdb_c == cdb0_val || cdb_c == cdb1_val, + ensures + cdb_c == cdb, +{ +} - /// We model the persistent memory as getting flushed in chunks, - /// where each chunk has `persistence_chunk_size` bytes. We refer - /// to chunk number `id` as the set of addresses `addr` such that - /// `addr / persistence_chunk_size == id`. - pub spec const persistence_chunk_size: int = 8; - - /// Return the byte at address `addr` after writing - /// `write_bytes` to address `write_addr`, if the byte at - /// `addr` before the write was `prewrite_byte`. - pub open spec fn update_byte_to_reflect_write(addr: int, prewrite_byte: u8, write_addr: int, - write_bytes: Seq) -> u8 - { - if write_addr <= addr && addr < write_addr + write_bytes.len() { - write_bytes[addr - write_addr] - } - else { - prewrite_byte - } - } +pub struct PersistentMemoryConstants { + pub impervious_to_corruption: bool, +} - /// Return the contents of persistent memory after writing - /// `write_bytes` to address `write_addr`, if the contents - /// before the write was `prewrite_contents`. - pub open spec(checked) fn update_contents_to_reflect_write(prewrite_contents: Seq, write_addr: int, - write_bytes: Seq) -> Seq - recommends - 0 <= write_addr, - write_addr + write_bytes.len() <= prewrite_contents.len(), - { - Seq::::new(prewrite_contents.len(), - |addr| update_byte_to_reflect_write(addr, prewrite_contents[addr], - write_addr, write_bytes)) - } +// We mark this as `external_body` so that the verifier can't see +// that there's nothing important in it and thereby shortcut some +// checks. +pub trait PersistentMemory: Sized { + spec fn view(self) -> Seq; + + spec fn inv(self) -> bool; + + spec fn constants(self) -> PersistentMemoryConstants; + + /// This is the model of some routine that reads the + /// `num_bytes` bytes at address `addr`. + fn read(&self, addr: u64, num_bytes: u64) -> (bytes: Vec) + requires + self.inv(), + addr + num_bytes <= self@.len(), + ensures + ({ + let true_bytes = self@.subrange(addr as int, addr + num_bytes); + let addrs = Seq::::new(num_bytes as nat, |i: int| i + addr); + if self.constants().impervious_to_corruption { + bytes@ == true_bytes + } else { + maybe_corrupted(bytes@, true_bytes, addrs) + } + }), + ; + + /// This is the model of some routine that writes `bytes` + /// starting at address `addr`. + fn write(&mut self, addr: u64, bytes: &[u8]) + requires + old(self).inv(), + addr + bytes@.len() <= (old(self))@.len(), + addr + bytes@.len() <= u64::MAX, + ensures + self.inv(), + self.constants() == old(self).constants(), + self@ == update_contents_to_reflect_write(old(self)@, addr as int, bytes@), + ; +} - /// Return the byte at address `addr` after initiating (but - /// not necessarily completing) a write of `write_bytes` to - /// address `write_addr`, given that the byte at `addr` before - /// the write was `prewrite_byte` and given that the set of - /// chunk IDs that have been flushed since the initiation of - /// the write is `chunks_flushed`. - pub open spec fn update_byte_to_reflect_partially_flushed_write(addr: int, prewrite_byte: u8, write_addr: int, - write_bytes: Seq, - chunks_flushed: Set) -> u8 - { - if chunks_flushed.contains(addr / persistence_chunk_size) { - update_byte_to_reflect_write(addr, prewrite_byte, write_addr, write_bytes) - } - else { - prewrite_byte - } - } +/// We model the persistent memory as getting flushed in chunks, +/// where each chunk has `persistence_chunk_size` bytes. We refer +/// to chunk number `id` as the set of addresses `addr` such that +/// `addr / persistence_chunk_size == id`. +pub spec const persistence_chunk_size: int = 8; + +/// Return the byte at address `addr` after writing +/// `write_bytes` to address `write_addr`, if the byte at +/// `addr` before the write was `prewrite_byte`. +pub open spec fn update_byte_to_reflect_write( + addr: int, + prewrite_byte: u8, + write_addr: int, + write_bytes: Seq, +) -> u8 { + if write_addr <= addr && addr < write_addr + write_bytes.len() { + write_bytes[addr - write_addr] + } else { + prewrite_byte + } +} - /// Return the contents of persistent memory after initiating - /// (but not necessarily completing) a write of `write_bytes` - /// to address `write_addr`, given that the contents before - /// the write were `prewrite_contents` and given that the set of - /// chunk IDs that have been flushed since the initiation of - /// the write is `chunks_flushed`. - pub open spec(checked) fn update_contents_to_reflect_partially_flushed_write(contents: Seq, write_addr: int, - write_bytes: Seq, - chunks_flushed: Set) -> Seq - recommends - 0 <= write_addr, - write_addr + write_bytes.len() <= contents.len(), - { - Seq::::new(contents.len(), - |addr| update_byte_to_reflect_partially_flushed_write(addr, contents[addr], write_addr, - write_bytes, chunks_flushed)) - } +/// Return the contents of persistent memory after writing +/// `write_bytes` to address `write_addr`, if the contents +/// before the write was `prewrite_contents`. +pub open spec(checked) fn update_contents_to_reflect_write( + prewrite_contents: Seq, + write_addr: int, + write_bytes: Seq, +) -> Seq + recommends + 0 <= write_addr, + write_addr + write_bytes.len() <= prewrite_contents.len(), +{ + Seq::::new( + prewrite_contents.len(), + |addr| update_byte_to_reflect_write(addr, prewrite_contents[addr], write_addr, write_bytes), + ) +} - /// A `WriteRestrictedPersistentMemory

` object wraps a - /// `PersistentMemory` object to restrict how it's written. - /// Untrusted code passed one of these can only write to the - /// encapsulated persistent memory by providing a permission of - /// type `P`. That permission must allow all possible states `s` - /// such that crashing in the middle of the write might leave the - /// persistent memory in state `s`. - pub struct WriteRestrictedPersistentMemory - where - Perm: CheckPermission>, - PM: PersistentMemory - { - pm: PM, - ghost perm: Option // unused, but Rust demands some reference to Perm - } +/// Return the byte at address `addr` after initiating (but +/// not necessarily completing) a write of `write_bytes` to +/// address `write_addr`, given that the byte at `addr` before +/// the write was `prewrite_byte` and given that the set of +/// chunk IDs that have been flushed since the initiation of +/// the write is `chunks_flushed`. +pub open spec fn update_byte_to_reflect_partially_flushed_write( + addr: int, + prewrite_byte: u8, + write_addr: int, + write_bytes: Seq, + chunks_flushed: Set, +) -> u8 { + if chunks_flushed.contains(addr / persistence_chunk_size) { + update_byte_to_reflect_write(addr, prewrite_byte, write_addr, write_bytes) + } else { + prewrite_byte + } +} - impl WriteRestrictedPersistentMemory - where - Perm: CheckPermission>, - PM: PersistentMemory - { - pub closed spec fn view(self) -> Seq { - self.pm@ - } +/// Return the contents of persistent memory after initiating +/// (but not necessarily completing) a write of `write_bytes` +/// to address `write_addr`, given that the contents before +/// the write were `prewrite_contents` and given that the set of +/// chunk IDs that have been flushed since the initiation of +/// the write is `chunks_flushed`. +pub open spec(checked) fn update_contents_to_reflect_partially_flushed_write( + contents: Seq, + write_addr: int, + write_bytes: Seq, + chunks_flushed: Set, +) -> Seq + recommends + 0 <= write_addr, + write_addr + write_bytes.len() <= contents.len(), +{ + Seq::::new( + contents.len(), + |addr| + update_byte_to_reflect_partially_flushed_write( + addr, + contents[addr], + write_addr, + write_bytes, + chunks_flushed, + ), + ) +} - pub closed spec fn inv(self) -> bool { - self.pm.inv() - } +/// A `WriteRestrictedPersistentMemory

` object wraps a +/// `PersistentMemory` object to restrict how it's written. +/// Untrusted code passed one of these can only write to the +/// encapsulated persistent memory by providing a permission of +/// type `P`. That permission must allow all possible states `s` +/// such that crashing in the middle of the write might leave the +/// persistent memory in state `s`. +pub struct WriteRestrictedPersistentMemory where + Perm: CheckPermission>, + PM: PersistentMemory, + { + pm: PM, + ghost perm: Option // unused, but Rust demands some reference to Perm + , +} - pub closed spec fn constants(self) -> PersistentMemoryConstants { - self.pm.constants() - } +impl WriteRestrictedPersistentMemory where + Perm: CheckPermission>, + PM: PersistentMemory, + { + pub closed spec fn view(self) -> Seq { + self.pm@ + } - pub exec fn new(pm: PM) -> (wrpm: Self) - requires - pm.inv() - ensures - wrpm@ == pm@, - wrpm.inv(), - wrpm.constants() == pm.constants() - { - Self { pm: pm, perm: None } - } + pub closed spec fn inv(self) -> bool { + self.pm.inv() + } - pub exec fn get_pm_ref(&self) -> (pm: &PM) - requires - self.inv() - ensures - pm.inv(), - pm@ == self@, - pm.constants() == self.constants() - { - &self.pm - } + pub closed spec fn constants(self) -> PersistentMemoryConstants { + self.pm.constants() + } - /// This `write` function can only be called if a crash in the - /// middle of the requested write will leave the persistent - /// memory in a state allowed by `perm`. The state must be - /// allowed no matter what subset of the persistence chunks - /// have been flushed. - pub exec fn write(&mut self, addr: u64, bytes: &[u8], perm: Tracked<&Perm>) - requires - old(self).inv(), - addr + bytes@.len() <= old(self)@.len(), - addr + bytes@.len() <= u64::MAX, - forall |chunks_flushed| { - let new_contents: Seq = - #[trigger] update_contents_to_reflect_partially_flushed_write( - old(self)@, addr as int, bytes@, chunks_flushed - ); - perm@.check_permission(new_contents) - }, - ensures - self.inv(), - self.constants() == old(self).constants(), - self@ == update_contents_to_reflect_write(old(self)@, addr as int, bytes@), - { - self.pm.write(addr, bytes) - } - } + pub exec fn new(pm: PM) -> (wrpm: Self) + requires + pm.inv(), + ensures + wrpm@ == pm@, + wrpm.inv(), + wrpm.constants() == pm.constants(), + { + Self { pm: pm, perm: None } + } + + pub exec fn get_pm_ref(&self) -> (pm: &PM) + requires + self.inv(), + ensures + pm.inv(), + pm@ == self@, + pm.constants() == self.constants(), + { + &self.pm + } + /// This `write` function can only be called if a crash in the + /// middle of the requested write will leave the persistent + /// memory in a state allowed by `perm`. The state must be + /// allowed no matter what subset of the persistence chunks + /// have been flushed. + pub exec fn write(&mut self, addr: u64, bytes: &[u8], perm: Tracked<&Perm>) + requires + old(self).inv(), + addr + bytes@.len() <= old(self)@.len(), + addr + bytes@.len() <= u64::MAX, + forall|chunks_flushed| + { + let new_contents: Seq = + #[trigger] update_contents_to_reflect_partially_flushed_write( + old(self)@, + addr as int, + bytes@, + chunks_flushed, + ); + perm@.check_permission(new_contents) + }, + ensures + self.inv(), + self.constants() == old(self).constants(), + self@ == update_contents_to_reflect_write(old(self)@, addr as int, bytes@), + { + self.pm.write(addr, bytes) } } +} // verus! +} + pub mod sccf { /* Simple crash-consistency framework (open source) @@ -3617,29 +4169,27 @@ pub mod sccf { verus! { - pub open spec fn is_state_allowable( - pre_operation_state: AbstractStorage, - crash_state: AbstractStorage, - recovery_view: FnSpec(AbstractStorage) -> Option, - abstract_next: FnSpec(AbstractService, AbstractService) -> bool - ) -> bool - { - let pre_operation_abstract_state = recovery_view(pre_operation_state); - let crash_abstract_state = recovery_view(crash_state); - ||| crash_abstract_state == pre_operation_abstract_state - ||| { - &&& pre_operation_abstract_state.is_Some() - &&& crash_abstract_state.is_Some() - &&& abstract_next(pre_operation_abstract_state.unwrap(), crash_abstract_state.unwrap()) - } - } +pub open spec fn is_state_allowable( + pre_operation_state: AbstractStorage, + crash_state: AbstractStorage, + recovery_view: FnSpec(AbstractStorage) -> Option, + abstract_next: FnSpec(AbstractService, AbstractService) -> bool, +) -> bool { + let pre_operation_abstract_state = recovery_view(pre_operation_state); + let crash_abstract_state = recovery_view(crash_state); + ||| crash_abstract_state == pre_operation_abstract_state + ||| { + &&& pre_operation_abstract_state.is_Some() + &&& crash_abstract_state.is_Some() + &&& abstract_next(pre_operation_abstract_state.unwrap(), crash_abstract_state.unwrap()) + } +} - pub trait CheckPermission - { - spec fn check_permission(&self, state: AbstractStorage) -> bool; - } +pub trait CheckPermission { + spec fn check_permission(&self, state: AbstractStorage) -> bool; +} - } +} // verus! } use crate::main_t::*; @@ -3648,26 +4198,28 @@ use crate::pmemspec_t::*; verus! { - fn main() { - let device_size: u64 = 4096; - if let Ok(mut pm) = VolatileMemoryMockingPersistentMemory::new(device_size) { - if let Ok(_) = InfiniteLogImpl::setup(&mut pm, device_size) { - if let Ok(mut log) = InfiniteLogImpl::start(pm, device_size) { - let mut v: Vec = Vec::::new(); - v.push(30); v.push(42); v.push(100); - if let Ok(pos) = log.append(&v) { - if let Ok((head, tail, capacity)) = log.get_head_and_tail() { - assert (head == 0); - assert (tail == 3); - // TODO: add an assertion using maybe_corrupted here - // if let Ok(bytes) = log.read(1, 2) { - // assert (bytes@[0] == 42); - // } - } +fn main() { + let device_size: u64 = 4096; + if let Ok(mut pm) = VolatileMemoryMockingPersistentMemory::new(device_size) { + if let Ok(_) = InfiniteLogImpl::setup(&mut pm, device_size) { + if let Ok(mut log) = InfiniteLogImpl::start(pm, device_size) { + let mut v: Vec = Vec::::new(); + v.push(30); + v.push(42); + v.push(100); + if let Ok(pos) = log.append(&v) { + if let Ok((head, tail, capacity)) = log.get_head_and_tail() { + assert(head == 0); + assert(tail == 3); + // TODO: add an assertion using maybe_corrupted here + // if let Ok(bytes) = log.read(1, 2) { + // assert (bytes@[0] == 42); + // } } } } } } - } + +} // verus! diff --git a/examples/vstd.rs b/examples/vstd.rs index 1420eb6..a05b7ce 100644 --- a/examples/vstd.rs +++ b/examples/vstd.rs @@ -66,7 +66,7 @@ pub proof fn lemma_small_div() } } // verus! -} + } pub mod general_internals { //! This file contains general internal functions used within the math @@ -187,7 +187,7 @@ pub proof fn lemma_induction_helper(n: int, f: spec_fn(int) -> bool, x: int) } } // verus! -} + } pub mod mod_internals_nonlinear { //! This file contains proofs related to modulo that require @@ -270,7 +270,7 @@ pub proof fn lemma_mod_range(x: int, m: int) } } // verus! -} + } pub mod mul_internals_nonlinear { //! This file contains proofs related to multiplication that require @@ -361,7 +361,7 @@ pub proof fn lemma_mul_strict_inequality(x: int, y: int, z: int) } } // verus! -} + } pub mod div_internals { //! This file contains proofs related to division. These are internal @@ -712,7 +712,7 @@ pub proof fn lemma_div_induction_auto_forall(n: int, f: spec_fn(int) -> bool) } } // verus! -} + } pub mod mod_internals { //! This file contains proofs related to modulo. These are internal @@ -1237,7 +1237,7 @@ pub proof fn lemma_mod_induction_auto_forall(n: int, f: spec_fn(int) -> bool) } } // verus! -} + } pub mod mul_internals { //! This file contains proofs related to multiplication. These are @@ -1460,7 +1460,7 @@ pub proof fn lemma_mul_induction_auto_forall(f: spec_fn(int) -> bool) } } // verus! -} + } } pub mod div_mod { @@ -3712,7 +3712,7 @@ pub proof fn lemma_mod_breakdown_auto() } } // verus! -} + } pub mod logarithm { //! This file contains proofs related to integer logarithms. These are @@ -3896,7 +3896,7 @@ pub proof fn lemma_log_pow(base: int, n: nat) } } // verus! -} + } pub mod mul { //! This file contains proofs related to integer multiplication (`*`). @@ -4595,7 +4595,7 @@ pub proof fn lemma_mul_properties() } } // verus! -} + } pub mod power { //! This file contains proofs related to exponentiation. These are @@ -5357,7 +5357,7 @@ pub proof fn lemma_pow_mod_noop_auto() } } // verus! -} + } pub mod power2 { //! This file contains proofs related to powers of 2. These are part @@ -5553,7 +5553,7 @@ pub proof fn lemma2_to64() } } // verus! -} + } } pub mod array { @@ -5664,7 +5664,7 @@ pub mod atomic { } } // verus! -atomic_types!($at_ident, $p_ident, $p_data_ident, $rust_ty, $value_ty); + atomic_types!($at_ident, $p_ident, $p_data_ident, $rust_ty, $value_ty); #[cfg_attr(verus_keep_ghost, verus::internal(verus_macro))] impl $at_ident { atomic_common_methods!($at_ident, $p_ident, $p_data_ident, $rust_ty, $value_ty); @@ -5700,7 +5700,7 @@ atomic_types!($at_ident, $p_ident, $p_data_ident, $rust_ty, $value_ty); } } // verus! -atomic_types!($at_ident, $p_ident, $p_data_ident, $rust_ty, $value_ty); + atomic_types!($at_ident, $p_ident, $p_data_ident, $rust_ty, $value_ty); #[cfg_attr(verus_keep_ghost, verus::internal(verus_macro))] impl $at_ident { atomic_common_methods!($at_ident, $p_ident, $p_data_ident, $rust_ty, $value_ty); @@ -5755,7 +5755,7 @@ atomic_types!($at_ident, $p_ident, $p_data_ident, $rust_ty, $value_ty); } } -}; + }; } macro_rules! atomic_common_methods { @@ -5876,7 +5876,7 @@ atomic_types!($at_ident, $p_ident, $p_data_ident, $rust_ty, $value_ty); } } -}; + }; } macro_rules! atomic_integer_methods { @@ -6034,7 +6034,7 @@ atomic_types!($at_ident, $p_ident, $p_data_ident, $rust_ty, $value_ty); } } -}; + }; } macro_rules! atomic_bool_methods { @@ -6102,7 +6102,7 @@ atomic_types!($at_ident, $p_ident, $p_data_ident, $rust_ty, $value_ty); } } -}; + }; } make_bool_atomic!( @@ -6226,7 +6226,7 @@ pub trait AtomicInvariantPredicate { } } // verus! -macro_rules! declare_atomic_type { + macro_rules! declare_atomic_type { ($at_ident:ident, $patomic_ty:ident, $perm_ty:ty, $value_ty: ty, $atomic_pred_ty: ident) => { verus!{ @@ -6318,7 +6318,7 @@ macro_rules! declare_atomic_type { } } -}; + }; } declare_atomic_type!(AtomicU64, PAtomicU64, PermissionU64, u64, AtomicPredU64); @@ -7858,7 +7858,7 @@ pub proof fn fun_ext(f1: spec_fn(A) -> B, f2: spec_fn(A) -> B) } } // verus! -/// A macro to conveniently generate similar functional extensionality axioms for functions that + /// A macro to conveniently generate similar functional extensionality axioms for functions that /// take `n` arguments. #[doc(hidden)] macro_rules! gen_fun_ext_n { @@ -7873,7 +7873,7 @@ pub proof fn fun_ext(f1: spec_fn(A) -> B, f2: spec_fn(A) -> B) ensures f1 == f2 {} } -}; + }; } // Note: We start at 1 just for consistency; it is exactly equivalent to `fun_ext` @@ -7944,7 +7944,7 @@ pub trait InvariantPredicate { } } // verus! -// LocalInvariant is NEVER `Sync`. + // LocalInvariant is NEVER `Sync`. // // Furthermore, for either type: // @@ -8109,7 +8109,7 @@ pub trait InvariantPredicate { } } -}; + }; } declare_invariant_impl!(AtomicInvariant); @@ -9412,7 +9412,7 @@ pub proof fn tracked_static_ref(tracked v: V) -> (tracked res: &'static V) } } // verus! -// verus + // verus } pub mod multiset { @@ -10194,7 +10194,7 @@ impl Option { } } // verus! -/// A poor-person's `?` operator, until Verus switches to the "real" Rust `Option`. + /// A poor-person's `?` operator, until Verus switches to the "real" Rust `Option`. #[macro_export] #[allow(unused_macros)] macro_rules! try_option { @@ -10415,7 +10415,7 @@ pub fn runtime_assert(b: bool) } } // verus! -#[inline(always)] + #[inline(always)] #[cfg_attr(verus_keep_ghost, verifier::external)] fn runtime_assert_internal(b: bool) { assert!(b); @@ -17748,7 +17748,7 @@ pub proof fn axiom_u64_leading_ones(i: u64) } } // verus! -} + } pub mod control_flow { use crate::prelude::*; @@ -17830,7 +17830,7 @@ pub fn ex_result_from_residual>(result: Result) } } // verus! -} + } pub mod core { use crate::prelude::*; @@ -17904,7 +17904,7 @@ pub fn ex_intrinsics_unlikely(b: bool) -> (c: bool) pub struct ExManuallyDrop(core::mem::ManuallyDrop); } // verus! -} + } pub mod num { #![allow(unused_imports)] @@ -18053,7 +18053,7 @@ pub struct ExManuallyDrop(core::mem::ManuallyDrop); } } -}; + }; } num_specs!(u8, i8, u8_specs, i8_specs, 0x100); @@ -18273,7 +18273,7 @@ pub fn ex_i32_checked_rem_euclid(lhs: i32, rhs: i32) -> (result: Option) } } // verus! -} + } pub mod option { #![allow(unused_imports)] @@ -18447,7 +18447,7 @@ pub fn take(option: &mut Option) -> (t: Option) } } // verus! -} + } pub mod range { use crate::prelude::*; @@ -18559,7 +18559,7 @@ impl crate::view::View for RangeGhostIterator } } // verus! -macro_rules! step_specs { + macro_rules! step_specs { ($t: ty, $axiom: ident) => { verus! { impl StepSpec for $t { @@ -18616,7 +18616,7 @@ macro_rules! step_specs { { } } // verus! -}; + }; } step_specs!(u8, axiom_spec_range_next_u8); @@ -18819,7 +18819,7 @@ pub fn ex_result_err(result: Result) -> (opt: Option) } } // verus! -} + } #[cfg(feature = "alloc")] pub mod vec { @@ -19042,7 +19042,7 @@ pub fn ex_vec_truncate(vec: &mut Vec, len: usize) } } // verus! -} + } } // Re-exports all vstd types, traits, and functions that are commonly used or replace diff --git a/tests/snapshot-examples.rs b/tests/snapshot-examples.rs index 891e779..5a200da 100644 --- a/tests/snapshot-examples.rs +++ b/tests/snapshot-examples.rs @@ -24,6 +24,11 @@ fn syntax_rs_unchanged() { check_snapshot(include_str!("../examples/syntax.rs")); } +#[test] +fn atomic_rs_unchanged() { + check_snapshot(include_str!("../examples/atomic.rs")); +} + #[test] fn ironfleet_rs_unchanged() { check_snapshot(include_str!("../examples/ironfleet.rs"));