diff --git a/Cargo.toml b/Cargo.toml index 369f44b..0a9a8a3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,6 +9,7 @@ edition = "2018" readme = "README.md" [dependencies] +scopeguard = "1.1" # Manually included tracing support for third party libraries # Providing support for these important libraries, # gives zerogc 'batteries included' support. @@ -19,9 +20,6 @@ zerogc-derive = { path = "libs/derive", version = "0.2.0-alpha.3" } [workspace] members = ["libs/simple", "libs/derive", "libs/context"] -[profile.dev] -opt-level = 1 - [features] default = ["std"] # Depend on the standard library (optional) diff --git a/libs/context/src/collector.rs b/libs/context/src/collector.rs index ad4e301..c417c51 100644 --- a/libs/context/src/collector.rs +++ b/libs/context/src/collector.rs @@ -12,6 +12,8 @@ use zerogc::{Gc, GcSafe, GcSystem, Trace, GcSimpleAlloc, NullTrace, TraceImmutab use crate::{CollectorContext}; use crate::state::{CollectionManager, RawContext}; +use zerogc::vec::GcVec; +use zerogc::vec::repr::GcVecRepr; /// A specific implementation of a collector pub unsafe trait RawCollectorImpl: 'static + Sized { @@ -20,6 +22,8 @@ pub unsafe trait RawCollectorImpl: 'static + Sized { /// The simple collector implements this as /// a trait object pointer. type DynTracePtr: Copy + Debug + 'static; + /// The configuration + type Config: Sized + Default; /// A pointer to this collector /// @@ -31,6 +35,8 @@ pub unsafe trait RawCollectorImpl: 'static + Sized { /// The context type RawContext: RawContext; + /// The raw representation of a vec + type RawVecRepr: GcVecRepr; /// True if this collector is a singleton /// @@ -50,18 +56,18 @@ pub unsafe trait RawCollectorImpl: 'static + Sized { /// Initialize an instance of the collector /// /// Must panic if the collector is not a singleton - fn init(logger: Logger) -> NonNull; + fn init(config: Self::Config, logger: Logger) -> NonNull; /// The id of this collector #[inline] fn id(&self) -> CollectorId { CollectorId { ptr: unsafe { Self::Ptr::from_raw(self as *const _ as *mut _) } } } - unsafe fn gc_write_barrier<'gc, T, V>( - owner: &Gc<'gc, T, CollectorId>, + unsafe fn gc_write_barrier<'gc, O, V>( + owner: &Gc<'gc, O, CollectorId>, value: &Gc<'gc, V, CollectorId>, field_offset: usize - ) where T: GcSafe + ?Sized + 'gc, V: GcSafe + ?Sized + 'gc; + ) where O: GcSafe + ?Sized + 'gc, V: GcSafe + ?Sized + 'gc; /// The logger associated with this collector fn logger(&self) -> &Logger; @@ -90,7 +96,7 @@ pub unsafe trait SingletonCollector: RawCollectorImpl PartialEq for CollectorId { @@ -278,6 +284,7 @@ impl CollectorId { } unsafe impl ::zerogc::CollectorId for CollectorId { type System = CollectorRef; + type RawVecRepr = C::RawVecRepr; #[inline] fn from_gc_ptr<'a, 'gc, T>(gc: &'a Gc<'gc, T, Self>) -> &'a Self where T: GcSafe + ?Sized + 'gc, 'gc: 'a { @@ -286,11 +293,11 @@ unsafe impl ::zerogc::CollectorId for CollectorId { #[inline(always)] - unsafe fn gc_write_barrier<'gc, T, V>( - owner: &Gc<'gc, T, Self>, + unsafe fn gc_write_barrier<'gc, O, V>( + owner: &Gc<'gc, O, Self>, value: &Gc<'gc, V, Self>, field_offset: usize - ) where T: GcSafe + ?Sized + 'gc, V: GcSafe + ?Sized + 'gc { + ) where O: GcSafe + ?Sized + 'gc, V: GcSafe + ?Sized + 'gc { C::gc_write_barrier(owner, value, field_offset) } @@ -341,13 +348,36 @@ impl WeakCollectorRef { pub unsafe trait RawSimpleAlloc: RawCollectorImpl { fn alloc<'gc, T: GcSafe + 'gc>(context: &'gc CollectorContext, value: T) -> Gc<'gc, T, CollectorId>; + unsafe fn alloc_uninit_slice<'gc, T>(context: &'gc CollectorContext, len: usize) -> (CollectorId, *mut T) + where T: GcSafe + 'gc; + fn alloc_vec<'gc, T>(context: &'gc CollectorContext) -> GcVec<'gc, T, CollectorContext> + where T: GcSafe + 'gc; + fn alloc_vec_with_capacity<'gc, T>(context: &'gc CollectorContext, capacity: usize) -> GcVec<'gc, T, CollectorContext> + where T: GcSafe + 'gc; } -unsafe impl<'gc, T, C> GcSimpleAlloc<'gc, T> for CollectorContext - where T: GcSafe + 'gc, C: RawSimpleAlloc { +unsafe impl GcSimpleAlloc for CollectorContext + where C: RawSimpleAlloc { #[inline] - fn alloc(&'gc self, value: T) -> Gc<'gc, T, Self::Id> { + fn alloc<'gc, T>(&'gc self, value: T) -> Gc<'gc, T, Self::Id> + where T: GcSafe + 'gc { C::alloc(self, value) } + + #[inline] + unsafe fn alloc_uninit_slice<'gc, T>(&'gc self, len: usize) -> (Self::Id, *mut T) + where T: GcSafe + 'gc { + C::alloc_uninit_slice(self, len) + } + + #[inline] + fn alloc_vec<'gc, T>(&'gc self) -> GcVec<'gc, T, Self> where T: GcSafe + 'gc { + C::alloc_vec(self) + } + + #[inline] + fn alloc_vec_with_capacity<'gc, T>(&'gc self, capacity: usize) -> GcVec<'gc, T, Self> where T: GcSafe + 'gc { + C::alloc_vec_with_capacity(self, capacity) + } } /// A reference to the collector. @@ -371,26 +401,26 @@ unsafe impl Sync for CollectorRef {} #[doc(hidden)] pub trait CollectorInit>: CollectorPtr { fn create() -> CollectorRef { - Self::with_logger(Logger::root( + Self::with_logger(C::Config::default(), Logger::root( slog::Discard, o!() )) } - fn with_logger(logger: Logger) -> CollectorRef; + fn with_logger(config: C::Config, logger: Logger) -> CollectorRef; } impl>> CollectorInit for NonNull { - fn with_logger(logger: Logger) -> CollectorRef { + fn with_logger(config: C::Config, logger: Logger) -> CollectorRef { assert!(!C::SINGLETON); - let raw_ptr = C::init(logger); + let raw_ptr = C::init(config, logger); CollectorRef { ptr: raw_ptr } } } impl CollectorInit for PhantomData<&'static C> where C: SingletonCollector { - fn with_logger(logger: Logger) -> CollectorRef { + fn with_logger(config: C::Config, logger: Logger) -> CollectorRef { assert!(C::SINGLETON); - C::init_global(logger); // TODO: Is this safe? + C::init_global(config, logger); // TODO: Is this safe? // NOTE: The raw pointer is implicit (now that we're leaked) CollectorRef { ptr: PhantomData } } @@ -405,7 +435,11 @@ impl CollectorRef { #[inline] pub fn with_logger(logger: Logger) -> Self where C::Ptr: CollectorInit { - >::with_logger(logger) + Self::with_config(C::Config::default(), logger) + } + + pub fn with_config(config: C::Config, logger: Logger) -> Self where C::Ptr: CollectorInit { + >::with_logger(config, logger) } #[inline] diff --git a/libs/context/src/state/sync.rs b/libs/context/src/state/sync.rs index e48b3c5..f727939 100644 --- a/libs/context/src/state/sync.rs +++ b/libs/context/src/state/sync.rs @@ -429,13 +429,13 @@ pub(crate) trait SyncCollectorImpl: RawCollectorImpl { - unsafe { (core::ptr::addr_of!((*(std::ptr::null_mut::<$target>()))$(.$field)*) as usize) } - }; + ($target:ty, $($field:ident).+) => {{ + const OFFSET: usize = { + let uninit = core::mem::MaybeUninit::<$target>::uninit(); + unsafe { ((core::ptr::addr_of!((*uninit.as_ptr())$(.$field)*)) as *const u8) + .offset_from(uninit.as_ptr() as *const u8) as usize } + }; + OFFSET + }}; } #[cfg(feature = "sync")] diff --git a/libs/derive/src/lib.rs b/libs/derive/src/lib.rs index 42ddadf..2426cd3 100644 --- a/libs/derive/src/lib.rs +++ b/libs/derive/src/lib.rs @@ -732,6 +732,12 @@ fn impl_erase(target: &DeriveInput, info: &GcTypeInfo) -> Result Error { + Error::new( + lt.span(), + "Unless Self: NullTrace, derive(GcErase) is currently unable to handle lifetimes" + ) + } match param { GenericParam::Type(ref mut type_param) => { let param_name = &type_param.ident; @@ -740,7 +746,20 @@ fn impl_erase(target: &DeriveInput, info: &GcTypeInfo) -> Result>(); + fn rewrite_bound(bound: &TypeParamBound, info: &GcTypeInfo) -> Result { + match bound { + TypeParamBound::Trait(ref t) => Ok(TypeParamBound::Trait(t.clone())), + TypeParamBound::Lifetime(ref lt) if *lt == info.config.gc_lifetime() => { + Ok(parse_quote!('new_gc)) + }, + TypeParamBound::Lifetime(ref lt) => { + return Err(unsupported_lifetime_param(lt)) + } + } + } + let original_bounds = type_param.bounds.iter() + .map(|bound| rewrite_bound(bound, info)) + .collect::, _>>()?; type_param.bounds.push(parse_quote!(#zerogc_crate::GcErase<'min, #collector_id>)); type_param.bounds.push(parse_quote!('min)); let rewritten_type: Type = parse_quote!(<#param_name as #zerogc_crate::GcErase<'min, #collector_id>>::Erased); @@ -754,13 +773,10 @@ fn impl_erase(target: &DeriveInput, info: &GcTypeInfo) -> Result { if l.lifetime == info.config.gc_lifetime() { - rewritten_param = parse_quote!('static); + rewritten_param = parse_quote!('min); assert!(!info.config.ignored_lifetimes.contains(&l.lifetime)); } else { - return Err(Error::new( - l.span(), - "Unless Self: NullTrace, derive(GcErase) is currently unable to handle lifetimes" - )) + return Err(unsupported_lifetime_param(&l.lifetime)) } }, GenericParam::Const(ref param) => { @@ -881,6 +897,12 @@ fn impl_rebrand(target: &DeriveInput, info: &GcTypeInfo) -> Result Error { + Error::new( + lt.span(), + "Unless Self: NullTrace, derive(GcRebrand) is currently unable to handle lifetimes" + ) + } match param { GenericParam::Type(ref mut type_param) => { let param_name = &type_param.ident; @@ -888,7 +910,20 @@ fn impl_rebrand(target: &DeriveInput, info: &GcTypeInfo) -> Result>(); + let original_bounds = type_param.bounds.iter() + .map(|bound| rewrite_bound(bound, info)) + .collect::, Error>>()?; + fn rewrite_bound(bound: &TypeParamBound, info: &GcTypeInfo) -> Result { + match bound { + TypeParamBound::Trait(ref t) => Ok(TypeParamBound::Trait(t.clone())), + TypeParamBound::Lifetime(ref lt) if *lt == info.config.gc_lifetime() => { + Ok(parse_quote!('new_gc)) + }, + TypeParamBound::Lifetime(ref lt) => { + return Err(unsupported_lifetime_param(lt)) + } + } + } type_param.bounds.push(parse_quote!(#zerogc_crate::GcRebrand<'new_gc, #collector_id>)); let rewritten_type: Type = parse_quote!(<#param_name as #zerogc_crate::GcRebrand<'new_gc, #collector_id>>::Branded); rewritten_restrictions.push(WherePredicate::Type(PredicateType { @@ -904,10 +939,7 @@ fn impl_rebrand(target: &DeriveInput, info: &GcTypeInfo) -> Result { @@ -962,7 +994,7 @@ fn impl_trace(target: &DeriveInput, info: &GcTypeInfo) -> Result rebrand, false => erase */) -> Result, Error> { let zerogc_crate = zerogc_crate(); let requirements = if rebrand { self.bounds.rebrand.clone() } else { self.bounds.erase.clone() }; - if let Some(TraitRequirements::Never) = requirements { - // They are requesting that we dont implement - return Ok(None); - } let target_type = &self.target_type; let mut generics = self.basic_generics(); - let default_bounds: Vec = match requirements { + let id_type: Type = match self.options.collector_id { + Some(ref tp) => tp.clone(), + None => { + generics.params.push(parse_quote!(Id: #zerogc_crate::CollectorId)); + parse_quote!(Id) + } + }; + + let (generate_implicit, default_bounds): (bool, Vec) = match requirements { Some(TraitRequirements::Where(ref explicit_requirements)) => { generics.make_where_clause().predicates .extend(explicit_requirements.predicates.iter().cloned()); // they have explicit requirements -> no default bounds - vec![] + (false, vec![]) } Some(TraitRequirements::Always) => { - vec![] // always should implement + (false, vec![]) // always should implement (even without implicit bounds) + }, + Some(TraitRequirements::Never) => { + return Ok(None); // They are requesting we dont implement it at all }, - Some(TraitRequirements::Never) => unreachable!(), None => { - if rebrand { - vec![parse_quote!(#zerogc_crate::GcRebrand<'new_gc, Id>)] + (true, if rebrand { + vec![parse_quote!(#zerogc_crate::GcRebrand<'new_gc, #id_type>)] } else { - vec![parse_quote!(#zerogc_crate::GcErase<'min, Id>)] - } + vec![parse_quote!(#zerogc_crate::GcErase<'min, #id_type>)] + }) } }; // generate default bounds @@ -197,6 +203,7 @@ impl MacroInput { // no defaults to generate break } + if !generate_implicit { break } // skip generating implicit bounds match param { GenericParam::Type(ref tp) => { let type_name = &tp.ident; @@ -228,15 +235,16 @@ impl MacroInput { _ => {} } } - /* - * If we don't have explicit specification, - * extend the with the trace clauses - * - * TODO: Do we need to apply to the `Branded`/`Erased` types - */ - generics.make_where_clause().predicates - .extend(self.bounds.trace_where_clause(&self.params).predicates); - generics.params.push(parse_quote!(Id: #zerogc_crate::CollectorId)); + if generate_implicit { + /* + * If we don't have explicit specification, + * extend the with the trace clauses + * + * TODO: Do we need to apply to the `Branded`/`Erased` types + */ + generics.make_where_clause().predicates + .extend(self.bounds.trace_where_clause(&self.params).predicates); + } if rebrand { generics.params.push(parse_quote!('new_gc)); } else { @@ -244,9 +252,9 @@ impl MacroInput { } let (impl_generics, _, where_clause) = generics.split_for_impl(); let target_trait = if rebrand { - quote!(#zerogc_crate::GcRebrand<'new_gc, Id>) + quote!(#zerogc_crate::GcRebrand<'new_gc, #id_type>) } else { - quote!(#zerogc_crate::GcErase<'min, Id>) + quote!(#zerogc_crate::GcErase<'min, #id_type>) }; fn rewrite_brand_trait( target: &Type, trait_name: &str, target_params: &HashSet, @@ -278,7 +286,7 @@ impl MacroInput { rewrite_brand_trait( &self.target_type, "GcRebrand", &target_params, - parse_quote!(#zerogc_crate::GcRebrand<'new_gc, Id>), + parse_quote!(#zerogc_crate::GcRebrand<'new_gc, #id_type>), parse_quote!(Branded) ) }, Ok)?; @@ -288,7 +296,7 @@ impl MacroInput { rewrite_brand_trait( &self.target_type, "GcErase", &target_params, - parse_quote!(#zerogc_crate::GcErase<'min, Id>), + parse_quote!(#zerogc_crate::GcErase<'min, #id_type>), parse_quote!(Erased) ) })?; @@ -378,6 +386,7 @@ impl Parse for MacroInput { "visit" [VisitClosure] opt => visit_closure; "trace_mut" [VisitClosure] opt => trace_mut_closure; "trace_immutable" [VisitClosure] opt => trace_immutable_closure; + "collector_id" [Type] opt => collector_id; }); let bounds = res.bounds.unwrap_or_default(); if let Some(TraitRequirements::Never) = bounds.trace { @@ -439,7 +448,8 @@ impl Parse for MacroInput { branded_type: res.branded_type, erased_type: res.erased_type, needs_trace: res.needs_trace, - needs_drop: res.needs_drop + needs_drop: res.needs_drop, + collector_id: res.collector_id }, visit: visit_impl }) @@ -502,18 +512,33 @@ pub struct CustomBounds { } impl CustomBounds { fn trace_where_clause(&self, generic_params: &[GenericParam]) -> WhereClause { - let zerogc_crate = zerogc_crate(); - create_clause_with_default( - &self.trace, generic_params, - vec![parse_quote!(#zerogc_crate::Trace)] - ).unwrap_or_else(|| unreachable!("Trace must always be implemented")) + match self.trace { + Some(TraitRequirements::Never) => unreachable!("Trace must always be implemented"), + Some(TraitRequirements::Always) => empty_clause(), // No requirements + Some(TraitRequirements::Where(ref explicit)) => explicit.clone(), + None => { + // generate the implicit requiremnents + let zerogc_crate = zerogc_crate(); + create_clause_with_default( + &self.trace, generic_params, + vec![parse_quote!(#zerogc_crate::Trace)] + ).unwrap_or_else(|| unreachable!("Trace must always be implemented")) + } + } } fn trace_immutable_clause(&self, generic_params: &[GenericParam]) -> Option { - let zerogc_crate = zerogc_crate(); - create_clause_with_default( - &self.trace_immutable, generic_params, - vec![parse_quote!(#zerogc_crate::TraceImmutable)] - ) + match self.trace_immutable { + Some(TraitRequirements::Never) => None, // skip this impl + Some(TraitRequirements::Always) => Some(empty_clause()), // No requirements + Some(TraitRequirements::Where(ref explicit)) => Some(explicit.clone()), + None => { + let zerogc_crate = zerogc_crate(); + create_clause_with_default( + &self.trace_immutable, generic_params, + vec![parse_quote!(#zerogc_crate::TraceImmutable)] + ) + } + } } fn gcsafe_clause(&self, generic_params: &[GenericParam]) -> Option { let zerogc_crate = zerogc_crate(); @@ -598,6 +623,8 @@ pub struct StandardOptions { needs_trace: Expr, /// A (constant) expression determining whether the type should be dropped needs_drop: Expr, + /// The fixed id of the collector, or `None` if the type can work with any collector + collector_id: Option } /// The visit implementation. @@ -728,7 +755,12 @@ impl VisitImpl { Ok(if mutable { mutable_impl.clone() } else { - immutable.clone().unwrap() + immutable.clone().ok_or_else(|| { + Error::new( + Span::call_site(), + "Expected a trace_immutable closure" + ) + })? }) } } diff --git a/libs/simple/src/alloc.rs b/libs/simple/src/alloc.rs index 2fa0d3a..ddc2dd8 100644 --- a/libs/simple/src/alloc.rs +++ b/libs/simple/src/alloc.rs @@ -15,6 +15,88 @@ use std::cell::RefCell; use zerogc_context::utils::AtomicCell; +const DEBUG_INTERNAL_ALLOCATOR: bool = cfg!(zerogc_simple_debug_alloc); +mod debug { + pub const PADDING: u32 = 0xDEADBEAF; + pub const UNINIT: u32 = 0xCAFEBABE; + pub const PADDING_TIMES: usize = 16; + pub const PADDING_BYTES: usize = PADDING_TIMES * 4; + pub unsafe fn pad_memory_block(ptr: *mut u8, size: usize) { + assert!(super::DEBUG_INTERNAL_ALLOCATOR); + let start = ptr.sub(PADDING_BYTES); + for i in 0..PADDING_TIMES { + (start as *mut u32).add(i).write(PADDING); + } + let end = ptr.add(size); + for i in 0..PADDING_TIMES { + (end as *mut u32).add(i).write(PADDING); + } + } + pub unsafe fn mark_memory_uninit(ptr: *mut u8, size: usize) { + assert!(super::DEBUG_INTERNAL_ALLOCATOR); + let (blocks, leftover) = (size / 4, size % 4); + for i in 0..blocks { + (ptr as *mut u32).add(i).write(UNINIT); + } + let leftover_ptr = ptr.add(blocks * 4); + debug_assert_eq!(leftover_ptr.wrapping_add(leftover), ptr.add(size)); + for i in 0..leftover { + leftover_ptr.add(i).write(0xF0); + } + } + pub unsafe fn assert_padded(ptr: *mut u8, size: usize) { + assert!(super::DEBUG_INTERNAL_ALLOCATOR); + let start = ptr.sub(PADDING_BYTES); + let end = ptr.add(size); + let start_padding = std::slice::from_raw_parts( + start as *const u8 as *const u32, + PADDING_TIMES + ); + let region = std::slice::from_raw_parts( + ptr as *const u8, + size + ); + let end_padding = std::slice::from_raw_parts( + end as *const u8 as *const u32, + PADDING_TIMES + ); + let print_memory_region = || { + use std::fmt::Write; + let mut res = String::new(); + for &val in start_padding { + write!(&mut res, "{:X}", val).unwrap(); + } + res.push_str("||"); + for &b in region { + write!(&mut res, "{:X}", b).unwrap(); + } + res.push_str("||"); + for &val in end_padding { + write!(&mut res, "{:X}", val).unwrap(); + } + res + }; + // Closest to farthest + for (idx, &block) in start_padding.iter().rev().enumerate() { + if block == PADDING { continue } + assert_eq!( + block, PADDING, + "Unexpected start padding (offset -{}) w/ {}", + idx * 4, + print_memory_region() + ); + } + for (idx, &block) in end_padding.iter().enumerate() { + if block == PADDING { continue } + assert_eq!( + block, PADDING, + "Unexpected end padding (offset {}) w/ {}", + idx * 4, + print_memory_region() + ) + } + } +} /// The minimum size of supported memory (in words) /// /// Since the header takes at least one word, @@ -25,30 +107,15 @@ pub const MINIMUM_WORDS: usize = 2; /// Past this we have to fallback to the global allocator pub const MAXIMUM_SMALL_WORDS: usize = 32; /// The alignment of elements in the arena -pub const ARENA_ELEMENT_ALIGN: usize = ARENA_HEADER_LAYOUT.align(); -/// The size of headers in the arena -/// -/// This is the same regardless of the underlying object format -const ARENA_HEADER_LAYOUT: Layout = Layout::new::(); +pub const ARENA_ELEMENT_ALIGN: usize = std::mem::align_of::(); -use crate::layout::{GcHeader}; +use crate::layout::{GcHeader, UnknownHeader}; -#[inline] -pub const fn small_object_size(layout: Layout) -> usize { - let header_layout = ARENA_HEADER_LAYOUT; - header_layout.size() + header_layout - .padding_needed_for(layout.align()) - + layout.size() -} #[inline] pub const fn fits_small_object(layout: Layout) -> bool { - small_object_size(layout) <= MAXIMUM_SMALL_WORDS * std::mem::size_of::() + layout.size() <= MAXIMUM_SMALL_WORDS * std::mem::size_of::() && layout.align() <= ARENA_ELEMENT_ALIGN } -#[inline] -pub const fn is_small_object() -> bool { - fits_small_object(Layout::new::()) -} pub(crate) struct Chunk { pub start: *mut u8, @@ -88,10 +155,6 @@ impl Chunk { } } #[inline] - fn current(&self) -> *mut u8 { - self.current.load() - } - #[inline] fn capacity(&self) -> usize { self.end as usize - self.start as usize } @@ -107,38 +170,11 @@ impl Drop for Chunk { } } -/// We use zero as our marker value. -/// -/// `GcHeader::type_info` is the first field of the header -/// and it will never be null (its a reference). -/// Therefore this marker will never conflict with a valid header. -pub const FREE_SLOT_MARKER: usize = 0; +/// A slot in the free list #[repr(C)] pub struct FreeSlot { - /// Marker for the slot, initialized to `FREE_SLOT_MARKER` - pub marker: usize, /// Pointer to the previous free slot - pub(crate) prev_free: Option>, -} -#[repr(C)] -pub(crate) union MaybeFreeSlot { - pub free: FreeSlot, - pub header: GcHeader, -} - -impl MaybeFreeSlot { - #[inline] - pub unsafe fn is_free(&self) -> bool { - self.free.marker == FREE_SLOT_MARKER - } - #[inline] - pub unsafe fn mark_free(&mut self, prev: Option>) { - debug_assert!(!self.is_free()); - self.free = FreeSlot { - marker: FREE_SLOT_MARKER, - prev_free: prev - }; - } + pub(crate) prev_free: Option>, } pub const NUM_SMALL_ARENAS: usize = 15; const INITIAL_SIZE: usize = 512; @@ -201,7 +237,7 @@ impl ArenaState { self.current_chunk.store(ptr); } #[inline] - fn alloc(&self, element_size: usize) -> NonNull { + fn alloc(&self, element_size: usize) -> NonNull { unsafe { let chunk = &*self.current_chunk().as_ptr(); match chunk.try_alloc(element_size) { @@ -213,7 +249,7 @@ impl ArenaState { #[cold] #[inline(never)] - fn alloc_fallback(&self, element_size: usize) -> NonNull { + fn alloc_fallback(&self, element_size: usize) -> NonNull { let mut chunks = self.lock_chunks(); // Now that we hold the lock, check the current chunk again unsafe { @@ -229,7 +265,7 @@ impl ArenaState { self.force_current_chunk(NonNull::from(&**chunks.last().unwrap())); self.current_chunk().as_ref() .try_alloc(element_size).unwrap() - .cast::() + .cast::() } } } @@ -239,19 +275,28 @@ impl ArenaState { /// This is a lock-free linked list #[derive(Default)] pub(crate) struct FreeList { - next: AtomicCell>> + next: AtomicCell>> } impl FreeList { - #[inline] - pub(crate) fn next_free(&self) -> Option> { - self.next.load() - } - #[inline] - pub(crate) unsafe fn set_next_free(&self, next: Option>) { - self.next.store(next) + unsafe fn add_free(&self, free: *mut UnknownHeader, size: usize) { + if DEBUG_INTERNAL_ALLOCATOR { + debug::assert_padded(free as *mut u8, size); + debug::mark_memory_uninit(free as *mut u8, size); + } + let new_slot = free as *mut FreeSlot; + let mut next = self.next.load(); + loop { + (*new_slot).prev_free = next; + match self.next.compare_exchange(next, Some(NonNull::new_unchecked(new_slot))) { + Ok(_) => break, + Err(actual_next) => { + next = actual_next; + } + } + } } #[inline] - fn take_free(&self) -> Option> { + fn take_free(&self) -> Option> { loop { let next_free = match self.next.load() { Some(free) => free, @@ -261,13 +306,9 @@ impl FreeList { unsafe { if self.next.compare_exchange( Some(next_free), - next_free.as_ref().free.prev_free + next_free.as_ref().prev_free ).is_err() { continue /* retry */ } - debug_assert_eq!( - next_free.as_ref().free.marker, - FREE_SLOT_MARKER - ); - return Some(NonNull::from(&next_free.as_ref().header)) + return Some(next_free.cast()) } } } @@ -276,9 +317,13 @@ impl FreeList { pub struct SmallArena { pub(crate) element_size: usize, state: ArenaState, - pub(crate) free: FreeList + free: FreeList, } + impl SmallArena { + pub(crate) unsafe fn add_free(&self, obj: *mut UnknownHeader) { + self.free.add_free(obj, self.element_size) + } #[cold] // Initialization is the slow path fn with_words(num_words: usize) -> SmallArena { assert!(num_words >= MINIMUM_WORDS); @@ -291,25 +336,23 @@ impl SmallArena { } } #[inline] - pub(crate) fn alloc(&self) -> NonNull { + pub(crate) fn alloc(&self) -> NonNull { // Check the free list if let Some(free) = self.free.take_free() { - free + free.cast() + } else if DEBUG_INTERNAL_ALLOCATOR { + let mem = self.state.alloc(self.element_size + debug::PADDING_BYTES * 2) + .as_ptr() as *mut u8; + unsafe { + let mem = mem.add(debug::PADDING_BYTES); + debug::pad_memory_block(mem, self.element_size); + debug::mark_memory_uninit(mem, self.element_size); + NonNull::new_unchecked(mem).cast() + } } else { self.state.alloc(self.element_size) } } - pub(crate) unsafe fn for_each(&self, mut func: F) { - let chunks = self.state.lock_chunks(); - for chunk in &*chunks { - let mut ptr = chunk.start; - let end = chunk.current(); - while ptr < end { - func(ptr as *mut MaybeFreeSlot); - ptr = ptr.add(self.element_size); - } - } - } } macro_rules! arena_match { ($arenas:expr, $target:ident, max = $max:expr; $($size:pat => $num_words:literal @ $idx:expr),*) => { @@ -354,20 +397,14 @@ impl SmallArenaList { } } } - pub fn iter(&self) -> impl Iterator + '_ { - self.arenas.iter().filter_map(OnceCell::get) - } - #[inline] // This should be constant folded away (size/align is const) - pub fn find(&self) -> Option<&SmallArena> { - if std::mem::align_of::() > ARENA_ELEMENT_ALIGN { - return None - } - if !is_small_object::() { + #[inline] // This should hopefully be constant folded away (layout is const) + pub fn find(&self, layout: Layout) -> Option<&SmallArena> { + if !fits_small_object(layout) { return None } // Divide round up let word_size = mem::size_of::(); - let num_words = (small_object_size(Layout::new::()) + (word_size - 1)) + let num_words = (layout.size() + (word_size - 1)) / word_size; self.find_raw(num_words) } diff --git a/libs/simple/src/layout.rs b/libs/simple/src/layout.rs index ddd1e8b..9fca8ca 100644 --- a/libs/simple/src/layout.rs +++ b/libs/simple/src/layout.rs @@ -8,18 +8,21 @@ //! //! However, `zerogc-simple` is committed to a stable ABI, so this should (hopefully) //! be relatively well documented. -use std::mem::{ManuallyDrop}; +use std::mem::{self, }; use std::marker::PhantomData; use std::cell::Cell; - -use zerogc_context::field_offset; -use zerogc_derive::NullTrace; -use crate::{RawMarkState, CollectorId, DynTrace, MarkVisitor}; use std::ffi::c_void; use std::alloc::Layout; + use zerogc::{GcSafe, Trace}; -use std::mem; +use zerogc::vec::repr::{GcVecRepr,}; +use zerogc_context::field_offset; +use zerogc_derive::{NullTrace, unsafe_gc_impl}; + +use crate::{RawMarkState, CollectorId, DynTrace, MarkVisitor}; +use crate::alloc::fits_small_object; +use std::ptr::NonNull; /// Everything but the lower 2 bits of mark data are unused /// for CollectorId. @@ -40,6 +43,73 @@ pub struct SimpleMarkData { fmt: PhantomData } +/// Marker type for an unknown header +pub(crate) struct UnknownHeader(()); + +/// The layout of an object's header +#[derive(Debug)] +pub struct HeaderLayout { + /// The overall size of the header + pub header_size: usize, + /// The offset of the 'common' header, + /// starting from the start of the real header + pub common_header_offset: usize, + marker: PhantomData<*mut H> +} +impl Copy for HeaderLayout {} +impl Clone for HeaderLayout { + #[inline] + fn clone(&self) -> Self { + *self + } +} + +impl HeaderLayout { + #[inline] + pub(crate) const fn value_offset_from_common_header(&self, align: usize) -> usize { + self.value_offset(align) - self.common_header_offset + } + #[inline] + pub(crate) const fn into_unknown(self) -> HeaderLayout { + HeaderLayout { + header_size: self.header_size, + common_header_offset: self.common_header_offset, + marker: PhantomData + } + } + /// The alignment of the header + /// + /// NOTE: All headers have the same alignment + pub const ALIGN: usize = std::mem::align_of::(); + #[inline] + pub(crate) const unsafe fn common_header(self, ptr: *mut H) -> *mut GcHeader { + (ptr as *mut u8).add(self.common_header_offset).cast() + } + #[inline] + pub(crate) const unsafe fn from_common_header(self, ptr: *mut GcHeader) -> *mut H { + (ptr as *mut u8).sub(self.common_header_offset).cast() + } + /// Get the header from the specified value pointer + #[inline] + pub const unsafe fn from_value_ptr(self, ptr: *mut T) -> *mut H { + let align = std::mem::align_of_val(&*ptr); + (ptr as *mut u8).sub(self.value_offset(align)).cast() + } + /// Get the in-memory layout of the header (doesn't include the value) + #[inline] + pub const fn layout(&self) -> Layout { + unsafe { + Layout::from_size_align_unchecked(self.header_size, Self::ALIGN) + } + } + /// Get the offset of the value from the start of the header, + /// given the alignment of its value + #[inline] + pub const fn value_offset(&self, align: usize) -> usize { + let padding = self.layout().padding_needed_for(align); + self.header_size + padding + } +} impl SimpleMarkData { /// Create mark data from a specified snapshot @@ -109,29 +179,47 @@ impl SimpleMarkDataSnapshot { } /// Marker for an unknown GC object -pub(crate) struct DynamicObj; +#[repr(C)] +pub struct DynamicObj; +unsafe_gc_impl!( + target => DynamicObj, + params => [], + NEEDS_TRACE => true, + NEEDS_DROP => true, + null_trace => never, + visit => |self, visitor| { + unreachable!() + } +); #[repr(C)] -pub(crate) struct BigGcObject { - pub(crate) header: GcHeader, - /// This is dropped using dynamic type info - pub(crate) static_value: ManuallyDrop +pub(crate) struct BigGcObject { + pub(crate) header: NonNull } -impl BigGcObject { +impl BigGcObject { #[inline] - pub(crate) unsafe fn into_dynamic_box(val: Box) -> Box> { - std::mem::transmute::>, Box>>(val) + pub unsafe fn header(&self) -> &GcHeader { + self.header.as_ref() + } + #[inline] + pub unsafe fn from_ptr(header: *mut GcHeader) -> BigGcObject { + debug_assert!(!header.is_null()); + BigGcObject { header: NonNull::new_unchecked(header) } } } -impl Drop for BigGcObject { +impl Drop for BigGcObject { fn drop(&mut self) { unsafe { - let type_info = self.header.type_info; + let type_info = self.header().type_info; if let Some(func) = type_info.drop_func { - func((&self.header as *const GcHeader as *const u8 as *mut u8) - .add(type_info.value_offset) + func((self.header.as_ptr() as *const u8 as *mut u8) + .add(type_info.value_offset_from_common_header) .cast()); } + let layout = type_info.determine_total_layout(self.header.as_ptr()); + let actual_header = type_info.header_layout() + .from_common_header(self.header.as_ptr()); + std::alloc::dealloc(actual_header.cast(), layout); } } } @@ -142,11 +230,101 @@ pub struct GcArrayHeader { pub(crate) common_header: GcHeader } impl GcArrayHeader { + pub(crate) const LAYOUT: HeaderLayout = HeaderLayout { + header_size: std::mem::size_of::(), + common_header_offset: field_offset!(GcArrayHeader, common_header), + marker: PhantomData + }; +} +/// A header for a Gc vector +#[repr(C)] +pub struct GcVecHeader { + pub(crate) capacity: usize, + pub(crate) len: Cell, + pub(crate) common_header: GcHeader +} +impl GcVecHeader { + pub(crate) const LAYOUT: HeaderLayout = HeaderLayout { + common_header_offset: field_offset!(GcVecHeader, common_header), + header_size: std::mem::size_of::(), + marker: PhantomData + }; +} + +/// The raw representation of a vector in the simple collector +/// +/// NOTE: Length and capacity are stored implicitly in the [GcVecHeader] +#[repr(C)] +pub struct SimpleVecRepr { + marker: PhantomData, +} +impl SimpleVecRepr { + #[inline] + fn header(&self) -> *mut GcVecHeader { + unsafe { + (self as *const Self as *mut Self as *mut u8) + .sub(GcVecHeader::LAYOUT.value_offset(std::mem::align_of::())) + .cast() + } + } +} +unsafe impl GcVecRepr for SimpleVecRepr { + /// Right now, there is no stable API for in-place re-allocation + const SUPPORTS_REALLOC: bool = false; + + #[inline] + fn element_layout(&self) -> Layout { + Layout::new::() + } + + #[inline] + fn len(&self) -> usize { + unsafe { (*self.header()).len.get() } + } + + #[inline] + unsafe fn set_len(&self, len: usize) { + debug_assert!(len <= self.capacity()); + (*self.header()).len.set(len); + } + + #[inline] + fn capacity(&self) -> usize { + unsafe { (*self.header()).capacity } + } + #[inline] - pub(crate) const fn array_value_offset(align: usize) -> usize { - GcHeader::value_offset_for(align) + field_offset!(GcArrayHeader, common_header) + unsafe fn ptr(&self) -> *const c_void { + self as *const Self as *const c_void // We are actually just a GC pointer to the value ptr } } +unsafe_gc_impl!( + target => SimpleVecRepr, + params => [T: GcSafe], + NEEDS_TRACE => T::NEEDS_TRACE, + NEEDS_DROP => T::NEEDS_DROP, + null_trace => { where T: ::zerogc::NullTrace }, + trace_mut => |self, visitor| { + // Trace our innards + unsafe { + let start: *mut T = self.ptr() as *const T as *mut T; + for i in 0..self.len() { + visitor.visit(&mut *start.add(i))?; + } + } + Ok(()) + }, + trace_immutable => |self, visitor| { + // Trace our innards + unsafe { + let start: *mut T = self.ptr() as *const T as *mut T; + for i in 0..self.len() { + visitor.visit_immutable(&*start.add(i))?; + } + } + Ok(()) + } +); /// A header for a GC object /// @@ -160,6 +338,12 @@ pub struct GcHeader { pub mark_data: SimpleMarkData } impl GcHeader { + /// The layout of the header + pub const LAYOUT: HeaderLayout = HeaderLayout { + header_size: std::mem::size_of::(), + common_header_offset: 0, + marker: PhantomData + }; /// Create a new header #[inline] pub fn new(type_info: &'static GcType, mark_data: SimpleMarkData) -> Self { @@ -171,25 +355,17 @@ impl GcHeader { unsafe { (self as *const GcHeader as *mut GcHeader as *mut u8) // NOTE: This takes into account the possibility of `BigGcObject` - .add(self.type_info.value_offset) + .add(self.type_info.value_offset_from_common_header) .cast::() } } - pub(crate) const fn value_offset_for(align: usize) -> usize { - // NOTE: Header - let layout = Layout::new::(); - let regular_padding = layout.padding_needed_for(align); - let array_padding = Layout::new::().padding_needed_for(align); - debug_assert!(regular_padding == array_padding); - layout.size() + regular_padding - } /// Get the [GcHeader] for the specified value, assuming that its been allocated by this collector. /// /// ## Safety /// Assumes the value was allocated in the simple collector. #[inline] pub unsafe fn from_value_ptr(ptr: *mut T) -> *mut GcHeader { - (ptr as *mut u8).sub(GcHeader::value_offset_for(std::mem::align_of_val(&*ptr))).cast() + GcHeader::LAYOUT.from_value_ptr(ptr) } #[inline] pub(crate) fn raw_mark_state(&self) -> RawMarkState { @@ -205,13 +381,18 @@ impl GcHeader { pub enum GcTypeLayout { /// A type with a fixed, statically-known layout Fixed(Layout), - /// An array, whose type can vary at runtime + /// An array, whose size can vary at runtime Array { /// The fixed layout of elements in the array /// /// The overall alignment of the array is equal to the alignment of each element, /// however the size may vary at runtime. element_layout: Layout + }, + /// A vector, whose capacity can vary from instance to instance + Vec { + /// The fixed layout of elements in the vector. + element_layout: Layout } } @@ -223,54 +404,114 @@ pub struct GcType { /// The offset of the value from the start of the header /// /// This varies depending on the type's alignment - pub value_offset: usize, + pub value_offset_from_common_header: usize, /// The function to trace the type, or `None` if it doesn't need to be traced pub trace_func: Option, /// The function to drop the type, or `None` if it doesn't need to be dropped pub drop_func: Option, } impl GcType { + #[inline] + fn align(&self) -> usize { + match self.layout { + GcTypeLayout::Fixed(fixed) => fixed.align(), + GcTypeLayout::Array { element_layout } | + GcTypeLayout::Vec { element_layout } => element_layout.align() + } + } + pub(crate) fn header_layout(&self) -> HeaderLayout { + match self.layout { + GcTypeLayout::Fixed(_) => GcHeader::LAYOUT.into_unknown(), + GcTypeLayout::Array { .. } => GcArrayHeader::LAYOUT.into_unknown(), + GcTypeLayout::Vec { .. } => GcVecHeader::LAYOUT.into_unknown() + } + } #[inline] unsafe fn determine_size(&self, header: *mut GcHeader) -> usize { match self.layout { GcTypeLayout::Fixed(layout) => layout.size(), GcTypeLayout::Array { element_layout } => { - let header = (header as *mut u8) - .sub(field_offset!(GcArrayHeader, common_header)) - .cast::(); - element_layout.repeat((*header).len).unwrap().0 - .pad_to_align().size() + let header = GcArrayHeader::LAYOUT.from_common_header(header); + element_layout.repeat((*header).len).unwrap().0.size() + }, + GcTypeLayout::Vec { element_layout } => { + let header = GcVecHeader::LAYOUT.from_common_header(header); + element_layout.repeat((*header).capacity).unwrap().0.size() } } } #[inline] pub(crate) unsafe fn determine_total_size(&self, header: *mut GcHeader) -> usize { - let mut res = self.value_offset + self.determine_size(header); - match self.layout { - GcTypeLayout::Fixed(_) => {} - GcTypeLayout::Array { .. } => { - res += field_offset!(GcArrayHeader, common_header); - } - } - res + self.determine_total_layout(header).size() + } + #[inline] + pub(crate) unsafe fn determine_total_layout(&self, header: *mut GcHeader) -> Layout { + self.header_layout().layout() + .extend(Layout::from_size_align_unchecked( + self.determine_size(header), + self.align() + )).unwrap().0.pad_to_align() } } +pub(crate) trait StaticVecType { + const STATIC_VEC_TYPE: &'static GcType; +} +impl StaticVecType for T { + const STATIC_VEC_TYPE: &'static GcType = &GcType { + layout: GcTypeLayout::Vec { + element_layout: Layout::new::(), + }, + value_offset_from_common_header: { + // We have same alignment as our members + let align = std::mem::align_of::(); + GcArrayHeader::LAYOUT.value_offset_from_common_header(align) + }, + trace_func: if T::NEEDS_TRACE { + Some({ + unsafe fn visit(val: *mut c_void, visitor: &mut MarkVisitor) { + let len = (*GcVecHeader::LAYOUT.from_value_ptr(val as *mut T)).len.get(); + let slice = std::slice::from_raw_parts_mut( + val as *mut T, + len + ); + let Ok(()) = <[T] as Trace>::visit(slice, visitor); + } + visit:: as unsafe fn(*mut c_void, &mut MarkVisitor) + }) + } else { + None + }, + drop_func: if T::NEEDS_DROP { + Some({ + unsafe fn drop_gc_vec(val: *mut c_void) { + let len = (*GcVecHeader::LAYOUT.from_value_ptr(val as *mut T)).len.get(); + std::ptr::drop_in_place::<[T]>(std::ptr::slice_from_raw_parts_mut( + val as *mut T, + len + )); + } + drop_gc_vec:: as unsafe fn(*mut c_void) + }) + } else { + None + } + }; +} pub(crate) trait StaticGcType { - const VALUE_OFFSET: usize; const STATIC_TYPE: &'static GcType; } impl StaticGcType for [T] { - const VALUE_OFFSET: usize = GcHeader::value_offset_for(std::mem::align_of::()); const STATIC_TYPE: &'static GcType = &GcType { layout: GcTypeLayout::Array { element_layout: Layout::new::() }, - value_offset: Self::VALUE_OFFSET, + value_offset_from_common_header: { + GcArrayHeader::LAYOUT.value_offset_from_common_header(std::mem::align_of::()) + }, trace_func: if ::NEEDS_TRACE { Some({ unsafe fn visit(val: *mut c_void, visitor: &mut MarkVisitor) { - let len = (*(val as *mut u8) - .sub(GcArrayHeader::array_value_offset(std::mem::align_of::())) - .cast::()).len; + let header = GcArrayHeader::LAYOUT.from_value_ptr(val as *mut T); + let len = (*header).len; let slice = std::slice::from_raw_parts_mut( val as *mut T, len @@ -283,9 +524,7 @@ impl StaticGcType for [T] { drop_func: if ::NEEDS_DROP { Some({ unsafe fn drop_gc_slice(val: *mut c_void) { - let len = (*(val as *mut u8) - .sub(GcArrayHeader::array_value_offset(std::mem::align_of::())) - .cast::()).len; + let len = (*GcArrayHeader::LAYOUT.from_value_ptr(val as *mut T)).len; std::ptr::drop_in_place::<[T]>(std::ptr::slice_from_raw_parts_mut( val as *mut T, len @@ -297,10 +536,11 @@ impl StaticGcType for [T] { }; } impl StaticGcType for T { - const VALUE_OFFSET: usize = GcHeader::value_offset_for(std::mem::align_of::()); const STATIC_TYPE: &'static GcType = &GcType { layout: GcTypeLayout::Fixed(Layout::new::()), - value_offset: Self::VALUE_OFFSET, + value_offset_from_common_header: { + GcHeader::LAYOUT.value_offset_from_common_header(std::mem::align_of::()) + }, trace_func: if ::NEEDS_TRACE { Some(unsafe { mem::transmute::<_, unsafe fn(*mut c_void, &mut MarkVisitor)>( ::trace as fn(&mut T, &mut MarkVisitor), diff --git a/libs/simple/src/lib.rs b/libs/simple/src/lib.rs index e306c5d..b376504 100644 --- a/libs/simple/src/lib.rs +++ b/libs/simple/src/lib.rs @@ -31,8 +31,12 @@ ptr_metadata, // Needed to abstract over Sized/unsized types // Used for const layout computation: const_raw_ptr_deref, - const_raw_ptr_to_usize_cast, const_ptr_offset, + const_align_of_val, + // Needed for field_offset! + const_ptr_offset_from, + const_maybe_uninit_as_ptr, + const_refs_to_cell, )] #![feature(drain_filter)] #![allow( @@ -43,9 +47,7 @@ clippy::vtable_address_comparisons, )] use std::alloc::Layout; -use std::ptr::{NonNull, addr_of_mut}; -use std::mem::{ManuallyDrop}; -#[cfg(feature = "multiple-collectors")] +use std::ptr::{NonNull,}; use std::sync::Arc; use std::sync::atomic::{AtomicUsize, AtomicBool, Ordering}; #[cfg(not(feature = "multiple-collectors"))] @@ -53,6 +55,7 @@ use std::sync::atomic::AtomicPtr; #[cfg(not(feature = "multiple-collectors"))] use std::marker::PhantomData; use std::any::{TypeId}; +use std::ops::{Deref, DerefMut}; use slog::{Logger, FnValue, debug}; @@ -60,30 +63,30 @@ use zerogc::{GcSafe, Trace, GcVisitor}; use zerogc_context::utils::{ThreadId, MemorySize}; -use crate::alloc::{SmallArenaList, small_object_size}; -use crate::layout::{StaticGcType, GcType}; +use crate::alloc::{SmallArenaList, SmallArena}; +use crate::layout::{StaticGcType, GcType, SimpleVecRepr, DynamicObj, StaticVecType, SimpleMarkData, SimpleMarkDataSnapshot, GcHeader, BigGcObject, HeaderLayout, GcArrayHeader, GcVecHeader, GcTypeLayout}; -use zerogc_context::collector::{RawSimpleAlloc}; +use zerogc_context::collector::{RawSimpleAlloc, RawCollectorImpl}; use zerogc_context::handle::{GcHandleList, RawHandleImpl}; -use zerogc_context::{ - CollectionManager as AbstractCollectionManager, - RawContext as AbstractRawContext -}; -use crate::layout::{SimpleMarkData, SimpleMarkDataSnapshot, GcHeader, BigGcObject}; -use std::ops::{Deref, DerefMut}; +use zerogc_context::{CollectionManager as AbstractCollectionManager, RawContext as AbstractRawContext, CollectorContext}; +use zerogc::vec::{GcRawVec}; +use std::cell::Cell; #[cfg(feature = "small-object-arenas")] mod alloc; #[cfg(not(feature = "small-object-arenas"))] mod alloc { - pub const fn is_small_object() -> bool { + use std::alloc::Layout; + use crate::layout::UnknownHeader; + + pub const fn fits_small_object(_layout: Layout) -> bool { false } - pub const fn small_object_size() -> usize { - unimplemented!() - } - pub struct FakeArena; - impl FakeArena { + pub struct SmallArena; + impl SmallArena { + pub(crate) fn add_free(&self, _free: *mut UnknownHeader) { + unimplemented!() + } pub(crate) fn alloc(&self) -> std::ptr::NonNull { unimplemented!() } @@ -92,11 +95,31 @@ mod alloc { impl SmallArenaList { // Create dummy pub fn new() -> Self { SmallArenaList } - pub fn find(&self) -> Option { None } + pub fn find(&self, _layout: Layout) -> Option<&SmallArena> { None } } } pub mod layout; +/// The configuration for a garbage collection +pub struct GcConfig { + /// Whether to always force a collection at safepoints, + /// regardless of whether the heuristics say. + pub always_force_collect: bool, + /// The initial threshold to trigger garbage collection (in bytes) + pub initial_threshold: usize, +} +impl Default for GcConfig { + fn default() -> Self { + GcConfig { + always_force_collect: false, + initial_threshold: 2048, + } + } +} + +/// The alignment of the singleton empty vector +const EMPTY_VEC_ALIGNMENT: usize = std::mem::align_of::(); + #[cfg(feature = "sync")] type RawContext = zerogc_context::state::sync::RawContext; #[cfg(feature = "sync")] @@ -115,14 +138,78 @@ pub type SimpleCollectorContext = ::zerogc_context::CollectorContext; /// A garbage collected pointer, allocated in the "simple" collector pub type Gc<'gc, T> = ::zerogc::Gc<'gc, T, CollectorId>; +/// A garbage collected array, allocated in the "simple" collector +pub type GcArray<'gc, T> = ::zerogc::vec::GcArray<'gc, T, CollectorId>; +/// A garbage colelcted vector, allocated in the "simple" collector +pub type GcVec<'gc, T> = ::zerogc::vec::GcVec<'gc, T, SimpleCollectorContext>; #[cfg(not(feature = "multiple-collectors"))] static GLOBAL_COLLECTOR: AtomicPtr = AtomicPtr::new(std::ptr::null_mut()); unsafe impl RawSimpleAlloc for RawSimpleCollector { - #[inline] fn alloc<'gc, T>(context: &'gc SimpleCollectorContext, value: T) -> Gc<'gc, T> where T: GcSafe + 'gc { - context.collector().heap.allocator.alloc(value) + unsafe { + let (_header, ptr) = context.collector().heap.allocator.alloc_layout( + GcHeader::LAYOUT, + Layout::new::(), + T::STATIC_TYPE + ); + let ptr = ptr as *mut T; + ptr.write(value); + Gc::from_raw(context.collector().id(), NonNull::new_unchecked(ptr)) + } + } + + unsafe fn alloc_uninit_slice<'gc, T>(context: &'gc CollectorContext, len: usize) -> (CollectorId, *mut T) where T: GcSafe + 'gc { + let (header, ptr) = context.collector().heap.allocator.alloc_layout( + GcArrayHeader::LAYOUT, + Layout::array::(len).unwrap(), + <[T] as StaticGcType>::STATIC_TYPE + ); + (*header).len = len; + (context.collector().id(), ptr.cast()) + } + + #[inline] + fn alloc_vec<'gc, T>(context: &'gc CollectorContext) -> GcVec<'gc, T> where T: GcSafe + 'gc { + if std::mem::align_of::() > EMPTY_VEC_ALIGNMENT { + // We have to do an actual allocation because we want higher alignment :( + return Self::alloc_vec_with_capacity(context, 0) + } + let header = context.collector().heap.empty_vec(); + // NOTE: Assuming header is already initialized + unsafe { + debug_assert_eq!((*header).len.get(), 0); + debug_assert_eq!((*header).capacity, 0); + } + let id = context.collector().id(); + let ptr = unsafe { NonNull::new_unchecked((header as *mut u8) + .add(GcVecHeader::LAYOUT.value_offset(EMPTY_VEC_ALIGNMENT))) }; + GcVec { + raw: unsafe { GcRawVec::from_repr(Gc::from_raw(id, ptr.cast())) }, + context + } + } + + fn alloc_vec_with_capacity<'gc, T>(context: &'gc CollectorContext, capacity: usize) -> GcVec<'gc, T> where T: GcSafe + 'gc { + if capacity == 0 && std::mem::align_of::() <= EMPTY_VEC_ALIGNMENT { + return Self::alloc_vec(context) + } + let (header, value_ptr) = context.collector().heap.allocator.alloc_layout( + GcVecHeader::LAYOUT, + Layout::array::(capacity).unwrap(), + ::STATIC_VEC_TYPE + ); + let ptr = unsafe { + (*header).capacity = capacity; + (*header).len.set(0); + NonNull::new_unchecked(value_ptr as *mut SimpleVecRepr) + }; + let id = context.collector().id(); + GcVec { + raw: unsafe { GcRawVec::from_repr(Gc::from_raw(id, ptr.cast())) }, + context + } } } @@ -175,22 +262,75 @@ unsafe impl DynTrace for GcHandleListWrapper { } -/// The initial memory usage to start a collection -const INITIAL_COLLECTION_THRESHOLD: usize = 2048; - struct GcHeap { + config: Arc, threshold: AtomicUsize, - allocator: SimpleAlloc + allocator: SimpleAlloc, + // NOTE: This is only public so it can be traced + cached_empty_vec: Cell> } impl GcHeap { - fn new(threshold: usize) -> GcHeap { + fn new(config: Arc) -> GcHeap { GcHeap { - threshold: AtomicUsize::new(threshold), - allocator: SimpleAlloc::new() + threshold: AtomicUsize::new(if config.always_force_collect { + 0 + } else { + config.initial_threshold + }), + allocator: SimpleAlloc::new(), + config, cached_empty_vec: Cell::new(None) + } + } + #[inline] + pub fn empty_vec(&self) -> *mut GcVecHeader { + match self.cached_empty_vec.get() { + Some(cached) => cached, + None => { + let res = self.create_empty_vec(); + self.cached_empty_vec.set(Some(self.create_empty_vec())); + res + } + } + } + #[cold] + fn create_empty_vec<'gc>(&self) -> *mut GcVecHeader { + const DUMMY_LAYOUT: Layout = unsafe { Layout::from_size_align_unchecked( + 0, EMPTY_VEC_ALIGNMENT + ) }; + const DUMMY_TYPE: GcType = GcType { + layout: GcTypeLayout::Vec { + element_layout: DUMMY_LAYOUT, + }, + value_offset_from_common_header: GcVecHeader::LAYOUT + .value_offset_from_common_header(EMPTY_VEC_ALIGNMENT), + drop_func: None, + trace_func: None + }; + let (header, _) = self.allocator.alloc_layout( + GcVecHeader::LAYOUT, + DUMMY_LAYOUT, + &DUMMY_TYPE + ); + unsafe { + (*header).capacity = 0; + (*header).len.set(0); + header } } #[inline] fn should_collect_relaxed(&self) -> bool { + /* + * Double check that 'config.always_force_collect' implies + * a threshold of zero. + * + * NOTE: This is not part of the ABI, it's only for internal debugging + */ + if cfg!(debug_assertions) && self.config.always_force_collect { + debug_assert_eq!( + self.threshold.load(Ordering::Relaxed), + 0 + ); + } /* * Going with relaxed ordering because it's not essential * that we see updates immediately. @@ -247,11 +387,11 @@ impl From for Lock { } } #[cfg(not(feature = "sync"))] -impl<'a, T> ILock<'a, T> for Lock { +impl<'a, T: 'a> ILock<'a, T> for Lock { type Guard = ::std::cell::RefMut<'a, T>; #[inline] - fn lock(&self) -> Self::Guard { + fn lock(&'a self) -> Self::Guard { self.0.borrow_mut() } @@ -267,13 +407,21 @@ impl<'a, T> ILock<'a, T> for Lock { pub(crate) struct SimpleAlloc { collector_id: Option, small_arenas: SmallArenaList, - big_objects: Lock>>, + big_objects: Lock>, + small_objects: Lock>, /// Whether the meaning of the mark bit is currently inverted. /// /// This flips every collection mark_inverted: AtomicBool, allocated_size: AtomicUsize } +#[derive(Debug)] +struct TargetLayout { + value_layout: Layout, + header_layout: HeaderLayout, + value_offset: usize, + overall_layout: Layout +} impl SimpleAlloc { fn new() -> SimpleAlloc { SimpleAlloc { @@ -281,6 +429,7 @@ impl SimpleAlloc { allocated_size: AtomicUsize::new(0), small_arenas: SmallArenaList::new(), big_objects: Lock::from(Vec::new()), + small_objects: Lock::from(Vec::new()), mark_inverted: AtomicBool::new(false), } } @@ -306,112 +455,116 @@ impl SimpleAlloc { } #[inline] - fn alloc(&self, value: T) -> Gc<'_, T> { - if let Some(arena) = self.small_arenas.find::() { - let header = arena.alloc(); - unsafe { - let collector_id_ptr = match self.collector_id { - Some(ref collector) => collector, - None => { - #[cfg(debug_assertions)] { - unreachable!("Invalid collector id") - } - #[cfg(not(debug_assertions))] { - std::hint::unreachable_unchecked() - } - } - }; - let _value_ptr = header.as_ptr().write(GcHeader::new( - T::STATIC_TYPE, - SimpleMarkData::from_snapshot(SimpleMarkDataSnapshot::new( - MarkState::White.to_raw(self.mark_inverted()), - collector_id_ptr as *const _ as *mut _, - )) - )); - let value_ptr = header.as_ref().value().cast::(); - value_ptr.write(value); - self.add_allocated_size(small_object_size(Layout::new::())); - Gc::from_raw( - *collector_id_ptr, - NonNull::new_unchecked(value_ptr), - ) + fn alloc_layout(&self, header_layout: HeaderLayout, value_layout: Layout, static_type: &'static GcType) -> (*mut H, *mut u8) { + let collector_id_ptr = match self.collector_id { + Some(ref collector) => collector, + None => { + #[cfg(debug_assertions)] { + unreachable!("Invalid collector id") + } + #[cfg(not(debug_assertions))] { + unsafe { std::hint::unreachable_unchecked() } + } } + }; + let (mut overall_layout, value_offset) = header_layout.layout() + .extend(value_layout).unwrap(); + debug_assert_eq!(header_layout.value_offset(value_layout.align()), value_offset); + overall_layout = overall_layout.pad_to_align(); + debug_assert_eq!( + value_offset, + header_layout.value_offset(value_layout.align()) + ); + let target_layout = TargetLayout { + header_layout, value_offset, value_layout, overall_layout + }; + let (header, value_ptr) = if let Some(arena) = self.small_arenas.find(target_layout.overall_layout) { + unsafe { self.alloc_layout_small(arena, target_layout) } } else { - self.alloc_big(value) - } - } - fn alloc_big(&self, value: T) -> Gc<'_, T> { - let collector_id = self.collector_id.as_ref().unwrap(); - let mut object = Box::new(BigGcObject { - header: GcHeader::new( - T::STATIC_TYPE, + self.alloc_layout_big(target_layout) + }; + unsafe { + header_layout.common_header(header).write(GcHeader::new( + static_type, SimpleMarkData::from_snapshot(SimpleMarkDataSnapshot::new( MarkState::White.to_raw(self.mark_inverted()), - collector_id as *const _ as *mut _ + collector_id_ptr as *const _ as *mut _, )) - ), - static_value: ManuallyDrop::new(value), - }); - let gc = unsafe { Gc::from_raw( - self.collector_id.unwrap(), - NonNull::new_unchecked(&mut *object.static_value), - ) }; + )); + } + (header, value_ptr) + } + #[inline] + unsafe fn alloc_layout_small(&self, arena: &SmallArena, target_layout: TargetLayout) -> (*mut H, *mut u8) { + let ptr = arena.alloc(); + debug_assert_eq!( + ptr.as_ptr() as usize % target_layout.header_layout.layout().align(), + 0 + ); + self.add_allocated_size(target_layout.overall_layout.size()); + { + let mut lock = self.small_objects.lock(); + lock.push((ptr.as_ptr() as *mut u8).add(target_layout.header_layout.common_header_offset).cast()); + } + (ptr.as_ptr().cast(), (ptr.as_ptr() as *mut u8).add(target_layout.value_offset)) + } + fn alloc_layout_big(&self, target_layout: TargetLayout) -> (*mut H, *mut u8) { + let header: *mut H; + let value_ptr = unsafe { + header = std::alloc::alloc(target_layout.overall_layout).cast(); + (header as *mut u8).add(target_layout.value_offset) + }; { - let size = std::mem::size_of::>(); - { + unsafe { let mut objs = self.big_objects.lock(); - objs.push(unsafe { BigGcObject::into_dynamic_box(object) }); + let common_header = (header as *mut u8) + .add(target_layout.header_layout.common_header_offset) + .cast(); + objs.push(BigGcObject::from_ptr(common_header)); } - self.add_allocated_size(size); + self.add_allocated_size(target_layout.overall_layout.size()); } - gc + (header, value_ptr) } unsafe fn sweep(&self) { let mut expected_size = self.allocated_size(); let mut actual_size = 0; // Clear small arenas - #[cfg(feature = "small-object-arenas")] - for arena in self.small_arenas.iter() { - let mut last_free = arena.free.next_free(); - let mark_inverted = self.mark_inverted.load(Ordering::SeqCst); - arena.for_each(|slot| { - if (*slot).is_free() { + let was_mark_inverted = self.mark_inverted.load(Ordering::SeqCst); + self.small_objects.lock().drain_filter(|&mut common_header| { + let total_size = (*common_header).type_info.determine_total_size(common_header); + match (*common_header).raw_mark_state().resolve(was_mark_inverted) { + MarkState::White => { + // Free the object, dropping if necessary + expected_size -= total_size; + true // Drain, implicitly adding to free list + }, + MarkState::Grey => panic!("All grey objects should've been processed"), + MarkState::Black => { /* - * Just ignore this. It's already part of our linked list - * of allocated objects. + * Retain the object + * State will be implicitly set to white + * by inverting mark the meaning of the mark bits. */ - } else { - let total_size = (*slot).header.type_info.determine_total_size(addr_of_mut!((*slot).header)); - match (*slot).header.raw_mark_state().resolve(mark_inverted) { - MarkState::White => { - // Free the object, dropping if necessary - expected_size -= total_size; - if let Some(func) = (*slot).header.type_info.drop_func { - func((*slot).header.value()); - } - // Add to free list - (*slot).mark_free(last_free); - last_free = Some(NonNull::new_unchecked(slot)); - }, - MarkState::Grey => panic!("All grey objects should've been processed"), - MarkState::Black => { - /* - * Retain the object - * State will be implicitly set to white - * by inverting mark the meaning of the mark bits. - */ - actual_size += total_size; - }, - } + actual_size += total_size; + false // keep } - }); - arena.free.set_next_free(last_free); - } + } + }).for_each(|freed_common_header| { + let type_info = (*freed_common_header).type_info; + if let Some(func) = type_info.drop_func { + func((*freed_common_header).value()); + } + let overall_layout = type_info.determine_total_layout(freed_common_header); + let actual_start = type_info.header_layout() + .from_common_header(freed_common_header); + self.small_arenas.find(overall_layout).unwrap().add_free(actual_start) + }); // Clear large objects - let was_mark_inverted = self.mark_inverted(); + debug_assert_eq!(was_mark_inverted, self.mark_inverted()); self.big_objects.lock().drain_filter(|big_item| { - let total_size = big_item.header.type_info.determine_total_size(addr_of_mut!(big_item.header)); - match big_item.header.mark_data.load_snapshot().state.resolve(was_mark_inverted) { + let total_size = big_item.header().type_info.determine_total_size(big_item.header.as_ptr()); + match big_item.header().mark_data.load_snapshot().state.resolve(was_mark_inverted) { MarkState::White => { // Free the object expected_size -= total_size; @@ -458,10 +611,12 @@ pub struct RawSimpleCollector { manager: CollectionManager, /// Tracks object handles handle_list: GcHandleList, + config: Arc } unsafe impl ::zerogc_context::collector::RawCollectorImpl for RawSimpleCollector { type DynTracePtr = NonNull; + type Config = GcConfig; #[cfg(feature = "multiple-collectors")] type Ptr = NonNull; @@ -473,6 +628,8 @@ unsafe impl ::zerogc_context::collector::RawCollectorImpl for RawSimpleCollector type RawContext = RawContext; + type RawVecRepr = SimpleVecRepr; + const SINGLETON: bool = cfg!(not(feature = "multiple-collectors")); const SYNC: bool = cfg!(feature = "sync"); @@ -481,8 +638,8 @@ unsafe impl ::zerogc_context::collector::RawCollectorImpl for RawSimpleCollector fn id_for_gc<'a, 'gc, T>(gc: &'a Gc<'gc, T>) -> &'a CollectorId where 'gc: 'a, T: GcSafe + ?Sized + 'gc { #[cfg(feature = "multiple-collectors")] { unsafe { - let mark_data = GcHeader::from_value_ptr(gc.as_raw_ptr()); - &*(&*mark_data).mark_data.load_snapshot().collector_id_ptr + let header = GcHeader::from_value_ptr(gc.as_raw_ptr()); + &*(&*header).mark_data.load_snapshot().collector_id_ptr } } #[cfg(not(feature = "multiple-collectors"))] { @@ -503,14 +660,14 @@ unsafe impl ::zerogc_context::collector::RawCollectorImpl for RawSimpleCollector } #[cfg(not(feature = "multiple-collectors"))] - fn init(_logger: Logger) -> NonNull { + fn init(config: GcConfig, _logger: Logger) -> NonNull { panic!("Not a singleton") } #[cfg(feature = "multiple-collectors")] - fn init(logger: Logger) -> NonNull { + fn init(config: GcConfig, logger: Logger) -> NonNull { // We're assuming its safe to create multiple (as given by config) - let raw = unsafe { RawSimpleCollector::with_logger(logger) }; + let raw = unsafe { RawSimpleCollector::with_logger(config, logger) }; let mut collector = Arc::new(raw); let raw_ptr = unsafe { NonNull::new_unchecked( Arc::as_ptr(&collector) as *mut RawSimpleCollector @@ -567,7 +724,7 @@ unsafe impl zerogc_context::collector::SingletonCollector for RawSimpleCollector GLOBAL_COLLECTOR.load(Ordering::Acquire) } - fn init_global(logger: Logger) { + fn init_global(config: GcConfig, logger: Logger) { let marker_ptr = NonNull::dangling().as_ptr(); /* * There can only be one collector (due to configuration). @@ -579,7 +736,7 @@ unsafe impl zerogc_context::collector::SingletonCollector for RawSimpleCollector Ordering::SeqCst, Ordering::SeqCst ), Ok(std::ptr::null_mut()), "Collector already exists"); let mut raw = Box::new( - unsafe { RawSimpleCollector::with_logger(logger) } + unsafe { RawSimpleCollector::with_logger(config, logger) } ); const GLOBAL_ID: CollectorId = unsafe { CollectorId::from_raw(PhantomData) }; raw.heap.allocator.collector_id = Some(GLOBAL_ID); @@ -595,12 +752,14 @@ unsafe impl zerogc_context::collector::SingletonCollector for RawSimpleCollector } } impl RawSimpleCollector { - unsafe fn with_logger(logger: Logger) -> Self { + unsafe fn with_logger(config: GcConfig, logger: Logger) -> Self { + let config = Arc::new(config); RawSimpleCollector { logger, manager: CollectionManager::new(), - heap: GcHeap::new(INITIAL_COLLECTION_THRESHOLD), + heap: GcHeap::new(Arc::clone(&config)), handle_list: GcHandleList::new(), + config } } #[cold] @@ -609,21 +768,44 @@ impl RawSimpleCollector { &self, contexts: &[*mut RawContext] ) { debug_assert!(self.manager.is_collecting()); - let roots: Vec<*mut dyn DynTrace> = contexts.iter() - .flat_map(|ctx| { - (**ctx).assume_valid_shadow_stack() - .reverse_iter().map(NonNull::as_ptr) - }) - .chain(std::iter::once(&self.handle_list + let roots = { + let mut roots: Vec<*mut dyn DynTrace> = Vec::new(); + for ctx in contexts.iter() { + roots.extend((**ctx).assume_valid_shadow_stack() + .reverse_iter().map(NonNull::as_ptr)); + } + roots.push(&self.handle_list // Cast to wrapper type as *const GcHandleList as *const GcHandleListWrapper // Make into virtual pointer as *const dyn DynTrace as *mut dyn DynTrace - )) - .collect(); + ); + #[repr(transparent)] + struct CachedEmptyVec(GcVecHeader); + unsafe impl DynTrace for CachedEmptyVec { + fn trace(&mut self, visitor: &mut MarkVisitor) { + let cached_vec_header = self as *mut Self as *mut GcVecHeader; + unsafe { + let target_repr = (cached_vec_header as *mut u8) + .add(GcVecHeader::LAYOUT.value_offset(EMPTY_VEC_ALIGNMENT)) + .cast::>(); + let mut target_gc = *(&target_repr + as *const *mut SimpleVecRepr<()> + as *const Gc>); + // TODO: Assert not moving? + let Ok(()) = visitor.visit_vec::<(), _>(&mut target_gc); + } + } + } + if let Some(cached_vec_header) = self.heap.cached_empty_vec.get() { + roots.push(cached_vec_header as *mut CachedEmptyVec as *mut dyn DynTrace) + } + roots + }; let num_roots = roots.len(); let mut task = CollectionTask { + config: &*self.config, expected_collector: self.heap.allocator.collector_id.unwrap(), roots, heap: &self.heap, grey_stack: if cfg!(feature = "implicit-grey-stack") { @@ -645,6 +827,7 @@ impl RawSimpleCollector { } } struct CollectionTask<'a> { + config: &'a GcConfig, expected_collector: CollectorId, roots: Vec<*mut dyn DynTrace>, heap: &'a GcHeap, @@ -677,7 +860,7 @@ impl<'a> CollectionTask<'a> { }; if let Some(trace) = (*obj).type_info.trace_func { (trace)( - &mut *(*obj).value(), + (*obj).value(), &mut visitor ); } @@ -688,11 +871,18 @@ impl<'a> CollectionTask<'a> { // Sweep unsafe { self.heap.allocator.sweep() }; let updated_size = self.heap.allocator.allocated_size(); - // Update the threshold to be 150% of currently used size - self.heap.threshold.store( - updated_size + (updated_size / 2), - Ordering::SeqCst - ); + if self.config.always_force_collect { + assert_eq!( + self.heap.threshold.load(Ordering::SeqCst), + 0 + ); + } else { + // Update the threshold to be 150% of currently used size + self.heap.threshold.store( + updated_size + (updated_size / 2), + Ordering::SeqCst + ); + } } } @@ -759,12 +949,43 @@ unsafe impl GcVisitor for MarkVisitor<'_> { Ok(()) } } + + #[inline] + unsafe fn visit_vec<'gc, T, Id>(&mut self, raw: &mut ::zerogc::Gc<'gc, Id::RawVecRepr, Id>) -> Result<(), Self::Err> + where T: GcSafe + 'gc, Id: ::zerogc::CollectorId { + // Just visit our innards as if they were a regular `Gc` reference + self.visit_gc(raw) + } + + #[inline] + unsafe fn visit_array<'gc, T, Id>(&mut self, array: &mut ::zerogc::vec::GcArray<'gc, T, Id>) -> Result<(), Self::Err> + where T: GcSafe + 'gc, Id: ::zerogc::CollectorId { + if TypeId::of::() == TypeId::of::() { + /* + * See comment in 'visit_gc'. + * Essentially this is a checked cast + */ + let array = std::mem::transmute::< + &mut ::zerogc::vec::GcArray<'gc, T, Id>, + &mut ::zerogc::Gc<'gc, [T], crate::CollectorId> + >(array); + /* + * Check the collectors match. Otherwise we're mutating + * other people's data. + */ + assert_eq!(*array.collector_id(), self.expected_collector); + self._visit_own_gc(array); + Ok(()) + } else { + Ok(()) + } + } } impl MarkVisitor<'_> { /// Visit a GC type whose [::zerogc::CollectorId] matches our own /// /// The caller should only use `GcVisitor::visit_gc()` - unsafe fn _visit_own_gc<'gc, T: GcSafe + 'gc>(&mut self, gc: &mut Gc<'gc, T>) { + unsafe fn _visit_own_gc<'gc, T: GcSafe + ?Sized + 'gc>(&mut self, gc: &mut Gc<'gc, T>) { // Verify this again (should be checked by caller) debug_assert_eq!(*gc.collector_id(), self.expected_collector); let header = GcHeader::from_value_ptr(gc.as_raw_ptr()); diff --git a/libs/simple/tests/arrays.rs b/libs/simple/tests/arrays.rs new file mode 100644 index 0000000..d0d0523 --- /dev/null +++ b/libs/simple/tests/arrays.rs @@ -0,0 +1,101 @@ +use slog::Logger; + +use zerogc::GcSimpleAlloc; +use zerogc::safepoint; +use zerogc_derive::Trace; + +use zerogc_simple::{SimpleCollector, GcArray, GcVec, Gc, CollectorId as SimpleCollectorId, GcConfig}; + +fn test_collector() -> SimpleCollector { + let mut config = GcConfig::default(); + config.always_force_collect = true; // Force collections for predictability + SimpleCollector::with_config( + config, Logger::root(::slog::Discard, ::slog::o!()) + ) +} + +#[derive(Trace, Copy, Clone, Debug)] +#[zerogc(copy, collector_id(SimpleCollectorId))] +struct Dummy<'gc> { + val: usize, + inner: Option>> +} + +#[test] +fn array() { + let collector = test_collector(); + let mut context = collector.into_context(); + let array1 = context.alloc_slice_fill_copy(5, 12u32); + assert_eq!(*array1.value(), *vec![12u32; 5]); + safepoint!(context, ()); + const TEXT: &[u8] = b"all cows eat grass"; + let array_text = context.alloc_slice_copy(TEXT); + let array_none: GcArray> = context.alloc_slice_none(12); + for val in array_none.value() { + assert_eq!(*val, None); + } + let array_text = safepoint!(context, array_text); + assert_eq!(array_text.0.value(), TEXT); + let mut nested_trace = Vec::new(); + let mut last = None; + for i in 0..16 { + let obj = context.alloc(Dummy { + val: i, + inner: last + }); + nested_trace.push(obj); + last = Some(obj); + } + let nested_trace = context.alloc_slice_copy(nested_trace.as_slice()); + let nested_trace: GcArray> = safepoint!(context, nested_trace); + for (idx, val) in nested_trace.value().iter().enumerate() { + assert_eq!(val.val, idx, "Invalid val: {:?}", val); + if let Some(last) = val.inner { + assert_eq!(last.val, idx - 1); + } + } +} + +#[test] +fn vec() { + let collector = test_collector(); + let mut context = collector.into_context(); + let mut vec1 = context.alloc_vec(); + for _ in 0..5 { + vec1.push(12u32); + } + assert_eq!(*vec1.as_slice(), *vec![12u32; 5]); + drop(vec1); + safepoint!(context, ()); + const TEXT: &[u8] = b"all cows eat grass"; + let mut vec_text = context.alloc_vec(); + vec_text.extend_from_slice(TEXT); + let mut vec_none: GcVec> = context.alloc_vec_with_capacity(12); + for _ in 0..12 { + vec_none.push(None); + } + for val in vec_none.iter() { + assert_eq!(*val, None); + } + drop(vec_none); + let vec_text: GcVec = GcVec { raw: safepoint!(context, vec_text.into_raw()), context: &context }; + assert_eq!(vec_text.as_slice(), TEXT); + let mut nested_trace: GcVec> = context.alloc_vec_with_capacity(3); + let mut last = None; + for i in 0..16 { + let obj = context.alloc(Dummy { + val: i, + inner: last + }); + nested_trace.push(obj); + last = Some(obj); + } + drop(vec_text); + let nested_trace: GcVec> = GcVec{ raw: safepoint!(context, nested_trace.into_raw()), context: &context }; + for (idx, val) in nested_trace.iter().enumerate() { + assert_eq!(val.val, idx, "Invalid val: {:?}", val); + if let Some(last) = val.inner { + assert_eq!(last.val, idx - 1); + } + } +} diff --git a/src/dummy_impl.rs b/src/dummy_impl.rs index 1054dd3..6a9d133 100644 --- a/src/dummy_impl.rs +++ b/src/dummy_impl.rs @@ -118,6 +118,7 @@ unsafe impl TraceImmutable for DummyCollectorId { unsafe impl NullTrace for DummyCollectorId {} unsafe impl CollectorId for DummyCollectorId { type System = DummySystem; + type RawVecRepr = crate::vec::repr::Unsupported; #[inline] fn from_gc_ptr<'a, 'gc, T>(_gc: &'a Gc<'gc, T>) -> &'a Self where T: GcSafe + ?Sized + 'gc, 'gc: 'a { diff --git a/src/lib.rs b/src/lib.rs index 5a3792f..6774ed2 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,6 +1,7 @@ #![feature( const_panic, // RFC 2345 - Const asserts )] +#![feature(maybe_uninit_slice)] #![deny(missing_docs)] #![cfg_attr(not(feature = "std"), no_std)] //! Zero overhead tracing garbage collection for rust, @@ -22,7 +23,7 @@ extern crate alloc; * I want this library to use 'mostly' stable features, * unless there's good justification to use an unstable feature. */ -use core::mem; +use core::mem::{self}; use core::ops::{Deref, DerefMut}; use core::ptr::NonNull; use core::marker::PhantomData; @@ -31,11 +32,14 @@ use core::fmt::{self, Debug, Formatter}; use zerogc_derive::unsafe_gc_impl; +use crate::vec::{GcArray, GcVec}; + #[macro_use] mod manually_traced; pub mod cell; pub mod prelude; pub mod dummy_impl; +pub mod vec; /// Invoke the closure with a temporary [GcContext], /// then perform a safepoint afterwards. @@ -321,7 +325,7 @@ pub unsafe trait GcContext: Sized { /// /// Some garbage collectors implement more complex interfaces, /// so implementing this is optional -pub unsafe trait GcSimpleAlloc<'gc, T: GcSafe + 'gc>: GcContext + 'gc { +pub unsafe trait GcSimpleAlloc: GcContext { /// Allocate the specified object in this garbage collector, /// binding it to the lifetime of this collector. /// @@ -334,7 +338,79 @@ pub unsafe trait GcSimpleAlloc<'gc, T: GcSafe + 'gc>: GcContext + 'gc { /// /// This gives a immutable reference to the resulting object. /// Once allocated, the object can only be correctly modified with a `GcCell` - fn alloc(&'gc self, value: T) -> Gc<'gc, T, Self::Id>; + fn alloc<'gc, T>(&'gc self, value: T) -> Gc<'gc, T, Self::Id> + where T: GcSafe + 'gc; + /// Allocate a slice with the specified length, + /// whose memory is uninitialized + /// + /// ## Safety + /// The slice **MUST** be initialized by the next safepoint. + /// By the time the next collection rolls around, + /// the collector will assume its entire contents are initialized. + /// + /// In particular, the traditional way to implement a [Vec] is wrong. + /// It is unsafe to leave the range of memory `[len, capacity)` uninitialized. + unsafe fn alloc_uninit_slice<'gc, T>(&'gc self, len: usize) -> (Self::Id, *mut T) + where T: GcSafe + 'gc; + /// Allocate a slice, copied from the specified input + fn alloc_slice_copy<'gc, T>(&'gc self, src: &[T]) -> GcArray<'gc, T, Self::Id> + where T: GcSafe + Copy + 'gc { + unsafe { + let (id, res_ptr) = self.alloc_uninit_slice::(src.len()); + res_ptr.copy_from_nonoverlapping(src.as_ptr(), src.len()); + GcArray(Gc::from_raw( + id, NonNull::new_unchecked(std::ptr::slice_from_raw_parts_mut( + res_ptr, + src.len() + )) + )) + } + } + /// Allocate a slice by filling it with results from the specified closure. + /// + /// The closure receives the target index as its only argument. + /// + /// ## Safety + /// The closure must always succeed and never panic. + /// + /// Otherwise, the gc may end up tracing the values even though they are uninitialized. + #[inline] + fn alloc_slice_fill_with<'gc, T, F>(&'gc self, len: usize, mut func: F) -> GcArray<'gc, T, Self::Id> + where T: GcSafe + 'gc, F: FnMut(usize) -> T { + unsafe { + let (id, res_ptr) = self.alloc_uninit_slice::(len); + for i in 0..len { + res_ptr.add(i).write(func(i)); + } + GcArray(Gc::from_raw( + id, NonNull::new_unchecked(std::ptr::slice_from_raw_parts_mut( + res_ptr, + len + )) + )) + } + } + /// Allocate a slice of the specified length, + /// initializing everything to `None` + #[inline] + fn alloc_slice_none<'gc, T>(&'gc self, len: usize) -> GcArray<'gc, Option, Self::Id> + where T: GcSafe + 'gc { + self.alloc_slice_fill_with(len, |_idx| None) + } + /// Allocate a slice by repeatedly copying a single value. + #[inline] + fn alloc_slice_fill_copy<'gc, T>(&'gc self, len: usize, val: T) -> GcArray<'gc, T, Self::Id> + where T: GcSafe + Copy + 'gc { + self.alloc_slice_fill_with(len, |_idx| val) + } + /// Create a new [GcVec] with zero initial length, + /// with an implicit reference to this [GcContext]. + fn alloc_vec<'gc, T>(&'gc self) -> GcVec<'gc, T, Self> + where T: GcSafe + 'gc; + /// Allocate a new [GcVec] with the specified capacity + /// and an implcit reference to this [GcContext] + fn alloc_vec_with_capacity<'gc, T>(&'gc self, capacity: usize) -> GcVec<'gc, T, Self> + where T: GcSafe + 'gc; } /// The internal representation of a frozen context /// @@ -375,6 +451,10 @@ impl FrozenContext { pub unsafe trait CollectorId: Copy + Eq + Debug + NullTrace + 'static { /// The type of the garbage collector system type System: GcSystem; + /// The raw representation of vectors in this collector. + /// + /// May be [crate::vec::repr::VecUnsupported] if vectors are unsupported. + type RawVecRepr: crate::vec::repr::GcVecRepr; /// Get the runtime id of the collector that allocated the [Gc] fn from_gc_ptr<'a, 'gc, T>(gc: &'a Gc<'gc, T, Self>) -> &'a Self @@ -383,10 +463,10 @@ pub unsafe trait CollectorId: Copy + Eq + Debug + NullTrace + 'static { /// Perform a write barrier before writing to a garbage collected field /// /// ## Safety - /// Smilar to the [GcDirectBarrier] trait, it can be assumed that + /// Similar to the [GcDirectBarrier] trait, it can be assumed that /// the field offset is correct and the types match. - unsafe fn gc_write_barrier<'gc, T: GcSafe + ?Sized + 'gc, V: GcSafe + ?Sized + 'gc>( - owner: &Gc<'gc, T, Self>, + unsafe fn gc_write_barrier<'gc, O: GcSafe + ?Sized + 'gc, V: GcSafe + ?Sized + 'gc>( + owner: &Gc<'gc, O, Self>, value: &Gc<'gc, V, Self>, field_offset: usize ); @@ -503,15 +583,15 @@ unsafe impl<'gc, T: GcSafe + 'gc, Id: CollectorId> GcSafe for Gc<'gc, T, Id> { } /// Rebrand unsafe impl<'gc, 'new_gc, T, Id> GcRebrand<'new_gc, Id> for Gc<'gc, T, Id> - where T: GcSafe + GcRebrand<'new_gc, Id>, + where T: GcSafe + ?Sized + GcRebrand<'new_gc, Id>, >::Branded: GcSafe, - Id: CollectorId, { + Id: CollectorId, Self: Trace { type Branded = Gc<'new_gc, >::Branded, Id>; } unsafe impl<'gc, 'a, T, Id> GcErase<'a, Id> for Gc<'gc, T, Id> - where T: GcSafe + GcErase<'a, Id>, + where T: GcSafe + ?Sized + GcErase<'a, Id>, >::Erased: GcSafe, - Id: CollectorId, { + Id: CollectorId, Self: Trace { type Erased = Gc<'a, >::Erased, Id>; } unsafe impl<'gc, T: GcSafe + 'gc, Id: CollectorId> Trace for Gc<'gc, T, Id> { @@ -526,7 +606,20 @@ unsafe impl<'gc, T: GcSafe + 'gc, Id: CollectorId> Trace for Gc<'gc, T, Id> { } } } -impl<'gc, T: GcSafe + 'gc, Id: CollectorId> Deref for Gc<'gc, T, Id> { +unsafe impl<'gc, T: GcSafe + 'gc, Id: CollectorId> Trace for Gc<'gc, [T], Id> { + const NEEDS_TRACE: bool = true; + + #[inline] + fn visit(&mut self, visitor: &mut V) -> Result<(), ::Err> { + unsafe { + V::visit_array( + visitor, + &mut *(self as *mut Gc<'gc, [T], Id> as *mut GcArray<'gc, T, Id>) + ) + } + } +} +impl<'gc, T: GcSafe + ?Sized + 'gc, Id: CollectorId> Deref for Gc<'gc, T, Id> { type Target = &'gc T; #[inline(always)] @@ -963,4 +1056,20 @@ pub unsafe trait GcVisitor: Sized { unsafe fn visit_gc<'gc, T: GcSafe + 'gc, Id: CollectorId>( &mut self, gc: &mut Gc<'gc, T, Id> ) -> Result<(), Self::Err>; + + /// Visit a garbage collected vector. + /// + /// ## Safety + /// Undefined behavior if the vector is invalid. + unsafe fn visit_vec<'gc, T: GcSafe + 'gc, Id: CollectorId>( + &mut self, raw: &mut Gc<'gc, Id::RawVecRepr, Id> + ) -> Result<(), Self::Err>; + + /// Visit a garbage collected array. + /// + /// ## Safety + /// Undefined behavior if the array is invalid. + unsafe fn visit_array<'gc, T: GcSafe + 'gc, Id: CollectorId>( + &mut self, array: &mut GcArray<'gc, T, Id> + ) -> Result<(), Self::Err>; } diff --git a/src/manually_traced/core.rs b/src/manually_traced/core.rs index a4bdb6f..1f01d3a 100644 --- a/src/manually_traced/core.rs +++ b/src/manually_traced/core.rs @@ -267,9 +267,6 @@ unsafe_gc_impl! { /* * Implements tracing for slices, by tracing all the objects they refer to. - * - * NOTE: We cannot currently implement `GcRebrand` + `GcErase` - * because we are unsized :(( */ unsafe_gc_impl! { target => [T], diff --git a/src/vec.rs b/src/vec.rs new file mode 100644 index 0000000..46552f9 --- /dev/null +++ b/src/vec.rs @@ -0,0 +1,348 @@ +//! The implementation of [GcVec] +//! +//! A [GcVec] is simply a [RawGcVec] that also holds +//! an implicit reference to the owning [GcContext]. + +use core::marker::PhantomData; +use core::ops::{Deref, DerefMut}; +use core::mem::ManuallyDrop; + +use zerogc_derive::{unsafe_gc_impl}; + +use crate::{GcSimpleAlloc, CollectorId, Gc, GcSafe, GcRebrand, GcErase}; +use crate::vec::repr::{GcVecRepr, ReallocFailedError}; + +pub mod repr; + +/// A garbage collected array. +/// +/// This is simply an alias for `Gc<[T]>` +#[repr(transparent)] +pub struct GcArray<'gc, T: GcSafe + 'gc, Id: CollectorId>(pub Gc<'gc, [T], Id>); +impl<'gc, T: GcSafe, Id: CollectorId> GcArray<'gc, T, Id> { + /// The value of the array as a slice + #[inline] + pub fn value(self) -> &'gc [T] { + self.0.value() + } +} +impl<'gc, T: GcSafe, Id: CollectorId> Deref for GcArray<'gc, T, Id> { + type Target = &'gc [T]; + + #[inline] + fn deref(&self) -> &Self::Target { + &*self.0 + } +} +impl<'gc, T: GcSafe, Id: CollectorId> Copy for GcArray<'gc, T, Id> {} +impl<'gc, T: GcSafe, Id: CollectorId> Clone for GcArray<'gc, T, Id> { + #[inline] + fn clone(&self) -> Self { + *self + } +} +// Need to implement by hand, because [T] is not GcRebrand +unsafe_gc_impl!( + target => GcArray<'gc, T, Id>, + params => ['gc, T: GcSafe, Id: CollectorId], + collector_id => Id, + bounds => { + TraceImmutable => never, + GcRebrand => { where T: GcRebrand<'new_gc, Id>, >::Branded: GcSafe }, + GcErase => { where T: GcErase<'min, Id>, >::Erased: GcSafe } + }, + null_trace => never, + branded_type => GcArray<'new_gc, >::Branded, Id>, + erased_type => GcArray<'min, >::Erased, Id>, + NEEDS_TRACE => true, + NEEDS_DROP => false, + trace_mut => |self, visitor| { + unsafe { visitor.visit_array(self) } + }, +); + +/// A version of [Vec] for use with garbage collectors. +/// +/// This is simply a thin wrapper around [RawGcVec] +/// that also contains a reference to the owning [GcContext]. +pub struct GcVec<'gc, T: GcSafe, Ctx: GcSimpleAlloc> { + /// A reference to the owning GcContext + pub context: &'gc Ctx, + /// The underlying [RawGcVec], + /// which actually manages the memory + pub raw: GcRawVec<'gc, T, Ctx::Id> +} +impl<'gc, T: GcSafe, Ctx: GcSimpleAlloc> GcVec<'gc, T, Ctx> { + /// Convert this vector into its underlying [GcRawVec] + #[inline] + pub fn into_raw(self) -> GcRawVec<'gc, T, Ctx::Id> { + self.raw + } + /// Reserve enough capacity for the specified number of additional elements. + #[inline] + pub fn reserve(&mut self, amount: usize) { + let remaining = self.raw.capacity() - self.raw.len(); + if remaining < amount { + self.grow(amount); + } + } + /// Extend the vector with elements copied from the specified slice + #[inline] + pub fn extend_from_slice(&mut self, src: &[T]) + where T: Copy { + let old_len = self.len(); + self.reserve(src.len()); + // TODO: Write barriers? + unsafe { + (self.raw.as_repr_mut().ptr() as *mut T).add(self.len()) + .copy_from_nonoverlapping(src.as_ptr(), src.len()); + self.raw.as_repr_mut().set_len(old_len + src.len()); + } + } + /// Push the specified value onto the vector + #[inline] + pub fn push(&mut self, val: T) { + self.reserve(1); + match self.raw.try_push(val) { + Ok(()) => {}, + Err(InsufficientCapacityError { }) => { + unsafe { std::hint::unreachable_unchecked(); } + } + } + } + #[cold] + fn grow(&mut self, amount: usize) { + let requested_capacity = self.len().checked_add(amount).unwrap(); + let new_capacity = self.raw.capacity().checked_mul(2).unwrap() + .max(requested_capacity); + if ::RawVecRepr::SUPPORTS_REALLOC { + match self.raw.repr.value().realloc_in_place(new_capacity) { + Ok(()) => { + return; // success + } + Err(ReallocFailedError::Unsupported) => unreachable!(), + Err(ReallocFailedError::OutOfMemory) => panic!("Out of memory"), + Err(ReallocFailedError::SizeUnsupported) => {} // fallthrough to realloc + } + } + // Just allocate a new one, copying from the old + let mut new_mem = self.context.alloc_vec_with_capacity(new_capacity).raw; + // TODO: Write barriers + unsafe { + (new_mem.as_repr_mut().ptr() as *mut T).copy_from_nonoverlapping( + self.raw.as_ptr() as *const T, + self.raw.len() + ); + new_mem.as_repr_mut().set_len(self.raw.len()); + let mut old_mem = std::mem::replace(&mut self.raw, new_mem); + old_mem.as_repr_mut().set_len(0); // We don't want to drop the old elements + } + } +} +unsafe_gc_impl!( + target => GcVec<'gc, T, Ctx>, + params => ['gc, T: GcSafe, Ctx: GcSimpleAlloc], + bounds => { + TraceImmutable => never, + Trace => { where T: GcSafe }, + GcRebrand => never, + GcErase => never, + }, + null_trace => never, + NEEDS_TRACE => true, + NEEDS_DROP => T::NEEDS_DROP /* if our inner type needs a drop */, + trace_mut => |self, visitor| { + unsafe { visitor.visit_vec::(self.raw.as_repr_mut()) } + }, + collector_id => Ctx::Id +); +impl<'gc, T: GcSafe, Ctx: GcSimpleAlloc> Deref for GcVec<'gc, T, Ctx> { + type Target = GcRawVec<'gc, T, Ctx::Id>; + + #[inline] + fn deref(&self) -> &Self::Target { + &self.raw + } +} + +impl<'gc, T: GcSafe, Ctx: GcSimpleAlloc> DerefMut for GcVec<'gc, T, Ctx> { + #[inline] + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.raw + } +} + +/// Indicates there is insufficient capacity for an operation on a [RawGcVec] +#[derive(Debug)] +pub struct InsufficientCapacityError; + +/// A version of [Vec] for use with garbage collection. +/// +/// Despite using garbage collected memory, this type may only have a single owner +/// in order to ensure unique `&mut` references. +/// +/// If this were the case, one reference could create a `&[T]` slice +/// while another slice references it. +/// +/// Unlike [GcVec], this doesn't contain a reference to the owning [GcContext]. +/// +/// NOTE: This is completely different from the distinction between +/// [Vec] and [RawVec](https://github.com/rust-lang/rust/blob/master/library/alloc/src/raw_vec.rs) +/// in the standard library. +/// In particular, this still contains a `len`. +/// +/// In fact, most [GcVec] methods just delegate here with an additional context. +/// +/// ## Safety +/// To avoid undefined behavior, there can only be a single reference +/// to a [RawGcVec], despite [Gc] implementing `Copy`. +#[repr(transparent)] +pub struct GcRawVec<'gc, T: GcSafe + 'gc, Id: CollectorId> { + repr: Gc<'gc, Id::RawVecRepr, Id>, + marker: PhantomData<&'gc [T]> +} +impl<'gc, T: GcSafe, Id: CollectorId> GcRawVec<'gc, T, Id> { + /// Create a [RawGcVec] from the specified repr + /// + /// ## Safety + /// The specified representation must be valid and actually + /// be intended for this type `T`. + #[inline] + pub unsafe fn from_repr(repr: Gc<'gc, Id::RawVecRepr, Id>) -> Self { + GcRawVec { repr, marker: PhantomData } + } + /// View this [RawGcVec] as a [GcVec] for the duration of the specified closure, + /// with an implicit reference to the specified [GcContext] + #[inline] + pub fn with_context_mut(&mut self, ctx: &'gc Ctx, func: F) -> R + where Ctx: GcSimpleAlloc, F: FnOnce(&mut GcVec<'gc, T, Ctx>) -> R { + let vec = ManuallyDrop::new(GcVec { + // NOTE: This is okay, because we will restore any changes via `scopeguard` + raw: unsafe { std::ptr::read(self) }, + context: ctx + }); + let mut guard = scopeguard::guard(vec, |vec| { + // Restore any changes + *self = ManuallyDrop::into_inner(vec).raw; + }); + func(&mut *guard) + } + /// View this [RawGcVec] as a [GcVec] for the duration of the specified closure + #[inline] + pub fn with_context(&self, ctx: &'gc Ctx, func: F) -> R + where Ctx: GcSimpleAlloc, F: FnOnce(&GcVec<'gc, T, Ctx>) -> R { + let vec = ManuallyDrop::new(GcVec { + // NOTE: This is okay, because we will forget it later + raw: unsafe { std::ptr::read(self) }, + context: ctx + }); + let guard = scopeguard::guard(vec, std::mem::forget); + func(&*guard) + } + /// The length of this vector + #[inline] + pub fn len(&self) -> usize { + self.repr.len() + } + /// The capacity of this vector. + #[inline] + pub fn capacity(&self) -> usize { + self.repr.capacity() + } + /// Clear the vector, setting its length to zero + #[inline] + pub fn clear(&mut self) { + unsafe { + let elements = self.as_ptr() as *mut T; + let old_len = self.len(); + // Drop after we set the length, in case it panics + self.repr.set_len(0); + std::ptr::drop_in_place::<[T]>(std::ptr::slice_from_raw_parts_mut( + elements, old_len + )); + } + } + /// Push a value onto this vector, + /// returning an error if there is insufficient space. + #[inline] + pub fn try_push(&mut self, val: T) -> Result<(), InsufficientCapacityError> { + let old_len = self.len(); + if old_len < self.capacity() { + unsafe { + // TODO: Write barriers.... + (self.as_ptr() as *mut T).add(old_len).write(val); + self.repr.set_len(old_len + 1); + } + Ok(()) + } else { + Err(InsufficientCapacityError) + } + } + /// Get the raw representation of this vector as a [GcVecRepr] + /// + /// ## Safety + /// The user must not violate the invariants of the repr. + #[inline] + pub unsafe fn as_repr(&self) -> Gc<'gc, Id::RawVecRepr, Id> { + self.repr + } + /// Get a mutable reference to the raw represnetation of this vector + /// + /// ## Safety + /// The user must preserve the validity of the underlying representation. + #[inline] + pub unsafe fn as_repr_mut(&mut self) -> &mut Gc<'gc, Id::RawVecRepr, Id> { + &mut self.repr + } + /// Interpret this vector as a slice + /// + /// Because the length may change, + /// this is bound to the lifetime of the current value. + #[inline] + pub fn as_slice(&self) -> &[T] { + unsafe { std::slice::from_raw_parts( + self.as_ptr(), + self.len() + ) } + } + /// Give a pointer to the underlying slice of memory + /// + /// ## Safety + /// The returned memory must not be mutated. + #[inline] + pub unsafe fn as_ptr(&self) -> *const T { + self.repr.ptr() as *const T + } +} +impl<'gc, T: GcSafe, Id: CollectorId> Deref for GcRawVec<'gc, T, Id> { + type Target = [T]; + + #[inline] + fn deref(&self) -> &Self::Target { + self.as_slice() // &'gc > &'self + } +} +unsafe_gc_impl!( + target => GcRawVec<'gc, T, Id>, + params => ['gc, T: GcSafe, Id: CollectorId], + collector_id => Id, + bounds => { + TraceImmutable => never, + GcRebrand => { + where T: GcSafe + GcRebrand<'new_gc, Id>, + >::Branded: GcSafe + }, + GcErase => { + where T: GcSafe + GcErase<'min, Id>, + >::Erased: GcSafe + }, + }, + branded_type => GcRawVec<'new_gc, >::Branded, Id>, + erased_type => GcRawVec<'min, >::Erased, Id>, + null_trace => never, + NEEDS_TRACE => true, + NEEDS_DROP => false, // GcVecRepr is responsible for Drop + trace_mut => |self, visitor| { + unsafe { visitor.visit_vec::(self.as_repr_mut()) } + }, +); \ No newline at end of file diff --git a/src/vec/repr.rs b/src/vec/repr.rs new file mode 100644 index 0000000..40e3795 --- /dev/null +++ b/src/vec/repr.rs @@ -0,0 +1,91 @@ +//! The underlying representation of a [GcVec] +//! +//! This is exposed only for use by collector implemetnations. +//! User code should avoid it. + +use core::ffi::c_void; +use core::alloc::Layout; + +use crate::{GcSafe,}; + +use zerogc_derive::unsafe_gc_impl; + +/// A marker error to indicate in-place reallocation failed +#[derive(Debug)] +pub enum ReallocFailedError { + /// Indicates that the operation is unsupported + Unsupported, + /// Indicates that the vector is too large to reallocate in-place + SizeUnsupported, + /// Indicates that the garbage collector is out of memory + OutOfMemory, +} + +/// The underlying representation of a [RawGcVec] +/// +/// This varies from collector to collector. +/// +/// ## Safety +/// This must be implemented consistent with the API of [RawGcVec]. +/// +/// It should only be used by [RawGcVec] and [GcVec]. +pub unsafe trait GcVecRepr: GcSafe { + /// Whether this vector supports in-place reallocation. + const SUPPORTS_REALLOC: bool = false; + /// The layout of the underlying element type + fn element_layout(&self) -> Layout; + /// The length of the vector. + /// + /// This is the number of elements that are actually + /// initialized, as opposed to `capacity`, which is the number + /// of elements that are available in total. + fn len(&self) -> usize; + /// Set the length of the vector. + /// + /// ## Safety + /// The underlying memory must be initialized up to the specified length, + /// otherwise the vector's memory will be traced incorrectly. + /// + /// Undefined behavior if length is greater than capacity. + unsafe fn set_len(&self, len: usize); + /// The total number of elements that are available + fn capacity(&self) -> usize; + /// Attempt to reallocate the vector in-place, + /// without moving the underlying pointer. + fn realloc_in_place(&self, new_capacity: usize) -> Result<(), ReallocFailedError> { + assert!(!Self::SUPPORTS_REALLOC); + drop(new_capacity); + Err(ReallocFailedError::Unsupported) + } + /// A pointer to the underlying memory + /// + /// ## Safety + /// This is marked unsafe, because the type must be interpreted correctly. + /// + /// The returned memory must not be mutated, because it may have multiple owners. + unsafe fn ptr(&self) -> *const c_void; +} +/// Dummy implementation of [GcVecRepr] for collectors which do not support [GcVec] +pub enum Unsupported {} +unsafe_trace_primitive!(Unsupported); +unsafe impl GcVecRepr for Unsupported { + fn element_layout(&self) -> Layout { + unimplemented!() + } + + fn len(&self) -> usize { + unimplemented!() + } + + unsafe fn set_len(&self, _len: usize) { + unimplemented!() + } + + fn capacity(&self) -> usize { + unimplemented!() + } + + unsafe fn ptr(&self) -> *const c_void { + unimplemented!() + } +} \ No newline at end of file