From 09a92a4ad789ac3c5d1397837323d4c33237703f Mon Sep 17 00:00:00 2001 From: Josh Junon Date: Wed, 11 Sep 2024 05:13:41 +0200 Subject: [PATCH] kernel: add `Registry` and `Handle` types and initialize Ring registry / root ring --- oro-arch-aarch64/src/boot/mod.rs | 2 +- oro-arch-aarch64/src/init.rs | 16 +- oro-arch-aarch64/src/mem/segment.rs | 29 ++ oro-arch-x86_64/src/boot/mod.rs | 2 +- oro-arch-x86_64/src/init.rs | 16 +- oro-arch-x86_64/src/mem/segment.rs | 66 ++-- oro-kernel/src/lib.rs | 105 ++++- oro-kernel/src/registry.rs | 478 +++++++++++++++++++++++ oro-kernel/src/ring.rs | 4 +- oro-macro/src/likely.rs | 10 +- oro-mem/src/mapper.rs | 19 +- oro-sync/src/spinlock/unfair_critical.rs | 2 +- 12 files changed, 683 insertions(+), 66 deletions(-) create mode 100644 oro-kernel/src/registry.rs diff --git a/oro-arch-aarch64/src/boot/mod.rs b/oro-arch-aarch64/src/boot/mod.rs index e206b63..b596457 100644 --- a/oro-arch-aarch64/src/boot/mod.rs +++ b/oro-arch-aarch64/src/boot/mod.rs @@ -34,7 +34,7 @@ pub unsafe fn boot_primary() -> ! { oro_debug::init_with_offset(pat.offset()); // Initialize the primary core. - crate::init::initialize_primary(pfa); + crate::init::initialize_primary(pat.clone(), pfa); let mut pfa = crate::init::KERNEL_STATE .assume_init_ref() .pfa() diff --git a/oro-arch-aarch64/src/init.rs b/oro-arch-aarch64/src/init.rs index 913ca07..aaa9aef 100644 --- a/oro-arch-aarch64/src/init.rs +++ b/oro-arch-aarch64/src/init.rs @@ -12,7 +12,14 @@ pub type Pfa = FiloPageFrameAllocator; /// The global kernel state. Initialized once during boot /// and re-used across all cores. -pub static mut KERNEL_STATE: MaybeUninit> = MaybeUninit::uninit(); +pub static mut KERNEL_STATE: MaybeUninit< + KernelState< + Pfa, + OffsetTranslator, + crate::mem::address_space::AddressSpaceLayout, + crate::sync::InterruptController, + >, +> = MaybeUninit::uninit(); /// Initializes the global state of the architecture. /// @@ -20,7 +27,7 @@ pub static mut KERNEL_STATE: MaybeUninit> = MaybeUninit::uninit /// Must be called exactly once for the lifetime of the system, /// only by the boot processor at boot time (_not_ at any /// subsequent bringup). -pub unsafe fn initialize_primary(pfa: Pfa) { +pub unsafe fn initialize_primary(pat: OffsetTranslator, pfa: Pfa) { #[cfg(debug_assertions)] { use core::sync::atomic::{AtomicBool, Ordering}; @@ -36,7 +43,10 @@ pub unsafe fn initialize_primary(pfa: Pfa) { } } - KERNEL_STATE.write(KernelState::new(UnfairCriticalSpinlock::new(pfa))); + KERNEL_STATE.write( + KernelState::new(pat, UnfairCriticalSpinlock::new(pfa)) + .expect("failed to create global kernel state"), + ); } /// Main boot sequence for all cores for each bringup diff --git a/oro-arch-aarch64/src/mem/segment.rs b/oro-arch-aarch64/src/mem/segment.rs index 50e53e6..f54fa5e 100644 --- a/oro-arch-aarch64/src/mem/segment.rs +++ b/oro-arch-aarch64/src/mem/segment.rs @@ -308,6 +308,35 @@ unsafe impl AddressSegment for &'static Segment { (start, end) } + fn provision_as_shared( + &self, + space: &AddressSpaceHandle, + alloc: &mut A, + translator: &P, + ) -> Result<(), MapError> + where + A: PageFrameAllocate + PageFrameFree, + P: Translator, + { + let top_level = unsafe { &mut *translator.translate_mut::(space.base_phys) }; + + for idx in self.valid_range.0..=self.valid_range.1 { + let entry = &mut top_level[idx]; + + if entry.valid() { + return Err(MapError::Exists); + } + + let frame_phys_addr = alloc.allocate().ok_or(MapError::OutOfMemory)?; + unsafe { + (*translator.translate_mut::(frame_phys_addr)).reset(); + } + *entry = self.l0_template.with_address(frame_phys_addr).into(); + } + + Ok(()) + } + fn map( &self, space: &AddressSpaceHandle, diff --git a/oro-arch-x86_64/src/boot/mod.rs b/oro-arch-x86_64/src/boot/mod.rs index b40592f..0237183 100644 --- a/oro-arch-x86_64/src/boot/mod.rs +++ b/oro-arch-x86_64/src/boot/mod.rs @@ -113,7 +113,7 @@ pub unsafe fn boot_primary() -> ! { let lapic_id = lapic.id(); dbg!("local APIC ID: {lapic_id}"); - crate::init::initialize_primary(pfa); + crate::init::initialize_primary(pat.clone(), pfa); let mut pfa = crate::init::KERNEL_STATE .assume_init_ref() .pfa() diff --git a/oro-arch-x86_64/src/init.rs b/oro-arch-x86_64/src/init.rs index 913ca07..aaa9aef 100644 --- a/oro-arch-x86_64/src/init.rs +++ b/oro-arch-x86_64/src/init.rs @@ -12,7 +12,14 @@ pub type Pfa = FiloPageFrameAllocator; /// The global kernel state. Initialized once during boot /// and re-used across all cores. -pub static mut KERNEL_STATE: MaybeUninit> = MaybeUninit::uninit(); +pub static mut KERNEL_STATE: MaybeUninit< + KernelState< + Pfa, + OffsetTranslator, + crate::mem::address_space::AddressSpaceLayout, + crate::sync::InterruptController, + >, +> = MaybeUninit::uninit(); /// Initializes the global state of the architecture. /// @@ -20,7 +27,7 @@ pub static mut KERNEL_STATE: MaybeUninit> = MaybeUninit::uninit /// Must be called exactly once for the lifetime of the system, /// only by the boot processor at boot time (_not_ at any /// subsequent bringup). -pub unsafe fn initialize_primary(pfa: Pfa) { +pub unsafe fn initialize_primary(pat: OffsetTranslator, pfa: Pfa) { #[cfg(debug_assertions)] { use core::sync::atomic::{AtomicBool, Ordering}; @@ -36,7 +43,10 @@ pub unsafe fn initialize_primary(pfa: Pfa) { } } - KERNEL_STATE.write(KernelState::new(UnfairCriticalSpinlock::new(pfa))); + KERNEL_STATE.write( + KernelState::new(pat, UnfairCriticalSpinlock::new(pfa)) + .expect("failed to create global kernel state"), + ); } /// Main boot sequence for all cores for each bringup diff --git a/oro-arch-x86_64/src/mem/segment.rs b/oro-arch-x86_64/src/mem/segment.rs index 0afe108..9ff597c 100644 --- a/oro-arch-x86_64/src/mem/segment.rs +++ b/oro-arch-x86_64/src/mem/segment.rs @@ -332,43 +332,6 @@ impl AddressSegment { }) } - /// Maps the L4/L5 entry for the given segment range to - /// empty page tables, without mapping any children. - /// - /// Intended to be used to create shared segments that are - /// otherwise empty, for later use. - /// - /// # Safety - /// Must only be called once per segment range. - /// - /// Does NOT invalidate the TLB. - pub unsafe fn make_top_level_present( - &self, - space: &Handle, - alloc: &mut A, - translator: &P, - ) -> Result<(), MapError> - where - A: PageFrameAllocate, - P: Translator, - { - let top_level = &mut *translator.translate_mut::(space.base_phys()); - - for idx in self.valid_range.0..=self.valid_range.1 { - let entry = &mut top_level[idx]; - - if entry.present() { - return Err(MapError::Exists); - } - - let frame_phys_addr = alloc.allocate().ok_or(MapError::OutOfMemory)?; - *entry = self.entry_template.with_address(frame_phys_addr); - (*translator.translate_mut::(frame_phys_addr)).reset(); - } - - Ok(()) - } - /// Unmaps the entire range's top level page tables without /// reclaiming any of the physical memory. /// @@ -411,6 +374,35 @@ unsafe impl Segment for &'static AddressSegment { } } + fn provision_as_shared( + &self, + space: &AddressSpaceHandle, + alloc: &mut A, + translator: &P, + ) -> Result<(), MapError> + where + A: PageFrameAllocate + PageFrameFree, + P: Translator, + { + let top_level = unsafe { &mut *translator.translate_mut::(space.base_phys()) }; + + for idx in self.valid_range.0..=self.valid_range.1 { + let entry = &mut top_level[idx]; + + if entry.present() { + return Err(MapError::Exists); + } + + let frame_phys_addr = alloc.allocate().ok_or(MapError::OutOfMemory)?; + unsafe { + (*translator.translate_mut::(frame_phys_addr)).reset(); + } + *entry = self.entry_template.with_address(frame_phys_addr); + } + + Ok(()) + } + fn map( &self, space: &AddressSpaceHandle, diff --git a/oro-kernel/src/lib.rs b/oro-kernel/src/lib.rs index 72a5633..5d833d5 100644 --- a/oro-kernel/src/lib.rs +++ b/oro-kernel/src/lib.rs @@ -10,25 +10,43 @@ #![allow(incomplete_features)] #![feature(adt_const_params)] -use oro_mem::pfa::alloc::{PageFrameAllocate, PageFrameFree}; -use oro_sync::spinlock::unfair_critical::UnfairCriticalSpinlock; +use oro_mem::{ + mapper::{AddressSpace, MapError}, + pfa::alloc::{PageFrameAllocate, PageFrameFree}, + translate::Translator, +}; +use oro_sync::spinlock::unfair_critical::{InterruptController, UnfairCriticalSpinlock}; pub mod id; pub mod module; pub mod port; pub mod ring; +pub(crate) mod registry; + /// Core-local instance of the Oro kernel. /// /// Intended to live on the core's respective stack, /// living for the lifetime of the core (and destroyed /// and re-created on core powerdown/subsequent bringup). -pub struct Kernel { +pub struct Kernel +where + Pfa: PageFrameAllocate + PageFrameFree + 'static, + Pat: Translator, + AddrSpace: AddressSpace + 'static, + IntCtrl: InterruptController + 'static, +{ /// Global reference to the shared kernel state. - state: &'static KernelState, + state: &'static KernelState, } -impl Kernel { +impl Kernel +where + Pfa: PageFrameAllocate + PageFrameFree + 'static, + Pat: Translator, + AddrSpace: AddressSpace, + IntCtrl: InterruptController, +{ /// Creates a new core-local instance of the Kernel. /// /// # Safety @@ -40,37 +58,96 @@ impl Kernel { /// The `state` given to the kernel must be shared for all /// instances of the kernel that wish to partake in the same /// Oro kernel universe. - pub unsafe fn new(state: &'static KernelState) -> Self { + pub unsafe fn new(state: &'static KernelState) -> Self { Self { state } } /// Returns the underlying [`KernelState`] for this kernel instance. #[must_use] - pub fn state(&self) -> &'static KernelState { + pub fn state(&self) -> &'static KernelState { self.state } } /// Global state shared by all [`Kernel`] instances across /// core boot/powerdown/bringup cycles. -pub struct KernelState { +pub struct KernelState +where + Pfa: PageFrameAllocate + PageFrameFree, + Pat: Translator, + AddrSpace: AddressSpace, + IntCtrl: InterruptController, +{ /// The shared, spinlocked page frame allocator (PFA) for the /// entire system. - pfa: UnfairCriticalSpinlock, + pfa: UnfairCriticalSpinlock, + /// Ring registry. + ring_registry: registry::Registry, } -impl KernelState +impl KernelState where - Pfa: PageFrameAllocate + PageFrameFree + 'static, + Pfa: PageFrameAllocate + PageFrameFree, + Pat: Translator, + AddrSpace: AddressSpace, + IntCtrl: InterruptController, { /// Creates a new instance of the kernel state. Meant to be called /// once for all cores at boot time. - pub fn new(pfa: UnfairCriticalSpinlock) -> Self { - Self { pfa } + /// + /// # Safety + /// This function sets up shared page table mappings that MUST be + /// shared across cores. The caller MUST initialize the kernel + /// state (this struct) prior to booting _any other cores_ + /// or else registry accesses will page fault. + #[allow(clippy::missing_panics_doc)] + pub unsafe fn new(pat: Pat, pfa: UnfairCriticalSpinlock) -> Result { + let ring_registry = { + let mut pfa_lock = pfa.lock::(); + + registry::Registry::new(pat, &mut *pfa_lock, AddrSpace::kernel_ring_registry())? + }; + + let root_ring_id = ring_registry.insert_permanent( + &pfa, + ring::Ring { + id: 0, + parent_id: 0, + }, + )?; + assert_eq!(root_ring_id, 0, "root ring ID must be 0"); + + Ok(Self { pfa, ring_registry }) } /// Returns the underlying PFA belonging to the kernel state. - pub fn pfa(&self) -> &UnfairCriticalSpinlock { + pub fn pfa(&'static self) -> &'static UnfairCriticalSpinlock { &self.pfa } + + /// Returns a [`registry::Handle`] to a [`ring::Ring`] by its ID, + /// or `None` if it does not exist. + /// + /// # Safety + /// **DO NOT USE THIS FUNCTION FOR ANYTHING SECURITY RELATED.** + /// + /// IDs are re-used by registries when items are dropped, so + /// multiple calls to this function with the same ID may return + /// handles to different ring items as the IDs get recycled. + /// + /// In almost all cases, you should be using [`registry::Handle`]s + /// directly. They are also easier to work with than calling + /// this function. + pub unsafe fn ring_by_id(&'static self, id: usize) -> Option> { + self.ring_registry.get(id) + } + + /// Inserts a [`ring::Ring`] into the registry and returns + /// its [`registry::Handle`]. + pub fn insert_ring( + &'static self, + ring: ring::Ring, + ) -> Result, MapError> { + self.ring_registry.insert(&self.pfa, ring) + } } diff --git a/oro-kernel/src/registry.rs b/oro-kernel/src/registry.rs new file mode 100644 index 0000000..55a3548 --- /dev/null +++ b/oro-kernel/src/registry.rs @@ -0,0 +1,478 @@ +//! Implements types for the Oro registries (reference-counted +//! arena allocators). + +// NOTE(qix-): This is an **INCREDIBLY UNSAFE** module. It is designed +// NOTE(qix-): to be as ergonomic and safe as possible from the outset, +// NOTE(qix-): especially for how it'll be used within Oro, but it employs +// NOTE(qix-): some normaally very questionable practices to achieve this +// NOTE(qix-): in order to stay performant in the hot path (optimizing +// NOTE(qix-): reads over writes, minimizing locking whilst doing so, +// NOTE(qix-): for example). +// NOTE(qix-): +// NOTE(qix-): It makes a ton of assumptions about its usage, and is NOT +// NOTE(qix-): suitable for use in any context other than the Oro kernel. +// NOTE(qix-): +// NOTE(qix-): DO NOT COPY THIS CODE INTO YOUR OWN PROJECTS IN ANY CAPACITY. +// NOTE(qix-): IT IS NOT SAFE. YOU HAVE BEEN WARNED. +// NOTE(qix-): +// NOTE(qix-): Similarly, if you are here to make edits to this code, please +// NOTE(qix-): be very careful and ensure that you are not introducing any +// NOTE(qix-): unsafety into the codebase. This is a very delicate module. +// NOTE(qix-): It will be HEAVILY scrutinized in code review. Be ready. + +use core::{ + marker::PhantomData, + mem::{size_of, ManuallyDrop, MaybeUninit}, + ops::Deref, + sync::atomic::{AtomicUsize, Ordering}, +}; +use oro_macro::unlikely; +use oro_mem::{ + mapper::{AddressSegment, AddressSpace, MapError}, + pfa::alloc::{PageFrameAllocate, PageFrameFree}, + translate::Translator, +}; +use oro_sync::spinlock::unfair_critical::{InterruptController, UnfairCriticalSpinlock}; + +/// A registry for reference-counted arena allocation. +/// +/// The registry is a reference-counted arena allocator that +/// allows for the allocation of items that are reference-counted +/// across the system. The registry is designed to be used in +/// a supervisor space, and is not intended for use in user space. +/// +/// Registry allocations return [`Handle`]s, which can be cloned +/// and will free the slot when the final user drops it. +pub struct Registry +where + T: Sized + 'static, + IntCtrl: InterruptController, + AddrSpace: AddressSpace, + Pat: Translator, +{ + /// The base address of the registry. + // TODO(qix-): Remove this field once const trait functions are stabilized, + // TODO(qix-): replacing it with `segment.range().0 as *mut _` and saving + // TODO(qix-): a few bytes. + base: *mut MaybeUninit>, + /// Bookkeeping counters used in the registry. + bookkeeping: UnfairCriticalSpinlock, + /// The segment this registry is in. + segment: AddrSpace::SupervisorSegment, + /// The mapper for the registry. + mapper: AddrSpace::SupervisorHandle, + /// The physical address translator (PAT) this registry will use. + pat: Pat, + /// The interrupt controller for the registry. + _interrupt_controller: PhantomData, + /// The address space for the registry. + _address_space: PhantomData, +} + +/// Registry-level bookkeeping fields, protected +/// behind an [`UnfairCriticalSpinlock`]. +struct RegistryBookkeeping { + /// The last free ID in the registry. + /// + /// If this is `usize::MAX`, then there are no free slots. + last_free_id: usize, + /// The total count of items in the registry. + total_count: usize, + /// Total page count of the registry. + total_page_count: usize, +} + +impl RegistryBookkeeping { + /// Creates a new instance of the registry bookkeeping. + fn new() -> Self { + Self { + last_free_id: usize::MAX, + total_count: 0, + total_page_count: 0, + } + } +} + +/// A frame in the registry. +/// +/// Wraps an item `T` with metadata about the slot itself, +/// used for bookkeeping purposes. +struct ItemFrame { + /// A union of the item or the next free index. + maybe_item: MaybeItem, + /// Count of users of this item. + /// In the event that this is zero, the item is free. + /// In the event that this count reaches zero, the item gets dropped. + user_count: AtomicUsize, +} + +/// A union of either an occupied item slot, or the index of the +/// next free slot. +union MaybeItem { + /// The item itself. + item: ManuallyDrop>, + /// The next free index. + next_free: usize, +} + +impl Registry +where + T: Sized + 'static, + IntCtrl: InterruptController, + AddrSpace: AddressSpace, + Pat: Translator, +{ + /// Creates a new, empty registry in the given + /// segment. + /// + /// Makes the registry available for use across all + /// cores in the system. + /// + /// The segment used for the registry must be valid, + /// unique to all other registries, and previously + /// unpopulated (or this function will error with + /// [`MapError::Exists`]). + /// + /// Typically, this function should be called once + /// at boot time. + pub fn new( + pat: Pat, + pfa: &mut Pfa, + segment: AddrSpace::SupervisorSegment, + ) -> Result + where + Pat: Translator, + Pfa: PageFrameAllocate + PageFrameFree, + { + // SAFETY(qix-): We can more or less guarantee that this registry + // SAFETY(qix-): is being constructed in the supervisor space. + // SAFETY(qix-): Further, we can't guarantee that the segment is + // SAFETY(qix-): going to be accessed separately from other segments + // SAFETY(qix-): quite yet, but we'll verify that we have exclusive + // SAFETY(qix-): access to the segment directly after this call. + let mapper = unsafe { AddrSpace::current_supervisor_space(&pat) }; + segment.provision_as_shared(&mapper, pfa, &pat)?; + + Ok(Self { + base: segment.range().0 as *mut _, + bookkeeping: UnfairCriticalSpinlock::new(RegistryBookkeeping::new()), + pat, + segment, + mapper, + _interrupt_controller: PhantomData, + _address_space: PhantomData, + }) + } + + /// Allocates and inserts an item `T` into the registry, permanently. + /// Returns the `id` rather than a `Handle`. This is useful for + /// items that are not intended to be reference-counted and must + /// always be valid throughout the lifetime of the kernel. + /// + /// Returns an error if there was a problem allocating the item. + /// + /// Takes a reference to the spinlock itself, since not all allocations require + /// locking the PFA. + /// + /// # Safety + /// Marked unsafe because misuse of this function can lead to + /// memory leaks. You probably want to use [`Self::insert()`] instead. + pub unsafe fn insert_permanent( + &self, + pfa: &UnfairCriticalSpinlock, + item: T, + ) -> Result + where + Pfa: PageFrameAllocate + PageFrameFree, + { + // SAFETY(qix-): We don't panic in this function. + let mut bk = unsafe { self.bookkeeping.lock::() }; + + if bk.last_free_id == usize::MAX { + let byte_offset = bk.total_count * size_of::>>(); + let byte_offset_end = byte_offset + size_of::>>(); + + if unlikely!((self.segment.range().0 + byte_offset_end - 1) > self.segment.range().1) { + return Err(MapError::VirtOutOfRange); + } + + // TODO(qix-): If PFAs ever support more than 4K pages, this will need to be updated. + let new_page_end = byte_offset_end & !4095; + let new_page_count = new_page_end + 1; + + if new_page_count > bk.total_page_count { + // SAFETY(qix-): We don't panic in this function. + let mut pfa = unsafe { pfa.lock::() }; + + for page_id in bk.total_page_count..new_page_count { + let page = pfa.allocate().ok_or(MapError::OutOfMemory)?; + + // TODO(qix-): If PFAs ever support more than 4K pages, this will need to be updated. + let virt = self.segment.range().0 + page_id * 4096; + if let Err(err) = + self.segment + .map(&self.mapper, &mut *pfa, &self.pat, virt, page) + { + // SAFETY(qix-): We just allocated this page and the mapper didn't use it. + unsafe { + pfa.free(page); + } + return Err(err); + } + + // Increment on each loop such that if we fail, a future attempt won't try to + // re-map the same virtual addresses. + bk.total_page_count += 1; + } + } + + let id = bk.total_count; + bk.total_count += 1; + + let slot = unsafe { &mut *self.base.add(id) }; + slot.write(ItemFrame { + maybe_item: MaybeItem { + item: ManuallyDrop::new(UnfairCriticalSpinlock::new(item)), + }, + user_count: AtomicUsize::new(1), + }); + + Ok(id) + } else { + let id = bk.last_free_id; + let slot = unsafe { (*self.base.add(id)).assume_init_mut() }; + bk.last_free_id = unsafe { slot.maybe_item.next_free }; + let last_user_count = slot.user_count.fetch_add(1, Ordering::Relaxed); + debug_assert_eq!(last_user_count, 0); + slot.maybe_item.item = ManuallyDrop::new(UnfairCriticalSpinlock::new(item)); + + Ok(id) + } + } + + /// Allocates and inserts an item `T` into the registry. + /// + /// Returns an error if there was a problem allocating the item. + /// + /// Takes a reference to the spinlock itself, since not all allocations require + /// locking the PFA. + pub fn insert( + &'static self, + pfa: &UnfairCriticalSpinlock, + item: T, + ) -> Result, MapError> + where + Pfa: PageFrameAllocate + PageFrameFree, + { + // SAFETY(qix-): `insert_permanent` simply creates a new item + // SAFETY(qix-): with a user count of 1, but doesn't return a handle + // SAFETY(qix-): to it. Since this is the only other place that + // SAFETY(qix-): a `Handle` can even be constructed, it means + // SAFETY(qix-): all other usages really *are* permanent, but ours + // SAFETY(qix-): is not and instead piggie-backs off the user count + // SAFETY(qix-): being 1 to simply initialize a handle that *does* + // SAFETY(qix-): become reference counted. + let id = unsafe { self.insert_permanent(pfa, item)? }; + Ok(Handle { id, registry: self }) + } + + /// Returns the item at the given ID, or `None` if the ID is invalid. + /// + /// **This function incurs a registry lock.** + /// You should use [`Handle`]s wherever possible, which do not + /// incur registry locks. + /// + /// # Safety + /// **DO NOT PERFORM LOOKUPS BY ID FOR ANYTHING SECURITY-RELATED.** + /// + /// IDs are RE-USABLE and may not refer to the same item if the item + /// slot is dropped and re-allocated. + /// + /// For that reason, this function is marked as unsafe. + pub unsafe fn get(&'static self, id: usize) -> Option> { + // We have to keep this lock open even during the lookup + // since user counts are not locked at the record level + // and there is no "fetch_and_increment_unless_zero" atomic + // operation. + // + // NOTE(qix-): We could load and then do a compare-and-swap, but this function + // NOTE(qix-): really should be seldom used, and I'm not interested in + // NOTE(qix-): fleshing it out further at this time. PR welcome. + let bk = self.bookkeeping.lock::(); + + if id >= bk.total_count { + return None; + } + + let slot = (*self.base.add(id)).assume_init_ref(); + + // NOTE(qix-): Here's the part that could be changed + // NOTE(qix-): to a compare-and-swap. + if slot.user_count.load(Ordering::Relaxed) == 0 { + None + } else { + slot.user_count.fetch_add(1, Ordering::Relaxed); + Some(Handle { id, registry: self }) + } + } +} + +/// Handles item access and dropping in the registry. +trait RegistryAccess { + /// Gets the item frame at the given ID. + /// + /// # Safety + /// Caller must ensure that the ID is valid. + /// This function performs no bounds checks, + /// and assumes if an ID is passed in, it is + /// valid. + unsafe fn get(&self, id: usize) -> &UnfairCriticalSpinlock; + + /// Increments the user count of the item at the given ID. + /// + /// # Safety + /// Caller must ensure that the ID is valid. + /// This function performs no bounds checks, + /// and assumes if an ID is passed in, it is + /// valid. + /// + /// The caller must ensure that [`Self::forget_item_at()`] + /// is called when the item is no longer needed. + unsafe fn lease_item_at(&self, id: usize); + + /// Forgets the item at the given ID. + /// + /// If this is the last user of the item, the item + /// will be dropped. + /// + /// # Safety + /// Caller must ensure that the ID is valid. + /// This function performs no bounds checks, + /// and assumes if an ID is passed in, it is + /// valid. + /// + /// Any references or handles to the item + /// must be dropped before calling this function. + unsafe fn forget_item_at(&self, id: usize); +} + +impl RegistryAccess for Registry +where + T: Sized + 'static, + IntCtrl: InterruptController, + AddrSpace: AddressSpace, + Pat: Translator, +{ + unsafe fn get(&self, id: usize) -> &UnfairCriticalSpinlock { + &(*self.base.add(id)).assume_init_ref().maybe_item.item + } + + unsafe fn lease_item_at(&self, id: usize) { + let last_user_count = (*self.base.add(id)) + .assume_init_ref() + .user_count + .fetch_add(1, Ordering::Relaxed); + debug_assert_eq!(last_user_count, 0); + } + + unsafe fn forget_item_at(&self, id: usize) { + let slot = &mut *self.base.add(id); + + let last_user_count = slot + .assume_init_ref() + .user_count + .fetch_sub(1, Ordering::Relaxed); + + debug_assert_ne!(last_user_count, 0); + + if last_user_count == 1 { + let slot = slot.assume_init_mut(); + + ManuallyDrop::drop(&mut slot.maybe_item.item); + + // SAFETY(qix-): DO NOT PUT THIS LOCK BEFORE THE ABOVE DROP. + // SAFETY(qix-): YOU WILL DEADLOCK THE KERNEL. + let mut bk = self.bookkeeping.lock::(); + slot.maybe_item.next_free = bk.last_free_id; + bk.last_free_id = id; + } + } +} + +/// A lightweight handle to an item in a [`Registry`]. +/// +/// The handle is a reference-counted item in the registry, +/// and is a thin wrapper around an [`UnfairCriticalSpinlock`] +/// to the item itself. +/// +/// Handles can be safely `clone()`d. When the last handle +/// is dropped, the item is freed from the registry, where +/// the backing memory is reused for future allocations. +#[must_use] +pub struct Handle { + /// The ID of the item in the registry. + /// + /// This is the offset into the registry's base address. + /// + /// **DO NOT USE THIS ID FOR ANYTHING SECURITY-SENSITIVE.** + id: usize, + /// The registry the item is in. + registry: &'static dyn RegistryAccess, +} + +impl Handle { + /// Returns the ID of the item in the registry. + /// + /// **DO NOT USE THIS ID FOR ANYTHING SECURITY-SENSITIVE.** + /// You should use `Handle`s wherever possible. + /// + /// Note that this ID may go stale if the item is + /// dropped and re-allocated. Future lookups + /// using the given ID **_MAY_ NOT** refer to the + /// same item. + /// + /// **Do not rely on this ID for anything other + /// than debugging or logging purposes.** + #[must_use] + pub fn id(&self) -> usize { + self.id + } +} + +impl Deref for Handle { + type Target = UnfairCriticalSpinlock; + + fn deref(&self) -> &Self::Target { + // SAFETY(qix-): We can assume that, given this handle + // SAFETY(qix-): is even created (and cannot be created + // SAFETY(qix-): externally), the ID is valid. + unsafe { self.registry.get(self.id) } + } +} + +impl Clone for Handle { + fn clone(&self) -> Self { + // SAFETY(qix-): We can assume that, given this handle + // SAFETY(qix-): is even created (and cannot be created + // SAFETY(qix-): externally), the ID is valid. + unsafe { + self.registry.lease_item_at(self.id); + } + + Self { + id: self.id, + registry: self.registry, + } + } +} + +impl Drop for Handle { + fn drop(&mut self) { + // SAFETY(qix-): We can assume that, given this handle + // SAFETY(qix-): is even created (and cannot be created + // SAFETY(qix-): externally), the ID is valid. + unsafe { + self.registry.forget_item_at(self.id); + } + } +} diff --git a/oro-kernel/src/ring.rs b/oro-kernel/src/ring.rs index 1805699..414acc8 100644 --- a/oro-kernel/src/ring.rs +++ b/oro-kernel/src/ring.rs @@ -27,5 +27,7 @@ pub struct Ring { /// /// This is unique for each ring, but can be re-used if rings are destroyed. /// It is the offset of the arena slot into the arena pool. - id: u32, + pub id: u64, + /// The parent ring ID. + pub parent_id: u64, } diff --git a/oro-macro/src/likely.rs b/oro-macro/src/likely.rs index bdb21d0..aa4f709 100644 --- a/oro-macro/src/likely.rs +++ b/oro-macro/src/likely.rs @@ -1,13 +1,19 @@ //! Likely/unlikely macros for branch prediction hints. +// XXX TODO(qix-): **(UN)LIKELY IS CURRENTLY BUGGED**. +// XXX TODO(qix-): For now, it's been disabled and will +// XXX TODO(qix-): simply pass through the expression +// XXX TODO(qix-): until it's fixed. Tracking issue: +// XXX TODO(qix-): https://github.com/rust-lang/rust/issues/88767 + /// Stub for [`core::intrinsics::likely`]. #[macro_export] macro_rules! likely { - ($e:expr) => {{ ::core::intrinsics::likely($e) }}; + ($e:expr) => {{ $e }}; // ($e:expr) => {{ ::core::intrinsics::likely($e) }}; } /// Stub for [`core::intrinsics::unlikely`]. #[macro_export] macro_rules! unlikely { - ($e:expr) => {{ ::core::intrinsics::unlikely($e) }}; + ($e:expr) => {{ $e }}; // ($e:expr) => {{ ::core::intrinsics::unlikely($e) }}; } diff --git a/oro-mem/src/mapper.rs b/oro-mem/src/mapper.rs index 4395de2..183f340 100644 --- a/oro-mem/src/mapper.rs +++ b/oro-mem/src/mapper.rs @@ -26,12 +26,12 @@ use crate::{ /// [`AddressSpace::current_supervisor_space`] method), so as to not incur undefined /// behavior under Rust's safety rules regarding multiple mutable references. // TODO(qix-): Turn this into a const trait whenever const traits are stabilized. -pub unsafe trait AddressSpace { +pub unsafe trait AddressSpace: 'static { /// The type of supervisor address space handle that this address space works with. type SupervisorHandle: Sized + 'static; /// The type of [`AddressSegment`] that this address space returns. - type SupervisorSegment: AddressSegment + Sized + 'static; + type SupervisorSegment: AddressSegment + Sized; /// Returns the supervisor address space handle for the current CPU. /// @@ -164,12 +164,25 @@ pub unsafe trait AddressSpace { /// references. /// /// Implementations **MUST NOT PANIC** under any circumstance. -pub unsafe trait AddressSegment { +pub unsafe trait AddressSegment: 'static { /// Returns the range of virtual addresses that this segment covers. /// /// The range is inclusive of the start and end addresses. fn range(&self) -> (usize, usize); + /// Makes the segment shared across all address spaces. + /// + /// Returns an error if the segment is not empty. + fn provision_as_shared( + &self, + space: &Handle, + alloc: &mut A, + translator: &P, + ) -> Result<(), MapError> + where + A: PageFrameAllocate + PageFrameFree, + P: Translator; + /// Maps a physical address into the segment at the given virtual address. /// Fails if the virtual address is already mapped. /// diff --git a/oro-sync/src/spinlock/unfair_critical.rs b/oro-sync/src/spinlock/unfair_critical.rs index cf0374e..8032b34 100644 --- a/oro-sync/src/spinlock/unfair_critical.rs +++ b/oro-sync/src/spinlock/unfair_critical.rs @@ -12,7 +12,7 @@ use core::{ /// Allows for an architecture-specific means of disabling and re-enabling /// interrupts. -pub trait InterruptController { +pub trait InterruptController: 'static { /// The interrupt state for the architecture. type InterruptState: Copy;