Skip to content

Commit

Permalink
kernel: add Registry and Handle types and initialize Ring registr…
Browse files Browse the repository at this point in the history
…y / root ring
  • Loading branch information
Qix- committed Sep 11, 2024
1 parent 4c50371 commit 09a92a4
Show file tree
Hide file tree
Showing 12 changed files with 683 additions and 66 deletions.
2 changes: 1 addition & 1 deletion oro-arch-aarch64/src/boot/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ pub unsafe fn boot_primary() -> ! {
oro_debug::init_with_offset(pat.offset());

// Initialize the primary core.
crate::init::initialize_primary(pfa);
crate::init::initialize_primary(pat.clone(), pfa);
let mut pfa = crate::init::KERNEL_STATE
.assume_init_ref()
.pfa()
Expand Down
16 changes: 13 additions & 3 deletions oro-arch-aarch64/src/init.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,15 +12,22 @@ pub type Pfa = FiloPageFrameAllocator<OffsetTranslator>;

/// The global kernel state. Initialized once during boot
/// and re-used across all cores.
pub static mut KERNEL_STATE: MaybeUninit<KernelState<Pfa>> = MaybeUninit::uninit();
pub static mut KERNEL_STATE: MaybeUninit<
KernelState<
Pfa,
OffsetTranslator,
crate::mem::address_space::AddressSpaceLayout,
crate::sync::InterruptController,
>,
> = MaybeUninit::uninit();

/// Initializes the global state of the architecture.
///
/// # Safety
/// Must be called exactly once for the lifetime of the system,
/// only by the boot processor at boot time (_not_ at any
/// subsequent bringup).
pub unsafe fn initialize_primary(pfa: Pfa) {
pub unsafe fn initialize_primary(pat: OffsetTranslator, pfa: Pfa) {
#[cfg(debug_assertions)]
{
use core::sync::atomic::{AtomicBool, Ordering};
Expand All @@ -36,7 +43,10 @@ pub unsafe fn initialize_primary(pfa: Pfa) {
}
}

KERNEL_STATE.write(KernelState::new(UnfairCriticalSpinlock::new(pfa)));
KERNEL_STATE.write(
KernelState::new(pat, UnfairCriticalSpinlock::new(pfa))
.expect("failed to create global kernel state"),
);
}

/// Main boot sequence for all cores for each bringup
Expand Down
29 changes: 29 additions & 0 deletions oro-arch-aarch64/src/mem/segment.rs
Original file line number Diff line number Diff line change
Expand Up @@ -308,6 +308,35 @@ unsafe impl AddressSegment<AddressSpaceHandle> for &'static Segment {
(start, end)
}

fn provision_as_shared<A, P>(
&self,
space: &AddressSpaceHandle,
alloc: &mut A,
translator: &P,
) -> Result<(), MapError>
where
A: PageFrameAllocate + PageFrameFree,
P: Translator,
{
let top_level = unsafe { &mut *translator.translate_mut::<PageTable>(space.base_phys) };

for idx in self.valid_range.0..=self.valid_range.1 {
let entry = &mut top_level[idx];

if entry.valid() {
return Err(MapError::Exists);
}

let frame_phys_addr = alloc.allocate().ok_or(MapError::OutOfMemory)?;
unsafe {
(*translator.translate_mut::<PageTable>(frame_phys_addr)).reset();
}
*entry = self.l0_template.with_address(frame_phys_addr).into();
}

Ok(())
}

fn map<A, P>(
&self,
space: &AddressSpaceHandle,
Expand Down
2 changes: 1 addition & 1 deletion oro-arch-x86_64/src/boot/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ pub unsafe fn boot_primary() -> ! {
let lapic_id = lapic.id();
dbg!("local APIC ID: {lapic_id}");

crate::init::initialize_primary(pfa);
crate::init::initialize_primary(pat.clone(), pfa);
let mut pfa = crate::init::KERNEL_STATE
.assume_init_ref()
.pfa()
Expand Down
16 changes: 13 additions & 3 deletions oro-arch-x86_64/src/init.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,15 +12,22 @@ pub type Pfa = FiloPageFrameAllocator<OffsetTranslator>;

/// The global kernel state. Initialized once during boot
/// and re-used across all cores.
pub static mut KERNEL_STATE: MaybeUninit<KernelState<Pfa>> = MaybeUninit::uninit();
pub static mut KERNEL_STATE: MaybeUninit<
KernelState<
Pfa,
OffsetTranslator,
crate::mem::address_space::AddressSpaceLayout,
crate::sync::InterruptController,
>,
> = MaybeUninit::uninit();

/// Initializes the global state of the architecture.
///
/// # Safety
/// Must be called exactly once for the lifetime of the system,
/// only by the boot processor at boot time (_not_ at any
/// subsequent bringup).
pub unsafe fn initialize_primary(pfa: Pfa) {
pub unsafe fn initialize_primary(pat: OffsetTranslator, pfa: Pfa) {
#[cfg(debug_assertions)]
{
use core::sync::atomic::{AtomicBool, Ordering};
Expand All @@ -36,7 +43,10 @@ pub unsafe fn initialize_primary(pfa: Pfa) {
}
}

KERNEL_STATE.write(KernelState::new(UnfairCriticalSpinlock::new(pfa)));
KERNEL_STATE.write(
KernelState::new(pat, UnfairCriticalSpinlock::new(pfa))
.expect("failed to create global kernel state"),
);
}

/// Main boot sequence for all cores for each bringup
Expand Down
66 changes: 29 additions & 37 deletions oro-arch-x86_64/src/mem/segment.rs
Original file line number Diff line number Diff line change
Expand Up @@ -332,43 +332,6 @@ impl AddressSegment {
})
}

/// Maps the L4/L5 entry for the given segment range to
/// empty page tables, without mapping any children.
///
/// Intended to be used to create shared segments that are
/// otherwise empty, for later use.
///
/// # Safety
/// Must only be called once per segment range.
///
/// Does NOT invalidate the TLB.
pub unsafe fn make_top_level_present<A, P, Handle: MapperHandle>(
&self,
space: &Handle,
alloc: &mut A,
translator: &P,
) -> Result<(), MapError>
where
A: PageFrameAllocate,
P: Translator,
{
let top_level = &mut *translator.translate_mut::<PageTable>(space.base_phys());

for idx in self.valid_range.0..=self.valid_range.1 {
let entry = &mut top_level[idx];

if entry.present() {
return Err(MapError::Exists);
}

let frame_phys_addr = alloc.allocate().ok_or(MapError::OutOfMemory)?;
*entry = self.entry_template.with_address(frame_phys_addr);
(*translator.translate_mut::<PageTable>(frame_phys_addr)).reset();
}

Ok(())
}

/// Unmaps the entire range's top level page tables without
/// reclaiming any of the physical memory.
///
Expand Down Expand Up @@ -411,6 +374,35 @@ unsafe impl Segment<AddressSpaceHandle> for &'static AddressSegment {
}
}

fn provision_as_shared<A, P>(
&self,
space: &AddressSpaceHandle,
alloc: &mut A,
translator: &P,
) -> Result<(), MapError>
where
A: PageFrameAllocate + PageFrameFree,
P: Translator,
{
let top_level = unsafe { &mut *translator.translate_mut::<PageTable>(space.base_phys()) };

for idx in self.valid_range.0..=self.valid_range.1 {
let entry = &mut top_level[idx];

if entry.present() {
return Err(MapError::Exists);
}

let frame_phys_addr = alloc.allocate().ok_or(MapError::OutOfMemory)?;
unsafe {
(*translator.translate_mut::<PageTable>(frame_phys_addr)).reset();
}
*entry = self.entry_template.with_address(frame_phys_addr);
}

Ok(())
}

fn map<A, P>(
&self,
space: &AddressSpaceHandle,
Expand Down
105 changes: 91 additions & 14 deletions oro-kernel/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,25 +10,43 @@
#![allow(incomplete_features)]
#![feature(adt_const_params)]

use oro_mem::pfa::alloc::{PageFrameAllocate, PageFrameFree};
use oro_sync::spinlock::unfair_critical::UnfairCriticalSpinlock;
use oro_mem::{
mapper::{AddressSpace, MapError},
pfa::alloc::{PageFrameAllocate, PageFrameFree},
translate::Translator,
};
use oro_sync::spinlock::unfair_critical::{InterruptController, UnfairCriticalSpinlock};

pub mod id;
pub mod module;
pub mod port;
pub mod ring;

pub(crate) mod registry;

/// Core-local instance of the Oro kernel.
///
/// Intended to live on the core's respective stack,
/// living for the lifetime of the core (and destroyed
/// and re-created on core powerdown/subsequent bringup).
pub struct Kernel<Pfa: 'static> {
pub struct Kernel<Pfa, Pat, AddrSpace, IntCtrl>
where
Pfa: PageFrameAllocate + PageFrameFree + 'static,
Pat: Translator,
AddrSpace: AddressSpace + 'static,
IntCtrl: InterruptController + 'static,
{
/// Global reference to the shared kernel state.
state: &'static KernelState<Pfa>,
state: &'static KernelState<Pfa, Pat, AddrSpace, IntCtrl>,
}

impl<Pfa: 'static> Kernel<Pfa> {
impl<Pfa, Pat, AddrSpace, IntCtrl> Kernel<Pfa, Pat, AddrSpace, IntCtrl>
where
Pfa: PageFrameAllocate + PageFrameFree + 'static,
Pat: Translator,
AddrSpace: AddressSpace,
IntCtrl: InterruptController,
{
/// Creates a new core-local instance of the Kernel.
///
/// # Safety
Expand All @@ -40,37 +58,96 @@ impl<Pfa: 'static> Kernel<Pfa> {
/// The `state` given to the kernel must be shared for all
/// instances of the kernel that wish to partake in the same
/// Oro kernel universe.
pub unsafe fn new(state: &'static KernelState<Pfa>) -> Self {
pub unsafe fn new(state: &'static KernelState<Pfa, Pat, AddrSpace, IntCtrl>) -> Self {
Self { state }
}

/// Returns the underlying [`KernelState`] for this kernel instance.
#[must_use]
pub fn state(&self) -> &'static KernelState<Pfa> {
pub fn state(&self) -> &'static KernelState<Pfa, Pat, AddrSpace, IntCtrl> {
self.state
}
}

/// Global state shared by all [`Kernel`] instances across
/// core boot/powerdown/bringup cycles.
pub struct KernelState<Pfa: 'static> {
pub struct KernelState<Pfa, Pat, AddrSpace, IntCtrl>
where
Pfa: PageFrameAllocate + PageFrameFree,
Pat: Translator,
AddrSpace: AddressSpace,
IntCtrl: InterruptController,
{
/// The shared, spinlocked page frame allocator (PFA) for the
/// entire system.
pfa: UnfairCriticalSpinlock<Pfa>,
pfa: UnfairCriticalSpinlock<Pfa>,
/// Ring registry.
ring_registry: registry::Registry<ring::Ring, IntCtrl, AddrSpace, Pat>,
}

impl<Pfa> KernelState<Pfa>
impl<Pfa, Pat, AddrSpace, IntCtrl> KernelState<Pfa, Pat, AddrSpace, IntCtrl>
where
Pfa: PageFrameAllocate + PageFrameFree + 'static,
Pfa: PageFrameAllocate + PageFrameFree,
Pat: Translator,
AddrSpace: AddressSpace,
IntCtrl: InterruptController,
{
/// Creates a new instance of the kernel state. Meant to be called
/// once for all cores at boot time.
pub fn new(pfa: UnfairCriticalSpinlock<Pfa>) -> Self {
Self { pfa }
///
/// # Safety
/// This function sets up shared page table mappings that MUST be
/// shared across cores. The caller MUST initialize the kernel
/// state (this struct) prior to booting _any other cores_
/// or else registry accesses will page fault.
#[allow(clippy::missing_panics_doc)]
pub unsafe fn new(pat: Pat, pfa: UnfairCriticalSpinlock<Pfa>) -> Result<Self, MapError> {
let ring_registry = {
let mut pfa_lock = pfa.lock::<IntCtrl>();

registry::Registry::new(pat, &mut *pfa_lock, AddrSpace::kernel_ring_registry())?
};

let root_ring_id = ring_registry.insert_permanent(
&pfa,
ring::Ring {
id: 0,
parent_id: 0,
},
)?;
assert_eq!(root_ring_id, 0, "root ring ID must be 0");

Ok(Self { pfa, ring_registry })
}

/// Returns the underlying PFA belonging to the kernel state.
pub fn pfa(&self) -> &UnfairCriticalSpinlock<Pfa> {
pub fn pfa(&'static self) -> &'static UnfairCriticalSpinlock<Pfa> {
&self.pfa
}

/// Returns a [`registry::Handle`] to a [`ring::Ring`] by its ID,
/// or `None` if it does not exist.
///
/// # Safety
/// **DO NOT USE THIS FUNCTION FOR ANYTHING SECURITY RELATED.**
///
/// IDs are re-used by registries when items are dropped, so
/// multiple calls to this function with the same ID may return
/// handles to different ring items as the IDs get recycled.
///
/// In almost all cases, you should be using [`registry::Handle`]s
/// directly. They are also easier to work with than calling
/// this function.
pub unsafe fn ring_by_id(&'static self, id: usize) -> Option<registry::Handle<ring::Ring>> {
self.ring_registry.get(id)
}

/// Inserts a [`ring::Ring`] into the registry and returns
/// its [`registry::Handle`].
pub fn insert_ring(
&'static self,
ring: ring::Ring,
) -> Result<registry::Handle<ring::Ring>, MapError> {
self.ring_registry.insert(&self.pfa, ring)
}
}
Loading

0 comments on commit 09a92a4

Please sign in to comment.