diff --git a/src/registers/model_specific.rs b/src/registers/model_specific.rs index cf2dd34b..fa1902c9 100644 --- a/src/registers/model_specific.rs +++ b/src/registers/model_specific.rs @@ -67,6 +67,10 @@ pub struct UCet; #[derive(Debug)] pub struct SCet; +/// IA32_PAT: Page Attribute Table. +#[derive(Debug)] +pub struct Pat; + impl Efer { /// The underlying model specific register. pub const MSR: Msr = Msr(0xC000_0080); @@ -112,6 +116,22 @@ impl SCet { pub const MSR: Msr = Msr(0x6A2); } +impl Pat { + /// The underlying model specific register. + pub const MSR: Msr = Msr(0x277); + /// The default PAT configuration following a power up or reset of the processor. + pub const DEFAULT: [PatMemoryType; 8] = [ + PatMemoryType::WriteBack, + PatMemoryType::WriteThrough, + PatMemoryType::Uncacheable, + PatMemoryType::StrongUncacheable, + PatMemoryType::WriteBack, + PatMemoryType::WriteThrough, + PatMemoryType::Uncacheable, + PatMemoryType::StrongUncacheable, + ]; +} + bitflags! { /// Flags of the Extended Feature Enable Register. #[repr(transparent)] @@ -161,6 +181,43 @@ bitflags! { } } +#[derive(PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Clone, Copy)] +/// Memory types used in the [PAT](Pat). +#[repr(u8)] +pub enum PatMemoryType { + /// Uncacheable (UC). + StrongUncacheable = 0x00, + /// Uses a write combining (WC) cache policy. + WriteCombining = 0x01, + /// Uses a write through (WT) cache policy. + WriteThrough = 0x04, + /// Uses a write protected (WP) cache policy. + WriteProtected = 0x05, + /// Uses a write back (WB) cache policy. + WriteBack = 0x06, + /// Same as strong uncacheable, but can be overridden to be write combining by MTRRs (UC-). + Uncacheable = 0x07, +} +impl PatMemoryType { + /// Converts from bits, returning `None` if the value is invalid. + pub const fn from_bits(bits: u8) -> Option { + match bits { + 0x00 => Some(Self::StrongUncacheable), + 0x01 => Some(Self::WriteCombining), + 0x04 => Some(Self::WriteThrough), + 0x05 => Some(Self::WriteProtected), + 0x06 => Some(Self::WriteBack), + 0x07 => Some(Self::Uncacheable), + _ => None, + } + } + + /// Gets the underlying bits. + pub const fn bits(self) -> u8 { + self as u8 + } +} + #[cfg(all(feature = "instructions", target_arch = "x86_64"))] mod x86_64 { use super::*; @@ -636,4 +693,36 @@ mod x86_64 { Self::write(flags, legacy_bitmap); } } + + impl Pat { + /// Reads IA32_PAT. + /// + /// The PAT must be supported on the CPU, otherwise a general protection exception will + /// occur. Support can be detected using the `cpuid` instruction. + #[inline] + pub fn read() -> [PatMemoryType; 8] { + unsafe { Self::MSR.read() } + .to_ne_bytes() + .map(|bits| PatMemoryType::from_bits(bits).unwrap()) + } + + /// Writes IA32_PAT. + /// + /// The PAT must be supported on the CPU, otherwise a general protection exception will + /// occur. Support can be detected using the `cpuid` instruction. + /// + /// # Safety + /// + /// All affected pages must be flushed from the TLB. Processor caches may also need to be + /// flushed. Additionally, all pages that map to a given frame must have the same memory + /// type. + #[inline] + pub unsafe fn write(table: [PatMemoryType; 8]) { + let bits = u64::from_ne_bytes(table.map(PatMemoryType::bits)); + let mut msr = Self::MSR; + unsafe { + msr.write(bits); + } + } + } } diff --git a/src/structures/paging/mapper/mapped_page_table.rs b/src/structures/paging/mapper/mapped_page_table.rs index b50f072e..dfa3602e 100644 --- a/src/structures/paging/mapper/mapped_page_table.rs +++ b/src/structures/paging/mapper/mapped_page_table.rs @@ -421,7 +421,8 @@ impl Mapper for MappedPageTable<'_, P> { let frame = p1_entry.frame().map_err(|err| match err { FrameError::FrameNotPresent => UnmapError::PageNotMapped, - FrameError::HugeFrame => UnmapError::ParentEntryHugePage, + #[allow(deprecated)] + FrameError::HugeFrame => unreachable!(), })?; p1_entry.set_unused(); @@ -711,6 +712,9 @@ impl PageTableWalker

{ &self, entry: &'b PageTableEntry, ) -> Result<&'b PageTable, PageTableWalkError> { + if entry.flags().contains(PageTableFlags::HUGE_PAGE) { + return Err(PageTableWalkError::MappedToHugePage); + } let page_table_ptr = self .page_table_frame_mapping .frame_to_pointer(entry.frame()?); @@ -729,6 +733,9 @@ impl PageTableWalker

{ &self, entry: &'b mut PageTableEntry, ) -> Result<&'b mut PageTable, PageTableWalkError> { + if entry.flags().contains(PageTableFlags::HUGE_PAGE) { + return Err(PageTableWalkError::MappedToHugePage); + } let page_table_ptr = self .page_table_frame_mapping .frame_to_pointer(entry.frame()?); @@ -832,7 +839,8 @@ impl From for PageTableWalkError { #[inline] fn from(err: FrameError) -> Self { match err { - FrameError::HugeFrame => PageTableWalkError::MappedToHugePage, + #[allow(deprecated)] + FrameError::HugeFrame => unreachable!(), FrameError::FrameNotPresent => PageTableWalkError::NotMapped, } } diff --git a/src/structures/paging/mapper/recursive_page_table.rs b/src/structures/paging/mapper/recursive_page_table.rs index ff427ffa..3906a1a5 100644 --- a/src/structures/paging/mapper/recursive_page_table.rs +++ b/src/structures/paging/mapper/recursive_page_table.rs @@ -322,9 +322,13 @@ impl Mapper for RecursivePageTable<'_> { let p4 = &mut self.p4; let p4_entry = &p4[page.p4_index()]; + if p4_entry.flags().contains(PageTableFlags::HUGE_PAGE) { + return Err(UnmapError::ParentEntryHugePage); + } p4_entry.frame().map_err(|err| match err { FrameError::FrameNotPresent => UnmapError::PageNotMapped, - FrameError::HugeFrame => UnmapError::ParentEntryHugePage, + #[allow(deprecated)] + FrameError::HugeFrame => unreachable!(), })?; let p3 = unsafe { &mut *(p3_ptr(page, self.recursive_index)) }; @@ -441,16 +445,24 @@ impl Mapper for RecursivePageTable<'_> { ) -> Result<(PhysFrame, MapperFlush), UnmapError> { let p4 = &mut self.p4; let p4_entry = &p4[page.p4_index()]; + if p4_entry.flags().contains(PageTableFlags::HUGE_PAGE) { + return Err(UnmapError::ParentEntryHugePage); + } p4_entry.frame().map_err(|err| match err { FrameError::FrameNotPresent => UnmapError::PageNotMapped, - FrameError::HugeFrame => UnmapError::ParentEntryHugePage, + #[allow(deprecated)] + FrameError::HugeFrame => unreachable!(), })?; let p3 = unsafe { &mut *(p3_ptr(page, self.recursive_index)) }; let p3_entry = &p3[page.p3_index()]; + if p3_entry.flags().contains(PageTableFlags::HUGE_PAGE) { + return Err(UnmapError::ParentEntryHugePage); + } p3_entry.frame().map_err(|err| match err { FrameError::FrameNotPresent => UnmapError::PageNotMapped, - FrameError::HugeFrame => UnmapError::ParentEntryHugePage, + #[allow(deprecated)] + FrameError::HugeFrame => unreachable!(), })?; let p2 = unsafe { &mut *(p2_ptr(page, self.recursive_index)) }; @@ -596,23 +608,35 @@ impl Mapper for RecursivePageTable<'_> { ) -> Result<(PhysFrame, MapperFlush), UnmapError> { let p4 = &mut self.p4; let p4_entry = &p4[page.p4_index()]; + if p4_entry.flags().contains(PageTableFlags::HUGE_PAGE) { + return Err(UnmapError::ParentEntryHugePage); + } p4_entry.frame().map_err(|err| match err { FrameError::FrameNotPresent => UnmapError::PageNotMapped, - FrameError::HugeFrame => UnmapError::ParentEntryHugePage, + #[allow(deprecated)] + FrameError::HugeFrame => unreachable!(), })?; let p3 = unsafe { &mut *(p3_ptr(page, self.recursive_index)) }; let p3_entry = &p3[page.p3_index()]; + if p3_entry.flags().contains(PageTableFlags::HUGE_PAGE) { + return Err(UnmapError::ParentEntryHugePage); + } p3_entry.frame().map_err(|err| match err { FrameError::FrameNotPresent => UnmapError::PageNotMapped, - FrameError::HugeFrame => UnmapError::ParentEntryHugePage, + #[allow(deprecated)] + FrameError::HugeFrame => unreachable!(), })?; let p2 = unsafe { &mut *(p2_ptr(page, self.recursive_index)) }; let p2_entry = &p2[page.p2_index()]; + if p2_entry.flags().contains(PageTableFlags::HUGE_PAGE) { + return Err(UnmapError::ParentEntryHugePage); + } p2_entry.frame().map_err(|err| match err { FrameError::FrameNotPresent => UnmapError::PageNotMapped, - FrameError::HugeFrame => UnmapError::ParentEntryHugePage, + #[allow(deprecated)] + FrameError::HugeFrame => unreachable!(), })?; let p1 = unsafe { &mut *(p1_ptr(page, self.recursive_index)) }; @@ -620,7 +644,8 @@ impl Mapper for RecursivePageTable<'_> { let frame = p1_entry.frame().map_err(|err| match err { FrameError::FrameNotPresent => UnmapError::PageNotMapped, - FrameError::HugeFrame => UnmapError::ParentEntryHugePage, + #[allow(deprecated)] + FrameError::HugeFrame => unreachable!(), })?; p1_entry.set_unused(); @@ -818,9 +843,6 @@ impl Translate for RecursivePageTable<'_> { if p1_entry.is_unused() { return TranslateResult::NotMapped; } - if p1_entry.flags().contains(PageTableFlags::HUGE_PAGE) { - panic!("level 1 entry has huge page bit set") - } let frame = match PhysFrame::from_start_address(p1_entry.addr()) { Ok(frame) => frame, @@ -890,6 +912,9 @@ impl CleanUp for RecursivePageTable<'_> { !(level == PageTableLevel::Four && *i == recursive_index.into()) }) { + if entry.flags().contains(PageTableFlags::HUGE_PAGE) { + continue; + } if let Ok(frame) = entry.frame() { let start = VirtAddr::forward_checked_impl( table_addr, diff --git a/src/structures/paging/page_table.rs b/src/structures/paging/page_table.rs index e9069bcc..6ee1a8dc 100644 --- a/src/structures/paging/page_table.rs +++ b/src/structures/paging/page_table.rs @@ -15,8 +15,8 @@ use bitflags::bitflags; pub enum FrameError { /// The entry does not have the `PRESENT` flag set, so it isn't currently mapped to a frame. FrameNotPresent, - /// The entry does have the `HUGE_PAGE` flag set. The `frame` method has a standard 4KiB frame - /// as return type, so a huge frame can't be returned. + #[deprecated = "`HugeFrame` is no longer returned by the `frame` method"] + /// The entry does have the `HUGE_PAGE` flag set. HugeFrame, } @@ -63,16 +63,12 @@ impl PageTableEntry { /// Returns the following errors: /// /// - `FrameError::FrameNotPresent` if the entry doesn't have the `PRESENT` flag set. - /// - `FrameError::HugeFrame` if the entry has the `HUGE_PAGE` flag set (for huge pages the - /// `addr` function must be used) #[inline] pub fn frame(&self) -> Result { - if !self.flags().contains(PageTableFlags::PRESENT) { - Err(FrameError::FrameNotPresent) - } else if self.flags().contains(PageTableFlags::HUGE_PAGE) { - Err(FrameError::HugeFrame) - } else { + if self.flags().contains(PageTableFlags::PRESENT) { Ok(PhysFrame::containing_address(self.addr())) + } else { + Err(FrameError::FrameNotPresent) } } @@ -86,7 +82,6 @@ impl PageTableEntry { /// Map the entry to the specified physical frame with the specified flags. #[inline] pub fn set_frame(&mut self, frame: PhysFrame, flags: PageTableFlags) { - assert!(!flags.contains(PageTableFlags::HUGE_PAGE)); self.set_addr(frame.start_address(), flags) } @@ -128,17 +123,21 @@ bitflags! { /// Controls whether accesses from userspace (i.e. ring 3) are permitted. const USER_ACCESSIBLE = 1 << 2; /// If this bit is set, a “write-through” policy is used for the cache, else a “write-back” - /// policy is used. + /// policy is used. This referred to as the page-level write-through (PWT) bit. const WRITE_THROUGH = 1 << 3; - /// Disables caching for the pointed entry is cacheable. + /// Disables caching for the pointed entry if it is cacheable. This referred to as the + /// page-level cache disable (PCD) bit. const NO_CACHE = 1 << 4; /// Set by the CPU when the mapped frame or page table is accessed. const ACCESSED = 1 << 5; /// Set by the CPU on a write to the mapped frame. const DIRTY = 1 << 6; - /// Specifies that the entry maps a huge frame instead of a page table. Only allowed in - /// P2 or P3 tables. + /// Specifies that the entry maps a huge frame instead of a page table. This is the same bit + /// as `PAT_4KIB_PAGE`. const HUGE_PAGE = 1 << 7; + /// This is the PAT bit for page table entries that point to 4KiB pages. This is the same + /// bit as `HUGE_PAGE`. + const PAT_4KIB_PAGE = 1 << 7; /// Indicates that the mapping is present in all address spaces, so it isn't flushed from /// the TLB on an address space switch. const GLOBAL = 1 << 8; @@ -148,6 +147,8 @@ bitflags! { const BIT_10 = 1 << 10; /// Available to the OS, can be used to store additional data, e.g. custom flags. const BIT_11 = 1 << 11; + /// This is the PAT bit for page table entries that point to huge pages. + const PAT_HUGE_PAGE = 1 << 12; /// Available to the OS, can be used to store additional data, e.g. custom flags. const BIT_52 = 1 << 52; /// Available to the OS, can be used to store additional data, e.g. custom flags.