Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
89 changes: 0 additions & 89 deletions src/registers/model_specific.rs
Original file line number Diff line number Diff line change
Expand Up @@ -67,10 +67,6 @@ pub struct UCet;
#[derive(Debug)]
pub struct SCet;

/// IA32_PAT: Page Attribute Table.
#[derive(Debug)]
pub struct Pat;

/// IA32_APIC_BASE: status and location of the local APIC
///
/// IA32_APIC_BASE must be supported on the CPU, otherwise, a general protection exception will occur. Support can be detected using the `cpuid` instruction.
Expand Down Expand Up @@ -122,22 +118,6 @@ impl SCet {
pub const MSR: Msr = Msr(0x6A2);
}

impl Pat {
/// The underlying model specific register.
pub const MSR: Msr = Msr(0x277);
/// The default PAT configuration following a power up or reset of the processor.
pub const DEFAULT: [PatMemoryType; 8] = [
PatMemoryType::WriteBack,
PatMemoryType::WriteThrough,
PatMemoryType::Uncacheable,
PatMemoryType::StrongUncacheable,
PatMemoryType::WriteBack,
PatMemoryType::WriteThrough,
PatMemoryType::Uncacheable,
PatMemoryType::StrongUncacheable,
];
}

impl ApicBase {
/// The underlying model specific register.
pub const MSR: Msr = Msr(0x1B);
Expand Down Expand Up @@ -192,43 +172,6 @@ bitflags! {
}
}

#[derive(PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Clone, Copy)]
/// Memory types used in the [PAT](Pat).
#[repr(u8)]
pub enum PatMemoryType {
/// Uncacheable (UC).
StrongUncacheable = 0x00,
/// Uses a write combining (WC) cache policy.
WriteCombining = 0x01,
/// Uses a write through (WT) cache policy.
WriteThrough = 0x04,
/// Uses a write protected (WP) cache policy.
WriteProtected = 0x05,
/// Uses a write back (WB) cache policy.
WriteBack = 0x06,
/// Same as strong uncacheable, but can be overridden to be write combining by MTRRs (UC-).
Uncacheable = 0x07,
}
impl PatMemoryType {
/// Converts from bits, returning `None` if the value is invalid.
pub const fn from_bits(bits: u8) -> Option<Self> {
match bits {
0x00 => Some(Self::StrongUncacheable),
0x01 => Some(Self::WriteCombining),
0x04 => Some(Self::WriteThrough),
0x05 => Some(Self::WriteProtected),
0x06 => Some(Self::WriteBack),
0x07 => Some(Self::Uncacheable),
_ => None,
}
}

/// Gets the underlying bits.
pub const fn bits(self) -> u8 {
self as u8
}
}

bitflags! {
/// Flags for the Advanced Programmable Interrupt Controler Base Register.
#[repr(transparent)]
Expand Down Expand Up @@ -724,38 +667,6 @@ mod x86_64 {
}
}

impl Pat {
/// Reads IA32_PAT.
///
/// The PAT must be supported on the CPU, otherwise a general protection exception will
/// occur. Support can be detected using the `cpuid` instruction.
#[inline]
pub fn read() -> [PatMemoryType; 8] {
unsafe { Self::MSR.read() }
.to_ne_bytes()
.map(|bits| PatMemoryType::from_bits(bits).unwrap())
}

/// Writes IA32_PAT.
///
/// The PAT must be supported on the CPU, otherwise a general protection exception will
/// occur. Support can be detected using the `cpuid` instruction.
///
/// # Safety
///
/// All affected pages must be flushed from the TLB. Processor caches may also need to be
/// flushed. Additionally, all pages that map to a given frame must have the same memory
/// type.
#[inline]
pub unsafe fn write(table: [PatMemoryType; 8]) {
let bits = u64::from_ne_bytes(table.map(PatMemoryType::bits));
let mut msr = Self::MSR;
unsafe {
msr.write(bits);
}
}
}

impl ApicBase {
/// Reads the IA32_APIC_BASE MSR.
#[inline]
Expand Down
12 changes: 2 additions & 10 deletions src/structures/paging/mapper/mapped_page_table.rs
Original file line number Diff line number Diff line change
Expand Up @@ -421,8 +421,7 @@ impl<P: PageTableFrameMapping> Mapper<Size4KiB> for MappedPageTable<'_, P> {

let frame = p1_entry.frame().map_err(|err| match err {
FrameError::FrameNotPresent => UnmapError::PageNotMapped,
#[allow(deprecated)]
FrameError::HugeFrame => unreachable!(),
FrameError::HugeFrame => UnmapError::ParentEntryHugePage,
})?;

p1_entry.set_unused();
Expand Down Expand Up @@ -712,9 +711,6 @@ impl<P: PageTableFrameMapping> PageTableWalker<P> {
&self,
entry: &'b PageTableEntry,
) -> Result<&'b PageTable, PageTableWalkError> {
if entry.flags().contains(PageTableFlags::HUGE_PAGE) {
return Err(PageTableWalkError::MappedToHugePage);
}
let page_table_ptr = self
.page_table_frame_mapping
.frame_to_pointer(entry.frame()?);
Expand All @@ -733,9 +729,6 @@ impl<P: PageTableFrameMapping> PageTableWalker<P> {
&self,
entry: &'b mut PageTableEntry,
) -> Result<&'b mut PageTable, PageTableWalkError> {
if entry.flags().contains(PageTableFlags::HUGE_PAGE) {
return Err(PageTableWalkError::MappedToHugePage);
}
let page_table_ptr = self
.page_table_frame_mapping
.frame_to_pointer(entry.frame()?);
Expand Down Expand Up @@ -839,8 +832,7 @@ impl From<FrameError> for PageTableWalkError {
#[inline]
fn from(err: FrameError) -> Self {
match err {
#[allow(deprecated)]
FrameError::HugeFrame => unreachable!(),
FrameError::HugeFrame => PageTableWalkError::MappedToHugePage,
FrameError::FrameNotPresent => PageTableWalkError::NotMapped,
}
}
Expand Down
45 changes: 10 additions & 35 deletions src/structures/paging/mapper/recursive_page_table.rs
Original file line number Diff line number Diff line change
Expand Up @@ -322,13 +322,9 @@ impl Mapper<Size1GiB> for RecursivePageTable<'_> {
let p4 = &mut self.p4;
let p4_entry = &p4[page.p4_index()];

if p4_entry.flags().contains(PageTableFlags::HUGE_PAGE) {
return Err(UnmapError::ParentEntryHugePage);
}
p4_entry.frame().map_err(|err| match err {
FrameError::FrameNotPresent => UnmapError::PageNotMapped,
#[allow(deprecated)]
FrameError::HugeFrame => unreachable!(),
FrameError::HugeFrame => UnmapError::ParentEntryHugePage,
})?;

let p3 = unsafe { &mut *(p3_ptr(page, self.recursive_index)) };
Expand Down Expand Up @@ -445,24 +441,16 @@ impl Mapper<Size2MiB> for RecursivePageTable<'_> {
) -> Result<(PhysFrame<Size2MiB>, MapperFlush<Size2MiB>), UnmapError> {
let p4 = &mut self.p4;
let p4_entry = &p4[page.p4_index()];
if p4_entry.flags().contains(PageTableFlags::HUGE_PAGE) {
return Err(UnmapError::ParentEntryHugePage);
}
p4_entry.frame().map_err(|err| match err {
FrameError::FrameNotPresent => UnmapError::PageNotMapped,
#[allow(deprecated)]
FrameError::HugeFrame => unreachable!(),
FrameError::HugeFrame => UnmapError::ParentEntryHugePage,
})?;

let p3 = unsafe { &mut *(p3_ptr(page, self.recursive_index)) };
let p3_entry = &p3[page.p3_index()];
if p3_entry.flags().contains(PageTableFlags::HUGE_PAGE) {
return Err(UnmapError::ParentEntryHugePage);
}
p3_entry.frame().map_err(|err| match err {
FrameError::FrameNotPresent => UnmapError::PageNotMapped,
#[allow(deprecated)]
FrameError::HugeFrame => unreachable!(),
FrameError::HugeFrame => UnmapError::ParentEntryHugePage,
})?;

let p2 = unsafe { &mut *(p2_ptr(page, self.recursive_index)) };
Expand Down Expand Up @@ -608,44 +596,31 @@ impl Mapper<Size4KiB> for RecursivePageTable<'_> {
) -> Result<(PhysFrame<Size4KiB>, MapperFlush<Size4KiB>), UnmapError> {
let p4 = &mut self.p4;
let p4_entry = &p4[page.p4_index()];
if p4_entry.flags().contains(PageTableFlags::HUGE_PAGE) {
return Err(UnmapError::ParentEntryHugePage);
}
p4_entry.frame().map_err(|err| match err {
FrameError::FrameNotPresent => UnmapError::PageNotMapped,
#[allow(deprecated)]
FrameError::HugeFrame => unreachable!(),
FrameError::HugeFrame => UnmapError::ParentEntryHugePage,
})?;

let p3 = unsafe { &mut *(p3_ptr(page, self.recursive_index)) };
let p3_entry = &p3[page.p3_index()];
if p3_entry.flags().contains(PageTableFlags::HUGE_PAGE) {
return Err(UnmapError::ParentEntryHugePage);
}
p3_entry.frame().map_err(|err| match err {
FrameError::FrameNotPresent => UnmapError::PageNotMapped,
#[allow(deprecated)]
FrameError::HugeFrame => unreachable!(),
FrameError::HugeFrame => UnmapError::ParentEntryHugePage,
})?;

let p2 = unsafe { &mut *(p2_ptr(page, self.recursive_index)) };
let p2_entry = &p2[page.p2_index()];
if p2_entry.flags().contains(PageTableFlags::HUGE_PAGE) {
return Err(UnmapError::ParentEntryHugePage);
}
p2_entry.frame().map_err(|err| match err {
FrameError::FrameNotPresent => UnmapError::PageNotMapped,
#[allow(deprecated)]
FrameError::HugeFrame => unreachable!(),
FrameError::HugeFrame => UnmapError::ParentEntryHugePage,
})?;

let p1 = unsafe { &mut *(p1_ptr(page, self.recursive_index)) };
let p1_entry = &mut p1[page.p1_index()];

let frame = p1_entry.frame().map_err(|err| match err {
FrameError::FrameNotPresent => UnmapError::PageNotMapped,
#[allow(deprecated)]
FrameError::HugeFrame => unreachable!(),
FrameError::HugeFrame => UnmapError::ParentEntryHugePage,
})?;

p1_entry.set_unused();
Expand Down Expand Up @@ -843,6 +818,9 @@ impl Translate for RecursivePageTable<'_> {
if p1_entry.is_unused() {
return TranslateResult::NotMapped;
}
if p1_entry.flags().contains(PageTableFlags::HUGE_PAGE) {
panic!("level 1 entry has huge page bit set")
}

let frame = match PhysFrame::from_start_address(p1_entry.addr()) {
Ok(frame) => frame,
Expand Down Expand Up @@ -912,9 +890,6 @@ impl CleanUp for RecursivePageTable<'_> {
!(level == PageTableLevel::Four && *i == recursive_index.into())
})
{
if entry.flags().contains(PageTableFlags::HUGE_PAGE) {
continue;
}
if let Ok(frame) = entry.frame() {
let start = VirtAddr::forward_checked_impl(
table_addr,
Expand Down
29 changes: 14 additions & 15 deletions src/structures/paging/page_table.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,8 @@ use bitflags::bitflags;
pub enum FrameError {
/// The entry does not have the `PRESENT` flag set, so it isn't currently mapped to a frame.
FrameNotPresent,
#[deprecated = "`HugeFrame` is no longer returned by the `frame` method"]
/// The entry does have the `HUGE_PAGE` flag set.
/// The entry does have the `HUGE_PAGE` flag set. The `frame` method has a standard 4KiB frame
/// as return type, so a huge frame can't be returned.
HugeFrame,
}

Expand Down Expand Up @@ -63,12 +63,16 @@ impl PageTableEntry {
/// Returns the following errors:
///
/// - `FrameError::FrameNotPresent` if the entry doesn't have the `PRESENT` flag set.
/// - `FrameError::HugeFrame` if the entry has the `HUGE_PAGE` flag set (for huge pages the
/// `addr` function must be used)
#[inline]
pub fn frame(&self) -> Result<PhysFrame, FrameError> {
if self.flags().contains(PageTableFlags::PRESENT) {
Ok(PhysFrame::containing_address(self.addr()))
} else {
if !self.flags().contains(PageTableFlags::PRESENT) {
Err(FrameError::FrameNotPresent)
} else if self.flags().contains(PageTableFlags::HUGE_PAGE) {
Err(FrameError::HugeFrame)
} else {
Ok(PhysFrame::containing_address(self.addr()))
}
}

Expand All @@ -82,6 +86,7 @@ impl PageTableEntry {
/// Map the entry to the specified physical frame with the specified flags.
#[inline]
pub fn set_frame(&mut self, frame: PhysFrame, flags: PageTableFlags) {
assert!(!flags.contains(PageTableFlags::HUGE_PAGE));
self.set_addr(frame.start_address(), flags)
}

Expand Down Expand Up @@ -123,21 +128,17 @@ bitflags! {
/// Controls whether accesses from userspace (i.e. ring 3) are permitted.
const USER_ACCESSIBLE = 1 << 2;
/// If this bit is set, a “write-through” policy is used for the cache, else a “write-back”
/// policy is used. This referred to as the page-level write-through (PWT) bit.
/// policy is used.
const WRITE_THROUGH = 1 << 3;
/// Disables caching for the pointed entry if it is cacheable. This referred to as the
/// page-level cache disable (PCD) bit.
/// Disables caching for the pointed entry is cacheable.
const NO_CACHE = 1 << 4;
/// Set by the CPU when the mapped frame or page table is accessed.
const ACCESSED = 1 << 5;
/// Set by the CPU on a write to the mapped frame.
const DIRTY = 1 << 6;
/// Specifies that the entry maps a huge frame instead of a page table. This is the same bit
/// as `PAT_4KIB_PAGE`.
/// Specifies that the entry maps a huge frame instead of a page table. Only allowed in
/// P2 or P3 tables.
const HUGE_PAGE = 1 << 7;
/// This is the PAT bit for page table entries that point to 4KiB pages. This is the same
/// bit as `HUGE_PAGE`.
const PAT_4KIB_PAGE = 1 << 7;
/// Indicates that the mapping is present in all address spaces, so it isn't flushed from
/// the TLB on an address space switch.
const GLOBAL = 1 << 8;
Expand All @@ -147,8 +148,6 @@ bitflags! {
const BIT_10 = 1 << 10;
/// Available to the OS, can be used to store additional data, e.g. custom flags.
const BIT_11 = 1 << 11;
/// This is the PAT bit for page table entries that point to huge pages.
const PAT_HUGE_PAGE = 1 << 12;
/// Available to the OS, can be used to store additional data, e.g. custom flags.
const BIT_52 = 1 << 52;
/// Available to the OS, can be used to store additional data, e.g. custom flags.
Expand Down