Skip to content

Add page attribute table support #548

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 1 commit into
base: next
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
89 changes: 89 additions & 0 deletions src/registers/model_specific.rs
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,10 @@ pub struct SCet;
#[derive(Debug)]
pub struct ApicBase;

/// IA32_PAT: Page Attribute Table.
#[derive(Debug)]
pub struct Pat;

impl Efer {
/// The underlying model specific register.
pub const MSR: Msr = Msr(0xC000_0080);
Expand Down Expand Up @@ -123,6 +127,22 @@ impl ApicBase {
pub const MSR: Msr = Msr(0x1B);
}

impl Pat {
/// The underlying model specific register.
pub const MSR: Msr = Msr(0x277);
/// The default PAT configuration following a power up or reset of the processor.
pub const DEFAULT: [PatMemoryType; 8] = [
PatMemoryType::WriteBack,
PatMemoryType::WriteThrough,
PatMemoryType::Uncacheable,
PatMemoryType::StrongUncacheable,
PatMemoryType::WriteBack,
PatMemoryType::WriteThrough,
PatMemoryType::Uncacheable,
PatMemoryType::StrongUncacheable,
];
}

bitflags! {
/// Flags of the Extended Feature Enable Register.
#[repr(transparent)]
Expand Down Expand Up @@ -189,6 +209,43 @@ bitflags! {
}
}

#[derive(PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Clone, Copy)]
/// Memory types used in the [PAT](Pat).
#[repr(u8)]
pub enum PatMemoryType {
/// Uncacheable (UC).
StrongUncacheable = 0x00,
/// Uses a write combining (WC) cache policy.
WriteCombining = 0x01,
/// Uses a write through (WT) cache policy.
WriteThrough = 0x04,
/// Uses a write protected (WP) cache policy.
WriteProtected = 0x05,
/// Uses a write back (WB) cache policy.
WriteBack = 0x06,
/// Same as strong uncacheable, but can be overridden to be write combining by MTRRs (UC-).
Uncacheable = 0x07,
}
impl PatMemoryType {
/// Converts from bits, returning `None` if the value is invalid.
pub const fn from_bits(bits: u8) -> Option<Self> {
match bits {
0x00 => Some(Self::StrongUncacheable),
0x01 => Some(Self::WriteCombining),
0x04 => Some(Self::WriteThrough),
0x05 => Some(Self::WriteProtected),
0x06 => Some(Self::WriteBack),
0x07 => Some(Self::Uncacheable),
_ => None,
}
}

/// Gets the underlying bits.
pub const fn bits(self) -> u8 {
self as u8
}
}

#[cfg(all(feature = "instructions", target_arch = "x86_64"))]
mod x86_64 {
use super::*;
Expand Down Expand Up @@ -733,4 +790,36 @@ mod x86_64 {
}
}
}

impl Pat {
/// Reads IA32_PAT.
///
/// The PAT must be supported on the CPU, otherwise a general protection exception will
/// occur. Support can be detected using the `cpuid` instruction.
#[inline]
pub fn read() -> [PatMemoryType; 8] {
unsafe { Self::MSR.read() }
.to_ne_bytes()
.map(|bits| PatMemoryType::from_bits(bits).unwrap())
}

/// Writes IA32_PAT.
///
/// The PAT must be supported on the CPU, otherwise a general protection exception will
/// occur. Support can be detected using the `cpuid` instruction.
///
/// # Safety
///
/// All affected pages must be flushed from the TLB. Processor caches may also need to be
/// flushed. Additionally, all pages that map to a given frame must have the same memory
/// type.
#[inline]
pub unsafe fn write(table: [PatMemoryType; 8]) {
let bits = u64::from_ne_bytes(table.map(PatMemoryType::bits));
let mut msr = Self::MSR;
unsafe {
msr.write(bits);
}
}
}
}
14 changes: 7 additions & 7 deletions src/structures/paging/mapper/mapped_page_table.rs
Original file line number Diff line number Diff line change
Expand Up @@ -484,9 +484,9 @@ impl<P: PageTableFrameMapping> Mapper<Size4KiB> for MappedPageTable<'_, P> {

let p1_entry = &mut p1[page.p1_index()];

let frame = p1_entry.frame().map_err(|err| match err {
let frame = p1_entry.frame(true).map_err(|err| match err {
FrameError::FrameNotPresent => UnmapError::PageNotMapped,
FrameError::HugeFrame => UnmapError::ParentEntryHugePage,
FrameError::HugeFrame => unreachable!(),
})?;
let flags = p1_entry.flags();

Expand All @@ -508,9 +508,9 @@ impl<P: PageTableFrameMapping> Mapper<Size4KiB> for MappedPageTable<'_, P> {

let p1_entry = &mut p1[page.p1_index()];

let frame = match p1_entry.frame() {
let frame = match p1_entry.frame(true) {
Ok(frame) => frame,
Err(FrameError::HugeFrame) => return Err(UnmapError::ParentEntryHugePage),
Err(FrameError::HugeFrame) => unreachable!(),
Err(FrameError::FrameNotPresent) => {
let cloned = p1_entry.clone();
p1_entry.set_unused();
Expand Down Expand Up @@ -763,7 +763,7 @@ impl<P: PageTableFrameMapping> CleanUp for MappedPageTable<'_, P> {
Page::range_inclusive(start, end),
frame_deallocator,
) {
let frame = entry.frame().unwrap();
let frame = entry.frame(false).unwrap();
entry.set_unused();
frame_deallocator.deallocate_frame(frame);
}
Expand Down Expand Up @@ -812,7 +812,7 @@ impl<P: PageTableFrameMapping> PageTableWalker<P> {
) -> Result<&'b PageTable, PageTableWalkError> {
let page_table_ptr = self
.page_table_frame_mapping
.frame_to_pointer(entry.frame()?);
.frame_to_pointer(entry.frame(false)?);
let page_table: &PageTable = unsafe { &*page_table_ptr };

Ok(page_table)
Expand All @@ -830,7 +830,7 @@ impl<P: PageTableFrameMapping> PageTableWalker<P> {
) -> Result<&'b mut PageTable, PageTableWalkError> {
let page_table_ptr = self
.page_table_frame_mapping
.frame_to_pointer(entry.frame()?);
.frame_to_pointer(entry.frame(false)?);
let page_table: &mut PageTable = unsafe { &mut *page_table_ptr };

Ok(page_table)
Expand Down
39 changes: 18 additions & 21 deletions src/structures/paging/mapper/recursive_page_table.rs
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ impl<'a> RecursivePageTable<'a> {
{
return Err(InvalidPageTable::NotRecursive);
}
if Ok(Cr3::read().0) != table[recursive_index].frame() {
if Ok(Cr3::read().0) != table[recursive_index].frame(false) {
return Err(InvalidPageTable::NotActive);
}

Expand Down Expand Up @@ -322,7 +322,7 @@ impl Mapper<Size1GiB> for RecursivePageTable<'_> {
let p4 = &mut self.p4;
let p4_entry = &p4[page.p4_index()];

p4_entry.frame().map_err(|err| match err {
p4_entry.frame(false).map_err(|err| match err {
FrameError::FrameNotPresent => UnmapError::PageNotMapped,
FrameError::HugeFrame => UnmapError::ParentEntryHugePage,
})?;
Expand All @@ -349,7 +349,7 @@ impl Mapper<Size1GiB> for RecursivePageTable<'_> {
let p4 = &mut self.p4;
let p4_entry = &p4[page.p4_index()];

p4_entry.frame().map_err(|err| match err {
p4_entry.frame(false).map_err(|err| match err {
FrameError::FrameNotPresent => UnmapError::PageNotMapped,
FrameError::HugeFrame => UnmapError::ParentEntryHugePage,
})?;
Expand Down Expand Up @@ -476,14 +476,14 @@ impl Mapper<Size2MiB> for RecursivePageTable<'_> {
) -> Result<(PhysFrame<Size2MiB>, PageTableFlags, MapperFlush<Size2MiB>), UnmapError> {
let p4 = &mut self.p4;
let p4_entry = &p4[page.p4_index()];
p4_entry.frame().map_err(|err| match err {
p4_entry.frame(false).map_err(|err| match err {
FrameError::FrameNotPresent => UnmapError::PageNotMapped,
FrameError::HugeFrame => UnmapError::ParentEntryHugePage,
})?;

let p3 = unsafe { &mut *(p3_ptr(page, self.recursive_index)) };
let p3_entry = &p3[page.p3_index()];
p3_entry.frame().map_err(|err| match err {
p3_entry.frame(false).map_err(|err| match err {
FrameError::FrameNotPresent => UnmapError::PageNotMapped,
FrameError::HugeFrame => UnmapError::ParentEntryHugePage,
})?;
Expand All @@ -509,14 +509,14 @@ impl Mapper<Size2MiB> for RecursivePageTable<'_> {
fn clear(&mut self, page: Page<Size2MiB>) -> Result<UnmappedFrame<Size2MiB>, UnmapError> {
let p4 = &mut self.p4;
let p4_entry = &p4[page.p4_index()];
p4_entry.frame().map_err(|err| match err {
p4_entry.frame(false).map_err(|err| match err {
FrameError::FrameNotPresent => UnmapError::PageNotMapped,
FrameError::HugeFrame => UnmapError::ParentEntryHugePage,
})?;

let p3 = unsafe { &mut *(p3_ptr(page, self.recursive_index)) };
let p3_entry = &p3[page.p3_index()];
p3_entry.frame().map_err(|err| match err {
p3_entry.frame(false).map_err(|err| match err {
FrameError::FrameNotPresent => UnmapError::PageNotMapped,
FrameError::HugeFrame => UnmapError::ParentEntryHugePage,
})?;
Expand Down Expand Up @@ -672,31 +672,31 @@ impl Mapper<Size4KiB> for RecursivePageTable<'_> {
) -> Result<(PhysFrame<Size4KiB>, PageTableFlags, MapperFlush<Size4KiB>), UnmapError> {
let p4 = &mut self.p4;
let p4_entry = &p4[page.p4_index()];
p4_entry.frame().map_err(|err| match err {
p4_entry.frame(false).map_err(|err| match err {
FrameError::FrameNotPresent => UnmapError::PageNotMapped,
FrameError::HugeFrame => UnmapError::ParentEntryHugePage,
})?;

let p3 = unsafe { &mut *(p3_ptr(page, self.recursive_index)) };
let p3_entry = &p3[page.p3_index()];
p3_entry.frame().map_err(|err| match err {
p3_entry.frame(false).map_err(|err| match err {
FrameError::FrameNotPresent => UnmapError::PageNotMapped,
FrameError::HugeFrame => UnmapError::ParentEntryHugePage,
})?;

let p2 = unsafe { &mut *(p2_ptr(page, self.recursive_index)) };
let p2_entry = &p2[page.p2_index()];
p2_entry.frame().map_err(|err| match err {
p2_entry.frame(false).map_err(|err| match err {
FrameError::FrameNotPresent => UnmapError::PageNotMapped,
FrameError::HugeFrame => UnmapError::ParentEntryHugePage,
})?;

let p1 = unsafe { &mut *(p1_ptr(page, self.recursive_index)) };
let p1_entry = &mut p1[page.p1_index()];

let frame = p1_entry.frame().map_err(|err| match err {
let frame = p1_entry.frame(true).map_err(|err| match err {
FrameError::FrameNotPresent => UnmapError::PageNotMapped,
FrameError::HugeFrame => UnmapError::ParentEntryHugePage,
FrameError::HugeFrame => unreachable!(),
})?;
let flags = p1_entry.flags();

Expand All @@ -707,36 +707,36 @@ impl Mapper<Size4KiB> for RecursivePageTable<'_> {
fn clear(&mut self, page: Page<Size4KiB>) -> Result<UnmappedFrame<Size4KiB>, UnmapError> {
let p4 = &mut self.p4;
let p4_entry = &p4[page.p4_index()];
p4_entry.frame().map_err(|err| match err {
p4_entry.frame(false).map_err(|err| match err {
FrameError::FrameNotPresent => UnmapError::PageNotMapped,
FrameError::HugeFrame => UnmapError::ParentEntryHugePage,
})?;

let p3 = unsafe { &mut *(p3_ptr(page, self.recursive_index)) };
let p3_entry = &p3[page.p3_index()];
p3_entry.frame().map_err(|err| match err {
p3_entry.frame(false).map_err(|err| match err {
FrameError::FrameNotPresent => UnmapError::PageNotMapped,
FrameError::HugeFrame => UnmapError::ParentEntryHugePage,
})?;

let p2 = unsafe { &mut *(p2_ptr(page, self.recursive_index)) };
let p2_entry = &p2[page.p2_index()];
p2_entry.frame().map_err(|err| match err {
p2_entry.frame(false).map_err(|err| match err {
FrameError::FrameNotPresent => UnmapError::PageNotMapped,
FrameError::HugeFrame => UnmapError::ParentEntryHugePage,
})?;

let p1 = unsafe { &mut *(p1_ptr(page, self.recursive_index)) };
let p1_entry = &mut p1[page.p1_index()];

let frame = match p1_entry.frame() {
let frame = match p1_entry.frame(true) {
Ok(frame) => frame,
Err(FrameError::FrameNotPresent) => {
let cloned = p1_entry.clone();
p1_entry.set_unused();
return Ok(UnmappedFrame::NotPresent { entry: cloned });
}
Err(FrameError::HugeFrame) => return Err(UnmapError::ParentEntryHugePage),
Err(FrameError::HugeFrame) => unreachable!(),
};
let flags = p1_entry.flags();

Expand Down Expand Up @@ -939,9 +939,6 @@ impl Translate for RecursivePageTable<'_> {
if p1_entry.is_unused() {
return TranslateResult::NotMapped;
}
if p1_entry.flags().contains(PageTableFlags::HUGE_PAGE) {
panic!("level 1 entry has huge page bit set")
}

let frame = match PhysFrame::from_start_address(p1_entry.addr()) {
Ok(frame) => frame,
Expand Down Expand Up @@ -1011,7 +1008,7 @@ impl CleanUp for RecursivePageTable<'_> {
!(level == PageTableLevel::Four && *i == recursive_index.into())
})
{
if let Ok(frame) = entry.frame() {
if let Ok(frame) = entry.frame(level == PageTableLevel::One) {
let start = VirtAddr::forward_checked_impl(
table_addr,
(offset_per_entry as usize) * i,
Expand Down
21 changes: 13 additions & 8 deletions src/structures/paging/page_table.rs
Original file line number Diff line number Diff line change
Expand Up @@ -64,12 +64,12 @@ impl PageTableEntry {
///
/// - `FrameError::FrameNotPresent` if the entry doesn't have the `PRESENT` flag set.
/// - `FrameError::HugeFrame` if the entry has the `HUGE_PAGE` flag set (for huge pages the
/// `addr` function must be used)
/// `addr` function must be used) and `is_level_1_entry` is `false`
#[inline]
pub fn frame(&self) -> Result<PhysFrame, FrameError> {
pub fn frame(&self, is_level_1_entry: bool) -> Result<PhysFrame, FrameError> {
if !self.flags().contains(PageTableFlags::PRESENT) {
Err(FrameError::FrameNotPresent)
} else if self.flags().contains(PageTableFlags::HUGE_PAGE) {
} else if !is_level_1_entry && self.flags().contains(PageTableFlags::HUGE_PAGE) {
Err(FrameError::HugeFrame)
} else {
Ok(PhysFrame::containing_address(self.addr()))
Expand All @@ -86,7 +86,6 @@ impl PageTableEntry {
/// Map the entry to the specified physical frame with the specified flags.
#[inline]
pub fn set_frame(&mut self, frame: PhysFrame, flags: PageTableFlags) {
assert!(!flags.contains(PageTableFlags::HUGE_PAGE));
self.set_addr(frame.start_address(), flags)
}

Expand Down Expand Up @@ -128,17 +127,21 @@ bitflags! {
/// Controls whether accesses from userspace (i.e. ring 3) are permitted.
const USER_ACCESSIBLE = 1 << 2;
/// If this bit is set, a “write-through” policy is used for the cache, else a “write-back”
/// policy is used.
/// policy is used. This referred to as the page-level write-through (PWT) bit.
const WRITE_THROUGH = 1 << 3;
/// Disables caching for the pointed entry is cacheable.
/// Disables caching for the pointed entry if it is cacheable. This referred to as the
/// page-level cache disable (PCD) bit.
const NO_CACHE = 1 << 4;
/// Set by the CPU when the mapped frame or page table is accessed.
const ACCESSED = 1 << 5;
/// Set by the CPU on a write to the mapped frame.
const DIRTY = 1 << 6;
/// Specifies that the entry maps a huge frame instead of a page table. Only allowed in
/// P2 or P3 tables.
/// Specifies that the entry maps a huge frame instead of a page table. This is the same bit
/// as `PAT_4KIB_PAGE`.
const HUGE_PAGE = 1 << 7;
/// This is the PAT bit for page table entries that point to 4KiB pages. This is the same
/// bit as `HUGE_PAGE`.
const PAT_4KIB_PAGE = 1 << 7;
/// Indicates that the mapping is present in all address spaces, so it isn't flushed from
/// the TLB on an address space switch.
const GLOBAL = 1 << 8;
Expand All @@ -148,6 +151,8 @@ bitflags! {
const BIT_10 = 1 << 10;
/// Available to the OS, can be used to store additional data, e.g. custom flags.
const BIT_11 = 1 << 11;
/// This is the PAT bit for page table entries that point to huge pages.
const PAT_HUGE_PAGE = 1 << 12;
/// Available to the OS, can be used to store additional data, e.g. custom flags.
const BIT_52 = 1 << 52;
/// Available to the OS, can be used to store additional data, e.g. custom flags.
Expand Down
Loading