Skip to content

Commit

Permalink
arm64: Set the Guarded Page flag in the kernel
Browse files Browse the repository at this point in the history
Now the kernel and modules are built with branch protection we can
enablethe Guarded Page flag in the page tables. This causes indirect
branches to a location without a correct landing pad instruction to
raise an exception.

This should help mitigate some attacks where a function pointer is
changed to point somewhere other than the start of the function,
however it doesn't stop an attacker pointing it to an unintended
function.

Reviewed by:	alc, scottph (both earlier version), markj
Sponsored by:	Arm Ltd
Sponsored by:	The FreeBSD Foundation (earlier version)
Differential Revision:	https://reviews.freebsd.org/D42080
  • Loading branch information
zxombie committed Nov 21, 2023
1 parent 0895751 commit 0e9ed7e
Show file tree
Hide file tree
Showing 3 changed files with 48 additions and 10 deletions.
6 changes: 6 additions & 0 deletions sys/arm64/arm64/locore.S
Original file line number Diff line number Diff line change
Expand Up @@ -691,6 +691,9 @@ LENTRY(build_l2_block_pagetable)
orr x12, x7, #L2_BLOCK
orr x12, x12, #(ATTR_DEFAULT)
orr x12, x12, #(ATTR_S1_UXN)
#ifdef __ARM_FEATURE_BTI_DEFAULT
orr x12, x12, #(ATTR_S1_GP)
#endif

/* Only use the output address bits */
lsr x9, x9, #L2_SHIFT
Expand Down Expand Up @@ -760,6 +763,9 @@ LENTRY(build_l3_page_pagetable)
orr x12, x7, #L3_PAGE
orr x12, x12, #(ATTR_DEFAULT)
orr x12, x12, #(ATTR_S1_UXN)
#ifdef __ARM_FEATURE_BTI_DEFAULT
orr x12, x12, #(ATTR_S1_GP)
#endif

/* Only use the output address bits */
lsr x9, x9, #L3_SHIFT
Expand Down
51 changes: 41 additions & 10 deletions sys/arm64/arm64/pmap.c
Original file line number Diff line number Diff line change
Expand Up @@ -180,7 +180,12 @@
#define pmap_l1_pindex(v) (NUL2E + ((v) >> L1_SHIFT))
#define pmap_l2_pindex(v) ((v) >> L2_SHIFT)

#define PMAP_SAN_PTE_BITS (ATTR_DEFAULT | ATTR_S1_XN | \
#ifdef __ARM_FEATURE_BTI_DEFAULT
#define ATTR_KERN_GP ATTR_S1_GP
#else
#define ATTR_KERN_GP 0
#endif
#define PMAP_SAN_PTE_BITS (ATTR_DEFAULT | ATTR_S1_XN | ATTR_KERN_GP | \
ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | ATTR_S1_AP(ATTR_S1_AP_RW))

struct pmap_large_md_page {
Expand Down Expand Up @@ -474,6 +479,8 @@ static void _pmap_unwire_l3(pmap_t pmap, vm_offset_t va, vm_page_t m,
static int pmap_unuse_pt(pmap_t, vm_offset_t, pd_entry_t, struct spglist *);
static __inline vm_page_t pmap_remove_pt_page(pmap_t pmap, vm_offset_t va);

static pt_entry_t pmap_pte_bti(pmap_t pmap, vm_offset_t va);

/*
* These load the old table data and store the new value.
* They need to be atomic as the System MMU may write to the table at
Expand Down Expand Up @@ -1080,7 +1087,7 @@ pmap_bootstrap_l2_block(struct pmap_bootstrap_state *state, int i)
MPASS((state->pa & L2_OFFSET) == 0);
MPASS(state->l2[l2_slot] == 0);
pmap_store(&state->l2[l2_slot], PHYS_TO_PTE(state->pa) |
ATTR_DEFAULT | ATTR_S1_XN |
ATTR_DEFAULT | ATTR_S1_XN | ATTR_KERN_GP |
ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | L2_BLOCK);
}
MPASS(state->va == (state->pa - dmap_phys_base + DMAP_MIN_ADDRESS));
Expand Down Expand Up @@ -1115,7 +1122,7 @@ pmap_bootstrap_l3_page(struct pmap_bootstrap_state *state, int i)
MPASS((state->pa & L3_OFFSET) == 0);
MPASS(state->l3[l3_slot] == 0);
pmap_store(&state->l3[l3_slot], PHYS_TO_PTE(state->pa) |
ATTR_DEFAULT | ATTR_S1_XN |
ATTR_DEFAULT | ATTR_S1_XN | ATTR_KERN_GP |
ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | L3_PAGE);
}
MPASS(state->va == (state->pa - dmap_phys_base + DMAP_MIN_ADDRESS));
Expand Down Expand Up @@ -1156,7 +1163,7 @@ pmap_bootstrap_dmap(vm_paddr_t min_pa)
&bs_state.l1[pmap_l1_index(bs_state.va)],
PHYS_TO_PTE(bs_state.pa) | ATTR_DEFAULT |
ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) |
ATTR_S1_XN | L1_BLOCK);
ATTR_S1_XN | ATTR_KERN_GP | L1_BLOCK);
}
MPASS(bs_state.pa <= physmap[i + 1]);

Expand Down Expand Up @@ -1988,7 +1995,7 @@ pmap_kenter(vm_offset_t sva, vm_size_t size, vm_paddr_t pa, int mode)
("pmap_kenter: Mapping is not page-sized"));

attr = ATTR_DEFAULT | ATTR_S1_AP(ATTR_S1_AP_RW) | ATTR_S1_XN |
ATTR_S1_IDX(mode) | L3_PAGE;
ATTR_KERN_GP | ATTR_S1_IDX(mode) | L3_PAGE;
old_l3e = 0;
va = sva;
while (size != 0) {
Expand Down Expand Up @@ -2112,7 +2119,7 @@ pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count)
m = ma[i];
pa = VM_PAGE_TO_PHYS(m);
attr = ATTR_DEFAULT | ATTR_S1_AP(ATTR_S1_AP_RW) | ATTR_S1_XN |
ATTR_S1_IDX(m->md.pv_memattr) | L3_PAGE;
ATTR_KERN_GP | ATTR_S1_IDX(m->md.pv_memattr) | L3_PAGE;
pte = pmap_l2_to_l3(pde, va);
old_l3e |= pmap_load_store(pte, PHYS_TO_PTE(pa) | attr);

Expand Down Expand Up @@ -4011,6 +4018,10 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
mask |= ATTR_S1_XN;
nbits |= ATTR_S1_XN;
}
if (pmap == kernel_pmap) {
mask |= ATTR_KERN_GP;
nbits |= ATTR_KERN_GP;
}
if (mask == 0)
return;

Expand Down Expand Up @@ -4439,7 +4450,6 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
new_l3 = (pt_entry_t)(PHYS_TO_PTE(pa) | ATTR_DEFAULT | L3_PAGE);
new_l3 |= pmap_pte_memattr(pmap, m->md.pv_memattr);
new_l3 |= pmap_pte_prot(pmap, prot);

if ((flags & PMAP_ENTER_WIRED) != 0)
new_l3 |= ATTR_SW_WIRED;
if (pmap->pm_stage == PM_STAGE1) {
Expand Down Expand Up @@ -4481,6 +4491,9 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,

lock = NULL;
PMAP_LOCK(pmap);
/* Wait until we lock the pmap to protect the bti rangeset */
new_l3 |= pmap_pte_bti(pmap, va);

if ((flags & PMAP_ENTER_LARGEPAGE) != 0) {
KASSERT((m->oflags & VPO_UNMANAGED) != 0,
("managed largepage va %#lx flags %#x", va, flags));
Expand Down Expand Up @@ -4749,6 +4762,7 @@ pmap_enter_2mpage(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
new_l2 = (pd_entry_t)(PHYS_TO_PTE(VM_PAGE_TO_PHYS(m)) | ATTR_DEFAULT |
ATTR_S1_IDX(m->md.pv_memattr) | ATTR_S1_AP(ATTR_S1_AP_RO) |
L2_BLOCK);
new_l2 |= pmap_pte_bti(pmap, va);
if ((m->oflags & VPO_UNMANAGED) == 0) {
new_l2 |= ATTR_SW_MANAGED;
new_l2 &= ~ATTR_AF;
Expand Down Expand Up @@ -5120,6 +5134,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
pa = VM_PAGE_TO_PHYS(m);
l3_val = PHYS_TO_PTE(pa) | ATTR_DEFAULT | ATTR_S1_IDX(m->md.pv_memattr) |
ATTR_S1_AP(ATTR_S1_AP_RO) | L3_PAGE;
l3_val |= pmap_pte_bti(pmap, va);
if ((prot & VM_PROT_EXECUTE) == 0 ||
m->md.pv_memattr == VM_MEMATTR_DEVICE)
l3_val |= ATTR_S1_XN;
Expand Down Expand Up @@ -6565,7 +6580,8 @@ pmap_mapbios(vm_paddr_t pa, vm_size_t size)
l2 = pmap_l1_to_l2(pde, va);
old_l2e |= pmap_load_store(l2,
PHYS_TO_PTE(pa) | ATTR_DEFAULT | ATTR_S1_XN |
ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | L2_BLOCK);
ATTR_KERN_GP | ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) |
L2_BLOCK);

va += L2_SIZE;
pa += L2_SIZE;
Expand Down Expand Up @@ -7837,6 +7853,19 @@ pmap_is_valid_memattr(pmap_t pmap __unused, vm_memattr_t mode)
return (mode >= VM_MEMATTR_DEVICE && mode <= VM_MEMATTR_WRITE_THROUGH);
}

static pt_entry_t
pmap_pte_bti(pmap_t pmap, vm_offset_t va __diagused)
{
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
MPASS(ADDR_IS_CANONICAL(va));

if (pmap->pm_stage != PM_STAGE1)
return (0);
if (pmap == kernel_pmap)
return (ATTR_KERN_GP);
return (0);
}

#if defined(KASAN)
static vm_paddr_t pmap_san_early_kernstart;
static pd_entry_t *pmap_san_early_l2;
Expand Down Expand Up @@ -8032,12 +8061,13 @@ sysctl_kmaps_dump(struct sbuf *sb, struct pmap_kernel_map_range *range,
break;
}

sbuf_printf(sb, "0x%016lx-0x%016lx r%c%c%c%c %6s %d %d %d %d\n",
sbuf_printf(sb, "0x%016lx-0x%016lx r%c%c%c%c%c %6s %d %d %d %d\n",
range->sva, eva,
(range->attrs & ATTR_S1_AP_RW_BIT) == ATTR_S1_AP_RW ? 'w' : '-',
(range->attrs & ATTR_S1_PXN) != 0 ? '-' : 'x',
(range->attrs & ATTR_S1_UXN) != 0 ? '-' : 'X',
(range->attrs & ATTR_S1_AP(ATTR_S1_AP_USER)) != 0 ? 'u' : 's',
(range->attrs & ATTR_S1_GP) != 0 ? 'g' : '-',
mode, range->l1blocks, range->l2blocks, range->l3contig,
range->l3pages);

Expand Down Expand Up @@ -8087,7 +8117,8 @@ sysctl_kmaps_table_attrs(pd_entry_t table)
static pt_entry_t
sysctl_kmaps_block_attrs(pt_entry_t block)
{
return (block & (ATTR_S1_AP_MASK | ATTR_S1_XN | ATTR_S1_IDX_MASK));
return (block & (ATTR_S1_AP_MASK | ATTR_S1_XN | ATTR_S1_IDX_MASK |
ATTR_S1_GP));
}

/*
Expand Down
1 change: 1 addition & 0 deletions sys/arm64/include/pte.h
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,7 @@ typedef uint64_t pt_entry_t; /* page table entry */

#define ATTR_CONTIGUOUS (1UL << 52)
#define ATTR_DBM (1UL << 51)
#define ATTR_S1_GP (1UL << 50)
#define ATTR_S1_nG (1 << 11)
#define ATTR_AF (1 << 10)
#define ATTR_SH(x) ((x) << 8)
Expand Down

0 comments on commit 0e9ed7e

Please sign in to comment.