Skip to content

Commit

Permalink
pmap: Skip some superpage promotion attempts that will fail
Browse files Browse the repository at this point in the history
Implement a simple heuristic to skip pointless promotion attempts by
pmap_enter_quick_locked() and moea64_enter().  Specifically, when
vm_fault() calls pmap_enter_quick() to map neighboring pages at the end
of a copy-on-write fault, there is no point in attempting promotion in
pmap_enter_quick_locked() and moea64_enter().  Promotion will fail
because the base pages have differing protection.

Reviewed by:	kib
Differential Revision:	https://reviews.freebsd.org/D45431
MFC after:	1 week
  • Loading branch information
alcriceedu committed Jun 4, 2024
1 parent 3b35e7e commit f1d73aa
Show file tree
Hide file tree
Showing 7 changed files with 26 additions and 7 deletions.
3 changes: 2 additions & 1 deletion sys/amd64/amd64/pmap.c
Original file line number Diff line number Diff line change
Expand Up @@ -7818,7 +7818,8 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
* If both the PTP and the reservation are fully populated, then
* attempt promotion.
*/
if ((mpte == NULL || mpte->ref_count == NPTEPG) &&
if ((prot & VM_PROT_NO_PROMOTE) == 0 &&
(mpte == NULL || mpte->ref_count == NPTEPG) &&
(m->flags & PG_FICTITIOUS) == 0 &&
vm_reserv_level_iffullpop(m) == 0) {
if (pde == NULL)
Expand Down
3 changes: 2 additions & 1 deletion sys/arm64/arm64/pmap.c
Original file line number Diff line number Diff line change
Expand Up @@ -6052,7 +6052,8 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
* If both the PTP and the reservation are fully populated, then
* attempt promotion.
*/
if ((mpte == NULL || mpte->ref_count == NL3PG) &&
if ((prot & VM_PROT_NO_PROMOTE) == 0 &&
(mpte == NULL || mpte->ref_count == NL3PG) &&
(m->flags & PG_FICTITIOUS) == 0 &&
vm_reserv_level_iffullpop(m) == 0) {
if (l2 == NULL)
Expand Down
3 changes: 2 additions & 1 deletion sys/i386/i386/pmap.c
Original file line number Diff line number Diff line change
Expand Up @@ -4250,7 +4250,8 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
* If both the PTP and the reservation are fully populated, then
* attempt promotion.
*/
if ((mpte == NULL || mpte->ref_count == NPTEPG) &&
if ((prot & VM_PROT_NO_PROMOTE) == 0 &&
(mpte == NULL || mpte->ref_count == NPTEPG) &&
(m->flags & PG_FICTITIOUS) == 0 &&
vm_reserv_level_iffullpop(m) == 0) {
if (pde == NULL)
Expand Down
9 changes: 7 additions & 2 deletions sys/powerpc/aim/mmu_oea64.c
Original file line number Diff line number Diff line change
Expand Up @@ -1755,10 +1755,14 @@ moea64_enter(pmap_t pmap, vm_offset_t va, vm_page_t m,
* If the VA of the entered page is not aligned with its PA,
* don't try page promotion as it is not possible.
* This reduces the number of promotion failures dramatically.
*
* Ignore VM_PROT_NO_PROMOTE unless PMAP_ENTER_QUICK_LOCKED.
*/
if (moea64_ps_enabled(pmap) && pmap != kernel_pmap && pvo != NULL &&
(pvo->pvo_vaddr & PVO_MANAGED) != 0 &&
(va & HPT_SP_MASK) == (pa & HPT_SP_MASK) &&
((prot & VM_PROT_NO_PROMOTE) == 0 ||
(flags & PMAP_ENTER_QUICK_LOCKED) == 0) &&
(m->flags & PG_FICTITIOUS) == 0 &&
vm_reserv_level_iffullpop(m) == 0)
moea64_sp_promote(pmap, va, m);
Expand Down Expand Up @@ -1850,8 +1854,9 @@ moea64_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m,
vm_prot_t prot)
{

moea64_enter(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE),
PMAP_ENTER_NOSLEEP | PMAP_ENTER_QUICK_LOCKED, 0);
moea64_enter(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE |
VM_PROT_NO_PROMOTE), PMAP_ENTER_NOSLEEP | PMAP_ENTER_QUICK_LOCKED,
0);
}

vm_paddr_t
Expand Down
3 changes: 2 additions & 1 deletion sys/riscv/riscv/pmap.c
Original file line number Diff line number Diff line change
Expand Up @@ -3519,7 +3519,8 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
* If both the PTP and the reservation are fully populated, then attempt
* promotion.
*/
if ((mpte == NULL || mpte->ref_count == Ln_ENTRIES) &&
if ((prot & VM_PROT_NO_PROMOTE) == 0 &&
(mpte == NULL || mpte->ref_count == Ln_ENTRIES) &&
(m->flags & PG_FICTITIOUS) == 0 &&
vm_reserv_level_iffullpop(m) == 0) {
if (l2 == NULL)
Expand Down
1 change: 1 addition & 0 deletions sys/vm/vm.h
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,7 @@ typedef u_char vm_prot_t; /* protection codes */
#define VM_PROT_COPY ((vm_prot_t) 0x08) /* copy-on-read */
#define VM_PROT_PRIV_FLAG ((vm_prot_t) 0x10)
#define VM_PROT_FAULT_LOOKUP VM_PROT_PRIV_FLAG
#define VM_PROT_NO_PROMOTE VM_PROT_PRIV_FLAG
#define VM_PROT_QUICK_NOFAULT VM_PROT_PRIV_FLAG /* same to save bits */

#define VM_PROT_ALL (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE)
Expand Down
11 changes: 10 additions & 1 deletion sys/vm/vm_fault.c
Original file line number Diff line number Diff line change
Expand Up @@ -1891,6 +1891,7 @@ vm_fault_prefault(const struct faultstate *fs, vm_offset_t addra,
vm_offset_t addr, starta;
vm_pindex_t pindex;
vm_page_t m;
vm_prot_t prot;
int i;

pmap = fs->map->pmap;
Expand All @@ -1906,6 +1907,14 @@ vm_fault_prefault(const struct faultstate *fs, vm_offset_t addra,
if (starta < entry->start)
starta = entry->start;
}
prot = entry->protection;

/*
* If pmap_enter() has enabled write access on a nearby mapping, then
* don't attempt promotion, because it will fail.
*/
if ((fs->prot & VM_PROT_WRITE) != 0)
prot |= VM_PROT_NO_PROMOTE;

/*
* Generate the sequence of virtual addresses that are candidates for
Expand Down Expand Up @@ -1949,7 +1958,7 @@ vm_fault_prefault(const struct faultstate *fs, vm_offset_t addra,
}
if (vm_page_all_valid(m) &&
(m->flags & PG_FICTITIOUS) == 0)
pmap_enter_quick(pmap, addr, m, entry->protection);
pmap_enter_quick(pmap, addr, m, prot);
if (!obj_locked || lobject != entry->object.vm_object)
VM_OBJECT_RUNLOCK(lobject);
}
Expand Down

0 comments on commit f1d73aa

Please sign in to comment.