diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c index 5ec1ac97a0d7..db9c1aecf529 100644 --- a/sys/amd64/amd64/pmap.c +++ b/sys/amd64/amd64/pmap.c @@ -325,8 +325,14 @@ pmap_pku_mask_bit(pmap_t pmap) #if VM_NRESERVLEVEL > 0 #define pa_to_pmdp(pa) (&pv_table[pa_index(pa)]) #define pa_to_pvh(pa) (&(pa_to_pmdp(pa)->pv_page)) -#define PHYS_TO_PV_LIST_LOCK(pa) \ - (&(pa_to_pmdp(pa)->pv_lock)) +#define PHYS_TO_PV_LIST_LOCK(pa) ({ \ + struct rwlock *_lock; \ + if (__predict_false((pa) > pmap_last_pa)) \ + _lock = &pv_dummy_large.pv_lock; \ + else \ + _lock = &(pa_to_pmdp(pa)->pv_lock); \ + _lock; \ +}) #else #define pa_to_pvh(pa) (&pv_table[pa_index(pa)]) @@ -349,8 +355,9 @@ pmap_pku_mask_bit(pmap_t pmap) } \ } while (0) -#define CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m) \ - CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, VM_PAGE_TO_PHYS(m)) +#define CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m) do { \ + CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, VM_PAGE_TO_PHYS(m));\ +} while (0) #define RELEASE_PV_LIST_LOCK(lockp) do { \ struct rwlock **_lockp = (lockp); \ @@ -416,19 +423,21 @@ static int pmap_initialized; */ static TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks); static struct mtx __exclusive_cache_line pv_chunks_mutex; -#if VM_NRESERVLEVEL > 0 struct pmap_large_md_page { struct rwlock pv_lock; struct md_page pv_page; u_long pv_invl_gen; }; -static struct pmap_large_md_page *pv_table; +__exclusive_cache_line static struct pmap_large_md_page pv_dummy_large; +#define pv_dummy pv_dummy_large.pv_page +#if VM_NRESERVLEVEL > 0 +__read_mostly static struct pmap_large_md_page *pv_table; +__read_mostly vm_paddr_t pmap_last_pa; #else static struct rwlock __exclusive_cache_line pv_list_locks[NPV_LIST_LOCKS]; static u_long pv_invl_gen[NPV_LIST_LOCKS]; static struct md_page *pv_table; #endif -static struct md_page pv_dummy; /* * All those kernel PT submaps that BSD is so fond of @@ -1851,13 +1860,16 @@ pmap_init_pv_table(void) /* * Calculate the size of the array. */ - pv_npg = howmany(vm_phys_segs[vm_phys_nsegs - 1].end, NBPDR); + pmap_last_pa = vm_phys_segs[vm_phys_nsegs - 1].end; + pv_npg = howmany(pmap_last_pa, NBPDR); s = (vm_size_t)pv_npg * sizeof(struct pmap_large_md_page); s = round_page(s); pv_table = (struct pmap_large_md_page *)kva_alloc(s); if (pv_table == NULL) panic("%s: kva_alloc failed\n", __func__); + printf("%s: pv_table %p size %lx (%lx)\n", __func__, pv_table, s, s / (sizeof(*pvd))); + /* * Iterate physical segments to allocate space for respective pages. */ @@ -1894,12 +1906,18 @@ pmap_init_pv_table(void) pvd++; } } - TAILQ_INIT(&pv_dummy.pv_list); + pvd = &pv_dummy_large; + rw_init_flags(&pvd->pv_lock, "pmap pv list dummy", RW_NEW); + TAILQ_INIT(&pvd->pv_page.pv_list); + pvd->pv_page.pv_gen = 0; + pvd->pv_page.pat_mode = 0; + pvd->pv_invl_gen = 0; } #else static void pmap_init_pv_table(void) { + struct pmap_large_md_page *pvd; vm_size_t s; long i, pv_npg; @@ -1922,7 +1940,13 @@ pmap_init_pv_table(void) pv_table = (struct md_page *)kmem_malloc(s, M_WAITOK | M_ZERO); for (i = 0; i < pv_npg; i++) TAILQ_INIT(&pv_table[i].pv_list); - TAILQ_INIT(&pv_dummy.pv_list); + pvd = &pv_dummy_large; + rw_init_flags(&pvd->pv_lock, "pmap pv list dummy", RW_NEW); + TAILQ_INIT(&pvd->pv_page.pv_list); + pvd->pv_page.pv_gen = 0; + pvd->pv_page.pat_mode = 0; + pvd->pv_invl_gen = 0; + } #endif