diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c index d4d4cd97cb3e..7efb427ada81 100644 --- a/sys/amd64/amd64/pmap.c +++ b/sys/amd64/amd64/pmap.c @@ -322,7 +322,7 @@ pmap_pku_mask_bit(pmap_t pmap) #define NPV_LIST_LOCKS MAXCPU #define PHYS_TO_PV_LIST_LOCK(pa) \ - (&pv_list_locks[pa_index(pa) % NPV_LIST_LOCKS]) + (&pv_list_locks[pa_index(pa)]) #define CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa) do { \ struct rwlock **_lockp = (lockp); \ @@ -405,7 +405,7 @@ static int pmap_initialized; */ static TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks); static struct mtx __exclusive_cache_line pv_chunks_mutex; -static struct rwlock __exclusive_cache_line pv_list_locks[NPV_LIST_LOCKS]; +static struct rwlock *pv_list_locks; static u_long pv_invl_gen[NPV_LIST_LOCKS]; static struct md_page *pv_table; static struct md_page pv_dummy; @@ -1903,17 +1903,17 @@ pmap_init(void) */ mtx_init(&pv_chunks_mutex, "pmap pv chunk list", NULL, MTX_DEF); - /* - * Initialize the pool of pv list locks. - */ - for (i = 0; i < NPV_LIST_LOCKS; i++) - rw_init(&pv_list_locks[i], "pmap pv list"); - /* * Calculate the size of the pv head table for superpages. */ pv_npg = howmany(vm_phys_segs[vm_phys_nsegs - 1].end, NBPDR); + s = (vm_size_t)(pv_npg * sizeof(struct rwlock)); + s = round_page(s); + pv_list_locks = (struct rwlock *)kmem_malloc(s, M_WAITOK | M_ZERO); + for (i = 0; i < pv_npg; i++) + rw_init(&pv_list_locks[i], "pmap pv list"); + /* * Allocate memory for the pv head table for superpages. */