Index: CURRENT/sys/amd64/amd64/pmap.c =================================================================== --- CURRENT/sys/amd64/amd64/pmap.c (revision 242515) +++ CURRENT/sys/amd64/amd64/pmap.c (working copy) @@ -225,17 +225,8 @@ u_int64_t KPML4phys; /* phys addr of kernel level static u_int64_t DMPDphys; /* phys addr of direct mapped level 2 */ static u_int64_t DMPDPphys; /* phys addr of direct mapped level 3 */ -/* - * Isolate the global pv list lock from data and other locks to prevent false - * sharing within the cache. - */ -static struct { - struct rwlock lock; - char padding[CACHE_LINE_SIZE - sizeof(struct rwlock)]; -} pvh_global __aligned(CACHE_LINE_SIZE); +static struct rwlock_padalign pvh_global_lock; -#define pvh_global_lock pvh_global.lock - /* * Data for the pv entry allocation mechanism */ Index: CURRENT/sys/i386/i386/pmap.c =================================================================== --- CURRENT/sys/i386/i386/pmap.c (revision 242515) +++ CURRENT/sys/i386/i386/pmap.c (working copy) @@ -224,17 +224,8 @@ SYSCTL_INT(_vm_pmap, OID_AUTO, pg_ps_enabled, CTLF #define PAT_INDEX_SIZE 8 static int pat_index[PAT_INDEX_SIZE]; /* cache mode to PAT index conversion */ -/* - * Isolate the global pv list lock from data and other locks to prevent false - * sharing within the cache. - */ -static struct { - struct rwlock lock; - char padding[CACHE_LINE_SIZE - sizeof(struct rwlock)]; -} pvh_global __aligned(CACHE_LINE_SIZE); +static struct rwlock_padalign pvh_global_lock; -#define pvh_global_lock pvh_global.lock - /* * Data for the pv entry allocation mechanism */ Index: CURRENT/sys/ia64/ia64/pmap.c =================================================================== --- CURRENT/sys/ia64/ia64/pmap.c (revision 242515) +++ CURRENT/sys/ia64/ia64/pmap.c (working copy) @@ -214,17 +214,8 @@ static int pmap_ridmax; static uint64_t *pmap_ridmap; struct mtx pmap_ridmutex; -/* - * Isolate the global pv list lock from data and other locks to prevent false - * sharing within the cache. - */ -static struct { - struct rwlock lock; - char padding[CACHE_LINE_SIZE - sizeof(struct rwlock)]; -} pvh_global __aligned(CACHE_LINE_SIZE); +static struct rwlock_padalign pvh_global_lock; -#define pvh_global_lock pvh_global.lock - /* * Data for the pv entry allocation mechanism */ Index: CURRENT/sys/mips/mips/pmap.c =================================================================== --- CURRENT/sys/mips/mips/pmap.c (revision 242515) +++ CURRENT/sys/mips/mips/pmap.c (working copy) @@ -148,17 +148,8 @@ vm_offset_t kernel_vm_end = VM_MIN_KERNEL_ADDRESS; static void pmap_asid_alloc(pmap_t pmap); -/* - * Isolate the global pv list lock from data and other locks to prevent false - * sharing within the cache. - */ -static struct { - struct rwlock lock; - char padding[CACHE_LINE_SIZE - sizeof(struct rwlock)]; -} pvh_global __aligned(CACHE_LINE_SIZE); +static struct rwlock_padalign pvh_global_lock; -#define pvh_global_lock pvh_global.lock - /* * Data for the pv entry allocation mechanism */ Index: CURRENT/sys/powerpc/aim/mmu_oea.c =================================================================== --- CURRENT/sys/powerpc/aim/mmu_oea.c (revision 242515) +++ CURRENT/sys/powerpc/aim/mmu_oea.c (working copy) @@ -200,17 +200,8 @@ struct pvo_head *moea_pvo_table; /* pvo entries b struct pvo_head moea_pvo_kunmanaged = LIST_HEAD_INITIALIZER(moea_pvo_kunmanaged); /* list of unmanaged pages */ -/* - * Isolate the global pv list lock from data and other locks to prevent false - * sharing within the cache. - */ -static struct { - struct rwlock lock; - char padding[CACHE_LINE_SIZE - sizeof(struct rwlock)]; -} pvh_global __aligned(CACHE_LINE_SIZE); +static struct rwlock_padalign pvh_global_lock; -#define pvh_global_lock pvh_global.lock - uma_zone_t moea_upvo_zone; /* zone for pvo entries for unmanaged pages */ uma_zone_t moea_mpvo_zone; /* zone for pvo entries for managed pages */ Index: CURRENT/sys/sparc64/include/pmap.h =================================================================== --- CURRENT/sys/sparc64/include/pmap.h (revision 242515) +++ CURRENT/sys/sparc64/include/pmap.h (working copy) @@ -68,11 +68,6 @@ struct pmap { struct pmap_statistics pm_stats; }; -struct tte_list_lock { - struct rwlock lock; - char padding[CACHE_LINE_SIZE - sizeof(struct rwlock)]; -}; - #define PMAP_LOCK(pmap) mtx_lock(&(pmap)->pm_mtx) #define PMAP_LOCK_ASSERT(pmap, type) \ mtx_assert(&(pmap)->pm_mtx, (type)) @@ -108,8 +103,7 @@ void pmap_set_kctx(void); extern struct pmap kernel_pmap_store; #define kernel_pmap (&kernel_pmap_store) -extern struct tte_list_lock tte_list_global; -#define tte_list_global_lock tte_list_global.lock +extern struct rwlock_padalign tte_list_global_lock; extern vm_paddr_t phys_avail[]; extern vm_offset_t virtual_avail; extern vm_offset_t virtual_end; Index: CURRENT/sys/sparc64/sparc64/pmap.c =================================================================== --- CURRENT/sys/sparc64/sparc64/pmap.c (revision 242515) +++ CURRENT/sys/sparc64/sparc64/pmap.c (working copy) @@ -129,12 +129,7 @@ vm_offset_t vm_max_kernel_address; */ struct pmap kernel_pmap_store; -/* - * Isolate the global TTE list lock from data and other locks to prevent - * false sharing within the cache (see also the declaration of struct - * tte_list_lock). - */ -struct tte_list_lock tte_list_global __aligned(CACHE_LINE_SIZE); +struct rwlock_padalign tte_list_global_lock; /* * Allocate physical memory for use in pmap_bootstrap.