Index: sys/vm/vm_page.c =================================================================== --- sys/vm/vm_page.c (revisione 242394) +++ sys/vm/vm_page.c (copia locale) @@ -116,10 +116,10 @@ __FBSDID("$FreeBSD$"); */ struct vpgqueues vm_page_queues[PQ_COUNT]; -struct vpglocks vm_page_queue_lock; -struct vpglocks vm_page_queue_free_lock; +struct mtx_padalign vm_page_queue_mtx; +struct mtx_padalign vm_page_queue_free_mtx; -struct vpglocks pa_lock[PA_LOCK_COUNT]; +struct mtx_padalign pa_lock[PA_LOCK_COUNT]; vm_page_t vm_page_array; long vm_page_array_size; @@ -298,7 +298,7 @@ vm_page_startup(vm_offset_t vaddr) MTX_RECURSE); mtx_init(&vm_page_queue_free_mtx, "vm page free queue", NULL, MTX_DEF); for (i = 0; i < PA_LOCK_COUNT; i++) - mtx_init(&pa_lock[i].data, "vm page", NULL, MTX_DEF); + mtx_init(&pa_lock[i], "vm page", NULL, MTX_DEF); /* * Initialize the queue headers for the hold queue, the active queue, Index: sys/vm/vm_page.h =================================================================== --- sys/vm/vm_page.h (revisione 242394) +++ sys/vm/vm_page.h (copia locale) @@ -187,14 +187,9 @@ struct vpgqueues { extern struct vpgqueues vm_page_queues[PQ_COUNT]; -struct vpglocks { - struct mtx data; - char pad[CACHE_LINE_SIZE - sizeof(struct mtx)]; -} __aligned(CACHE_LINE_SIZE); +extern struct mtx_padalign vm_page_queue_free_mtx; +extern struct mtx_padalign pa_lock[]; -extern struct vpglocks vm_page_queue_free_lock; -extern struct vpglocks pa_lock[]; - #if defined(__arm__) #define PDRSHIFT PDR_SHIFT #elif !defined(PDRSHIFT) @@ -202,7 +197,7 @@ extern struct vpgqueues vm_page_queues[PQ_COUNT]; #endif #define pa_index(pa) ((pa) >> PDRSHIFT) -#define PA_LOCKPTR(pa) &pa_lock[pa_index((pa)) % PA_LOCK_COUNT].data +#define PA_LOCKPTR(pa) ((struct mtx *)(&pa_lock[pa_index(pa) % PA_LOCK_COUNT])) #define PA_LOCKOBJPTR(pa) ((struct lock_object *)PA_LOCKPTR((pa))) #define PA_LOCK(pa) mtx_lock(PA_LOCKPTR(pa)) #define PA_TRYLOCK(pa) mtx_trylock(PA_LOCKPTR(pa)) @@ -235,8 +230,6 @@ extern struct vpgqueues vm_page_queues[PQ_COUNT]; #define vm_page_lock_assert(m, a) mtx_assert(vm_page_lockptr((m)), (a)) #endif -#define vm_page_queue_free_mtx vm_page_queue_free_lock.data - /* * The vm_page's aflags are updated using atomic operations. To set or clear * these flags, the functions vm_page_aflag_set() and vm_page_aflag_clear() @@ -327,9 +320,8 @@ vm_page_t vm_phys_paddr_to_vm_page(vm_paddr_t pa); vm_page_t PHYS_TO_VM_PAGE(vm_paddr_t pa); -extern struct vpglocks vm_page_queue_lock; +extern struct mtx_padalign vm_page_queue_mtx; -#define vm_page_queue_mtx vm_page_queue_lock.data #define vm_page_lock_queues() mtx_lock(&vm_page_queue_mtx) #define vm_page_unlock_queues() mtx_unlock(&vm_page_queue_mtx) Index: sys/kern/sched_ule.c =================================================================== --- sys/kern/sched_ule.c (revisione 242394) +++ sys/kern/sched_ule.c (copia locale) @@ -228,8 +228,7 @@ struct tdq { * tdq_lock is padded to avoid false sharing with tdq_load and * tdq_cpu_idle. */ - struct mtx tdq_lock; /* run queue lock. */ - char pad[64 - sizeof(struct mtx)]; + struct mtx_padalign tdq_lock; /* run queue lock. */ struct cpu_group *tdq_cg; /* Pointer to cpu topology. */ volatile int tdq_load; /* Aggregate load. */ volatile int tdq_cpu_idle; /* cpu_idle() is active. */ @@ -292,7 +291,7 @@ static struct tdq tdq_cpu; #define TDQ_LOCK(t) mtx_lock_spin(TDQ_LOCKPTR((t))) #define TDQ_LOCK_FLAGS(t, f) mtx_lock_spin_flags(TDQ_LOCKPTR((t)), (f)) #define TDQ_UNLOCK(t) mtx_unlock_spin(TDQ_LOCKPTR((t))) -#define TDQ_LOCKPTR(t) (&(t)->tdq_lock) +#define TDQ_LOCKPTR(t) ((struct mtx *)(&(t)->tdq_lock)) static void sched_priority(struct thread *); static void sched_thread_priority(struct thread *, u_char); Index: sys/kern/kern_timeout.c =================================================================== --- sys/kern/kern_timeout.c (revisione 242394) +++ sys/kern/kern_timeout.c (copia locale) @@ -119,8 +119,8 @@ struct cc_mig_ent { * when the callout should be served. */ struct callout_cpu { + struct mtx_padalign cc_lock; struct cc_mig_ent cc_migrating_entity; - struct mtx cc_lock; struct callout *cc_callout; struct callout_tailq *cc_callwheel; struct callout_list cc_callfree;