diff --git a/sys/kern/kern_cpuset.c b/sys/kern/kern_cpuset.c index 3bdb45e..7b15c8d 100644 --- a/sys/kern/kern_cpuset.c +++ b/sys/kern/kern_cpuset.c @@ -769,7 +769,20 @@ cpuset_init(void *arg) panic("Can't set initial cpuset mask.\n"); cpuset_zero->cs_flags |= CPU_SET_RDONLY; } -SYSINIT(cpuset, SI_SUB_SMP, SI_ORDER_ANY, cpuset_init, NULL); +SYSINIT(cpuset, SI_SUB_SMP, SI_ORDER_MIDDLE, cpuset_init, NULL); + +int +cpuset_zero_modify(cpuset_t *mask) +{ + int err; + + mtx_lock_spin(&cpuset_lock); + cpuset_zero->cs_flags &= ~CPU_SET_RDONLY; + err = cpuset_modify(cpuset_zero, mask); + cpuset_zero->cs_flags |= CPU_SET_RDONLY; + mtx_unlock_spin(&cpuset_lock); + return (err); +} #ifndef _SYS_SYSPROTO_H_ struct cpuset_args { diff --git a/sys/sys/cpuset.h b/sys/sys/cpuset.h index 854fa29..de1a7dd 100644 --- a/sys/sys/cpuset.h +++ b/sys/sys/cpuset.h @@ -54,6 +54,7 @@ typedef struct _cpuset { #define CPU_COPY(f, t) (void)(*(t) = *(f)) #define CPU_ISSET(n, p) (((p)->__bits[(n)/_NCPUBITS] & __cpuset_mask(n)) != 0) #define CPU_SET(n, p) ((p)->__bits[(n)/_NCPUBITS] |= __cpuset_mask(n)) +#define CPU_SETMASK(mask, p) ((p)->__bits[0] = (mask)) #define CPU_ZERO(p) do { \ __size_t __i; \ for (__i = 0; __i < _NCPUWORDS; __i++) \ @@ -178,6 +179,7 @@ extern cpuset_t *cpuset_root; struct prison; struct proc; +int cpuset_zero_modify(cpuset_t *mask); struct cpuset *cpuset_thread0(void); struct cpuset *cpuset_ref(struct cpuset *); void cpuset_rel(struct cpuset *); diff --git a/sys/amd64/amd64/mp_machdep.c b/sys/amd64/amd64/mp_machdep.c index d0b0abf..35d2051 100644 --- a/sys/amd64/amd64/mp_machdep.c +++ b/sys/amd64/amd64/mp_machdep.c @@ -39,6 +39,7 @@ __FBSDID("$FreeBSD$"); #ifdef GPROF #include #endif +#include #include #include #include @@ -1442,6 +1443,7 @@ SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL); static int sysctl_hlt_cpus(SYSCTL_HANDLER_ARGS) { + cpuset_t mask_set; cpumask_t mask; int error; @@ -1450,18 +1452,25 @@ sysctl_hlt_cpus(SYSCTL_HANDLER_ARGS) if (error || !req->newptr) return (error); + if (!hyperthreading_allowed) + mask |= hyperthreading_cpus_mask; + if ((mask & all_cpus) == all_cpus) + mask &= ~(1<<0); + + CPU_ZERO(&mask_set); + CPU_SETMASK(~mask & all_cpus, &mask_set); + error = cpuset_zero_modify(&mask_set); + if (error) + return (error); + if (logical_cpus_mask != 0 && (mask & logical_cpus_mask) == logical_cpus_mask) hlt_logical_cpus = 1; else hlt_logical_cpus = 0; - if (! hyperthreading_allowed) - mask |= hyperthreading_cpus_mask; - - if ((mask & all_cpus) == all_cpus) - mask &= ~(1<<0); hlt_cpus_mask = mask; + return (error); } SYSCTL_PROC(_machdep, OID_AUTO, hlt_cpus, CTLTYPE_INT|CTLFLAG_RW, @@ -1471,6 +1480,8 @@ SYSCTL_PROC(_machdep, OID_AUTO, hlt_cpus, CTLTYPE_INT|CTLFLAG_RW, static int sysctl_hlt_logical_cpus(SYSCTL_HANDLER_ARGS) { + cpuset_t mask_set; + cpumask_t mask; int disable, error; disable = hlt_logical_cpus; @@ -1478,17 +1489,24 @@ sysctl_hlt_logical_cpus(SYSCTL_HANDLER_ARGS) if (error || !req->newptr) return (error); + mask = hlt_cpus_mask; if (disable) - hlt_cpus_mask |= logical_cpus_mask; + mask |= logical_cpus_mask; else - hlt_cpus_mask &= ~logical_cpus_mask; + mask &= ~logical_cpus_mask; - if (! hyperthreading_allowed) - hlt_cpus_mask |= hyperthreading_cpus_mask; + if (!hyperthreading_allowed) + mask |= hyperthreading_cpus_mask; - if ((hlt_cpus_mask & all_cpus) == all_cpus) - hlt_cpus_mask &= ~(1<<0); + if ((mask & all_cpus) == all_cpus) + mask &= ~(1<<0); + CPU_ZERO(&mask_set); + CPU_SETMASK(~mask & all_cpus, &mask_set); + error = cpuset_zero_modify(&mask_set); + if (error != 0) + return (error); + hlt_cpus_mask = mask; hlt_logical_cpus = disable; return (error); } @@ -1496,6 +1514,8 @@ sysctl_hlt_logical_cpus(SYSCTL_HANDLER_ARGS) static int sysctl_hyperthreading_allowed(SYSCTL_HANDLER_ARGS) { + cpuset_t mask_set; + cpumask_t mask; int allowed, error; allowed = hyperthreading_allowed; @@ -1503,30 +1523,27 @@ sysctl_hyperthreading_allowed(SYSCTL_HANDLER_ARGS) if (error || !req->newptr) return (error); -#ifdef SCHED_ULE - /* - * SCHED_ULE doesn't allow enabling/disabling HT cores at - * run-time. - */ - if (allowed != hyperthreading_allowed) - return (ENOTSUP); - return (error); -#endif - + mask = hlt_cpus_mask; if (allowed) - hlt_cpus_mask &= ~hyperthreading_cpus_mask; + mask &= ~hyperthreading_cpus_mask; else - hlt_cpus_mask |= hyperthreading_cpus_mask; + mask |= hyperthreading_cpus_mask; if (logical_cpus_mask != 0 && - (hlt_cpus_mask & logical_cpus_mask) == logical_cpus_mask) + (mask & logical_cpus_mask) == logical_cpus_mask) hlt_logical_cpus = 1; else hlt_logical_cpus = 0; - if ((hlt_cpus_mask & all_cpus) == all_cpus) - hlt_cpus_mask &= ~(1<<0); + if ((mask & all_cpus) == all_cpus) + mask &= ~(1<<0); + CPU_ZERO(&mask_set); + CPU_SETMASK(~mask & all_cpus, &mask_set); + error = cpuset_zero_modify(&mask_set); + if (error != 0) + return (error); + hlt_cpus_mask = mask; hyperthreading_allowed = allowed; return (error); } @@ -1534,6 +1551,7 @@ sysctl_hyperthreading_allowed(SYSCTL_HANDLER_ARGS) static void cpu_hlt_setup(void *dummy __unused) { + cpuset_t mask_set; if (logical_cpus_mask != 0) { TUNABLE_INT_FETCH("machdep.hlt_logical_cpus", @@ -1564,6 +1582,9 @@ cpu_hlt_setup(void *dummy __unused) if (! hyperthreading_allowed) hlt_cpus_mask |= hyperthreading_cpus_mask; } + CPU_ZERO(&mask_set); + CPU_SETMASK(~hlt_cpus_mask & all_cpus, &mask_set); + cpuset_zero_modify(&mask_set); } } SYSINIT(cpu_hlt, SI_SUB_SMP, SI_ORDER_ANY, cpu_hlt_setup, NULL);