commit 23fbd28362ceb22ee2fb3bab899dda6bac129263 Author: Andriy Gapon Date: Fri Jul 6 20:17:03 2012 +0300 acpi_cpu: add a safe way of disabling acpi_cpu_idle method the new way ensures that there are no CPUs in the idle method after disable_idle() is executed diff --git a/sys/dev/acpica/acpi_cpu.c b/sys/dev/acpica/acpi_cpu.c index 5aa4097..16445a8 100644 --- a/sys/dev/acpica/acpi_cpu.c +++ b/sys/dev/acpica/acpi_cpu.c @@ -139,6 +139,8 @@ static int cpu_quirks; /* Indicate any hardware bugs. */ /* Runtime state. */ static int cpu_disable_idle; /* Disable entry to idle function */ +static struct mtx cpu_disable_idle_lock; +MTX_SYSINIT(cpu_disable_idle, &cpu_disable_idle_lock, "cpuidle lock", MTX_SPIN); /* Values for sysctl. */ static struct sysctl_ctx_list cpu_sysctl_ctx; @@ -418,6 +420,42 @@ acpi_cpu_postattach(void *unused __unused) SYSINIT(acpi_cpu, SI_SUB_CONFIGURE, SI_ORDER_MIDDLE, acpi_cpu_postattach, NULL); +static void +disable_idle(void) +{ + mtx_lock_spin(&cpu_disable_idle_lock); + cpu_disable_idle = TRUE; + mtx_unlock_spin(&cpu_disable_idle_lock); + + /* + * Ensure that no CPU is in idle state or in acpi_cpu_idle(). + * Note that this code depends on the fact that the rendezvous IPI + * can not penetrate context where interrupts are disabled and acpi_cpu_idle + * is called and executed in such a context, interrupts are re-enabled + * right before return. + */ + smp_rendezvous(smp_no_rendevous_barrier, NULL, smp_no_rendevous_barrier, NULL); +} + +static void +enable_idle(void) +{ + mtx_lock_spin(&cpu_disable_idle_lock); + cpu_disable_idle = FALSE; + mtx_unlock_spin(&cpu_disable_idle_lock); +} + +static int +is_idle_disabled(void) +{ + int ret; + + mtx_lock_spin(&cpu_disable_idle_lock); + ret = cpu_disable_idle; + mtx_unlock_spin(&cpu_disable_idle_lock); + return (ret); +} + /* * Disable any entry to the idle function during suspend and re-enable it * during resume. @@ -430,7 +468,7 @@ acpi_cpu_suspend(device_t dev) error = bus_generic_suspend(dev); if (error) return (error); - cpu_disable_idle = TRUE; + disable_idle(); return (0); } @@ -438,7 +476,7 @@ static int acpi_cpu_resume(device_t dev) { - cpu_disable_idle = FALSE; + enable_idle(); return (bus_generic_resume(dev)); } @@ -572,12 +610,13 @@ acpi_cpu_shutdown(device_t dev) bus_generic_shutdown(dev); /* - * Disable any entry to the idle function. There is a small race where - * an idle thread have passed this check but not gone to sleep. This - * is ok since device_shutdown() does not free the softc, otherwise - * we'd have to be sure all threads were evicted before returning. + * Disable any entry to the idle function. + */ + disable_idle(); + + /* + * TODO: properly free all resources. */ - cpu_disable_idle = TRUE; return_VALUE (0); } @@ -860,7 +899,7 @@ acpi_cpu_startup(void *arg) /* Take over idling from cpu_idle_default(). */ cpu_cx_lowest_lim = 0; - cpu_disable_idle = FALSE; + enable_idle(); cpu_idle_hook = acpi_cpu_idle; } @@ -925,7 +964,7 @@ acpi_cpu_idle() int bm_active, cx_next_idx, i; /* If disabled, return immediately. */ - if (cpu_disable_idle) { + if (is_idle_disabled()) { ACPI_ENABLE_IRQS(); return; }