--- sys/amd64/conf/NOTES 15 Aug 2007 19:26:02 -0000 1.69 +++ sys/amd64/conf/NOTES 18 Sep 2007 17:45:23 -0000 @@ -276,12 +276,9 @@ # kernel environment variables to select initial debugging levels for the # Intel ACPICA code. (Note that the Intel code must also have USE_DEBUGGER # defined when it is built). -# -# ACPI_NO_SEMAPHORES makes the AcpiOs*Semaphore routines a no-op. device acpi options ACPI_DEBUG -#!options ACPI_NO_SEMAPHORES # The cpufreq(4) driver provides support for non-ACPI CPU frequency control device cpufreq --- sys/conf/options 12 Sep 2007 07:43:42 -0000 1.606 +++ sys/conf/options 18 Sep 2007 17:45:24 -0000 @@ -636,8 +636,6 @@ # options for ACPI support ACPI_DEBUG opt_acpi.h -ACPI_MAX_THREADS opt_acpi.h -ACPI_NO_SEMAPHORES opt_acpi.h # ISA support DEV_ISA opt_isa.h --- sys/contrib/dev/acpica/acenv.h 22 Mar 2007 17:58:27 -0000 1.13 +++ sys/contrib/dev/acpica/acenv.h 18 Sep 2007 17:45:26 -0000 @@ -163,6 +163,9 @@ #ifdef ACPI_APPLICATION #define ACPI_USE_SYSTEM_CLIBRARY #define ACPI_USE_LOCAL_CACHE +#ifndef ACPI_MUTEX_USE_SEMAPHORE +#define ACPI_MUTEX_USE_SEMAPHORE +#endif #endif #ifdef ACPI_FULL_DEBUG --- sys/contrib/dev/acpica/acpiosxf.h 22 Mar 2007 17:58:27 -0000 1.19 +++ sys/contrib/dev/acpica/acpiosxf.h 18 Sep 2007 17:45:28 -0000 @@ -254,10 +254,12 @@ /* Temporary macros for Mutex* interfaces, map to existing semaphore xfaces */ +#ifdef ACPI_MUTEX_USE_SEMAPHORE #define AcpiOsCreateMutex(OutHandle) AcpiOsCreateSemaphore (1, 1, OutHandle) #define AcpiOsDeleteMutex(Handle) (void) AcpiOsDeleteSemaphore (Handle) #define AcpiOsAcquireMutex(Handle,Time) AcpiOsWaitSemaphore (Handle, 1, Time) #define AcpiOsReleaseMutex(Handle) (void) AcpiOsSignalSemaphore (Handle, 1) +#endif /* --- sys/dev/acpica/Osd/OsdSchedule.c 22 Mar 2007 18:16:41 -0000 1.39 +++ sys/dev/acpica/Osd/OsdSchedule.c 25 Sep 2007 23:02:15 -0000 @@ -43,6 +43,8 @@ #include #include +#include + #include #include @@ -56,17 +58,17 @@ static int acpi_max_threads = ACPI_MAX_THREADS; TUNABLE_INT("debug.acpi.max_threads", &acpi_max_threads); -MALLOC_DEFINE(M_ACPITASK, "acpitask", "ACPI deferred task"); +TASKQUEUE_FAST_DEFINE(acpi, taskqueue_thread_enqueue, &taskqueue_acpi, + taskqueue_start_threads(&taskqueue_acpi, acpi_max_threads, PWAIT, + "acpi_task")); + +#define ACPI_MAX_TASKS 32 struct acpi_task_ctx { struct task at_task; ACPI_OSD_EXEC_CALLBACK at_function; - void *at_context; -}; - -TASKQUEUE_DEFINE(acpi, taskqueue_thread_enqueue, &taskqueue_acpi, - taskqueue_start_threads(&taskqueue_acpi, acpi_max_threads, PWAIT, - "acpi_task")); + void *at_context; +} acpi_tasks[ACPI_MAX_TASKS]; /* * Bounce through this wrapper function since ACPI-CA doesn't understand @@ -75,11 +77,27 @@ static void acpi_task_execute(void *context, int pending) { + struct acpi_task_ctx *at = context; + ACPI_OSD_EXEC_CALLBACK func; + void *args = at->at_context; + + func = (void *)atomic_readandclear_ptr((void *)&at->at_function); + func(args); +} + +static struct acpi_task_ctx * +acpi_alloc_task(ACPI_OSD_EXEC_CALLBACK Function) +{ struct acpi_task_ctx *at; + int i; - at = (struct acpi_task_ctx *)context; - at->at_function(at->at_context); - free(at, M_ACPITASK); + for (i = 0; i < ACPI_MAX_TASKS; i++) { + at = &acpi_tasks[i]; + if (atomic_cmpset_ptr((void *)&at->at_function, 0, (uintptr_t)Function)) + return (at); + } + + return (NULL); } /* @@ -98,12 +116,6 @@ if (Function == NULL) return_ACPI_STATUS (AE_BAD_PARAMETER); - at = malloc(sizeof(*at), M_ACPITASK, M_NOWAIT); - if (at == NULL) - return_ACPI_STATUS (AE_NO_MEMORY); - - at->at_function = Function; - at->at_context = Context; switch (Type) { case OSL_GPE_HANDLER: pri = 10; @@ -120,12 +132,14 @@ pri = 0; break; default: - free(at, M_ACPITASK); return_ACPI_STATUS (AE_BAD_PARAMETER); } + if ((at = acpi_alloc_task(Function)) == NULL) + return_ACPI_STATUS (AE_NO_MEMORY); + at->at_context = Context; TASK_INIT(&at->at_task, pri, acpi_task_execute, at); - taskqueue_enqueue(taskqueue_acpi, &at->at_task); + taskqueue_enqueue_fast(taskqueue_acpi, &at->at_task); return_ACPI_STATUS (AE_OK); } --- sys/dev/acpica/Osd/OsdSynch.c 26 Mar 2007 23:04:02 -0000 1.32 +++ sys/dev/acpica/Osd/OsdSynch.c 25 Sep 2007 21:25:39 -0000 @@ -1,4 +1,5 @@ /*- + * Copyright (c) 2007 Jung-uk Kim * Copyright (c) 2000 Michael Smith * Copyright (c) 2000 BSDi * All rights reserved. @@ -35,365 +36,440 @@ #include #include "opt_acpi.h" +#include #include -#include -#include +#include #include +#include #include +#include +#include +#include + +#include -#define _COMPONENT ACPI_OS_SERVICES +#define _COMPONENT ACPI_OS_SERVICES ACPI_MODULE_NAME("SYNCH") MALLOC_DEFINE(M_ACPISEM, "acpisem", "ACPI semaphore"); -#define AS_LOCK(as) mtx_lock(&(as)->as_mtx) -#define AS_UNLOCK(as) mtx_unlock(&(as)->as_mtx) +/* Convert microseconds to hz. */ +static int +timeout2hz(long timo) +{ + struct timeval tv; + + tv.tv_sec = timo / 1000000; + tv.tv_usec = timo % 1000000; + + return (tvtohz(&tv)); +} + +/* Adjust timeout from the previous uptime. */ +static long +adjust_timeout(long *tmop, struct timeval *tvp) +{ + struct timeval now, slept; + + getmicrouptime(&now); + slept = now; + timevalsub(&slept, tvp); + *tmop -= slept.tv_sec * 1000000 + slept.tv_usec; + *tvp = now; + + return (*tmop); +} /* - * Simple counting semaphore implemented using a mutex. (Subsequently used - * in the OSI code to implement a mutex. Go figure.) + * ACPI_SEMAPHORE: a sleepable counting semaphore */ struct acpi_semaphore { - struct mtx as_mtx; - UINT32 as_units; - UINT32 as_maxunits; - UINT32 as_pendings; - UINT32 as_resetting; - UINT32 as_timeouts; + struct sx as_lock; + char as_name[32]; + struct cv as_cv; + UINT32 as_units; + UINT32 as_maxunits; }; -/* Default number of maximum pending threads. */ -#ifndef ACPI_NO_SEMAPHORES -#ifndef ACPI_SEMAPHORES_MAX_PENDING -#define ACPI_SEMAPHORES_MAX_PENDING 4 -#endif - -static int acpi_semaphore_debug = 0; -TUNABLE_INT("debug.acpi_semaphore_debug", &acpi_semaphore_debug); -SYSCTL_DECL(_debug_acpi); -SYSCTL_INT(_debug_acpi, OID_AUTO, semaphore_debug, CTLFLAG_RW, - &acpi_semaphore_debug, 0, "Enable ACPI semaphore debug messages"); -#endif /* !ACPI_NO_SEMAPHORES */ - ACPI_STATUS AcpiOsCreateSemaphore(UINT32 MaxUnits, UINT32 InitialUnits, ACPI_SEMAPHORE *OutHandle) { -#ifndef ACPI_NO_SEMAPHORES - struct acpi_semaphore *as; + struct acpi_semaphore *as; - ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); + ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); - if (OutHandle == NULL) - return_ACPI_STATUS (AE_BAD_PARAMETER); - if (InitialUnits > MaxUnits) - return_ACPI_STATUS (AE_BAD_PARAMETER); + if (OutHandle == NULL || InitialUnits > MaxUnits) + return_ACPI_STATUS (AE_BAD_PARAMETER); + + if ((as = malloc(sizeof(*as), M_ACPISEM, M_NOWAIT | M_ZERO)) == NULL) + return_ACPI_STATUS (AE_NO_MEMORY); - if ((as = malloc(sizeof(*as), M_ACPISEM, M_NOWAIT | M_ZERO)) == NULL) - return_ACPI_STATUS (AE_NO_MEMORY); + snprintf(as->as_name, sizeof(as->as_name), "ACPI sema (%p)", as); + sx_init(&as->as_lock, as->as_name); + cv_init(&as->as_cv, as->as_name); + as->as_units = InitialUnits; + as->as_maxunits = MaxUnits; - mtx_init(&as->as_mtx, "ACPI semaphore", NULL, MTX_DEF); - as->as_units = InitialUnits; - as->as_maxunits = MaxUnits; - as->as_pendings = as->as_resetting = as->as_timeouts = 0; - - ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, - "created semaphore %p max %d, initial %d\n", - as, InitialUnits, MaxUnits)); - - *OutHandle = (ACPI_HANDLE)as; -#else - *OutHandle = (ACPI_HANDLE)OutHandle; -#endif /* !ACPI_NO_SEMAPHORES */ + ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, + "created semaphore %p, max %u, initial %u\n", + as, InitialUnits, MaxUnits)); - return_ACPI_STATUS (AE_OK); + *OutHandle = (ACPI_SEMAPHORE)as; + + return_ACPI_STATUS (AE_OK); } ACPI_STATUS AcpiOsDeleteSemaphore(ACPI_SEMAPHORE Handle) { -#ifndef ACPI_NO_SEMAPHORES - struct acpi_semaphore *as = (struct acpi_semaphore *)Handle; + struct acpi_semaphore *as = (struct acpi_semaphore *)Handle; + + ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); - ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); + ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "delete semaphore %p\n", as)); - ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "destroyed semaphore %p\n", as)); - mtx_destroy(&as->as_mtx); - free(Handle, M_ACPISEM); -#endif /* !ACPI_NO_SEMAPHORES */ + if (as != NULL) { + sx_destroy(&as->as_lock); + cv_destroy(&as->as_cv); + free(as, M_ACPISEM); + return_ACPI_STATUS (AE_OK); + } else + ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, + "cannot delete null semaphore\n")); - return_ACPI_STATUS (AE_OK); + return_ACPI_STATUS (AE_BAD_PARAMETER); } -/* - * This implementation has a bug, in that it has to stall for the entire - * timeout before it will return AE_TIME. A better implementation would - * use getmicrotime() to correctly adjust the timeout after being woken up. - */ ACPI_STATUS AcpiOsWaitSemaphore(ACPI_SEMAPHORE Handle, UINT32 Units, UINT16 Timeout) { -#ifndef ACPI_NO_SEMAPHORES - ACPI_STATUS result; - struct acpi_semaphore *as = (struct acpi_semaphore *)Handle; - int rv, tmo; - struct timeval timeouttv, currenttv, timelefttv; + struct acpi_semaphore *as = (struct acpi_semaphore *)Handle; + struct timeval tv; + long tmo; + ACPI_STATUS result; - ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); + ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); - if (as == NULL) - return_ACPI_STATUS (AE_BAD_PARAMETER); + if (as == NULL || Units == 0) + return_ACPI_STATUS (AE_BAD_PARAMETER); - if (cold) - return_ACPI_STATUS (AE_OK); + sx_xlock(&as->as_lock); + ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, + "get %u units from semaphore %p (has %u), timeout %u\n", + Units, as, as->as_units, Timeout)); -#if 0 - if (as->as_units < Units && as->as_timeouts > 10) { - printf("%s: semaphore %p too many timeouts, resetting\n", __func__, as); - AS_LOCK(as); - as->as_units = as->as_maxunits; - if (as->as_pendings) - as->as_resetting = 1; - as->as_timeouts = 0; - wakeup(as); - AS_UNLOCK(as); - return_ACPI_STATUS (AE_TIME); - } - - if (as->as_resetting) - return_ACPI_STATUS (AE_TIME); -#endif - - /* a timeout of ACPI_WAIT_FOREVER means "forever" */ - if (Timeout == ACPI_WAIT_FOREVER) { - tmo = 0; - timeouttv.tv_sec = ((0xffff/1000) + 1); /* cf. ACPI spec */ - timeouttv.tv_usec = 0; - } else { - /* compute timeout using microseconds per tick */ - tmo = (Timeout * 1000) / (1000000 / hz); - if (tmo <= 0) - tmo = 1; - timeouttv.tv_sec = Timeout / 1000; - timeouttv.tv_usec = (Timeout % 1000) * 1000; - } - - /* calculate timeout value in timeval */ - getmicrotime(¤ttv); - timevaladd(&timeouttv, ¤ttv); - - AS_LOCK(as); - ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, - "get %d units from semaphore %p (has %d), timeout %d\n", - Units, as, as->as_units, Timeout)); - for (;;) { if (as->as_maxunits == ACPI_NO_UNIT_LIMIT) { - result = AE_OK; - break; + sx_xunlock(&as->as_lock); + return_ACPI_STATUS (AE_OK); } - if (as->as_units >= Units) { - as->as_units -= Units; - result = AE_OK; - break; + if (as->as_maxunits < Units) { + ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "exceeded max units %u\n", + as->as_maxunits)); + sx_xunlock(&as->as_lock); + return_ACPI_STATUS (AE_LIMIT); } + if (as->as_units >= Units) { + as->as_units -= Units; + sx_xunlock(&as->as_lock); + return_ACPI_STATUS (AE_OK); + } + + switch (Timeout) { + case ACPI_DO_NOT_WAIT: + result = AE_TIME; + break; + case ACPI_WAIT_FOREVER: + do + cv_wait(&as->as_cv, &as->as_lock); + while (as->as_units < Units); + result = AE_OK; + break; + default: + tmo = (long)Timeout * 1000; + getmicrouptime(&tv); + for (;;) { + if (cv_timedwait(&as->as_cv, &as->as_lock, + timeout2hz(tmo)) == EWOULDBLOCK) { + result = AE_TIME; + break; + } else if (as->as_units >= Units) { + result = AE_OK; + break; + } else if (adjust_timeout(&tmo, &tv) <= 0) { + result = AE_TIME; + break; + } + } + } + + if (result == AE_OK) + as->as_units -= Units; + sx_xunlock(&as->as_lock); - /* limit number of pending threads */ - if (as->as_pendings >= ACPI_SEMAPHORES_MAX_PENDING) { - result = AE_TIME; - break; - } + return_ACPI_STATUS (result); +} - /* if timeout values of zero is specified, return immediately */ - if (Timeout == 0) { - result = AE_TIME; - break; - } +ACPI_STATUS +AcpiOsSignalSemaphore(ACPI_SEMAPHORE Handle, UINT32 Units) +{ + struct acpi_semaphore *as = (struct acpi_semaphore *)Handle; - ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, - "semaphore blocked, calling msleep(%p, %p, %d, \"acsem\", %d)\n", - as, &as->as_mtx, PCATCH, tmo)); + ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); - as->as_pendings++; + if (as == NULL || Units == 0) + return_ACPI_STATUS (AE_BAD_PARAMETER); - if (acpi_semaphore_debug) { - printf("%s: Sleep %d, pending %d, semaphore %p, thread %d\n", - __func__, Timeout, as->as_pendings, as, AcpiOsGetThreadId()); + sx_xlock(&as->as_lock); + ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, + "return %u units to semaphore %p (has %u)\n", + Units, as, as->as_units)); + + if (as->as_maxunits == ACPI_NO_UNIT_LIMIT) { + sx_xunlock(&as->as_lock); + return_ACPI_STATUS (AE_OK); + } + if (as->as_maxunits < Units || + as->as_maxunits - Units < as->as_units) { + ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "exceeded max units %u\n", + as->as_maxunits)); + sx_xunlock(&as->as_lock); + return_ACPI_STATUS (AE_LIMIT); } - rv = msleep(as, &as->as_mtx, PCATCH, "acsem", tmo); + as->as_units += Units; + cv_broadcast(&as->as_cv); + sx_xunlock(&as->as_lock); - as->as_pendings--; + return_ACPI_STATUS (AE_OK); +} -#if 0 - if (as->as_resetting) { - /* semaphore reset, return immediately */ - if (as->as_pendings == 0) { - as->as_resetting = 0; - } - result = AE_TIME; - break; - } -#endif +#ifndef ACPI_MUTEX_USE_SEMAPHORE +/* + * ACPI_MUTEX: a sleepable binary semaphore + */ +struct acpi_mutex { + struct sx am_lock; + char am_name[32]; +}; - ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "msleep(%d) returned %d\n", tmo, rv)); - if (rv == EWOULDBLOCK) { - result = AE_TIME; - break; - } +ACPI_STATUS +AcpiOsCreateMutex(ACPI_MUTEX *OutHandle) +{ + struct acpi_mutex *am; - /* check if we already awaited enough */ - timelefttv = timeouttv; - getmicrotime(¤ttv); - timevalsub(&timelefttv, ¤ttv); - if (timelefttv.tv_sec < 0) { - ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "await semaphore %p timeout\n", - as)); - result = AE_TIME; - break; - } + ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); - /* adjust timeout for the next sleep */ - tmo = (timelefttv.tv_sec * 1000000 + timelefttv.tv_usec) / - (1000000 / hz); - if (tmo <= 0) - tmo = 1; - - if (acpi_semaphore_debug) { - printf("%s: Wakeup timeleft(%jd, %lu), tmo %u, sem %p, thread %d\n", - __func__, (intmax_t)timelefttv.tv_sec, timelefttv.tv_usec, tmo, as, - AcpiOsGetThreadId()); - } - } + if (OutHandle == NULL) + return_ACPI_STATUS (AE_BAD_PARAMETER); + if ((am = malloc(sizeof(*am), M_ACPISEM, M_NOWAIT | M_ZERO)) == NULL) + return_ACPI_STATUS (AE_NO_MEMORY); - if (acpi_semaphore_debug) { - if (result == AE_TIME && Timeout > 0) { - printf("%s: Timeout %d, pending %d, semaphore %p\n", - __func__, Timeout, as->as_pendings, as); - } - if (result == AE_OK && (as->as_timeouts > 0 || as->as_pendings > 0)) { - printf("%s: Acquire %d, units %d, pending %d, sem %p, thread %d\n", - __func__, Units, as->as_units, as->as_pendings, as, - AcpiOsGetThreadId()); - } - } + /* Build a unique name based on the address of the handle. */ + snprintf(am->am_name, sizeof(am->am_name), "ACPI mutex (%p)", + OutHandle); + sx_init(&am->am_lock, am->am_name); + + ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "created mutex %p\n", am)); - if (result == AE_TIME) - as->as_timeouts++; - else - as->as_timeouts = 0; - - AS_UNLOCK(as); - return_ACPI_STATUS (result); -#else - return_ACPI_STATUS (AE_OK); -#endif /* !ACPI_NO_SEMAPHORES */ + *OutHandle = (ACPI_MUTEX)am; + + return_ACPI_STATUS (AE_OK); } -ACPI_STATUS -AcpiOsSignalSemaphore(ACPI_SEMAPHORE Handle, UINT32 Units) +void +AcpiOsDeleteMutex(ACPI_MUTEX Handle) { -#ifndef ACPI_NO_SEMAPHORES - struct acpi_semaphore *as = (struct acpi_semaphore *)Handle; + struct acpi_mutex *am = (struct acpi_mutex *)Handle; - ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); + ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); - if (as == NULL) - return_ACPI_STATUS(AE_BAD_PARAMETER); + ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "delete mutex %p\n", am)); - AS_LOCK(as); - ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, - "return %d units to semaphore %p (has %d)\n", - Units, as, as->as_units)); - if (as->as_maxunits != ACPI_NO_UNIT_LIMIT) { - as->as_units += Units; - if (as->as_units > as->as_maxunits) - as->as_units = as->as_maxunits; - } + if (am != NULL) { + sx_destroy(&am->am_lock); + free(am, M_ACPISEM); + } else + ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "cannot delete null mutex\n")); +} + +ACPI_STATUS +AcpiOsAcquireMutex(ACPI_MUTEX Handle, UINT16 Timeout) +{ + struct acpi_mutex *am = (struct acpi_mutex *)Handle; + struct timeval tv; + long tmo; + + ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); + + if (Handle == NULL) + return_ACPI_STATUS (AE_BAD_PARAMETER); + if (sx_xlocked(&am->am_lock)) + return_ACPI_STATUS (AE_ALREADY_ACQUIRED); + + ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "acquire mutex %p\n", am)); + + if (sx_try_xlock(&am->am_lock) != 0) + return_ACPI_STATUS (AE_OK); + + switch (Timeout) { + case ACPI_DO_NOT_WAIT: + return_ACPI_STATUS (AE_TIME); + case ACPI_WAIT_FOREVER: + do { + /* XXX Timeout cannot be zero without a lock. */ + tsleep(am, PLOCK, "acpimtx", INT_MAX); + } while (sx_try_xlock(&am->am_lock) == 0); + return_ACPI_STATUS (AE_OK); + default: + tmo = (long)Timeout * 1000; + getmicrouptime(&tv); + for (;;) { + if (tsleep(am, PLOCK, "acpimtx", + timeout2hz(tmo)) == EWOULDBLOCK) + return_ACPI_STATUS (AE_TIME); + if (sx_try_xlock(&am->am_lock) != 0) + return_ACPI_STATUS (AE_OK); + if (adjust_timeout(&tmo, &tv) <= 0) + return_ACPI_STATUS (AE_TIME); + } + } +} + +void +AcpiOsReleaseMutex(ACPI_MUTEX Handle) +{ + struct acpi_mutex *am = (struct acpi_mutex *)Handle; - if (acpi_semaphore_debug && (as->as_timeouts > 0 || as->as_pendings > 0)) { - printf("%s: Release %d, units %d, pending %d, semaphore %p, thread %d\n", - __func__, Units, as->as_units, as->as_pendings, as, AcpiOsGetThreadId()); - } + ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); - wakeup(as); - AS_UNLOCK(as); -#endif /* !ACPI_NO_SEMAPHORES */ + ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "release mutex %p\n", am)); - return_ACPI_STATUS (AE_OK); + if (am != NULL) { + if (sx_xlocked(&am->am_lock)) { + sx_xunlock(&am->am_lock); + wakeup_one(am); + } else + ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, + "cannot release unowned mutex %p\n", am)); + } else + ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, + "cannot release null mutex\n")); } +#endif /* ACPI_MUTEX_USE_SEMAPHORE */ -/* Combined mutex + mutex name storage since the latter must persist. */ +/* + * ACPI_SPINLOCK: a non-sleepable spinlock + */ struct acpi_spinlock { - struct mtx lock; - char name[32]; + struct mtx al_lock; + char al_name[32]; + int al_nested; }; ACPI_STATUS -AcpiOsCreateLock (ACPI_SPINLOCK *OutHandle) +AcpiOsCreateLock(ACPI_SPINLOCK *OutHandle) { - struct acpi_spinlock *h; + struct acpi_spinlock *al; + + ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); + + if (OutHandle == NULL) + return_ACPI_STATUS (AE_BAD_PARAMETER); + al = malloc(sizeof(*al), M_ACPISEM, M_NOWAIT | M_ZERO); + if (al == NULL) + return_ACPI_STATUS (AE_NO_MEMORY); + + /* Build a unique name based on the address of the handle. */ + if (OutHandle == &AcpiGbl_GpeLock) + snprintf(al->al_name, sizeof(al->al_name), "ACPI GPE lock"); + else if (OutHandle == &AcpiGbl_HardwareLock) + snprintf(al->al_name, sizeof(al->al_name), "ACPI HW lock"); + else + snprintf(al->al_name, sizeof(al->al_name), "ACPI lock (%p)", + OutHandle); + mtx_init(&al->al_lock, al->al_name, NULL, MTX_SPIN); + + ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "created spinlock %p\n", al)); + + *OutHandle = (ACPI_SPINLOCK)al; - if (OutHandle == NULL) - return (AE_BAD_PARAMETER); - h = malloc(sizeof(*h), M_ACPISEM, M_NOWAIT | M_ZERO); - if (h == NULL) - return (AE_NO_MEMORY); - - /* Build a unique name based on the address of the handle. */ - if (OutHandle == &AcpiGbl_GpeLock) - snprintf(h->name, sizeof(h->name), "acpi subsystem GPE lock"); - else if (OutHandle == &AcpiGbl_HardwareLock) - snprintf(h->name, sizeof(h->name), "acpi subsystem HW lock"); - else - snprintf(h->name, sizeof(h->name), "acpi subsys %p", OutHandle); - mtx_init(&h->lock, h->name, NULL, MTX_DEF); - *OutHandle = (ACPI_SPINLOCK)h; - return (AE_OK); + return_ACPI_STATUS (AE_OK); } void -AcpiOsDeleteLock (ACPI_SPINLOCK Handle) +AcpiOsDeleteLock(ACPI_SPINLOCK Handle) { - struct acpi_spinlock *h = (struct acpi_spinlock *)Handle; + struct acpi_spinlock *al = (struct acpi_spinlock *)Handle; + + ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); + + ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "delete spinlock %p\n", al)); - if (Handle == NULL) - return; - mtx_destroy(&h->lock); - free(h, M_ACPISEM); + if (al != NULL) { + mtx_destroy(&al->al_lock); + free(al, M_ACPISEM); + } else + ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, + "cannot delete null spinlock\n")); } -/* - * The Flags parameter seems to state whether or not caller is an ISR - * (and thus can't block) but since we have ithreads, we don't worry - * about potentially blocking. - */ -ACPI_NATIVE_UINT -AcpiOsAcquireLock (ACPI_SPINLOCK Handle) +ACPI_CPU_FLAGS +AcpiOsAcquireLock(ACPI_SPINLOCK Handle) { - struct acpi_spinlock *h = (struct acpi_spinlock *)Handle; + struct acpi_spinlock *al = (struct acpi_spinlock *)Handle; + + ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); + + ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "acquire spinlock %p\n", al)); + + if (al != NULL) { + if (mtx_owned(&al->al_lock)) { + al->al_nested++; + ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, + "acquire nested spinlock %p, depth %d\n", + al, al->al_nested)); + } else + mtx_lock_spin(&al->al_lock); + } else + ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, + "cannot acquire null spinlock\n")); - if (Handle == NULL) return (0); - mtx_lock(&h->lock); - return (0); } void -AcpiOsReleaseLock (ACPI_SPINLOCK Handle, ACPI_CPU_FLAGS Flags) +AcpiOsReleaseLock(ACPI_SPINLOCK Handle, ACPI_CPU_FLAGS Flags) { - struct acpi_spinlock *h = (struct acpi_spinlock *)Handle; + struct acpi_spinlock *al = (struct acpi_spinlock *)Handle; + + ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); + + ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "release spinlock %p\n", al)); - if (Handle == NULL) - return; - mtx_unlock(&h->lock); + if (al != NULL) { + if (mtx_owned(&al->al_lock)) { + if (al->al_nested > 0) { + ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, + "release nested spinlock %p, depth %d\n", + al, al->al_nested)); + al->al_nested--; + } else + mtx_unlock_spin(&al->al_lock); + } else + ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, + "cannot release unowned spinlock %p\n", al)); + } else + ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, + "cannot release null spinlock\n")); } -/* Section 5.2.9.1: global lock acquire/release functions */ -#define GL_ACQUIRED (-1) -#define GL_BUSY 0 -#define GL_BIT_PENDING 0x1 -#define GL_BIT_OWNED 0x2 -#define GL_BIT_MASK (GL_BIT_PENDING | GL_BIT_OWNED) +/* Section 5.2.10.1: global lock acquire/release functions */ +#define GL_ACQUIRED (-1) +#define GL_BUSY 0 +#define GL_BIT_PENDING 0x01 +#define GL_BIT_OWNED 0x02 +#define GL_BIT_MASK (GL_BIT_PENDING | GL_BIT_OWNED) /* * Acquire the global lock. If busy, set the pending bit. The caller @@ -403,7 +479,7 @@ int acpi_acquire_global_lock(uint32_t *lock) { - uint32_t new, old; + uint32_t new, old; do { old = *lock; @@ -422,7 +498,7 @@ int acpi_release_global_lock(uint32_t *lock) { - uint32_t new, old; + uint32_t new, old; do { old = *lock; --- sys/i386/conf/NOTES 15 Aug 2007 19:26:03 -0000 1.1244 +++ sys/i386/conf/NOTES 18 Sep 2007 17:45:31 -0000 @@ -504,14 +504,11 @@ # Intel ACPICA code. (Note that the Intel code must also have USE_DEBUGGER # defined when it is built). # -# ACPI_NO_SEMAPHORES makes the AcpiOs*Semaphore routines a no-op. -# # Note that building ACPI into the kernel is deprecated; the module is # normally loaded automatically by the loader. device acpi options ACPI_DEBUG -#!options ACPI_NO_SEMAPHORES # ACPI Asus Desktop Extras. (voltage, temp, fan) device acpi_aiboost