GENERIC HEAD from 2010-04-16 07:02:28 UTC, r206700M, vmcore.433 KDB: debugger backends: ddb KDB: current backend: ddb 524288K of memory above 4GB ignored Copyright (c) 1992-2010 The FreeBSD Project. Copyright (c) 1979, 1980, 1983, 1986, 1988, 1989, 1991, 1992, 1993, 1994 The Regents of the University of California. All rights reserved. FreeBSD is a registered trademark of The FreeBSD Foundation. FreeBSD 9.0-CURRENT #0 r206700M: Mon Apr 19 11:54:42 CEST 2010 pho@x4.osted.lan:/usr/src/sys/i386/compile/PHO i386 WARNING: WITNESS option enabled, expect reduced performance. WARNING: DIAGNOSTIC option enabled, expect reduced performance. Timecounter "i8254" frequency 1193182 Hz quality 0 CPU: AMD Phenom(tm) 9150e Quad-Core Processor (1799.98-MHz 686-class CPU) Origin = "AuthenticAMD" Id = 0x100f23 Family = 10 Model = 2 Stepping = 3 Features=0x178bfbff Features2=0x802009 AMD Features=0xee500800 AMD Features2=0x7ff TSC: P-state invariant real memory = 4294967296 (4096 MB) avail memory = 3536142336 (3372 MB) : Trying to mount root from ufs:/dev/ad4s1a Entropy harvesting: interrupts ethernet point_to_point kickstart. /dev/ad4s1a: FILE SYSTEM CLEAN; SKIPPING CHECKS /dev/ad4s1a: clean, 639087 free (3039 frags, 79506 blocks, 0.3% fragmentation) /dev/ad4s1e: FILE SYSTEM CLEAN; SKIPPING CHECKS /dev/ad4s1e: clean, 49291069 free (2197 frags, 6161109 blocks, 0.0% fragmentation) /dev/ad4s1f: FILE SYSTEM CLEAN; SKIPPING CHECKS /dev/ad4s1f: clean, 3165657 free (379361 frags, 348287 blocks, 1.7% fragmentation) /dev/ad4s1d: FILE SYSTEM CLEAN; SKIPPING CHECKS /dev/ad4s1d: clean, 9632681 free (99801 frags, 1191610 blocks, 0.2% fragmentation) Additional TCP/IP options: rfc1323 extensions=NO no-ipv4-mapped-ipv6. re0: link state changed to DOWN Starting Network: lo0 re0 fwe0 fwip0. Starting Network: fwe0. Starting Network: fwip0. add net default: gateway 192.168.1.1 add net ::ffff:0.0.0.0: gateway ::1 add net ::0.0.0.0: gateway ::1 add net fe80::: gateway ::1 add net ff02::: gateway ::1 Additional ABI support: linux. lock order reversal: 1st 0xe6d1d9a0 bufwait (bufwait) @ kern/vfs_bio.c:2564 2nd 0xc7104800 dirhash (dirhash) @ ufs/ufs/ufs_dirhash.c:285 KDB: stack backtrace: db_trace_self_wrapper(c0cbfa23,f61c8864,c08e9615,c08d97bb,c0cc2a94,...) at db_trace_self_wrapper+0x26 kdb_backtrace(c08d97bb,c0cc2a94,c6d31098,c6d349e0,f61c88c0,...) at kdb_backtrace+0x29 _witness_debugger(c0cc2a94,c7104800,c0ce58e2,c6d349e0,c0ce5574,...) at _witness_debugger+0x25 witness_checkorder(c7104800,9,c0ce556b,11d,0,...) at witness_checkorder+0x839 _sx_xlock(c7104800,0,c0ce556b,11d,c77da6cc,...) at _sx_xlock+0x85 ufsdirhash_acquire(e6d1d940,f61c8a1c,a8,e77dbb70,f61c8990,...) at ufsdirhash_acquire+0x48 ufsdirhash_add(c77da6cc,f61c8a1c,3b70,f61c897c,f61c8980,...) at ufsdirhash_add+0x13 ufs_direnter(c7877984,c7c59ae0,f61c8a1c,f61c8c00,e6d1e160,...) at ufs_direnter+0x779 ufs_mkdir(f61c8c28,c0cfb22f,0,0,f61c8b6c,...) at ufs_mkdir+0x92e VOP_MKDIR_APV(c0dcbb40,f61c8c28,f61c8c00,f61c8b6c,0,...) at VOP_MKDIR_APV+0xc5 kern_mkdirat(c7872240,ffffff9c,bfbfef5a,0,1ff,...) at kern_mkdirat+0x21b kern_mkdir(c7872240,bfbfef5a,0,1ff,f61c8d2c,...) at kern_mkdir+0x2e mkdir(c7872240,f61c8cf8,c0cf7a9f,c0cc3350,c7a122a8,...) at mkdir+0x29 syscall(f61c8d38) at syscall+0x230 Xint0x80_syscall() at Xint0x80_syscall+0x20 --- syscall (136, FreeBSD ELF32, mkdir), eip = 0x28171203, esp = 0xbfbfed6c, ebp = 0xbfbfee38 --- panic: mutex vm page queue mutex not owned at ../../../vm/vm_page.c:742 cpuid = 0 KDB: enter: panic [ thread pid 1064 tid 100135 ] Stopped at kdb_enter+0x3a: movl $0,kdb_why db> run pho db:0:pho> bt Tracing pid 1064 tid 100135 td 0xc7c99240 kdb_enter(c0cbc551,c0cbc551,c0cbac54,f6281b20,0,...) at kdb_enter+0x3a panic(c0cbac54,c0cc2c8e,c0ce8d1b,2e6,f6281b4c,...) at panic+0x136 _mtx_assert(c0f94480,4,c0ce8d1b,2e6,c4610e88,...) at _mtx_assert+0x87 vm_page_remove(c4610e88,0,c7ca32d0,1f,c46110c8,...) at vm_page_remove+0x74 vm_page_rename(c4610e88,c7ca17f8,1,0,c0bdafb3,...) at vm_page_rename+0x12 vm_object_split(c77df828,0,c0ce7c62,b35,28247000,...) at vm_object_split+0x436 vmspace_fork(c7a03910,f6281c48,2,c0f5adb0,c7c992e4,...) at vmspace_fork+0x3f7 fork1(c7c99240,14,0,f6281c78,c7c99240,...) at fork1+0x27b fork(c7c99240,f6281cf8,c,c7c99240,c7c95000,...) at fork+0x29 syscall(f6281d38) at syscall+0x230 Xint0x80_syscall() at Xint0x80_syscall+0x20 --- syscall (2, FreeBSD ELF32, fork), eip = 0x285c61fb, esp = 0xbfbfe9fc, ebp = 0xbfbfea38 --- db:0:bt> show allpcpu Current CPU: 0 cpuid = 0 dynamic pcpu = 0x63e380 curthread = 0xc7c99240: pid 1064 "snmpd" curpcb = 0xf6281d90 fpcurthread = none idlethread = 0xc6d7c480: pid 11 "idle: cpu0" APIC ID = 0 currentldt = 0x50 spin locks held: cpuid = 1 dynamic pcpu = 0x5d23380 curthread = 0xc6d7c6c0: pid 11 "idle: cpu1" curpcb = 0xc6b30d90 fpcurthread = none idlethread = 0xc6d7c6c0: pid 11 "idle: cpu1" APIC ID = 1 currentldt = 0x50 spin locks held: cpuid = 2 dynamic pcpu = 0x5d26380 curthread = 0xc6d7c900: pid 11 "idle: cpu2" curpcb = 0xc6b2dd90 fpcurthread = none idlethread = 0xc6d7c900: pid 11 "idle: cpu2" APIC ID = 2 currentldt = 0x50 spin locks held: cpuid = 3 dynamic pcpu = 0x5d29380 curthread = 0xc6d7cb40: pid 11 "idle: cpu3" curpcb = 0xc6b2ad90 fpcurthread = none idlethread = 0xc6d7cb40: pid 11 "idle: cpu3" APIC ID = 3 currentldt = 0x50 spin locks held: db:0:allpcpu> show alllocks Process 1064 (snmpd) thread 0xc7c99240 (100135) exclusive sleep mutex vm object (standard object) r = 0 (0xc7ca2a18) locked @ vm/vm_object.c:1382 exclusive sleep mutex vm object (standard object) r = 0 (0xc7ca17f8) locked @ vm/vm_object.c:1381 exclusive sx user map (user map) r = 0 (0xc7b17870) locked @ vm/vm_map.c:2990 exclusive sx user map (user map) r = 0 (0xc7a03958) locked @ vm/vm_map.c:2982 db:0:alllocks> show lockedvnods Locked vnodes db:0:lockedvnods> show mount 0xc7793b50 /dev/ad4s1a on / (ufs) 0xc7794000 devfs on /dev (devfs) 0xc79e45a8 /dev/ad4s1e on /tmp (ufs) 0xc79ea000 /dev/ad4s1f on /usr (ufs) 0xc79e9b50 /dev/ad4s1d on /var (ufs) 0xc79e987c procfs on /proc (procfs) More info: show mount db:0:mount> ps pid ppid pgrp uid state wmesg wchan cmd 1064 1060 21 0 R+ CPU 0 snmpd 1060 21 21 0 S+ wait 0xc781c550 sh 1034 1 1034 0 Ss rpcsvc 0xc77990d0 NLM: master 1027 1 1027 0 Ss select 0xc7886664 rpc.statd 1020 1019 1019 0 S (threaded) nfsd 100123 S rpcsvc 0xc7884bd0 nfsd: service 100122 S rpcsvc 0xc7884c10 nfsd: service 100121 S rpcsvc 0xc7884c50 nfsd: service 100081 S rpcsvc 0xc7815c50 nfsd: master 1019 1 1019 0 Ss select 0xc7886764 nfsd 1010 1 1010 0 Ss select 0xc7c4e964 mountd 909 1 909 0 Ss select 0xc7814524 rpcbind 883 1 883 0 Ss select 0xc76f93a4 syslogd 701 1 701 0 Ss select 0xc7799b64 devd 21 1 21 0 Ss+ wait 0xc77e77f8 sh 20 0 0 0 DL flowclea 0xc0f883a8 [flowcleaner] 19 0 0 0 DL sdflush 0xc0f93ae0 [softdepflush] 18 0 0 0 DL syncer 0xc0f881b8 [syncer] 17 0 0 0 DL vlruwt 0xc777aaa0 [vnlru] 16 0 0 0 DL psleep 0xc0f87ee8 [bufdaemon] 15 0 0 0 DL pgzero 0xc0f94914 [pagezero] 9 0 0 0 DL psleep 0xc0f94544 [vmdaemon] 8 0 0 0 DL psleep 0xc0f9450c [pagedaemon] 7 0 0 0 DL ccb_scan 0xc0de7954 [xpt_thrd] 6 0 0 0 DL - 0xc6fade3c [fdc0] 5 0 0 0 SL - 0xc6ff9000 [fw0_probe] 14 0 0 0 DL (threaded) [usb] 100057 D - 0xc6fd9d0c [usbus5] 100056 D - 0xc6fd9cdc [usbus5] 100055 D - 0xc6fd9cac [usbus5] 100054 D - 0xc6fd9c7c [usbus5] 100052 D - 0xc6fcfb5c [usbus4] 100051 D - 0xc6fcfb2c [usbus4] 100050 D - 0xc6fcfafc [usbus4] 100049 D - 0xc6fcfacc [usbus4] 100048 D - 0xc6fcbb5c [usbus3] 100047 D - 0xc6fcbb2c [usbus3] 100046 D - 0xc6fcbafc [usbus3] 100045 D - 0xc6fcbacc [usbus3] 100044 D - 0xc6fc1b5c [usbus2] 100043 D - 0xc6fc1b2c [usbus2] 100042 D - 0xc6fc1afc [usbus2] 100041 D - 0xc6fc1acc [usbus2] 100039 D - 0xc6fbbb5c [usbus1] 100038 D - 0xc6fbbb2c [usbus1] 100037 D - 0xc6fbbafc [usbus1] 100036 D - 0xc6fbbacc [usbus1] 100034 D - 0xc6fb6b5c [usbus0] 100033 D - 0xc6fb6b2c [usbus0] 100032 D - 0xc6fb6afc [usbus0] 100031 D - 0xc6fb6acc [usbus0] 13 0 0 0 DL - 0xc0e1b9a4 [yarrow] 4 0 0 0 DL - 0xc0e19744 [g_down] 3 0 0 0 DL - 0xc0e19740 [g_up] 2 0 0 0 DL - 0xc0e19738 [g_event] 12 0 0 0 WL (threaded) [intr] 100065 I [irq12: psm0] 100064 I [irq1: atkbd0] 100062 I [swi0: uart] 100059 I [irq20: fwohci0] 100058 I [irq14: ata0] 100053 I [irq19: ehci0] 100040 I [irq18: ohci2 ohci4] 100035 I [irq17: ohci1 ohci3] 100030 I [irq16: hdac1 ohci0] 100029 I [irq22: atapci0] 100028 I [irq256: hdac0] 100027 I [irq9: acpi0] 100023 I [swi2: cambio] 100022 I [swi6: task queue] 100021 I [swi6: Giant taskq] 100019 I [swi5: +] 100012 I [swi1: netisr 0] 100011 I [swi4: clock] 100010 I [swi4: clock] 100009 I [swi4: clock] 100008 I [swi4: clock] 100007 I [swi3: vm] 11 0 0 0 RL (threaded) [idle] 100006 CanRun [idle: cpu0] 100005 Run CPU 1 [idle: cpu1] 100004 Run CPU 2 [idle: cpu2] 100003 Run CPU 3 [idle: cpu3] 1 0 1 0 SLs wait 0xc6d7ad48 [init] 10 0 0 0 DL audit_wo 0xc0f93400 [audit] 0 0 0 0 DLs (threaded) [kernel] 100066 D deadlkre 0xc0e1b9a4 [deadlkres] 100060 D - 0xc6ff8640 [fw0_taskq] 100026 D - 0xc6f3fd40 [acpi_task_2] 100025 D - 0xc6f3fd40 [acpi_task_1] 100024 D - 0xc6f3fd40 [acpi_task_0] 100020 D - 0xc6f400c0 [thread taskq] 100018 D - 0xc6f40340 [kqueue taskq] 100013 D - 0xc6d61c80 [firmware taskq] 100000 D sched 0xc0e19820 [swapper] db:0:ps> allt Tracing command snmpd pid 1064 tid 100135 td 0xc7c99240 kdb_enter(c0cbc551,c0cbc551,c0cbac54,f6281b20,0,...) at kdb_enter+0x3a panic(c0cbac54,c0cc2c8e,c0ce8d1b,2e6,f6281b4c,...) at panic+0x136 _mtx_assert(c0f94480,4,c0ce8d1b,2e6,c4610e88,...) at _mtx_assert+0x87 vm_page_remove(c4610e88,0,c7ca32d0,1f,c46110c8,...) at vm_page_remove+0x74 vm_page_rename(c4610e88,c7ca17f8,1,0,c0bdafb3,...) at vm_page_rename+0x12 vm_object_split(c77df828,0,c0ce7c62,b35,28247000,...) at vm_object_split+0x436 vmspace_fork(c7a03910,f6281c48,2,c0f5adb0,c7c992e4,...) at vmspace_fork+0x3f7 fork1(c7c99240,14,0,f6281c78,c7c99240,...) at fork1+0x27b fork(c7c99240,f6281cf8,c,c7c99240,c7c95000,...) at fork+0x29 syscall(f6281d38) at syscall+0x230 Xint0x80_syscall() at Xint0x80_syscall+0x20 --- syscall (2, FreeBSD ELF32, fork), eip = 0x285c61fb, esp = 0xbfbfe9fc, ebp = 0xbfbfea38 --- Tracing command sh pid 1060 tid 100077 td 0xc7820480 sched_switch(c7820480,0,104,191,7a5ff76b,...) at sched_switch+0x36a mi_switch(104,0,c0cc0b3d,1eb,5c,...) at mi_switch+0x200 sleepq_switch(c7820480,0,c0cc0b3d,1a0,5c,...) at sleepq_switch+0x15f sleepq_catch_signals(c0cc0b3d,160,0,100,100,...) at sleepq_catch_signals+0xb7 sleepq_wait_sig(c781c550,5c,c0cc3425,100,0,...) at sleepq_wait_sig+0x17 _sleep(c781c550,c781c5d8,15c,c0cc3425,0,...) at _sleep+0x354 kern_wait(c7820480,ffffffff,f615ec74,2,0,...) at kern_wait+0xb76 wait4(c7820480,f615ecf8,c0cf7a9f,c0cc3404,c781c550,...) at wait4+0x3b syscall(f615ed38) at syscall+0x230 Xint0x80_syscall() at Xint0x80_syscall+0x20 --- syscall (7, FreeBSD ELF32, wait4), eip = 0x281601db, esp = 0xbfbfdbcc, ebp = 0xbfbfdbe8 --- Tracing command rpc.lockd pid 1034 tid 100090 td 0xc781d000 sched_switch(c781d000,0,104,191,710a20ef,...) at sched_switch+0x36a mi_switch(104,0,c0cc0b3d,1eb,0,...) at mi_switch+0x200 sleepq_switch(c781d000,0,c0cc0b3d,1a0,0,...) at sleepq_switch+0x15f sleepq_catch_signals(f61939fc,c089350a,c7812600,0,c781d000,...) at sleepq_catch_signals+0xb7 sleepq_timedwait_sig(c77990d0,0,f6193a2c,101,0,...) at sleepq_timedwait_sig+0x1a _cv_timedwait_sig(c77990d0,c7812600,1388,3b1,c7c47100,...) at _cv_timedwait_sig+0x250 svc_run_internal(c781d168,14,c0ce0945,c0ce003a,f6193b8c,...) at svc_run_internal+0x356 svc_run(c7812600,0,4,c0a8f750,f6193b8c,...) at svc_run+0x7f nlm_syscall(c781d000,f6193cf8,c0cf7a9f,c0cc3c78,c77e7d48,...) at nlm_syscall+0x77d syscall(f6193d38) at syscall+0x230 Xint0x80_syscall() at Xint0x80_syscall+0x20 --- syscall (154, FreeBSD ELF32, nlm_syscall), eip = 0x280fad5b, esp = 0xbfbfed4c, ebp = 0xbfbfee18 --- Tracing command rpc.statd pid 1027 tid 100103 td 0xc7820b40 sched_switch(c7820b40,0,104,191,71098388,...) at sched_switch+0x36a mi_switch(104,0,c0cc0b3d,1eb,0,...) at mi_switch+0x200 sleepq_switch(c7820b40,0,c0cc0b3d,1a0,0,...) at sleepq_switch+0x15f sleepq_catch_signals(f61e6a4c,c089350a,c7886650,0,c7820b40,...) at sleepq_catch_signals+0xb7 sleepq_timedwait_sig(c7886664,0,f61e6a7c,101,0,...) at sleepq_timedwait_sig+0x1a _cv_timedwait_sig(c7886664,c7886650,7531,621,c7807428,...) at _cv_timedwait_sig+0x250 seltdwait(f61e6c28,f61e6c30,c6d80380,c7820b40,c728e200,...) at seltdwait+0x8a kern_select(c7820b40,8,bfbfed34,0,0,f61e6c70,20,1e,0) at kern_select+0x504 select(c7820b40,f61e6cf8,c0cf7a9f,c0cc354f,c781c7f8,...) at select+0x66 syscall(f61e6d38) at syscall+0x230 Xint0x80_syscall() at Xint0x80_syscall+0x20 --- syscall (93, FreeBSD ELF32, select), eip = 0x2818ddb3, esp = 0xbfbfec8c, ebp = 0xbfbfedc8 --- Tracing command nfsd pid 1020 tid 100123 td 0xc77ec6c0 sched_switch(c77ec6c0,0,104,191,62208137,...) at sched_switch+0x36a mi_switch(104,0,c0cc0b3d,1eb,0,...) at mi_switch+0x200 sleepq_switch(c77ec6c0,0,c0cc0b3d,1a0,0,...) at sleepq_switch+0x15f sleepq_catch_signals(f625dbf8,c089350a,c7259980,0,c77ec6c0,...) at sleepq_catch_signals+0xb7 sleepq_timedwait_sig(c7884bd0,0,f625dc28,101,0,...) at sleepq_timedwait_sig+0x1a _cv_timedwait_sig(c7884bd0,c7259980,1388,3ec,0,...) at _cv_timedwait_sig+0x250 svc_run_internal(f625dd24,c0878d68,c7259980,f625dd38,c0cb78fd,...) at svc_run_internal+0x356 svc_thread_start(c7259980,f625dd38,c0cb78fd,343,c781aaa0,...) at svc_thread_start+0x10 fork_exit(c0a9d7b0,c7259980,f625dd38) at fork_exit+0xb8 fork_trampoline() at fork_trampoline+0x8 --- trap 0x804c72e, eip = 0xc, esp = 0x33, ebp = 0 --- Tracing command nfsd pid 1020 tid 100122 td 0xc77ec900 sched_switch(c77ec900,0,104,191,62202d31,...) at sched_switch+0x36a mi_switch(104,0,c0cc0b3d,1eb,0,...) at mi_switch+0x200 sleepq_switch(c77ec900,0,c0cc0b3d,1a0,0,...) at sleepq_switch+0x15f sleepq_catch_signals(f625abf8,c089350a,c7259980,0,c77ec900,...) at sleepq_catch_signals+0xb7 sleepq_timedwait_sig(c7884c10,0,f625ac28,101,0,...) at sleepq_timedwait_sig+0x1a _cv_timedwait_sig(c7884c10,c7259980,1388,3ec,0,...) at _cv_timedwait_sig+0x250 svc_run_internal(f625ad24,c0878d68,c7259980,f625ad38,c0cb78fd,...) at svc_run_internal+0x356 svc_thread_start(c7259980,f625ad38,c0cb78fd,343,c781aaa0,...) at svc_thread_start+0x10 fork_exit(c0a9d7b0,c7259980,f625ad38) at fork_exit+0xb8 fork_trampoline() at fork_trampoline+0x8 --- trap 0x804c72e, eip = 0xc, esp = 0x33, ebp = 0 --- Tracing command nfsd pid 1020 tid 100121 td 0xc77ecb40 sched_switch(c77ecb40,0,104,191,621ff0d6,...) at sched_switch+0x36a mi_switch(104,0,c0cc0b3d,1eb,0,...) at mi_switch+0x200 sleepq_switch(c77ecb40,0,c0cc0b3d,1a0,0,...) at sleepq_switch+0x15f sleepq_catch_signals(f6257bf8,c089350a,c7259980,0,c77ecb40,...) at sleepq_catch_signals+0xb7 sleepq_timedwait_sig(c7884c50,0,f6257c28,101,0,...) at sleepq_timedwait_sig+0x1a _cv_timedwait_sig(c7884c50,c7259980,1388,3b1,0,...) at _cv_timedwait_sig+0x250 svc_run_internal(f6257d24,c0878d68,c7259980,f6257d38,c0cb78fd,...) at svc_run_internal+0x356 svc_thread_start(c7259980,f6257d38,c0cb78fd,343,c781aaa0,...) at svc_thread_start+0x10 fork_exit(c0a9d7b0,c7259980,f6257d38) at fork_exit+0xb8 fork_trampoline() at fork_trampoline+0x8 --- trap 0x804c72e, eip = 0xc, esp = 0x33, ebp = 0 --- Tracing command nfsd pid 1020 tid 100081 td 0xc781db40 sched_switch(c781db40,0,104,191,622077d8,...) at sched_switch+0x36a mi_switch(104,0,c0cc0b3d,1eb,0,...) at mi_switch+0x200 sleepq_switch(c781db40,0,c0cc0b3d,1a0,0,...) at sleepq_switch+0x15f sleepq_catch_signals(f616eae8,c089350a,c7259980,0,c781db40,...) at sleepq_catch_signals+0xb7 sleepq_timedwait_sig(c7815c50,0,f616eb18,101,0,...) at sleepq_timedwait_sig+0x1a _cv_timedwait_sig(c7815c50,c7259980,1388,3ec,f616eb60,...) at _cv_timedwait_sig+0x250 svc_run_internal(c781dca8,14,c0ce0945,c0cdf3f9,f616ec3c,...) at svc_run_internal+0x356 svc_run(c7259980,0,c0cdf6fd,1c2,0,...) at svc_run+0x7f nfssvc_nfsd(bfbfe8b0,f616ec3c,c,c6d80380,f616ec50,...) at nfssvc_nfsd+0xad nfssvc_nfsserver(c781db40,f616ecf8,bfbfe8b0,c781db40,0,...) at nfssvc_nfsserver+0x24f nfssvc(c781db40,f616ecf8,c0cf7a9f,c0cc3838,c781aaa0,...) at nfssvc+0x83 syscall(f616ed38) at syscall+0x230 Xint0x80_syscall() at Xint0x80_syscall+0x20 --- syscall (155, FreeBSD ELF32, nfssvc), eip = 0x280dbd3b, esp = 0xbfbfe86c, ebp = 0xbfbfead8 --- Tracing command nfsd pid 1019 tid 100082 td 0xc781d900 sched_switch(c781d900,0,104,191,6638ef13,...) at sched_switch+0x36a mi_switch(104,0,c0cc0b3d,1eb,0,...) at mi_switch+0x200 sleepq_switch(c781d900,0,c0cc0b3d,1a0,0,...) at sleepq_switch+0x15f sleepq_catch_signals(c089350a,c7886750,0,c0cbacdc,c781d900,...) at sleepq_catch_signals+0xb7 sleepq_wait_sig(c7886764,0,f6172a7c,101,0,...) at sleepq_wait_sig+0x17 _cv_wait_sig(c7886764,c7886750,c0cc2e8b,621,c77ee6c8,...) at _cv_wait_sig+0x240 seltdwait(c77ee6c8,58,c6d80380,c781d900,c7b17570,...) at seltdwait+0xa2 kern_select(c781d900,5,bfbfece0,0,0,0,20,bfbfeae0,1) at kern_select+0x504 select(c781d900,f6172cf8,c,c781d900,c781a7f8,...) at select+0x66 syscall(f6172d38) at syscall+0x230 Xint0x80_syscall() at Xint0x80_syscall+0x20 --- syscall (93, FreeBSD ELF32, select), eip = 0x28184db3, esp = 0xbfbfeadc, ebp = 0xbfbfee18 --- Tracing command mountd pid 1010 tid 100092 td 0xc7872480 sched_switch(c7872480,0,104,191,5e2836d7,...) at sched_switch+0x36a mi_switch(104,0,c0cc0b3d,1eb,0,...) at mi_switch+0x200 sleepq_switch(c7872480,0,c0cc0b3d,1a0,0,...) at sleepq_switch+0x15f sleepq_catch_signals(c089350a,c7c4e950,0,c0cbacdc,c7872480,...) at sleepq_catch_signals+0xb7 sleepq_wait_sig(c7c4e964,0,f61c5a7c,101,0,...) at sleepq_wait_sig+0x17 _cv_wait_sig(c7c4e964,c7c4e950,c0cc2e8b,621,c77ef888,...) at _cv_wait_sig+0x240 seltdwait(c77ef888,58,c6d80380,c7872480,c0f5b228,...) at seltdwait+0xa2 kern_select(c7872480,9,bfbfed7c,0,0,0,20,bfbfed50,bfbfed7c) at kern_select+0x504 select(c7872480,f61c5cf8,c0cf7a9f,c0cc33c9,c7a12550,...) at select+0x66 syscall(f61c5d38) at syscall+0x230 Xint0x80_syscall() at Xint0x80_syscall+0x20 --- syscall (93, FreeBSD ELF32, select), eip = 0x28197db3, esp = 0xbfbfed4c, ebp = 0xbfbfee18 --- Tracing command rpcbind pid 909 tid 100088 td 0xc781d240 sched_switch(c781d240,0,104,191,6ff497ad,...) at sched_switch+0x36a mi_switch(104,0,c0cc0b3d,1eb,0,...) at mi_switch+0x200 sleepq_switch(c781d240,0,c0cc0b3d,1a0,0,...) at sleepq_switch+0x15f sleepq_catch_signals(f618aaa8,c089350a,c7814510,0,c781d240,...) at sleepq_catch_signals+0xb7 sleepq_timedwait_sig(c7814524,0,f618aad8,101,0,...) at sleepq_timedwait_sig+0x1a _cv_timedwait_sig(c7814524,c7814510,7531,621,f618ab8c,...) at _cv_timedwait_sig+0x250 seltdwait(f618ac5c,f618ac64,52f,c781d240,f618ab5c,...) at seltdwait+0x8a poll(c781d240,f618acf8,c0cf7a9f,c0cc33c9,c781a000,...) at poll+0x300 syscall(f618ad38) at syscall+0x230 Xint0x80_syscall() at Xint0x80_syscall+0x20 --- syscall (209, FreeBSD ELF32, poll), eip = 0x28145d0f, esp = 0xbfbfcbdc, ebp = 0xbfbfeda8 --- Tracing command syslogd pid 883 tid 100078 td 0xc7820240 sched_switch(c7820240,0,104,191,6cbff445,...) at sched_switch+0x36a mi_switch(104,0,c0cc0b3d,1eb,0,...) at mi_switch+0x200 sleepq_switch(c7820240,0,c0cc0b3d,1a0,0,...) at sleepq_switch+0x15f sleepq_catch_signals(c089350a,c76f9390,0,c0cbacdc,c7820240,...) at sleepq_catch_signals+0xb7 sleepq_wait_sig(c76f93a4,0,f6162a7c,101,0,...) at sleepq_wait_sig+0x17 _cv_wait_sig(c76f93a4,c76f9390,c0cc2e8b,621,c77ee000,...) at _cv_wait_sig+0x240 seltdwait(c77ee000,58,c6d80380,c7820240,3,...) at seltdwait+0xa2 kern_select(c7820240,9,2847f0ac,0,0,0,20,bfbfe2b0,0) at kern_select+0x504 select(c7820240,f6162cf8,c0cf7a9f,c0cc3314,c781c2a8,...) at select+0x66 syscall(f6162d38) at syscall+0x230 Xint0x80_syscall() at Xint0x80_syscall+0x20 --- syscall (93, FreeBSD ELF32, select), eip = 0x28196db3, esp = 0xbfbfe2ac, ebp = 0xbfbfee18 --- Tracing command devd pid 701 tid 100086 td 0xc7872900 sched_switch(c7872900,0,104,191,5085098f,...) at sched_switch+0x36a mi_switch(104,0,c0cc0b3d,1eb,0,...) at mi_switch+0x200 sleepq_switch(c7872900,0,c0cc0b3d,1a0,0,...) at sleepq_switch+0x15f sleepq_catch_signals(c089350a,c7799b50,0,c0cbacdc,c7872900,...) at sleepq_catch_signals+0xb7 sleepq_wait_sig(c7799b64,0,f6182a7c,101,0,...) at sleepq_wait_sig+0x17 _cv_wait_sig(c7799b64,c7799b50,c0cc2e8b,621,c77efe70,...) at _cv_wait_sig+0x240 seltdwait(c77efe70,58,c6d80380,c7872900,c7821828,...) at seltdwait+0xa2 kern_select(c7872900,6,bfbfe9a0,0,0,0,20,bfbfe970,10) at kern_select+0x504 select(c7872900,f6182cf8,c,c7872900,c6f42aa0,...) at select+0x66 syscall(f6182d38) at syscall+0x230 Xint0x80_syscall() at Xint0x80_syscall+0x20 --- syscall (93, FreeBSD ELF32, select), eip = 0x808c273, esp = 0xbfbfe96c, ebp = 0xbfbfee48 --- Tracing command sh pid 21 tid 100076 td 0xc77ec000 sched_switch(c77ec000,0,104,191,765d8618,...) at sched_switch+0x36a mi_switch(104,0,c0cc0b3d,1eb,5c,...) at mi_switch+0x200 sleepq_switch(c77ec000,0,c0cc0b3d,1a0,5c,...) at sleepq_switch+0x15f sleepq_catch_signals(c0cc0b3d,160,0,100,100,...) at sleepq_catch_signals+0xb7 sleepq_wait_sig(c77e77f8,5c,c0cc3425,100,0,...) at sleepq_wait_sig+0x17 _sleep(c77e77f8,c77e7880,15c,c0cc3425,0,...) at _sleep+0x354 kern_wait(c77ec000,ffffffff,f615ac74,2,0,...) at kern_wait+0xb76 wait4(c77ec000,f615acf8,c0cf7a9f,c0cc3404,c77e77f8,...) at wait4+0x3b syscall(f615ad38) at syscall+0x230 Xint0x80_syscall() at Xint0x80_syscall+0x20 --- syscall (7, FreeBSD ELF32, wait4), eip = 0x281601db, esp = 0xbfbfe97c, ebp = 0xbfbfe998 --- Tracing command flowcleaner pid 20 tid 100075 td 0xc70f5480 sched_switch(c70f5480,0,104,191,72436f52,...) at sched_switch+0x36a mi_switch(104,0,c0cc0b3d,1eb,0,...) at mi_switch+0x200 sleepq_switch(c70f5480,0,c0cc0b3d,283,c70f5480,...) at sleepq_switch+0x15f sleepq_timedwait(c0f883a8,0,f3f6acc4,1,0,...) at sleepq_timedwait+0x6b _cv_timedwait(c0f883a8,c0f883b0,4e20,627,0,...) at _cv_timedwait+0x250 flowtable_cleaner(0,f3f6ad38,c0cb78fd,343,c777a2a8,...) at flowtable_cleaner+0x1bc fork_exit(c094f720,0,f3f6ad38) at fork_exit+0xb8 fork_trampoline() at fork_trampoline+0x8 --- trap 0, eip = 0, esp = 0xf3f6ad70, ebp = 0 --- Tracing command softdepflush pid 19 tid 100074 td 0xc70f56c0 sched_switch(c70f56c0,0,104,191,a1b47e5f,...) at sched_switch+0x36a mi_switch(104,0,c0cc0b3d,1eb,44,...) at mi_switch+0x200 sleepq_switch(c70f56c0,0,c0cc0b3d,283,0,...) at sleepq_switch+0x15f sleepq_timedwait(c0f93ae0,44,c0ce4489,0,0,...) at sleepq_timedwait+0x6b _sleep(c0f93ae0,c0f93a84,44,c0ce4489,3e8,...) at _sleep+0x339 softdep_flush(0,f3f67d38,c0cb78fd,343,c777a550,...) at softdep_flush+0x244 fork_exit(c0ade8a0,0,f3f67d38) at fork_exit+0xb8 fork_trampoline() at fork_trampoline+0x8 --- trap 0, eip = 0, esp = 0xf3f67d70, ebp = 0 --- Tracing command syncer pid 18 tid 100073 td 0xc70f5900 sched_switch(c70f5900,0,104,191,a1465b6f,...) at sched_switch+0x36a mi_switch(104,0,c0cc0b3d,1eb,0,...) at mi_switch+0x200 sleepq_switch(c70f5900,0,c0cc0b3d,283,c70f5900,...) at sleepq_switch+0x15f sleepq_timedwait(c0f881b8,0,f3f64c88,1,0,...) at sleepq_timedwait+0x6b _cv_timedwait(c0f881b8,c0f881a4,3e8,6d4,4e20,...) at _cv_timedwait+0x250 sched_sync(0,f3f64d38,c0cb78fd,343,c777a7f8,...) at sched_sync+0x502 fork_exit(c093a610,0,f3f64d38) at fork_exit+0xb8 fork_trampoline() at fork_trampoline+0x8 --- trap 0, eip = 0, esp = 0xf3f64d70, ebp = 0 --- Tracing command vnlru pid 17 tid 100072 td 0xc70f5b40 sched_switch(c70f5b40,0,104,191,a12aee63,...) at sched_switch+0x36a mi_switch(104,0,c0cc0b3d,1eb,50,...) at mi_switch+0x200 sleepq_switch(c70f5b40,0,c0cc0b3d,283,0,...) at sleepq_switch+0x15f sleepq_timedwait(c777aaa0,50,c0ccab50,0,0,...) at sleepq_timedwait+0x6b _sleep(c777aaa0,c0f88178,250,c0ccab50,3e8,...) at _sleep+0x339 vnlru_proc(0,f3f61d38,c0cb78fd,343,c777aaa0,...) at vnlru_proc+0xe7 fork_exit(c093b1e0,0,f3f61d38) at fork_exit+0xb8 fork_trampoline() at fork_trampoline+0x8 --- trap 0, eip = 0, esp = 0xf3f61d70, ebp = 0 --- Tracing command bufdaemon pid 16 tid 100071 td 0xc70f5d80 sched_switch(c70f5d80,0,104,191,a198bf5a,...) at sched_switch+0x36a mi_switch(104,0,c0cc0b3d,1eb,44,...) at mi_switch+0x200 sleepq_switch(c70f5d80,0,c0cc0b3d,283,0,...) at sleepq_switch+0x15f sleepq_timedwait(c0f87ee8,44,c0cc802d,0,0,...) at sleepq_timedwait+0x6b _sleep(c0f87ee8,c0f87eec,44,c0cc802d,3e8,...) at _sleep+0x339 buf_daemon(0,f3f5ed38,c0cb78fd,343,c777ad48,...) at buf_daemon+0x138 fork_exit(c0922040,0,f3f5ed38) at fork_exit+0xb8 fork_trampoline() at fork_trampoline+0x8 --- trap 0, eip = 0, esp = 0xf3f5ed70, ebp = 0 --- Tracing command pagezero pid 15 tid 100070 td 0xc70f7000 sched_switch(c70f7000,0,104,191,705381df,...) at sched_switch+0x36a mi_switch(104,0,c0cc0b3d,1eb,0,...) at mi_switch+0x200 sleepq_switch(c70f7000,0,c0cc0b3d,283,0,...) at sleepq_switch+0x15f sleepq_timedwait(c0f94914,0,c0cea400,0,0,...) at sleepq_timedwait+0x6b _sleep(c0f94914,c0f94400,0,c0cea400,493e0,...) at _sleep+0x339 vm_pagezero(0,f3f5bd38,c0cb78fd,343,c6d7b2a8,...) at vm_pagezero+0xdc fork_exit(c0b1f730,0,f3f5bd38) at fork_exit+0xb8 fork_trampoline() at fork_trampoline+0x8 --- trap 0, eip = 0, esp = 0xf3f5bd70, ebp = 0 --- Tracing command vmdaemon pid 9 tid 100069 td 0xc70f7240 sched_switch(c70f7240,0,104,191,704e599c,...) at sched_switch+0x36a mi_switch(104,0,c0cc0b3d,1eb,68,...) at mi_switch+0x200 sleepq_switch(c70f7240,0,c0cc0b3d,260,0,...) at sleepq_switch+0x15f sleepq_wait(c0f94544,68,c0cc802d,0,0,...) at sleepq_wait+0x63 _sleep(c0f94544,c0f94548,68,c0cc802d,0,...) at _sleep+0x36b vm_daemon(0,f3f58d38,c0cb78fd,343,c6d7b550,...) at vm_daemon+0x59 fork_exit(c0b19720,0,f3f58d38) at fork_exit+0xb8 fork_trampoline() at fork_trampoline+0x8 --- trap 0, eip = 0, esp = 0xf3f58d70, ebp = 0 --- Tracing command pagedaemon pid 8 tid 100068 td 0xc70f7480 sched_switch(c70f7480,0,104,191,a19f0840,...) at sched_switch+0x36a mi_switch(104,0,c0cc0b3d,1eb,44,...) at mi_switch+0x200 sleepq_switch(c70f7480,0,c0cc0b3d,283,0,...) at sleepq_switch+0x15f sleepq_timedwait(c0f9450c,44,c0cc802d,0,0,...) at sleepq_timedwait+0x6b _sleep(c0f9450c,c0f94400,44,c0cc802d,1388,...) at _sleep+0x339 vm_pageout(0,f3f55d38,c0cb78fd,343,c6d7b7f8,...) at vm_pageout+0x2bb fork_exit(c0b1a610,0,f3f55d38) at fork_exit+0xb8 fork_trampoline() at fork_trampoline+0x8 --- trap 0, eip = 0, esp = 0xf3f55d70, ebp = 0 --- Tracing command xpt_thrd pid 7 tid 100067 td 0xc70f76c0 sched_switch(c70f76c0,0,104,191,482717f2,...) at sched_switch+0x36a mi_switch(104,0,c0cc0b3d,1eb,4c,...) at mi_switch+0x200 sleepq_switch(c70f76c0,0,c0cc0b3d,260,0,...) at sleepq_switch+0x15f sleepq_wait(c0de7954,4c,c0c55adb,0,0,...) at sleepq_wait+0x63 _sleep(c0de7954,c0de7998,4c,c0c55adb,0,...) at _sleep+0x36b xpt_scanner_thread(0,f3ef2d38,c0cb78fd,343,c6d7baa0,...) at xpt_scanner_thread+0x47 fork_exit(c0484250,0,f3ef2d38) at fork_exit+0xb8 fork_trampoline() at fork_trampoline+0x8 --- trap 0, eip = 0, esp = 0xf3ef2d70, ebp = 0 --- Tracing command fdc0 pid 6 tid 100063 td 0xc6fc0240 sched_switch(c6fc0240,0,104,191,6cef58fc,...) at sched_switch+0x36a mi_switch(104,0,c0cc0b3d,1eb,4c,...) at mi_switch+0x200 sleepq_switch(c6fc0240,0,c0cc0b3d,283,0,...) at sleepq_switch+0x15f sleepq_timedwait(c6fade3c,4c,c0cb1b3b,0,0,...) at sleepq_timedwait+0x6b _sleep(c6fade3c,c6fadef0,4c,c0cb1b3b,3e8,...) at _sleep+0x339 fdc_thread(c6fade00,f3ee6d38,c0cb78fd,343,c6d7bd48,...) at fdc_thread+0x27d fork_exit(c0b9ce00,c6fade00,f3ee6d38) at fork_exit+0xb8 fork_trampoline() at fork_trampoline+0x8 --- trap 0, eip = 0, esp = 0xf3ee6d70, ebp = 0 --- Tracing command fw0_probe pid 5 tid 100061 td 0xc6fc06c0 sched_switch(c6fc06c0,0,104,191,b53829d8,...) at sched_switch+0x36a mi_switch(104,0,c0cc0b3d,1eb,5c,...) at mi_switch+0x200 sleepq_switch(c6fc06c0,0,c0cc0b3d,1a0,5c,...) at sleepq_switch+0x15f sleepq_catch_signals(c0cc0b3d,160,0,100,100,...) at sleepq_catch_signals+0xb7 sleepq_wait_sig(c6ff9000,5c,c0cb1b3b,100,0,...) at sleepq_wait_sig+0x17 _sleep(c6ff9000,c6ffd488,15c,c0cb1b3b,0,...) at _sleep+0x354 fw_bus_probe_thread(c6ff9000,f3ed6d38,c0cb78fd,343,c6f42000,...) at fw_bus_probe_thread+0xa08 fork_exit(c06689e0,c6ff9000,f3ed6d38) at fork_exit+0xb8 fork_trampoline() at fork_trampoline+0x8 --- trap 0, eip = 0, esp = 0xf3ed6d70, ebp = 0 --- Tracing command usb pid 14 tid 100057 td 0xc6fd4000 sched_switch(c6fd4000,0,104,191,4a8a536e,...) at sched_switch+0x36a mi_switch(104,0,c0cc0b3d,1eb,0,...) at mi_switch+0x200 sleepq_switch(c6fd4000,0,c0cc0b3d,260,c6fd4000,...) at sleepq_switch+0x15f sleepq_wait(c6fd9d0c,0,f3ebfcbc,1,0,...) at sleepq_wait+0x63 _cv_wait(c6fd9d0c,c6fd9dac,c0caa764,6c,c6fd9d14,...) at _cv_wait+0x240 usb_process(c6fd9d04,f3ebfd38,c0cb78fd,343,c6f422a8,...) at usb_process+0x193 fork_exit(c07d2750,c6fd9d04,f3ebfd38) at fork_exit+0xb8 fork_trampoline() at fork_trampoline+0x8 --- trap 0, eip = 0, esp = 0xf3ebfd70, ebp = 0 --- Tracing command usb pid 14 tid 100056 td 0xc6fd4240 sched_switch(c6fd4240,0,104,191,8d1a1244,...) at sched_switch+0x36a mi_switch(104,0,c0cc0b3d,1eb,0,...) at mi_switch+0x200 sleepq_switch(c6fd4240,0,c0cc0b3d,260,c6fd4240,...) at sleepq_switch+0x15f sleepq_wait(c6fd9cdc,0,f3ebccbc,1,0,...) at sleepq_wait+0x63 _cv_wait(c6fd9cdc,c6fd9dac,c0caa764,6c,c6fd9ce4,...) at _cv_wait+0x240 usb_process(c6fd9cd4,f3ebcd38,c0cb78fd,343,c6f422a8,...) at usb_process+0x193 fork_exit(c07d2750,c6fd9cd4,f3ebcd38) at fork_exit+0xb8 fork_trampoline() at fork_trampoline+0x8 --- trap 0, eip = 0, esp = 0xf3ebcd70, ebp = 0 --- Tracing command usb pid 14 tid 100055 td 0xc6fd4480 sched_switch(c6fd4480,0,104,191,4a232ba4,...) at sched_switch+0x36a mi_switch(104,0,c0cc0b3d,1eb,0,...) at mi_switch+0x200 sleepq_switch(c6fd4480,0,c0cc0b3d,260,c6fd4480,...) at sleepq_switch+0x15f sleepq_wait(c6fd9cac,0,f3eb9cbc,1,0,...) at sleepq_wait+0x63 _cv_wait(c6fd9cac,c6fd9dac,c0caa764,6c,c6fd9cb4,...) at _cv_wait+0x240 usb_process(c6fd9ca4,f3eb9d38,c0cb78fd,343,c6f422a8,...) at usb_process+0x193 fork_exit(c07d2750,c6fd9ca4,f3eb9d38) at fork_exit+0xb8 fork_trampoline() at fork_trampoline+0x8 --- trap 0, eip = 0, esp = 0xf3eb9d70, ebp = 0 --- Tracing command usb pid 14 tid 100054 td 0xc6fd46c0 sched_switch(c6fd46c0,0,104,191,4a231a55,...) at sched_switch+0x36a mi_switch(104,0,c0cc0b3d,1eb,0,...) at mi_switch+0x200 sleepq_switch(c6fd46c0,0,c0cc0b3d,260,c6fd46c0,...) at sleepq_switch+0x15f sleepq_wait(c6fd9c7c,0,f3eb6cbc,1,0,...) at sleepq_wait+0x63 _cv_wait(c6fd9c7c,c6fd9dac,c0caa764,6c,c6fd9c84,...) at _cv_wait+0x240 usb_process(c6fd9c74,f3eb6d38,c0cb78fd,343,c6f422a8,...) at usb_process+0x193 fork_exit(c07d2750,c6fd9c74,f3eb6d38) at fork_exit+0xb8 fork_trampoline() at fork_trampoline+0x8 --- trap 0, eip = 0, esp = 0xf3eb6d70, ebp = 0 --- Tracing command usb pid 14 tid 100052 td 0xc6fd4b40 sched_switch(c6fd4b40,0,104,191,4a2303d8,...) at sched_switch+0x36a mi_switch(104,0,c0cc0b3d,1eb,0,...) at mi_switch+0x200 sleepq_switch(c6fd4b40,0,c0cc0b3d,260,c6fd4b40,...) at sleepq_switch+0x15f sleepq_wait(c6fcfb5c,0,f3eaecbc,1,0,...) at sleepq_wait+0x63 _cv_wait(c6fcfb5c,c6fcfbfc,c0caa764,6c,c6fcfb64,...) at _cv_wait+0x240 usb_process(c6fcfb54,f3eaed38,c0cb78fd,343,c6f422a8,...) at usb_process+0x193 fork_exit(c07d2750,c6fcfb54,f3eaed38) at fork_exit+0xb8 fork_trampoline() at fork_trampoline+0x8 --- trap 0, eip = 0, esp = 0xf3eaed70, ebp = 0 --- Tracing command usb pid 14 tid 100051 td 0xc6fd4d80 sched_switch(c6fd4d80,0,104,191,7e2f78e1,...) at sched_switch+0x36a mi_switch(104,0,c0cc0b3d,1eb,0,...) at mi_switch+0x200 sleepq_switch(c6fd4d80,0,c0cc0b3d,260,c6fd4d80,...) at sleepq_switch+0x15f sleepq_wait(c6fcfb2c,0,f3eabcbc,1,0,...) at sleepq_wait+0x63 _cv_wait(c6fcfb2c,c6fcfbfc,c0caa764,6c,c6fcfb34,...) at _cv_wait+0x240 usb_process(c6fcfb24,f3eabd38,c0cb78fd,343,c6f422a8,...) at usb_process+0x193 fork_exit(c07d2750,c6fcfb24,f3eabd38) at fork_exit+0xb8 fork_trampoline() at fork_trampoline+0x8 --- trap 0, eip = 0, esp = 0xf3eabd70, ebp = 0 --- Tracing command usb pid 14 tid 100050 td 0xc6f966c0 sched_switch(c6f966c0,0,104,191,49beb366,...) at sched_switch+0x36a mi_switch(104,0,c0cc0b3d,1eb,0,...) at mi_switch+0x200 sleepq_switch(c6f966c0,0,c0cc0b3d,260,c6f966c0,...) at sleepq_switch+0x15f sleepq_wait(c6fcfafc,0,f3ea8cbc,1,0,...) at sleepq_wait+0x63 _cv_wait(c6fcfafc,c6fcfbfc,c0caa764,6c,c6fcfb04,...) at _cv_wait+0x240 usb_process(c6fcfaf4,f3ea8d38,c0cb78fd,343,c6f422a8,...) at usb_process+0x193 fork_exit(c07d2750,c6fcfaf4,f3ea8d38) at fork_exit+0xb8 fork_trampoline() at fork_trampoline+0x8 --- trap 0, eip = 0, esp = 0xf3ea8d70, ebp = 0 --- Tracing command usb pid 14 tid 100049 td 0xc6f96900 sched_switch(c6f96900,0,104,191,49bea217,...) at sched_switch+0x36a mi_switch(104,0,c0cc0b3d,1eb,0,...) at mi_switch+0x200 sleepq_switch(c6f96900,0,c0cc0b3d,260,c6f96900,...) at sleepq_switch+0x15f sleepq_wait(c6fcfacc,0,f3ea5cbc,1,0,...) at sleepq_wait+0x63 _cv_wait(c6fcfacc,c6fcfbfc,c0caa764,6c,c6fcfad4,...) at _cv_wait+0x240 usb_process(c6fcfac4,f3ea5d38,c0cb78fd,343,c6f422a8,...) at usb_process+0x193 fork_exit(c07d2750,c6fcfac4,f3ea5d38) at fork_exit+0xb8 fork_trampoline() at fork_trampoline+0x8 --- trap 0, eip = 0, esp = 0xf3ea5d70, ebp = 0 --- Tracing command usb pid 14 tid 100048 td 0xc6f96b40 sched_switch(c6f96b40,0,104,191,49be8ea9,...) at sched_switch+0x36a mi_switch(104,0,c0cc0b3d,1eb,0,...) at mi_switch+0x200 sleepq_switch(c6f96b40,0,c0cc0b3d,260,c6f96b40,...) at sleepq_switch+0x15f sleepq_wait(c6fcbb5c,0,f3ea1cbc,1,0,...) at sleepq_wait+0x63 _cv_wait(c6fcbb5c,c6fcbbfc,c0caa764,6c,c6fcbb64,...) at _cv_wait+0x240 usb_process(c6fcbb54,f3ea1d38,c0cb78fd,343,c6f422a8,...) at usb_process+0x193 fork_exit(c07d2750,c6fcbb54,f3ea1d38) at fork_exit+0xb8 fork_trampoline() at fork_trampoline+0x8 --- trap 0, eip = 0, esp = 0xf3ea1d70, ebp = 0 --- Tracing command usb pid 14 tid 100047 td 0xc6f96d80 sched_switch(c6f96d80,0,104,191,7ddd1452,...) at sched_switch+0x36a mi_switch(104,0,c0cc0b3d,1eb,0,...) at mi_switch+0x200 sleepq_switch(c6f96d80,0,c0cc0b3d,260,c6f96d80,...) at sleepq_switch+0x15f sleepq_wait(c6fcbb2c,0,f3e9ecbc,1,0,...) at sleepq_wait+0x63 _cv_wait(c6fcbb2c,c6fcbbfc,c0caa764,6c,c6fcbb34,...) at _cv_wait+0x240 usb_process(c6fcbb24,f3e9ed38,c0cb78fd,343,c6f422a8,...) at usb_process+0x193 fork_exit(c07d2750,c6fcbb24,f3e9ed38) at fork_exit+0xb8 fork_trampoline() at fork_trampoline+0x8 --- trap 0, eip = 0, esp = 0xf3e9ed70, ebp = 0 --- Tracing command usb pid 14 tid 100046 td 0xc6fbf000 sched_switch(c6fbf000,0,104,191,495a3aaa,...) at sched_switch+0x36a mi_switch(104,0,c0cc0b3d,1eb,0,...) at mi_switch+0x200 sleepq_switch(c6fbf000,0,c0cc0b3d,260,c6fbf000,...) at sleepq_switch+0x15f sleepq_wait(c6fcbafc,0,f3e9bcbc,1,0,...) at sleepq_wait+0x63 _cv_wait(c6fcbafc,c6fcbbfc,c0caa764,6c,c6fcbb04,...) at _cv_wait+0x240 usb_process(c6fcbaf4,f3e9bd38,c0cb78fd,343,c6f422a8,...) at usb_process+0x193 fork_exit(c07d2750,c6fcbaf4,f3e9bd38) at fork_exit+0xb8 fork_trampoline() at fork_trampoline+0x8 --- trap 0, eip = 0, esp = 0xf3e9bd70, ebp = 0 --- Tracing command usb pid 14 tid 100045 td 0xc6fbf240 sched_switch(c6fbf240,0,104,191,495a28dd,...) at sched_switch+0x36a mi_switch(104,0,c0cc0b3d,1eb,0,...) at mi_switch+0x200 sleepq_switch(c6fbf240,0,c0cc0b3d,260,c6fbf240,...) at sleepq_switch+0x15f sleepq_wait(c6fcbacc,0,f3e98cbc,1,0,...) at sleepq_wait+0x63 _cv_wait(c6fcbacc,c6fcbbfc,c0caa764,6c,c6fcbad4,...) at _cv_wait+0x240 usb_process(c6fcbac4,f3e98d38,c0cb78fd,343,c6f422a8,...) at usb_process+0x193 fork_exit(c07d2750,c6fcbac4,f3e98d38) at fork_exit+0xb8 fork_trampoline() at fork_trampoline+0x8 --- trap 0, eip = 0, esp = 0xf3e98d70, ebp = 0 --- Tracing command usb pid 14 tid 100044 td 0xc6fbf480 sched_switch(c6fbf480,0,104,191,495a1673,...) at sched_switch+0x36a mi_switch(104,0,c0cc0b3d,1eb,0,...) at mi_switch+0x200 sleepq_switch(c6fbf480,0,c0cc0b3d,260,c6fbf480,...) at sleepq_switch+0x15f sleepq_wait(c6fc1b5c,0,f3e94cbc,1,0,...) at sleepq_wait+0x63 _cv_wait(c6fc1b5c,c6fc1bfc,c0caa764,6c,c6fc1b64,...) at _cv_wait+0x240 usb_process(c6fc1b54,f3e94d38,c0cb78fd,343,c6f422a8,...) at usb_process+0x193 fork_exit(c07d2750,c6fc1b54,f3e94d38) at fork_exit+0xb8 fork_trampoline() at fork_trampoline+0x8 --- trap 0, eip = 0, esp = 0xf3e94d70, ebp = 0 --- Tracing command usb pid 14 tid 100043 td 0xc6fbf6c0 sched_switch(c6fbf6c0,0,104,191,7d8aa883,...) at sched_switch+0x36a mi_switch(104,0,c0cc0b3d,1eb,0,...) at mi_switch+0x200 sleepq_switch(c6fbf6c0,0,c0cc0b3d,260,c6fbf6c0,...) at sleepq_switch+0x15f sleepq_wait(c6fc1b2c,0,f3e91cbc,1,0,...) at sleepq_wait+0x63 _cv_wait(c6fc1b2c,c6fc1bfc,c0caa764,6c,c6fc1b34,...) at _cv_wait+0x240 usb_process(c6fc1b24,f3e91d38,c0cb78fd,343,c6f422a8,...) at usb_process+0x193 fork_exit(c07d2750,c6fc1b24,f3e91d38) at fork_exit+0xb8 fork_trampoline() at fork_trampoline+0x8 --- trap 0, eip = 0, esp = 0xf3e91d70, ebp = 0 --- Tracing command usb pid 14 tid 100042 td 0xc6fbf900 sched_switch(c6fbf900,0,104,191,48f59ab1,...) at sched_switch+0x36a mi_switch(104,0,c0cc0b3d,1eb,0,...) at mi_switch+0x200 sleepq_switch(c6fbf900,0,c0cc0b3d,260,c6fbf900,...) at sleepq_switch+0x15f sleepq_wait(c6fc1afc,0,f3e8ecbc,1,0,...) at sleepq_wait+0x63 _cv_wait(c6fc1afc,c6fc1bfc,c0caa764,6c,c6fc1b04,...) at _cv_wait+0x240 usb_process(c6fc1af4,f3e8ed38,c0cb78fd,343,c6f422a8,...) at usb_process+0x193 fork_exit(c07d2750,c6fc1af4,f3e8ed38) at fork_exit+0xb8 fork_trampoline() at fork_trampoline+0x8 --- trap 0, eip = 0, esp = 0xf3e8ed70, ebp = 0 --- Tracing command usb pid 14 tid 100041 td 0xc6fbfb40 sched_switch(c6fbfb40,0,104,191,48f58980,...) at sched_switch+0x36a mi_switch(104,0,c0cc0b3d,1eb,0,...) at mi_switch+0x200 sleepq_switch(c6fbfb40,0,c0cc0b3d,260,c6fbfb40,...) at sleepq_switch+0x15f sleepq_wait(c6fc1acc,0,f3e8bcbc,1,0,...) at sleepq_wait+0x63 _cv_wait(c6fc1acc,c6fc1bfc,c0caa764,6c,c6fc1ad4,...) at _cv_wait+0x240 usb_process(c6fc1ac4,f3e8bd38,c0cb78fd,343,c6f422a8,...) at usb_process+0x193 fork_exit(c07d2750,c6fc1ac4,f3e8bd38) at fork_exit+0xb8 fork_trampoline() at fork_trampoline+0x8 --- trap 0, eip = 0, esp = 0xf3e8bd70, ebp = 0 --- Tracing command usb pid 14 tid 100039 td 0xc6fc0000 sched_switch(c6fc0000,0,104,191,48f576e8,...) at sched_switch+0x36a mi_switch(104,0,c0cc0b3d,1eb,0,...) at mi_switch+0x200 sleepq_switch(c6fc0000,0,c0cc0b3d,260,c6fc0000,...) at sleepq_switch+0x15f sleepq_wait(c6fbbb5c,0,f3e84cbc,1,0,...) at sleepq_wait+0x63 _cv_wait(c6fbbb5c,c6fbbbfc,c0caa764,6c,c6fbbb64,...) at _cv_wait+0x240 usb_process(c6fbbb54,f3e84d38,c0cb78fd,343,c6f422a8,...) at usb_process+0x193 fork_exit(c07d2750,c6fbbb54,f3e84d38) at fork_exit+0xb8 fork_trampoline() at fork_trampoline+0x8 --- trap 0, eip = 0, esp = 0xf3e84d70, ebp = 0 --- Tracing command usb pid 14 tid 100038 td 0xc6f54d80 sched_switch(c6f54d80,0,104,191,7d53bc15,...) at sched_switch+0x36a mi_switch(104,0,c0cc0b3d,1eb,0,...) at mi_switch+0x200 sleepq_switch(c6f54d80,0,c0cc0b3d,260,c6f54d80,...) at sleepq_switch+0x15f sleepq_wait(c6fbbb2c,0,f3e81cbc,1,0,...) at sleepq_wait+0x63 _cv_wait(c6fbbb2c,c6fbbbfc,c0caa764,6c,c6fbbb34,...) at _cv_wait+0x240 usb_process(c6fbbb24,f3e81d38,c0cb78fd,343,c6f422a8,...) at usb_process+0x193 fork_exit(c07d2750,c6fbbb24,f3e81d38) at fork_exit+0xb8 fork_trampoline() at fork_trampoline+0x8 --- trap 0, eip = 0, esp = 0xf3e81d70, ebp = 0 --- Tracing command usb pid 14 tid 100037 td 0xc6f94000 sched_switch(c6f94000,0,104,191,489139fa,...) at sched_switch+0x36a mi_switch(104,0,c0cc0b3d,1eb,0,...) at mi_switch+0x200 sleepq_switch(c6f94000,0,c0cc0b3d,260,c6f94000,...) at sleepq_switch+0x15f sleepq_wait(c6fbbafc,0,f3e7ecbc,1,0,...) at sleepq_wait+0x63 _cv_wait(c6fbbafc,c6fbbbfc,c0caa764,6c,c6fbbb04,...) at _cv_wait+0x240 usb_process(c6fbbaf4,f3e7ed38,c0cb78fd,343,c6f422a8,...) at usb_process+0x193 fork_exit(c07d2750,c6fbbaf4,f3e7ed38) at fork_exit+0xb8 fork_trampoline() at fork_trampoline+0x8 --- trap 0, eip = 0, esp = 0xf3e7ed70, ebp = 0 --- Tracing command usb pid 14 tid 100036 td 0xc6f94240 sched_switch(c6f94240,0,104,191,48912862,...) at sched_switch+0x36a mi_switch(104,0,c0cc0b3d,1eb,0,...) at mi_switch+0x200 sleepq_switch(c6f94240,0,c0cc0b3d,260,c6f94240,...) at sleepq_switch+0x15f sleepq_wait(c6fbbacc,0,f3e7bcbc,1,0,...) at sleepq_wait+0x63 _cv_wait(c6fbbacc,c6fbbbfc,c0caa764,6c,c6fbbad4,...) at _cv_wait+0x240 usb_process(c6fbbac4,f3e7bd38,c0cb78fd,343,c6f422a8,...) at usb_process+0x193 fork_exit(c07d2750,c6fbbac4,f3e7bd38) at fork_exit+0xb8 fork_trampoline() at fork_trampoline+0x8 --- trap 0, eip = 0, esp = 0xf3e7bd70, ebp = 0 --- Tracing command usb pid 14 tid 100034 td 0xc6f946c0 sched_switch(c6f946c0,0,104,191,48911603,...) at sched_switch+0x36a mi_switch(104,0,c0cc0b3d,1eb,0,...) at mi_switch+0x200 sleepq_switch(c6f946c0,0,c0cc0b3d,260,c6f946c0,...) at sleepq_switch+0x15f sleepq_wait(c6fb6b5c,0,f3e74cbc,1,0,...) at sleepq_wait+0x63 _cv_wait(c6fb6b5c,c6fb6bfc,c0caa764,6c,c6fb6b64,...) at _cv_wait+0x240 usb_process(c6fb6b54,f3e74d38,c0cb78fd,343,c6f422a8,...) at usb_process+0x193 fork_exit(c07d2750,c6fb6b54,f3e74d38) at fork_exit+0xb8 fork_trampoline() at fork_trampoline+0x8 --- trap 0, eip = 0, esp = 0xf3e74d70, ebp = 0 --- Tracing command usb pid 14 tid 100033 td 0xc6f94900 sched_switch(c6f94900,0,104,191,7d0181e9,...) at sched_switch+0x36a mi_switch(104,0,c0cc0b3d,1eb,0,...) at mi_switch+0x200 sleepq_switch(c6f94900,0,c0cc0b3d,260,c6f94900,...) at sleepq_switch+0x15f sleepq_wait(c6fb6b2c,0,f3e71cbc,1,0,...) at sleepq_wait+0x63 _cv_wait(c6fb6b2c,c6fb6bfc,c0caa764,6c,c6fb6b34,...) at _cv_wait+0x240 usb_process(c6fb6b24,f3e71d38,c0cb78fd,343,c6f422a8,...) at usb_process+0x193 fork_exit(c07d2750,c6fb6b24,f3e71d38) at fork_exit+0xb8 fork_trampoline() at fork_trampoline+0x8 --- trap 0, eip = 0, esp = 0xf3e71d70, ebp = 0 --- Tracing command usb pid 14 tid 100032 td 0xc6f94b40 sched_switch(c6f94b40,0,104,191,482c0341,...) at sched_switch+0x36a mi_switch(104,0,c0cc0b3d,1eb,0,...) at mi_switch+0x200 sleepq_switch(c6f94b40,0,c0cc0b3d,260,c6f94b40,...) at sleepq_switch+0x15f sleepq_wait(c6fb6afc,0,f3e6ecbc,1,0,...) at sleepq_wait+0x63 _cv_wait(c6fb6afc,c6fb6bfc,c0caa764,6c,c6fb6b04,...) at _cv_wait+0x240 usb_process(c6fb6af4,f3e6ed38,c0cb78fd,343,c6f422a8,...) at usb_process+0x193 fork_exit(c07d2750,c6fb6af4,f3e6ed38) at fork_exit+0xb8 fork_trampoline() at fork_trampoline+0x8 --- trap 0, eip = 0, esp = 0xf3e6ed70, ebp = 0 --- Tracing command usb pid 14 tid 100031 td 0xc6f94d80 sched_switch(c6f94d80,0,104,191,482befb3,...) at sched_switch+0x36a mi_switch(104,0,c0cc0b3d,1eb,0,...) at mi_switch+0x200 sleepq_switch(c6f94d80,0,c0cc0b3d,260,c6f94d80,...) at sleepq_switch+0x15f sleepq_wait(c6fb6acc,0,f3e6bcbc,1,0,...) at sleepq_wait+0x63 _cv_wait(c6fb6acc,c6fb6bfc,c0caa764,6c,c6fb6ad4,...) at _cv_wait+0x240 usb_process(c6fb6ac4,f3e6bd38,c0cb78fd,343,c6f422a8,...) at usb_process+0x193 fork_exit(c07d2750,c6fb6ac4,f3e6bd38) at fork_exit+0xb8 fork_trampoline() at fork_trampoline+0x8 --- trap 0, eip = 0, esp = 0xf3e6bd70, ebp = 0 --- Tracing command yarrow pid 13 tid 100017 td 0xc6d7e240 sched_switch(c6d7e240,0,104,191,a31b2956,...) at sched_switch+0x36a mi_switch(104,0,c0cc0b3d,1eb,0,...) at mi_switch+0x200 sleepq_switch(c6d7e240,0,c0cc0b3d,283,2,...) at sleepq_switch+0x15f sleepq_timedwait(c0e1b9a4,0,c0cb1b3b,2,0,...) at sleepq_timedwait+0x6b _sleep(c0e1b9a4,0,0,c0cb1b3b,64,...) at _sleep+0x339 pause(c0cb1b3b,64,c0c9e25c,111,0,...) at pause+0x47 random_kthread(0,c6b55d38,c0cb78fd,343,c6f42550,...) at random_kthread+0x1ef fork_exit(c0745e60,0,c6b55d38) at fork_exit+0xb8 fork_trampoline() at fork_trampoline+0x8 --- trap 0, eip = 0, esp = 0xc6b55d70, ebp = 0 --- Tracing command g_down pid 4 tid 100016 td 0xc6d7e480 sched_switch(c6d7e480,0,104,191,aa83f2eb,...) at sched_switch+0x36a mi_switch(104,0,c0cc0b3d,1eb,4c,...) at mi_switch+0x200 sleepq_switch(c6d7e480,0,c0cc0b3d,260,0,...) at sleepq_switch+0x15f sleepq_wait(c0e19744,4c,c0cb1b3b,0,0,...) at sleepq_wait+0x63 _sleep(c0e19744,c0e196a8,24c,c0cb1b3b,0,...) at _sleep+0x36b g_io_schedule_down(c6d7e480,0,c0cb324e,74,0,...) at g_io_schedule_down+0x56 g_down_procbody(0,c6b52d38,c0cb78fd,343,c6d7a000,...) at g_down_procbody+0x8d fork_exit(c083f8f0,0,c6b52d38) at fork_exit+0xb8 fork_trampoline() at fork_trampoline+0x8 --- trap 0, eip = 0, esp = 0xc6b52d70, ebp = 0 --- Tracing command g_up pid 3 tid 100015 td 0xc6d7e6c0 sched_switch(c6d7e6c0,0,104,191,aa8dcb32,...) at sched_switch+0x36a mi_switch(104,0,c0cc0b3d,1eb,4c,...) at mi_switch+0x200 sleepq_switch(c6d7e6c0,0,c0cc0b3d,260,0,...) at sleepq_switch+0x15f sleepq_wait(c0e19740,4c,c0cb1b3b,0,0,...) at sleepq_wait+0x63 _sleep(c0e19740,c0e196c8,24c,c0cb1b3b,0,...) at _sleep+0x36b g_io_schedule_up(c6d7e6c0,0,c0cb324e,5d,0,...) at g_io_schedule_up+0x11e g_up_procbody(0,c6b4fd38,c0cb78fd,343,c6d7a2a8,...) at g_up_procbody+0x8d fork_exit(c083f980,0,c6b4fd38) at fork_exit+0xb8 fork_trampoline() at fork_trampoline+0x8 --- trap 0, eip = 0, esp = 0xc6b4fd70, ebp = 0 --- Tracing command g_event pid 2 tid 100014 td 0xc6d7e900 sched_switch(c6d7e900,0,104,191,a5ed6ee6,...) at sched_switch+0x36a mi_switch(104,0,c0cc0b3d,1eb,4c,...) at mi_switch+0x200 sleepq_switch(c6d7e900,0,c0cc0b3d,283,0,...) at sleepq_switch+0x15f sleepq_timedwait(c0e19738,4c,c0cb1b3b,0,0,...) at sleepq_timedwait+0x6b _sleep(c0e19738,0,4c,c0cb1b3b,64,...) at _sleep+0x339 g_event_procbody(0,c6b4cd38,c0cb78fd,343,c6d7a550,...) at g_event_procbody+0xcb fork_exit(c083fa10,0,c6b4cd38) at fork_exit+0xb8 fork_trampoline() at fork_trampoline+0x8 --- trap 0, eip = 0, esp = 0xc6b4cd70, ebp = 0 --- Tracing command intr pid 12 tid 100065 td 0xc70f7b40 fork_trampoline() at fork_trampoline Tracing command intr pid 12 tid 100064 td 0xc70f7d80 sched_switch(c70f7d80,0,109,191,48273c31,...) at sched_switch+0x36a mi_switch(109,0,c0cb7b7c,52d,c6d802f0,...) at mi_switch+0x200 ithread_loop(c70f05a0,f3ee9d38,c0cb78fd,343,c6d7a7f8,...) at ithread_loop+0x1f6 fork_exit(c087bd50,c70f05a0,f3ee9d38) at fork_exit+0xb8 fork_trampoline() at fork_trampoline+0x8 --- trap 0, eip = 0, esp = 0xf3ee9d70, ebp = 0 --- Tracing command intr pid 12 tid 100062 td 0xc6fc0480 sched_switch(c6fc0480,0,109,191,6c192f3,...) at sched_switch+0x36a mi_switch(109,0,c0cb7b7c,52d,c7002470,...) at mi_switch+0x200 ithread_loop(c70e46a0,f3ed9d38,c0cb78fd,343,c6d7a7f8,...) at ithread_loop+0x1f6 fork_exit(c087bd50,c70e46a0,f3ed9d38) at fork_exit+0xb8 fork_trampoline() at fork_trampoline+0x8 --- trap 0, eip = 0, esp = 0xf3ed9d70, ebp = 0 --- Tracing command intr pid 12 tid 100059 td 0xc6fc0b40 sched_switch(c6fc0b40,0,109,191,47e353de,...) at sched_switch+0x36a mi_switch(109,0,c0cb7b7c,52d,c6d7f870,...) at mi_switch+0x200 ithread_loop(c6fdb8c0,f3ecdd38,c0cb78fd,343,c6d7a7f8,...) at ithread_loop+0x1f6 fork_exit(c087bd50,c6fdb8c0,f3ecdd38) at fork_exit+0xb8 fork_trampoline() at fork_trampoline+0x8 --- trap 0, eip = 0, esp = 0xf3ecdd70, ebp = 0 --- Tracing command intr pid 12 tid 100058 td 0xc6fc0d80 sched_switch(c6fc0d80,0,109,191,b7d5ffa6,...) at sched_switch+0x36a mi_switch(109,0,c0cb7b7c,52d,c6d7fb70,...) at mi_switch+0x200 ithread_loop(c6faaba0,f3ec5d38,c0cb78fd,343,c6d7a7f8,...) at ithread_loop+0x1f6 fork_exit(c087bd50,c6faaba0,f3ec5d38) at fork_exit+0xb8 fork_trampoline() at fork_trampoline+0x8 --- trap 0, eip = 0, esp = 0xf3ec5d70, ebp = 0 --- Tracing command intr pid 12 tid 100053 td 0xc6fd4900 fork_trampoline() at fork_trampoline Tracing command intr pid 12 tid 100040 td 0xc6fbfd80 sched_switch(c6fbfd80,0,109,191,6db6addd,...) at sched_switch+0x36a mi_switch(109,0,c0cb7b7c,52d,c6d7f970,...) at mi_switch+0x200 ithread_loop(c6faa6b0,f3e88d38,c0cb78fd,343,c6d7a7f8,...) at ithread_loop+0x1f6 fork_exit(c087bd50,c6faa6b0,f3e88d38) at fork_exit+0xb8 fork_trampoline() at fork_trampoline+0x8 --- trap 0, eip = 0, esp = 0xf3e88d70, ebp = 0 --- Tracing command intr pid 12 tid 100035 td 0xc6f94480 sched_switch(c6f94480,0,109,191,6d200220,...) at sched_switch+0x36a mi_switch(109,0,c0cb7b7c,52d,c6d7f9f0,...) at mi_switch+0x200 ithread_loop(c6fa5ab0,f3e78d38,c0cb78fd,343,c6d7a7f8,...) at ithread_loop+0x1f6 fork_exit(c087bd50,c6fa5ab0,f3e78d38) at fork_exit+0xb8 fork_trampoline() at fork_trampoline+0x8 --- trap 0, eip = 0, esp = 0xf3e78d70, ebp = 0 --- Tracing command intr pid 12 tid 100030 td 0xc6f96000 sched_switch(c6f96000,0,109,191,cb4faaad,...) at sched_switch+0x36a mi_switch(109,0,c0cb7b7c,52d,c6d7fa70,...) at mi_switch+0x200 ithread_loop(c6fa5870,f3e68d38,c0cb78fd,343,c6d7a7f8,...) at ithread_loop+0x1f6 fork_exit(c087bd50,c6fa5870,f3e68d38) at fork_exit+0xb8 fork_trampoline() at fork_trampoline+0x8 --- trap 0, eip = 0, esp = 0xf3e68d70, ebp = 0 --- Tracing command intr pid 12 tid 100029 td 0xc6f96240 sched_switch(c6f96240,0,109,191,aa8cefa4,...) at sched_switch+0x36a mi_switch(109,0,c0cb7b7c,52d,c6d7f770,...) at mi_switch+0x200 ithread_loop(c6fa5080,f3d59d38,c0cb78fd,343,c6d7a7f8,...) at ithread_loop+0x1f6 fork_exit(c087bd50,c6fa5080,f3d59d38) at fork_exit+0xb8 fork_trampoline() at fork_trampoline+0x8 --- trap 0, eip = 0, esp = 0xf3d59d70, ebp = 0 --- Tracing command intr pid 12 tid 100028 td 0xc6f96480 sched_switch(c6f96480,0,109,191,c410aeeb,...) at sched_switch+0x36a mi_switch(109,0,c0cb7b7c,52d,c6f80a70,...) at mi_switch+0x200 ithread_loop(c6f909f0,c6ba4d38,c0cb78fd,343,c6d7a7f8,...) at ithread_loop+0x1f6 fork_exit(c087bd50,c6f909f0,c6ba4d38) at fork_exit+0xb8 fork_trampoline() at fork_trampoline+0x8 --- trap 0, eip = 0, esp = 0xc6ba4d70, ebp = 0 --- Tracing command intr pid 12 tid 100027 td 0xc6dc26c0 sched_switch(c6dc26c0,0,109,191,4827e254,...) at sched_switch+0x36a mi_switch(109,0,c0cb7b7c,52d,c6d7fdf0,...) at mi_switch+0x200 ithread_loop(c6d79660,c6b9ad38,c0cb78fd,343,c6d7a7f8,...) at ithread_loop+0x1f6 fork_exit(c087bd50,c6d79660,c6b9ad38) at fork_exit+0xb8 fork_trampoline() at fork_trampoline+0x8 --- trap 0, eip = 0, esp = 0xc6b9ad70, ebp = 0 --- Tracing command intr pid 12 tid 100023 td 0xc6f54000 sched_switch(c6f54000,0,109,191,6ed1d4fe,...) at sched_switch+0x36a mi_switch(109,0,c0cb7b7c,52d,c6d80870,...) at mi_switch+0x200 ithread_loop(c6d79410,c6b67d38,c0cb78fd,343,c6d7a7f8,...) at ithread_loop+0x1f6 fork_exit(c087bd50,c6d79410,c6b67d38) at fork_exit+0xb8 fork_trampoline() at fork_trampoline+0x8 --- trap 0, eip = 0, esp = 0xc6b67d70, ebp = 0 --- Tracing command intr pid 12 tid 100022 td 0xc6f54240 sched_switch(c6f54240,0,109,191,4c6cd4a9,...) at sched_switch+0x36a mi_switch(109,0,c0cb7b7c,52d,c6d80a70,...) at mi_switch+0x200 ithread_loop(c6d0fa60,c6b64d38,c0cb78fd,343,c6d7a7f8,...) at ithread_loop+0x1f6 fork_exit(c087bd50,c6d0fa60,c6b64d38) at fork_exit+0xb8 fork_trampoline() at fork_trampoline+0x8 --- trap 0, eip = 0, esp = 0xc6b64d70, ebp = 0 --- Tracing command intr pid 12 tid 100021 td 0xc6f54480 fork_trampoline() at fork_trampoline Tracing command intr pid 12 tid 100019 td 0xc6f54900 sched_switch(c6f54900,0,109,191,ee90613b,...) at sched_switch+0x36a mi_switch(109,0,c0cb7b7c,52d,c6d80b70,...) at mi_switch+0x200 ithread_loop(c6d0fa90,c6b5bd38,c0cb78fd,343,c6d7a7f8,...) at ithread_loop+0x1f6 fork_exit(c087bd50,c6d0fa90,c6b5bd38) at fork_exit+0xb8 fork_trampoline() at fork_trampoline+0x8 --- trap 0, eip = 0, esp = 0xc6b5bd70, ebp = 0 --- Tracing command intr pid 12 tid 100012 td 0xc6d7ed80 sched_switch(c6d7ed80,0,109,191,710991f9,...) at sched_switch+0x36a mi_switch(109,0,c0cb7b7c,52d,c6d7f070,...) at mi_switch+0x200 ithread_loop(c6d79210,c6b46d38,c0cb78fd,343,c6d7a7f8,...) at ithread_loop+0x1f6 fork_exit(c087bd50,c6d79210,c6b46d38) at fork_exit+0xb8 fork_trampoline() at fork_trampoline+0x8 --- trap 0, eip = 0, esp = 0xc6b46d70, ebp = 0 --- Tracing command intr pid 12 tid 100011 td 0xc6dc2000 sched_switch(c6dc2000,0,109,191,a31956d1,...) at sched_switch+0x36a mi_switch(109,0,c0cb7b7c,52d,c6d7f0f0,...) at mi_switch+0x200 ithread_loop(c6d79220,c6b43d38,c0cb78fd,343,c6d7a7f8,...) at ithread_loop+0x1f6 fork_exit(c087bd50,c6d79220,c6b43d38) at fork_exit+0xb8 fork_trampoline() at fork_trampoline+0x8 --- trap 0, eip = 0, esp = 0xc6b43d70, ebp = 0 --- Tracing command intr pid 12 tid 100010 td 0xc6dc2240 sched_switch(c6dc2240,0,109,191,5af32652,...) at sched_switch+0x36a mi_switch(109,0,c0cb7b7c,52d,c6d7f170,...) at mi_switch+0x200 ithread_loop(c6d79230,c6b40d38,c0cb78fd,343,c6d7a7f8,...) at ithread_loop+0x1f6 fork_exit(c087bd50,c6d79230,c6b40d38) at fork_exit+0xb8 fork_trampoline() at fork_trampoline+0x8 --- trap 0, eip = 0, esp = 0xc6b40d70, ebp = 0 --- Tracing command intr pid 12 tid 100009 td 0xc6dc2480 sched_switch(c6dc2480,0,109,191,a5ed6cb0,...) at sched_switch+0x36a mi_switch(109,0,c0cb7b7c,52d,c6d7f570,...) at mi_switch+0x200 ithread_loop(c6d79240,c6b3dd38,c0cb78fd,343,c6d7a7f8,...) at ithread_loop+0x1f6 fork_exit(c087bd50,c6d79240,c6b3dd38) at fork_exit+0xb8 fork_trampoline() at fork_trampoline+0x8 --- trap 0, eip = 0, esp = 0xc6b3dd70, ebp = 0 --- Tracing command intr pid 12 tid 100008 td 0xc6d7c000 sched_switch(c6d7c000,0,109,191,ab470937,...) at sched_switch+0x36a mi_switch(109,0,c0cb7b7c,52d,c6d7f5f0,...) at mi_switch+0x200 ithread_loop(c6d79250,c6b3ad38,c0cb78fd,343,c6d7a7f8,...) at ithread_loop+0x1f6 fork_exit(c087bd50,c6d79250,c6b3ad38) at fork_exit+0xb8 fork_trampoline() at fork_trampoline+0x8 --- trap 0, eip = 0, esp = 0xc6b3ad70, ebp = 0 --- Tracing command intr pid 12 tid 100007 td 0xc6d7c240 fork_trampoline() at fork_trampoline Tracing command idle pid 11 tid 100006 td 0xc6d7c480 sched_switch(c6d7c480,0,60c,18c,aa8dc4d3,...) at sched_switch+0x36a mi_switch(60c,0,c0cbe0b5,806,0,...) at mi_switch+0x200 sched_preempt(c6d7c480,0,1f4,c6b33c6c,c0bc086e,...) at sched_preempt+0x9f ipi_bitmap_handler(8,28,28,c6d49e18,c6d49e00,...) at ipi_bitmap_handler+0x34 Xipi_intr_bitmap_handler() at Xipi_intr_bitmap_handler+0x2e --- interrupt, eip = 0xc0bb7f55, esp = 0xc6b33c6c, ebp = 0xc6b33c6c --- acpi_cpu_c1(0,c6b33cd4,0,ffffffff,c6b33cb4,...) at acpi_cpu_c1+0x5 acpi_cpu_idle(2710,c0e1f540,c6b33cb4,c0bcc24b,1,...) at acpi_cpu_idle+0x11c cpu_idle_amdc1e(1,c6b33cf8,c08c849e,1,c6b33cd4,...) at cpu_idle_amdc1e+0x56 cpu_idle(1,c6b33cd4,c0cbe0b5,9fa,c6d7c480,...) at cpu_idle+0x1b sched_idletd(0,c6b33d38,c0cb78fd,343,c6d7aaa0,...) at sched_idletd+0x23e fork_exit(c08c8260,0,c6b33d38) at fork_exit+0xb8 fork_trampoline() at fork_trampoline+0x8 --- trap 0, eip = 0, esp = 0xc6b33d70, ebp = 0 --- Tracing command idle pid 11 tid 100005 td 0xc6d7c6c0 cpustop_handler(2,c6b30c20,c0bde7c6,c6b30bc0,c07a639c,...) at cpustop_handler+0x32 ipi_nmi_handler(c6b30bc0,c07a639c,c6feb600,c0e1bbbc,c6d7aaa0,...) at ipi_nmi_handler+0x2f trap(c6b30c2c) at trap+0x36 calltrap() at calltrap+0x6 --- trap 0x13, eip = 0xc0bb7f55, esp = 0xc6b30c6c, ebp = 0xc6b30c6c --- acpi_cpu_c1(28,c6b30cd4,1,ffffffff,c6b30cb4,...) at acpi_cpu_c1+0x5 acpi_cpu_idle(ffffffff,0,c6b30cb4,c0bcc24b,0,...) at acpi_cpu_idle+0x11c cpu_idle_amdc1e(0,c6b30cf8,c08c849e,0,c6b30cd4,...) at cpu_idle_amdc1e+0x56 cpu_idle(0,c6b30cd4,c0cbe0b5,9fa,c6d7c6c0,...) at cpu_idle+0x1b sched_idletd(0,c6b30d38,c0cb78fd,343,c6d7aaa0,...) at sched_idletd+0x23e fork_exit(c08c8260,0,c6b30d38) at fork_exit+0xb8 fork_trampoline() at fork_trampoline+0x8 --- trap 0, eip = 0, esp = 0xc6b30d70, ebp = 0 --- Tracing command idle pid 11 tid 100004 td 0xc6d7c900 cpustop_handler(4,c6b2dc20,c0bde7c6,c0e1bbf8,c6b2dbb4,...) at cpustop_handler+0x32 ipi_nmi_handler(c0e1bbf8,c6b2dbb4,c08930e4,c0e1bbf8,c6d7aaa0,...) at ipi_nmi_handler+0x2f trap(c6b2dc2c) at trap+0x36 calltrap() at calltrap+0x6 --- trap 0x13, eip = 0xc0bb7f55, esp = 0xc6b2dc6c, ebp = 0xc6b2dc6c --- acpi_cpu_c1(c0bc072f,c6b2dcd4,2,ffffffff,c6b2dcb4,...) at acpi_cpu_c1+0x5 acpi_cpu_idle(ffffffff,0,c6b2dcb4,c0bcc24b,0,...) at acpi_cpu_idle+0x11c cpu_idle_amdc1e(0,c6b2dcf8,c08c849e,0,c6b2dcd4,...) at cpu_idle_amdc1e+0x56 cpu_idle(0,c6b2dcd4,c0cbe0b5,9fa,c6d7c900,...) at cpu_idle+0x1b sched_idletd(0,c6b2dd38,c0cb78fd,343,c6d7aaa0,...) at sched_idletd+0x23e fork_exit(c08c8260,0,c6b2dd38) at fork_exit+0xb8 fork_trampoline() at fork_trampoline+0x8 --- trap 0, eip = 0, esp = 0xc6b2dd70, ebp = 0 --- Tracing command idle pid 11 tid 100003 td 0xc6d7cb40 cpustop_handler(8,c6b2ac20,c0bde7c6,c0e1bc34,c6b2abb4,...) at cpustop_handler+0x32 ipi_nmi_handler(c0e1bc34,c6b2abb4,c08930e4,c0e1bc34,c6d7aaa0,...) at ipi_nmi_handler+0x2f trap(c6b2ac2c) at trap+0x36 calltrap() at calltrap+0x6 --- trap 0x13, eip = 0xc0bb7f55, esp = 0xc6b2ac6c, ebp = 0xc6b2ac6c --- acpi_cpu_c1(3,c6b2acd4,3,ffffffff,c6b2acb4,...) at acpi_cpu_c1+0x5 acpi_cpu_idle(ffffffff,0,c6b2acb4,c0bcc24b,0,...) at acpi_cpu_idle+0x11c cpu_idle_amdc1e(0,c6b2acf8,c08c849e,0,c6b2acd4,...) at cpu_idle_amdc1e+0x56 cpu_idle(0,c6b2acd4,c0cbe0b5,9fa,c6d7cb40,...) at cpu_idle+0x1b sched_idletd(0,c6b2ad38,c0cb78fd,343,c6d7aaa0,...) at sched_idletd+0x23e fork_exit(c08c8260,0,c6b2ad38) at fork_exit+0xb8 fork_trampoline() at fork_trampoline+0x8 --- trap 0, eip = 0, esp = 0xc6b2ad70, ebp = 0 --- Tracing command init pid 1 tid 100002 td 0xc6d7cd80 sched_switch(c6d7cd80,0,104,191,6cbb1fa4,...) at sched_switch+0x36a mi_switch(104,0,c0cc0b3d,1eb,5c,...) at mi_switch+0x200 sleepq_switch(c6d7cd80,0,c0cc0b3d,1a0,5c,...) at sleepq_switch+0x15f sleepq_catch_signals(c0cc0b3d,160,0,100,100,...) at sleepq_catch_signals+0xb7 sleepq_wait_sig(c6d7ad48,5c,c0cc3425,100,0,...) at sleepq_wait_sig+0x17 _sleep(c6d7ad48,c6d7add0,15c,c0cc3425,0,...) at _sleep+0x354 kern_wait(c6d7cd80,ffffffff,c6b26c74,2,0,...) at kern_wait+0xb76 wait4(c6d7cd80,c6b26cf8,c,c6d7cd80,c6d7ad48,...) at wait4+0x3b syscall(c6b26d38) at syscall+0x230 Xint0x80_syscall() at Xint0x80_syscall+0x20 --- syscall (7, FreeBSD ELF32, wait4), eip = 0x8054cc3, esp = 0xbfbfe79c, ebp = 0xbfbfe7b8 --- Tracing command audit pid 10 tid 100001 td 0xc6d7e000 sched_switch(c6d7e000,0,104,191,4829743c,...) at sched_switch+0x36a mi_switch(104,0,c0cc0b3d,1eb,0,...) at mi_switch+0x200 sleepq_switch(c6d7e000,0,c0cc0b3d,260,c6d7e000,...) at sleepq_switch+0x15f sleepq_wait(c0f93400,0,c6b23c9c,1,0,...) at sleepq_wait+0x63 _cv_wait(c0f93400,c0f933e4,c0ce16ac,194,0,...) at _cv_wait+0x240 audit_worker(0,c6b23d38,c0cb78fd,343,c6d7b000,...) at audit_worker+0x84 fork_exit(c0aad930,0,c6b23d38) at fork_exit+0xb8 fork_trampoline() at fork_trampoline+0x8 --- trap 0, eip = 0, esp = 0xc6b23d70, ebp = 0 --- Tracing command kernel pid 0 tid 100066 td 0xc70f7900 sched_switch(c70f7900,0,104,191,c44240f9,...) at sched_switch+0x36a mi_switch(104,0,c0cc0b3d,1eb,0,...) at mi_switch+0x200 sleepq_switch(c70f7900,0,c0cc0b3d,283,2,...) at sleepq_switch+0x15f sleepq_timedwait(c0e1b9a4,0,c0cb5eb8,2,0,...) at sleepq_timedwait+0x6b _sleep(c0e1b9a4,0,0,c0cb5eb8,bb8,...) at _sleep+0x339 pause(c0cb5eb8,bb8,11e,11c,c0e198a8,...) at pause+0x47 deadlkres(0,f3eefd38,c0cb78fd,343,c0e19820,...) at deadlkres+0x2f2 fork_exit(c085f0a0,0,f3eefd38) at fork_exit+0xb8 fork_trampoline() at fork_trampoline+0x8 --- trap 0, eip = 0, esp = 0xf3eefd70, ebp = 0 --- Tracing command kernel pid 0 tid 100060 td 0xc6fc0900 sched_switch(c6fc0900,0,104,191,47e39362,...) at sched_switch+0x36a mi_switch(104,0,c0cc0b3d,1eb,0,...) at mi_switch+0x200 sleepq_switch(c6fc0900,0,c0cc0b3d,260,c6fc0900,...) at sleepq_switch+0x15f sleepq_wait(c6ff8640,0,c0cbd0b5,c0cb1b3b,0,...) at sleepq_wait+0x63 msleep_spin(c6ff8640,c6ff8658,c0cb1b3b,0,c0cbacdc,...) at msleep_spin+0x21d taskqueue_thread_loop(c6ffd49c,f3ed3d38,c0cb78fd,343,c0e19820,...) at taskqueue_thread_loop+0x94 fork_exit(c08e1e60,c6ffd49c,f3ed3d38) at fork_exit+0xb8 fork_trampoline() at fork_trampoline+0x8 --- trap 0, eip = 0, esp = 0xf3ed3d70, ebp = 0 --- Tracing command kernel pid 0 tid 100026 td 0xc6dc2900 sched_switch(c6dc2900,0,104,191,b544d4ce,...) at sched_switch+0x36a mi_switch(104,0,c0cc0b3d,1eb,0,...) at mi_switch+0x200 sleepq_switch(c6dc2900,0,c0cc0b3d,260,c6dc2900,...) at sleepq_switch+0x15f sleepq_wait(c6f3fd40,0,c0cbd0b5,c0cb1b3b,0,...) at sleepq_wait+0x63 msleep_spin(c6f3fd40,c6f3fd58,c0cb1b3b,0,c0cbacdc,...) at msleep_spin+0x21d taskqueue_thread_loop(c0dea800,c6b70d38,c0cb78fd,343,c0e19820,...) at taskqueue_thread_loop+0x94 fork_exit(c08e1e60,c0dea800,c6b70d38) at fork_exit+0xb8 fork_trampoline() at fork_trampoline+0x8 --- trap 0, eip = 0, esp = 0xc6b70d70, ebp = 0 --- Tracing command kernel pid 0 tid 100025 td 0xc6dc2b40 sched_switch(c6dc2b40,0,104,191,b544c610,...) at sched_switch+0x36a mi_switch(104,0,c0cc0b3d,1eb,0,...) at mi_switch+0x200 sleepq_switch(c6dc2b40,0,c0cc0b3d,260,c6dc2b40,...) at sleepq_switch+0x15f sleepq_wait(c6f3fd40,0,c0cbd0b5,c0cb1b3b,0,...) at sleepq_wait+0x63 msleep_spin(c6f3fd40,c6f3fd58,c0cb1b3b,0,c0cbacdc,...) at msleep_spin+0x21d taskqueue_thread_loop(c0dea800,c6b6dd38,c0cb78fd,343,c0e19820,...) at taskqueue_thread_loop+0x94 fork_exit(c08e1e60,c0dea800,c6b6dd38) at fork_exit+0xb8 fork_trampoline() at fork_trampoline+0x8 --- trap 0, eip = 0, esp = 0xc6b6dd70, ebp = 0 --- Tracing command kernel pid 0 tid 100024 td 0xc6dc2d80 sched_switch(c6dc2d80,0,104,191,b544b5a2,...) at sched_switch+0x36a mi_switch(104,0,c0cc0b3d,1eb,0,...) at mi_switch+0x200 sleepq_switch(c6dc2d80,0,c0cc0b3d,260,c6dc2d80,...) at sleepq_switch+0x15f sleepq_wait(c6f3fd40,0,c0cbd0b5,c0cb1b3b,0,...) at sleepq_wait+0x63 msleep_spin(c6f3fd40,c6f3fd58,c0cb1b3b,0,c0cbacdc,...) at msleep_spin+0x21d taskqueue_thread_loop(c0dea800,c6b6ad38,c0cb78fd,343,c0e19820,...) at taskqueue_thread_loop+0x94 fork_exit(c08e1e60,c0dea800,c6b6ad38) at fork_exit+0xb8 fork_trampoline() at fork_trampoline+0x8 --- trap 0, eip = 0, esp = 0xc6b6ad70, ebp = 0 --- Tracing command kernel pid 0 tid 100020 td 0xc6f546c0 sched_switch(c6f546c0,0,104,191,6ed359b0,...) at sched_switch+0x36a mi_switch(104,0,c0cc0b3d,1eb,0,...) at mi_switch+0x200 sleepq_switch(c6f546c0,0,c0cc0b3d,260,0,...) at sleepq_switch+0x15f sleepq_wait(c6f400c0,0,c0cb1b3b,0,0,...) at sleepq_wait+0x63 _sleep(c6f400c0,c6f400d8,0,c0cb1b3b,0,...) at _sleep+0x36b taskqueue_thread_loop(c0e2e248,c6b5ed38,c0cb78fd,343,c0e19820,...) at taskqueue_thread_loop+0xba fork_exit(c08e1e60,c0e2e248,c6b5ed38) at fork_exit+0xb8 fork_trampoline() at fork_trampoline+0x8 --- trap 0, eip = 0, esp = 0xc6b5ed70, ebp = 0 --- Tracing command kernel pid 0 tid 100018 td 0xc6f54b40 sched_switch(c6f54b40,0,104,191,b53869cb,...) at sched_switch+0x36a mi_switch(104,0,c0cc0b3d,1eb,0,...) at mi_switch+0x200 sleepq_switch(c6f54b40,0,c0cc0b3d,260,0,...) at sleepq_switch+0x15f sleepq_wait(c6f40340,0,c0cb1b3b,0,0,...) at sleepq_wait+0x63 _sleep(c6f40340,c6f40358,0,c0cb1b3b,0,...) at _sleep+0x36b taskqueue_thread_loop(c0e1a0b8,c6b58d38,c0cb78fd,343,c0e19820,...) at taskqueue_thread_loop+0xba fork_exit(c08e1e60,c0e1a0b8,c6b58d38) at fork_exit+0xb8 fork_trampoline() at fork_trampoline+0x8 --- trap 0, eip = 0, esp = 0xc6b58d70, ebp = 0 --- Tracing command kernel pid 0 tid 100013 td 0xc6d7eb40 sched_switch(c6d7eb40,0,104,191,73da3a35,...) at sched_switch+0x36a mi_switch(104,0,c0cc0b3d,1eb,0,...) at mi_switch+0x200 sleepq_switch(c6d7eb40,0,c0cc0b3d,260,0,...) at sleepq_switch+0x15f sleepq_wait(c6d61c80,0,c0cb1b3b,0,0,...) at sleepq_wait+0x63 _sleep(c6d61c80,c6d61c98,0,c0cb1b3b,0,...) at _sleep+0x36b taskqueue_thread_loop(c0e2cce0,c6b49d38,c0cb78fd,343,c0e19820,...) at taskqueue_thread_loop+0xba fork_exit(c08e1e60,c0e2cce0,c6b49d38) at fork_exit+0xb8 fork_trampoline() at fork_trampoline+0x8 --- trap 0, eip = 0, esp = 0xc6b49d70, ebp = 0 --- Tracing command kernel pid 0 tid 100000 td 0xc0e19ad0 sched_switch(c0e19ad0,0,104,191,a2b23ab4,...) at sched_switch+0x36a mi_switch(104,0,c0cc0b3d,1eb,44,...) at mi_switch+0x200 sleepq_switch(c0e19ad0,0,c0cc0b3d,283,0,...) at sleepq_switch+0x15f sleepq_timedwait(c0e19820,44,c0cbe92a,0,0,...) at sleepq_timedwait+0x6b _sleep(c0e19820,0,44,c0cbe92a,2710,...) at _sleep+0x339 scheduler(0,141ec00,141ec00,141e000,1425000,...) at scheduler+0x23e mi_startup() at mi_startup+0x96 begin() at begin+0x2c db:0:allt> call doadump Physical memory: 3439 MB Dumping 113 MB: 98 82 66 50 34 18 2 Dump complete = 0xf db:0:doadump> reset (kgdb) bt #0 doadump () at pcpu.h:246 #1 0xc04d2289 in db_fncall (dummy1=0xc08d555a, dummy2=0x0, dummy3=0xffffffff, dummy4=0xf62817c0 "Ô\027(ö") at ../../../ddb/db_command.c:548 #2 0xc04d26bf in db_command (last_cmdp=0xc0de89dc, cmd_table=0x0, dopager=0x0) at ../../../ddb/db_command.c:445 #3 0xc04d2774 in db_command_script (command=0xc0de98e8 "call doadump") at ../../../ddb/db_command.c:516 #4 0xc04d6940 in db_script_exec (scriptname=0xc0de9240 "doadump", warnifnotfound=Variable "warnifnotfound" is not available. ) at ../../../ddb/db_script.c:302 #5 0xc04d69d1 in db_run_cmd (addr=0x1, have_addr=0x0, count=0xc0fe8180, modif=0xf62818f8 "") at ../../../ddb/db_script.c:375 #6 0xc04d2681 in db_command (last_cmdp=0xc0de89dc, cmd_table=0x0, dopager=0x1) at ../../../ddb/db_command.c:445 #7 0xc04d27da in db_command_loop () at ../../../ddb/db_command.c:498 #8 0xc04d467d in db_trap (type=0x3, code=0x0) at ../../../ddb/db_main.c:229 #9 0xc08d53d6 in kdb_trap (type=0x3, code=0x0, tf=0xf6281aa0) at ../../../kern/subr_kdb.c:535 #10 0xc0bdedcb in trap (frame=0xf6281aa0) at ../../../i386/i386/trap.c:694 #11 0xc0bc014b in calltrap () at ../../../i386/i386/exception.s:165 #12 0xc08d555a in kdb_enter (why=0xc0cbc551 "panic", msg=0xc0cbc551 "panic") at cpufunc.h:71 #13 0xc08a2e96 in panic (fmt=0xc0cbac54 "mutex %s not owned at %s:%d") at ../../../kern/kern_shutdown.c:562 #14 0xc0892d57 in _mtx_assert (m=0xc0f94480, what=0x4, file=0xc0ce8d1b "../../../vm/vm_page.c", line=0x2e6) at ../../../kern/kern_mutex.c:706 #15 0xc0b173b4 in vm_page_remove (m=0xc4610e88) at ../../../vm/vm_page.c:742 #16 0xc0b18472 in vm_page_rename (m=0xc4610e88, new_object=0xc7ca17f8, new_pindex=0x1) at ../../../vm/vm_page.c:821 #17 0xc0b15ba6 in vm_object_split (entry=0xc77df828) at ../../../vm/vm_object.c:1441 #18 0xc0b0dcc7 in vmspace_fork (vm1=0xc7a03910, fork_charge=0xf6281c48) at ../../../vm/vm_map.c:2876 #19 0xc087915b in fork1 (td=0xc7c99240, flags=Variable "flags" is not available. ) at ../../../kern/kern_fork.c:304 #20 0xc087a629 in fork (td=0xc7c99240, uap=0xf6281cf8) at ../../../kern/kern_fork.c:107 #21 0xc0bde540 in syscall (frame=0xf6281d38) at ../../../i386/i386/trap.c:1113 #22 0xc0bc01b0 in Xint0x80_syscall () at ../../../i386/i386/exception.s:261 #23 0x00000033 in ?? () (kgdb) f 15 #15 0xc0b173b4 in vm_page_remove (m=0xc4610e88) at ../../../vm/vm_page.c:742 742 vm_page_lock_assert(m, MA_OWNED); (kgdb) p *m $2569 = {pageq = {tqe_next = 0xc517dfa8, tqe_prev = 0xc4610e40}, listq = {tqe_next = 0xc46110c8, tqe_prev = 0xc7ca2a40}, left = 0x0, right = 0xc46110c8, object = 0xc7ca2a18, pindex = 0x93, phys_addr = 0xab78a000, md = {pv_list = { tqh_first = 0xc52b2394, tqh_last = 0xc52b2398}, pat_mode = 0x6}, queue = 0x2, segind = 0x2, flags = 0x90, order = 0xb, pool = 0x0, cow = 0x0, wire_count = 0x0, hold_count = 0x0, oflags = 0x0, act_count = 0x5, busy = 0x0, valid = 0xff, dirty = 0xff} (kgdb) $ svn diff -x -p /usr/src/sys Index: /usr/src/sys/nfsclient/nfs_bio.c =================================================================== --- /usr/src/sys/nfsclient/nfs_bio.c (revision 206700) +++ /usr/src/sys/nfsclient/nfs_bio.c (working copy) @@ -131,12 +131,13 @@ nfs_getpages(struct vop_getpages_args *ap) */ VM_OBJECT_LOCK(object); if (pages[ap->a_reqpage]->valid != 0) { - vm_page_lock_queues(); for (i = 0; i < npages; ++i) { - if (i != ap->a_reqpage) + if (i != ap->a_reqpage) { + vm_page_lock(pages[i]); vm_page_free(pages[i]); + vm_page_unlock(pages[i]); + } } - vm_page_unlock_queues(); VM_OBJECT_UNLOCK(object); return (0); } @@ -171,12 +172,13 @@ nfs_getpages(struct vop_getpages_args *ap) if (error && (uio.uio_resid == count)) { nfs_printf("nfs_getpages: error %d\n", error); VM_OBJECT_LOCK(object); - vm_page_lock_queues(); for (i = 0; i < npages; ++i) { - if (i != ap->a_reqpage) + if (i != ap->a_reqpage) { + vm_page_lock(pages[i]); vm_page_free(pages[i]); + vm_page_unlock(pages[i]); + } } - vm_page_unlock_queues(); VM_OBJECT_UNLOCK(object); return (VM_PAGER_ERROR); } @@ -189,12 +191,12 @@ nfs_getpages(struct vop_getpages_args *ap) size = count - uio.uio_resid; VM_OBJECT_LOCK(object); - vm_page_lock_queues(); for (i = 0, toff = 0; i < npages; i++, toff = nextoff) { vm_page_t m; nextoff = toff + PAGE_SIZE; m = pages[i]; + vm_page_lock(m); if (nextoff <= size) { /* * Read operation filled an entire page @@ -241,8 +243,8 @@ nfs_getpages(struct vop_getpages_args *ap) vm_page_free(m); } } + vm_page_unlock(m); } - vm_page_unlock_queues(); VM_OBJECT_UNLOCK(object); return (0); } @@ -367,7 +369,7 @@ nfs_putpages(struct vop_putpages_args *ap) * attributes this could be forced by setting n_attrstamp to 0 before * the VOP_GETATTR() call. */ -static inline int +static __inline int nfs_bioread_check_cons(struct vnode *vp, struct thread *td, struct ucred *cred) { int error = 0; Index: /usr/src/sys/ufs/ffs/ffs_vnops.c =================================================================== --- /usr/src/sys/ufs/ffs/ffs_vnops.c (revision 206700) +++ /usr/src/sys/ufs/ffs/ffs_vnops.c (working copy) @@ -859,13 +859,13 @@ ffs_getpages(ap) if (mreq->valid) { if (mreq->valid != VM_PAGE_BITS_ALL) vm_page_zero_invalid(mreq, TRUE); - vm_page_lock_queues(); for (i = 0; i < pcount; i++) { if (i != ap->a_reqpage) { + vm_page_lock(ap->a_m[i]); vm_page_free(ap->a_m[i]); + vm_page_unlock(ap->a_m[i]); } } - vm_page_unlock_queues(); VM_OBJECT_UNLOCK(mreq->object); return VM_PAGER_OK; } Index: /usr/src/sys/kern/uipc_syscalls.c =================================================================== --- /usr/src/sys/kern/uipc_syscalls.c (revision 206700) +++ /usr/src/sys/kern/uipc_syscalls.c (working copy) @@ -1715,7 +1715,7 @@ sf_buf_mext(void *addr, void *args) m = sf_buf_page(args); sf_buf_free(args); - vm_page_lock_queues(); + vm_page_lock(m); vm_page_unwire(m, 0); /* * Check for the object going away on us. This can @@ -1724,7 +1724,7 @@ sf_buf_mext(void *addr, void *args) */ if (m->wire_count == 0 && m->object == NULL) vm_page_free(m); - vm_page_unlock_queues(); + vm_page_unlock(m); if (addr == NULL) return; sfs = addr; @@ -2108,7 +2108,7 @@ retry_space: mbstat.sf_iocnt++; } if (error) { - vm_page_lock_queues(); + vm_page_lock(pg); vm_page_unwire(pg, 0); /* * See if anyone else might know about @@ -2120,7 +2120,7 @@ retry_space: pg->hold_count == 0) { vm_page_free(pg); } - vm_page_unlock_queues(); + vm_page_unlock(pg); VM_OBJECT_UNLOCK(obj); if (error == EAGAIN) error = 0; /* not a real error */ @@ -2134,14 +2134,14 @@ retry_space: if ((sf = sf_buf_alloc(pg, (mnw ? SFB_NOWAIT : SFB_CATCH))) == NULL) { mbstat.sf_allocfail++; - vm_page_lock_queues(); + vm_page_lock(pg); vm_page_unwire(pg, 0); /* * XXX: Not same check as above!? */ if (pg->wire_count == 0 && pg->object == NULL) vm_page_free(pg); - vm_page_unlock_queues(); + vm_page_unlock(pg); error = (mnw ? EAGAIN : EINTR); break; } Index: /usr/src/sys/kern/vfs_bio.c =================================================================== --- /usr/src/sys/kern/vfs_bio.c (revision 206700) +++ /usr/src/sys/kern/vfs_bio.c (working copy) @@ -1337,9 +1337,9 @@ brelse(struct buf *bp) (PAGE_SIZE - poffset) : resid; KASSERT(presid >= 0, ("brelse: extra page")); - vm_page_lock_queues(); + vm_page_lock(m); vm_page_set_invalid(m, poffset, presid); - vm_page_unlock_queues(); + vm_page_unlock(m); if (had_bogus) printf("avoided corruption bug in bogus_page/brelse code\n"); } @@ -1547,10 +1547,10 @@ vfs_vmio_release(struct buf *bp) vm_page_t m; VM_OBJECT_LOCK(bp->b_bufobj->bo_object); - vm_page_lock_queues(); for (i = 0; i < bp->b_npages; i++) { m = bp->b_pages[i]; bp->b_pages[i] = NULL; + vm_page_lock(m); /* * In order to keep page LRU ordering consistent, put * everything on the inactive queue. @@ -1561,8 +1561,10 @@ vfs_vmio_release(struct buf *bp) * the responsibility of the process that * busied the pages to deal with them. */ - if ((m->oflags & VPO_BUSY) || (m->busy != 0)) + if ((m->oflags & VPO_BUSY) || (m->busy != 0)) { + vm_page_unlock(m); continue; + } if (m->wire_count == 0) { /* @@ -1579,8 +1581,8 @@ vfs_vmio_release(struct buf *bp) vm_page_try_to_cache(m); } } + vm_page_unlock(m); } - vm_page_unlock_queues(); VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object); pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages); @@ -2425,13 +2427,17 @@ vfs_setdirty_locked_object(struct buf *bp) vm_offset_t boffset; vm_offset_t eoffset; - vm_page_lock_queues(); /* * test the pages to see if they have been modified directly * by users through the VM system. */ - for (i = 0; i < bp->b_npages; i++) + for (i = 0; i < bp->b_npages; i++) { + vm_page_lock_assert(bp->b_pages[i], MA_NOTOWNED); + vm_page_lock(bp->b_pages[i]); vm_page_test_dirty(bp->b_pages[i]); + vm_page_unlock(bp->b_pages[i]); + vm_page_lock_assert(bp->b_pages[i], MA_NOTOWNED); + } /* * Calculate the encompassing dirty range, boffset and eoffset, @@ -2451,7 +2457,6 @@ vfs_setdirty_locked_object(struct buf *bp) } eoffset = ((i + 1) << PAGE_SHIFT) - (bp->b_offset & PAGE_MASK); - vm_page_unlock_queues(); /* * Fit it to the buffer. */ @@ -2920,7 +2925,6 @@ allocbuf(struct buf *bp, int size) vm_page_t m; VM_OBJECT_LOCK(bp->b_bufobj->bo_object); - vm_page_lock_queues(); for (i = desiredpages; i < bp->b_npages; i++) { /* * the page is not freed here -- it @@ -2930,13 +2934,14 @@ allocbuf(struct buf *bp, int size) m = bp->b_pages[i]; KASSERT(m != bogus_page, ("allocbuf: bogus page found")); + vm_page_lock(m); while (vm_page_sleep_if_busy(m, TRUE, "biodep")) - vm_page_lock_queues(); + vm_page_lock(m); bp->b_pages[i] = NULL; vm_page_unwire(m, 0); + vm_page_unlock(m); } - vm_page_unlock_queues(); VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object); pmap_qremove((vm_offset_t) trunc_page((vm_offset_t)bp->b_data) + (desiredpages << PAGE_SHIFT), (bp->b_npages - desiredpages)); @@ -3008,9 +3013,11 @@ allocbuf(struct buf *bp, int size) /* * We have a good page. */ - vm_page_lock_queues(); + vm_page_lock_assert(m, MA_NOTOWNED); + vm_page_lock(m); vm_page_wire(m); - vm_page_unlock_queues(); + vm_page_unlock(m); + vm_page_lock_assert(m, MA_NOTOWNED); bp->b_pages[bp->b_npages] = m; ++bp->b_npages; } @@ -3482,7 +3489,7 @@ vfs_page_set_validclean(struct buf *bp, vm_ooffset { vm_ooffset_t soff, eoff; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + vm_page_lock_assert(m, MA_OWNED); /* * Start and end offsets in buffer. eoff - soff may not cross a * page boundry or cross the end of the buffer. The end of the @@ -3545,11 +3552,11 @@ retry: goto retry; } bogus = 0; - if (clear_modify) - vm_page_lock_queues(); for (i = 0; i < bp->b_npages; i++) { m = bp->b_pages[i]; + if (clear_modify) + vm_page_lock(m); if ((bp->b_flags & B_CLUSTER) == 0) { vm_object_pip_add(obj, 1); vm_page_io_start(m); @@ -3572,6 +3579,7 @@ retry: if (clear_modify) { pmap_remove_write(m); vfs_page_set_validclean(bp, foff, m); + vm_page_unlock(m); } else if (m->valid == VM_PAGE_BITS_ALL && (bp->b_flags & B_CACHE) == 0) { bp->b_pages[i] = bogus_page; @@ -3579,8 +3587,6 @@ retry: } foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK; } - if (clear_modify) - vm_page_unlock_queues(); VM_OBJECT_UNLOCK(obj); if (bogus) pmap_qenter(trunc_page((vm_offset_t)bp->b_data), @@ -3609,7 +3615,6 @@ vfs_clean_pages(struct buf *bp) KASSERT(bp->b_offset != NOOFFSET, ("vfs_clean_pages: no buffer offset")); VM_OBJECT_LOCK(bp->b_bufobj->bo_object); - vm_page_lock_queues(); for (i = 0; i < bp->b_npages; i++) { m = bp->b_pages[i]; noff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK; @@ -3617,11 +3622,12 @@ vfs_clean_pages(struct buf *bp) if (eoff > bp->b_offset + bp->b_bufsize) eoff = bp->b_offset + bp->b_bufsize; + vm_page_lock(m); vfs_page_set_validclean(bp, foff, m); + vm_page_unlock(m); /* vm_page_clear_dirty(m, foff & PAGE_MASK, eoff - foff); */ foff = noff; } - vm_page_unlock_queues(); VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object); } @@ -3838,12 +3844,15 @@ vmapbuf(struct buf *bp) retry: if (vm_fault_quick(addr >= bp->b_data ? addr : bp->b_data, prot) < 0) { - vm_page_lock_queues(); for (i = 0; i < pidx; ++i) { - vm_page_unhold(bp->b_pages[i]); + vm_page_t m; + + m = bp->b_pages[i]; bp->b_pages[i] = NULL; + vm_page_lock(m); + vm_page_unhold(m); + vm_page_unlock(m); } - vm_page_unlock_queues(); return(-1); } m = pmap_extract_and_hold(pmap, (vm_offset_t)addr, prot); @@ -3869,15 +3878,18 @@ retry: void vunmapbuf(struct buf *bp) { + vm_page_t m; int pidx; int npages; npages = bp->b_npages; pmap_qremove(trunc_page((vm_offset_t)bp->b_data), npages); - vm_page_lock_queues(); - for (pidx = 0; pidx < npages; pidx++) - vm_page_unhold(bp->b_pages[pidx]); - vm_page_unlock_queues(); + for (pidx = 0; pidx < npages; pidx++) { + m = bp->b_pages[pidx]; + vm_page_lock(m); + vm_page_unhold(m); + vm_page_unlock(m); + } bp->b_data = bp->b_saveaddr; } Index: /usr/src/sys/kern/subr_uio.c =================================================================== --- /usr/src/sys/kern/subr_uio.c (revision 206700) +++ /usr/src/sys/kern/subr_uio.c (working copy) @@ -104,9 +104,9 @@ retry: if ((user_pg = vm_page_lookup(uobject, upindex)) != NULL) { if (vm_page_sleep_if_busy(user_pg, TRUE, "vm_pgmoveco")) goto retry; - vm_page_lock_queues(); + vm_page_lock(user_pg); pmap_remove_all(user_pg); - vm_page_free(user_pg); + vm_page_unlock(user_pg); } else { /* * Even if a physical page does not exist in the @@ -115,11 +115,11 @@ retry: */ if (uobject->backing_object != NULL) pmap_remove(map->pmap, uaddr, uaddr + PAGE_SIZE); - vm_page_lock_queues(); } + vm_page_lock(kern_pg); vm_page_insert(kern_pg, uobject, upindex); vm_page_dirty(kern_pg); - vm_page_unlock_queues(); + vm_page_unlock(kern_pg); VM_OBJECT_UNLOCK(uobject); vm_map_lookup_done(map, entry); return(KERN_SUCCESS); Index: /usr/src/sys/kern/kern_exec.c =================================================================== --- /usr/src/sys/kern/kern_exec.c (revision 206700) +++ /usr/src/sys/kern/kern_exec.c (working copy) @@ -949,17 +949,17 @@ exec_map_first_page(imgp) ma[0] = vm_page_lookup(object, 0); if ((rv != VM_PAGER_OK) || (ma[0] == NULL)) { if (ma[0]) { - vm_page_lock_queues(); + vm_page_lock(ma[0]); vm_page_free(ma[0]); - vm_page_unlock_queues(); + vm_page_unlock(ma[0]); } VM_OBJECT_UNLOCK(object); return (EIO); } } - vm_page_lock_queues(); + vm_page_lock(ma[0]); vm_page_hold(ma[0]); - vm_page_unlock_queues(); + vm_page_unlock(ma[0]); vm_page_wakeup(ma[0]); VM_OBJECT_UNLOCK(object); @@ -979,9 +979,9 @@ exec_unmap_first_page(imgp) m = sf_buf_page(imgp->firstpage); sf_buf_free(imgp->firstpage); imgp->firstpage = NULL; - vm_page_lock_queues(); + vm_page_lock(m); vm_page_unhold(m); - vm_page_unlock_queues(); + vm_page_unlock(m); } } Index: /usr/src/sys/kern/uipc_shm.c =================================================================== --- /usr/src/sys/kern/uipc_shm.c (revision 206700) +++ /usr/src/sys/kern/uipc_shm.c (working copy) @@ -304,9 +304,9 @@ shm_dotruncate(struct shmfd *shmfd, off_t length) */ base = roundup2(base, DEV_BSIZE); - vm_page_lock_queues(); + vm_page_lock(m); vm_page_clear_dirty(m, base, PAGE_SIZE - base); - vm_page_unlock_queues(); + vm_page_unlock(m); } else if ((length & PAGE_MASK) && __predict_false(object->cache != NULL)) { vm_page_cache_free(object, OFF_TO_IDX(length), Index: /usr/src/sys/kern/sys_pipe.c =================================================================== --- /usr/src/sys/kern/sys_pipe.c (revision 206700) +++ /usr/src/sys/kern/sys_pipe.c (working copy) @@ -766,17 +766,16 @@ pipe_build_write_buffer(wpipe, uio) if (endaddr < addr) return (EFAULT); for (i = 0; addr < endaddr; addr += PAGE_SIZE, i++) { - /* - * vm_fault_quick() can sleep. Consequently, - * vm_page_lock_queue() and vm_page_unlock_queue() - * should not be performed outside of this loop. - */ race: if (vm_fault_quick((caddr_t)addr, VM_PROT_READ) < 0) { - vm_page_lock_queues(); - for (j = 0; j < i; j++) - vm_page_unhold(wpipe->pipe_map.ms[j]); - vm_page_unlock_queues(); + for (j = 0; j < i; j++) { + vm_page_t m; + + m = wpipe->pipe_map.ms[j]; + vm_page_lock(m); + vm_page_unhold(m); + vm_page_unlock(m); + } return (EFAULT); } wpipe->pipe_map.ms[i] = pmap_extract_and_hold(pmap, addr, @@ -813,14 +812,16 @@ static void pipe_destroy_write_buffer(wpipe) struct pipe *wpipe; { + vm_page_t m; int i; PIPE_LOCK_ASSERT(wpipe, MA_OWNED); - vm_page_lock_queues(); for (i = 0; i < wpipe->pipe_map.npages; i++) { + m = wpipe->pipe_map.ms[i]; + vm_page_lock(m); vm_page_unhold(wpipe->pipe_map.ms[i]); + vm_page_unlock(m); } - vm_page_unlock_queues(); wpipe->pipe_map.npages = 0; } Index: /usr/src/sys/kern/uipc_cow.c =================================================================== --- /usr/src/sys/kern/uipc_cow.c (revision 206700) +++ /usr/src/sys/kern/uipc_cow.c (working copy) @@ -80,7 +80,7 @@ socow_iodone(void *addr, void *args) pp = sf_buf_page(sf); sf_buf_free(sf); /* remove COW mapping */ - vm_page_lock_queues(); + vm_page_lock(pp); vm_page_cowclear(pp); vm_page_unwire(pp, 0); /* @@ -90,7 +90,7 @@ socow_iodone(void *addr, void *args) */ if (pp->wire_count == 0 && pp->object == NULL) vm_page_free(pp); - vm_page_unlock_queues(); + vm_page_unlock(pp); socow_stats.iodone++; } @@ -128,10 +128,10 @@ socow_setup(struct mbuf *m0, struct uio *uio) /* * set up COW */ - vm_page_lock_queues(); + vm_page_lock(pp); if (vm_page_cowsetup(pp) != 0) { vm_page_unhold(pp); - vm_page_unlock_queues(); + vm_page_unlock(pp); return (0); } @@ -140,14 +140,14 @@ socow_setup(struct mbuf *m0, struct uio *uio) */ vm_page_wire(pp); vm_page_unhold(pp); - vm_page_unlock_queues(); + vm_page_unlock(pp); /* * Allocate an sf buf */ sf = sf_buf_alloc(pp, SFB_CATCH); if (!sf) { - vm_page_lock_queues(); + vm_page_lock(pp); vm_page_cowclear(pp); vm_page_unwire(pp, 0); /* @@ -157,7 +157,7 @@ socow_setup(struct mbuf *m0, struct uio *uio) */ if (pp->wire_count == 0 && pp->object == NULL) vm_page_free(pp); - vm_page_unlock_queues(); + vm_page_unlock(pp); socow_stats.fail_sf_buf++; return(0); } Index: /usr/src/sys/kern/sys_process.c =================================================================== --- /usr/src/sys/kern/sys_process.c (revision 206700) +++ /usr/src/sys/kern/sys_process.c (working copy) @@ -328,9 +328,9 @@ proc_rwmem(struct proc *p, struct uio *uio) /* * Hold the page in memory. */ - vm_page_lock_queues(); + vm_page_lock(m); vm_page_hold(m); - vm_page_unlock_queues(); + vm_page_unlock(m); /* * We're done with tmap now. @@ -349,9 +349,9 @@ proc_rwmem(struct proc *p, struct uio *uio) /* * Release the page. */ - vm_page_lock_queues(); + vm_page_lock(m); vm_page_unhold(m); - vm_page_unlock_queues(); + vm_page_unlock(m); } while (error == 0 && uio->uio_resid > 0); Index: /usr/src/sys/kern/subr_witness.c =================================================================== --- /usr/src/sys/kern/subr_witness.c (revision 206700) +++ /usr/src/sys/kern/subr_witness.c (working copy) @@ -597,6 +597,15 @@ static struct witness_order_list_entry order_lists { "cdev", &lock_class_mtx_sleep }, { NULL, NULL }, /* + * VM + * + */ + { "vm object", &lock_class_mtx_sleep }, + { "page lock", &lock_class_mtx_sleep }, + { "vm page queue mutex", &lock_class_mtx_sleep }, + { "pmap", &lock_class_mtx_sleep }, + { NULL, NULL }, + /* * kqueue/VFS interaction */ { "kqueue", &lock_class_mtx_sleep }, Index: /usr/src/sys/fs/nfsclient/nfs_clbio.c =================================================================== --- /usr/src/sys/fs/nfsclient/nfs_clbio.c (revision 206700) +++ /usr/src/sys/fs/nfsclient/nfs_clbio.c (working copy) @@ -134,12 +134,13 @@ ncl_getpages(struct vop_getpages_args *ap) */ VM_OBJECT_LOCK(object); if (pages[ap->a_reqpage]->valid != 0) { - vm_page_lock_queues(); for (i = 0; i < npages; ++i) { - if (i != ap->a_reqpage) + if (i != ap->a_reqpage) { + vm_page_lock(pages[i]); vm_page_free(pages[i]); + vm_page_unlock(pages[i]); + } } - vm_page_unlock_queues(); VM_OBJECT_UNLOCK(object); return (0); } @@ -174,12 +175,13 @@ ncl_getpages(struct vop_getpages_args *ap) if (error && (uio.uio_resid == count)) { ncl_printf("nfs_getpages: error %d\n", error); VM_OBJECT_LOCK(object); - vm_page_lock_queues(); for (i = 0; i < npages; ++i) { - if (i != ap->a_reqpage) + if (i != ap->a_reqpage) { + vm_page_lock(pages[i]); vm_page_free(pages[i]); + vm_page_unlock(pages[i]); + } } - vm_page_unlock_queues(); VM_OBJECT_UNLOCK(object); return (VM_PAGER_ERROR); } @@ -192,12 +194,13 @@ ncl_getpages(struct vop_getpages_args *ap) size = count - uio.uio_resid; VM_OBJECT_LOCK(object); - vm_page_lock_queues(); + for (i = 0, toff = 0; i < npages; i++, toff = nextoff) { vm_page_t m; nextoff = toff + PAGE_SIZE; m = pages[i]; + vm_page_lock(m); if (nextoff <= size) { /* * Read operation filled an entire page @@ -239,13 +242,15 @@ ncl_getpages(struct vop_getpages_args *ap) vm_page_activate(m); else vm_page_deactivate(m); + vm_page_unlock(m); vm_page_wakeup(m); } else { vm_page_free(m); + vm_page_unlock(m); } - } + } else + vm_page_unlock(m); } - vm_page_unlock_queues(); VM_OBJECT_UNLOCK(object); return (0); } Index: /usr/src/sys/fs/tmpfs/tmpfs_vnops.c =================================================================== --- /usr/src/sys/fs/tmpfs/tmpfs_vnops.c (revision 206700) +++ /usr/src/sys/fs/tmpfs/tmpfs_vnops.c (working copy) @@ -460,9 +460,9 @@ tmpfs_nocacheread(vm_object_t tobj, vm_pindex_t id error = uiomove_fromphys(&m, offset, tlen, uio); VM_OBJECT_LOCK(tobj); out: - vm_page_lock_queues(); + vm_page_lock(m); vm_page_unwire(m, TRUE); - vm_page_unlock_queues(); + vm_page_unlock(m); vm_page_wakeup(m); vm_object_pip_subtract(tobj, 1); VM_OBJECT_UNLOCK(tobj); @@ -630,9 +630,9 @@ lookupvpg: if (vm_page_sleep_if_busy(vpg, FALSE, "tmfsmw")) goto lookupvpg; vm_page_busy(vpg); - vm_page_lock_queues(); + vm_page_lock(vpg); vm_page_undirty(vpg); - vm_page_unlock_queues(); + vm_page_unlock(vpg); VM_OBJECT_UNLOCK(vobj); error = uiomove_fromphys(&vpg, offset, tlen, uio); } else { @@ -667,14 +667,14 @@ nocache: out: if (vobj != NULL) VM_OBJECT_LOCK(vobj); - vm_page_lock_queues(); + vm_page_lock(tpg); if (error == 0) { KASSERT(tpg->valid == VM_PAGE_BITS_ALL, ("parts of tpg invalid")); vm_page_dirty(tpg); } vm_page_unwire(tpg, TRUE); - vm_page_unlock_queues(); + vm_page_unlock(tpg); vm_page_wakeup(tpg); if (vpg != NULL) vm_page_wakeup(vpg); Index: /usr/src/sys/fs/smbfs/smbfs_io.c =================================================================== --- /usr/src/sys/fs/smbfs/smbfs_io.c (revision 206700) +++ /usr/src/sys/fs/smbfs/smbfs_io.c (working copy) @@ -450,12 +450,13 @@ smbfs_getpages(ap) VM_OBJECT_LOCK(object); if (m->valid != 0) { - vm_page_lock_queues(); for (i = 0; i < npages; ++i) { - if (i != reqpage) + if (i != reqpage) { + vm_page_lock(pages[i]); vm_page_free(pages[i]); + vm_page_unlock(pages[i]); + } } - vm_page_unlock_queues(); VM_OBJECT_UNLOCK(object); return 0; } @@ -488,24 +489,26 @@ smbfs_getpages(ap) VM_OBJECT_LOCK(object); if (error && (uio.uio_resid == count)) { printf("smbfs_getpages: error %d\n",error); - vm_page_lock_queues(); for (i = 0; i < npages; i++) { - if (reqpage != i) + if (reqpage != i) { + vm_page_lock(pages[i]); vm_page_free(pages[i]); + vm_page_unlock(pages[i]); + } + } - vm_page_unlock_queues(); VM_OBJECT_UNLOCK(object); return VM_PAGER_ERROR; } size = count - uio.uio_resid; - vm_page_lock_queues(); for (i = 0, toff = 0; i < npages; i++, toff = nextoff) { vm_page_t m; nextoff = toff + PAGE_SIZE; m = pages[i]; + vm_page_lock(m); if (nextoff <= size) { /* * Read operation filled an entire page @@ -548,13 +551,15 @@ smbfs_getpages(ap) vm_page_activate(m); else vm_page_deactivate(m); + vm_page_unlock(m); vm_page_wakeup(m); } else { vm_page_free(m); + vm_page_unlock(m); } - } + } else + vm_page_unlock(m); } - vm_page_unlock_queues(); VM_OBJECT_UNLOCK(object); return 0; #endif /* SMBFS_RWGENERIC */ @@ -644,12 +649,13 @@ smbfs_putpages(ap) if (!error) { int nwritten = round_page(count - uio.uio_resid) / PAGE_SIZE; - vm_page_lock_queues(); + for (i = 0; i < nwritten; i++) { rtvals[i] = VM_PAGER_OK; + vm_page_lock(pages[i]); vm_page_undirty(pages[i]); + vm_page_unlock(pages[i]); } - vm_page_unlock_queues(); } return rtvals[0]; #endif /* SMBFS_RWGENERIC */ Index: /usr/src/sys/fs/nwfs/nwfs_io.c =================================================================== --- /usr/src/sys/fs/nwfs/nwfs_io.c (revision 206700) +++ /usr/src/sys/fs/nwfs/nwfs_io.c (working copy) @@ -437,24 +437,27 @@ nwfs_getpages(ap) VM_OBJECT_LOCK(object); if (error && (uio.uio_resid == count)) { printf("nwfs_getpages: error %d\n",error); - vm_page_lock_queues(); + for (i = 0; i < npages; i++) { - if (ap->a_reqpage != i) - vm_page_free(pages[i]); + if (ap->a_reqpage != i) { + vm_page_lock(pages[i]); + vm_page_free(pages[i]); + vm_page_unlock(pages[i]); + } } - vm_page_unlock_queues(); VM_OBJECT_UNLOCK(object); return VM_PAGER_ERROR; } size = count - uio.uio_resid; - vm_page_lock_queues(); + for (i = 0, toff = 0; i < npages; i++, toff = nextoff) { vm_page_t m; nextoff = toff + PAGE_SIZE; m = pages[i]; + vm_page_lock(m); if (nextoff <= size) { m->valid = VM_PAGE_BITS_ALL; KASSERT(m->dirty == 0, @@ -484,13 +487,15 @@ nwfs_getpages(ap) vm_page_activate(m); else vm_page_deactivate(m); + vm_page_unlock(m); vm_page_wakeup(m); } else { vm_page_free(m); + vm_page_unlock(m); } - } + } else + vm_page_unlock(m); } - vm_page_unlock_queues(); VM_OBJECT_UNLOCK(object); return 0; #endif /* NWFS_RWCACHE */ @@ -574,12 +579,12 @@ nwfs_putpages(ap) if (!error) { int nwritten = round_page(count - uio.uio_resid) / PAGE_SIZE; - vm_page_lock_queues(); for (i = 0; i < nwritten; i++) { rtvals[i] = VM_PAGER_OK; + vm_page_lock(pages[i]); vm_page_undirty(pages[i]); + vm_page_unlock(pages[i]); } - vm_page_unlock_queues(); } return rtvals[0]; #endif /* NWFS_RWCACHE */ Index: /usr/src/sys/dev/agp/agp.c =================================================================== --- /usr/src/sys/dev/agp/agp.c (revision 206700) +++ /usr/src/sys/dev/agp/agp.c (working copy) @@ -623,9 +623,9 @@ bad: m = vm_page_lookup(mem->am_obj, OFF_TO_IDX(k)); if (k >= i) vm_page_wakeup(m); - vm_page_lock_queues(); + vm_page_lock(m); vm_page_unwire(m, 0); - vm_page_unlock_queues(); + vm_page_unlock(m); } VM_OBJECT_UNLOCK(mem->am_obj); @@ -657,9 +657,9 @@ agp_generic_unbind_memory(device_t dev, struct agp VM_OBJECT_LOCK(mem->am_obj); for (i = 0; i < mem->am_size; i += PAGE_SIZE) { m = vm_page_lookup(mem->am_obj, atop(i)); - vm_page_lock_queues(); + vm_page_lock(m); vm_page_unwire(m, 0); - vm_page_unlock_queues(); + vm_page_unlock(m); } VM_OBJECT_UNLOCK(mem->am_obj); Index: /usr/src/sys/dev/agp/agp_i810.c =================================================================== --- /usr/src/sys/dev/agp/agp_i810.c (revision 206700) +++ /usr/src/sys/dev/agp/agp_i810.c (working copy) @@ -1011,9 +1011,9 @@ agp_i810_free_memory(device_t dev, struct agp_memo VM_OBJECT_LOCK(mem->am_obj); m = vm_page_lookup(mem->am_obj, 0); VM_OBJECT_UNLOCK(mem->am_obj); - vm_page_lock_queues(); + vm_page_lock(m); vm_page_unwire(m, 0); - vm_page_unlock_queues(); + vm_page_unlock(m); } else { contigfree(sc->argb_cursor, mem->am_size, M_AGP); sc->argb_cursor = NULL; Index: /usr/src/sys/dev/ti/if_ti.c =================================================================== --- /usr/src/sys/dev/ti/if_ti.c (revision 206700) +++ /usr/src/sys/dev/ti/if_ti.c (working copy) @@ -1488,10 +1488,10 @@ ti_newbuf_jumbo(sc, idx, m_old) } sf[i] = sf_buf_alloc(frame, SFB_NOWAIT); if (sf[i] == NULL) { - vm_page_lock_queues(); + vm_page_lock(frame); vm_page_unwire(frame, 0); vm_page_free(frame); - vm_page_unlock_queues(); + vm_page_unlock(frame); device_printf(sc->ti_dev, "buffer allocation " "failed -- packet dropped!\n"); printf(" index %d page %d\n", idx, i); Index: /usr/src/sys/dev/md/md.c =================================================================== --- /usr/src/sys/dev/md/md.c (revision 206700) +++ /usr/src/sys/dev/md/md.c (working copy) @@ -665,11 +665,11 @@ mdstart_swap(struct md_s *sc, struct bio *bp) sf_buf_free(sf); sched_unpin(); vm_page_wakeup(m); - vm_page_lock_queues(); + vm_page_lock(m); vm_page_activate(m); if (bp->bio_cmd == BIO_WRITE) vm_page_dirty(m); - vm_page_unlock_queues(); + vm_page_unlock(m); /* Actions on further pages start at offset 0 */ p += PAGE_SIZE - offs; Index: /usr/src/sys/dev/drm/via_dmablit.c =================================================================== --- /usr/src/sys/dev/drm/via_dmablit.c (revision 206700) +++ /usr/src/sys/dev/drm/via_dmablit.c (working copy) @@ -178,9 +178,9 @@ via_free_sg_info(drm_via_sg_info_t *vsg) case dr_via_pages_locked: for (i=0; i < vsg->num_pages; ++i) { if ( NULL != (page = vsg->pages[i])) { - vm_page_lock_queues(); + vm_page_lock(page); vm_page_unwire(page, 0); - vm_page_unlock_queues(); + vm_page_unlock(page); } } case dr_via_pages_alloc: @@ -248,10 +248,10 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, dr (vm_offset_t)xfer->mem_addr + IDX_TO_OFF(i), VM_PROT_RW); if (m == NULL) break; - vm_page_lock_queues(); + vm_page_lock(m); vm_page_wire(m); vm_page_unhold(m); - vm_page_unlock_queues(); + vm_page_unlock(m); vsg->pages[i] = m; } vsg->state = dr_via_pages_locked; Index: /usr/src/sys/vm/vm_kern.c =================================================================== --- /usr/src/sys/vm/vm_kern.c (revision 206700) +++ /usr/src/sys/vm/vm_kern.c (working copy) @@ -351,10 +351,10 @@ retry: i -= PAGE_SIZE; m = vm_page_lookup(kmem_object, OFF_TO_IDX(offset + i)); - vm_page_lock_queues(); + vm_page_lock(m); vm_page_unwire(m, 0); vm_page_free(m); - vm_page_unlock_queues(); + vm_page_unlock(m); } VM_OBJECT_UNLOCK(kmem_object); vm_map_delete(map, addr, addr + size); Index: /usr/src/sys/vm/vm_pageout.c =================================================================== --- /usr/src/sys/vm/vm_pageout.c (revision 206700) +++ /usr/src/sys/vm/vm_pageout.c (working copy) @@ -252,7 +252,9 @@ vm_pageout_fallback_object_lock(vm_page_t m, vm_pa TAILQ_INSERT_AFTER(&vm_page_queues[queue].pl, m, &marker, pageq); vm_page_unlock_queues(); + vm_page_unlock(m); VM_OBJECT_LOCK(object); + vm_page_lock(m); vm_page_lock_queues(); /* Page queue might have changed. */ @@ -275,8 +277,7 @@ vm_pageout_fallback_object_lock(vm_page_t m, vm_pa * late and we cannot do anything that will mess with the page. */ static int -vm_pageout_clean(m) - vm_page_t m; +vm_pageout_clean(vm_page_t m) { vm_object_t object; vm_page_t mc[2*vm_pageout_page_count]; @@ -284,7 +285,8 @@ static int int ib, is, page_base; vm_pindex_t pindex = m->pindex; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + vm_page_lock_assert(m, MA_NOTOWNED); + vm_page_lock(m); VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); /* @@ -301,9 +303,9 @@ static int */ if ((m->hold_count != 0) || ((m->busy != 0) || (m->oflags & VPO_BUSY))) { + vm_page_unlock(m); return 0; } - mc[vm_pageout_page_count] = m; pageout_count = 1; page_base = vm_pageout_page_count; @@ -395,6 +397,7 @@ more: if (ib && pageout_count < vm_pageout_page_count) goto more; + vm_page_unlock(m); /* * we allow reads during pageouts... */ @@ -418,12 +421,8 @@ vm_pageout_flush(vm_page_t *mc, int count, int fla int numpagedout = 0; int i; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); /* - * Initiate I/O. Bump the vm_page_t->busy counter and - * mark the pages read-only. - * * We do not have to fixup the clean/dirty bits here... we can * allow the pager to do it after the I/O completes. * @@ -435,17 +434,18 @@ vm_pageout_flush(vm_page_t *mc, int count, int fla ("vm_pageout_flush: partially invalid page %p index %d/%d", mc[i], i, count)); vm_page_io_start(mc[i]); + vm_page_lock(mc[i]); pmap_remove_write(mc[i]); + vm_page_unlock(mc[i]); } - vm_page_unlock_queues(); vm_object_pip_add(object, count); vm_pager_put_pages(object, mc, count, flags, pageout_status); - vm_page_lock_queues(); for (i = 0; i < count; i++) { vm_page_t mt = mc[i]; + vm_page_lock(mt); KASSERT(pageout_status[i] == VM_PAGER_PEND || (mt->flags & PG_WRITEABLE) == 0, ("vm_pageout_flush: page %p is not write protected", mt)); @@ -487,6 +487,7 @@ vm_pageout_flush(vm_page_t *mc, int count, int fla if (vm_page_count_severe()) vm_page_try_to_cache(mt); } + vm_page_unlock(mt); } return numpagedout; } @@ -531,20 +532,19 @@ vm_pageout_object_deactivate_pages(pmap, first_obj */ rcount = object->resident_page_count; p = TAILQ_FIRST(&object->memq); - vm_page_lock_queues(); while (p && (rcount-- > 0)) { - if (pmap_resident_count(pmap) <= desired) { - vm_page_unlock_queues(); + if (pmap_resident_count(pmap) <= desired) goto unlock_return; - } next = TAILQ_NEXT(p, listq); cnt.v_pdpages++; + vm_page_lock(p); if (p->wire_count != 0 || p->hold_count != 0 || p->busy != 0 || (p->oflags & VPO_BUSY) || (p->flags & PG_UNMANAGED) || !pmap_page_exists_quick(pmap, p)) { + vm_page_unlock(p); p = next; continue; } @@ -569,18 +569,20 @@ vm_pageout_object_deactivate_pages(pmap, first_obj vm_page_requeue(p); } } else { - vm_page_activate(p); + vm_page_lock_queues(); + vm_page_activate_locked(p); vm_page_flag_clear(p, PG_REFERENCED); if (p->act_count < (ACT_MAX - ACT_ADVANCE)) p->act_count += ACT_ADVANCE; - vm_page_requeue(p); + vm_page_requeue_locked(p); + vm_page_unlock_queues(); } } else if (p->queue == PQ_INACTIVE) { pmap_remove_all(p); } + vm_page_unlock(p); p = next; } - vm_page_unlock_queues(); if ((backing_object = object->backing_object) == NULL) goto unlock_return; VM_OBJECT_LOCK(backing_object); @@ -735,19 +737,16 @@ vm_pageout_scan(int pass) rescan0: addl_page_shortage = addl_page_shortage_init; maxscan = cnt.v_inactive_count; - for (m = TAILQ_FIRST(&vm_page_queues[PQ_INACTIVE].pl); m != NULL && maxscan-- > 0 && page_shortage > 0; m = next) { cnt.v_pdpages++; - if (VM_PAGE_GETQUEUE(m) != PQ_INACTIVE) { + if (VM_PAGE_GETQUEUE(m) != PQ_INACTIVE) goto rescan0; - } next = TAILQ_NEXT(m, pageq); - object = m->object; /* * skip marker pages @@ -759,22 +758,29 @@ rescan0: * A held page may be undergoing I/O, so skip it. */ if (m->hold_count) { - vm_page_requeue(m); + vm_page_requeue_locked(m); addl_page_shortage++; continue; } + + if (!vm_page_trylock(m) || (object = m->object) == NULL) { + addl_page_shortage++; + continue; + } + /* * Don't mess with busy pages, keep in the front of the * queue, most likely are being paged out. */ if (!VM_OBJECT_TRYLOCK(object) && - (!vm_pageout_fallback_object_lock(m, &next) || - m->hold_count != 0)) { + !vm_pageout_fallback_object_lock(m, &next)) { VM_OBJECT_UNLOCK(object); + vm_page_unlock(m); addl_page_shortage++; continue; } - if (m->busy || (m->oflags & VPO_BUSY)) { + if (m->busy || (m->oflags & VPO_BUSY) || m->hold_count) { + vm_page_unlock(m); VM_OBJECT_UNLOCK(object); addl_page_shortage++; continue; @@ -800,9 +806,10 @@ rescan0: */ } else if (((m->flags & PG_REFERENCED) == 0) && (actcount = pmap_ts_referenced(m))) { - vm_page_activate(m); + vm_page_activate_locked(m); + m->act_count += (actcount + ACT_ADVANCE); + vm_page_unlock(m); VM_OBJECT_UNLOCK(object); - m->act_count += (actcount + ACT_ADVANCE); continue; } @@ -815,9 +822,10 @@ rescan0: if ((m->flags & PG_REFERENCED) != 0) { vm_page_flag_clear(m, PG_REFERENCED); actcount = pmap_ts_referenced(m); - vm_page_activate(m); + vm_page_activate_locked(m); + m->act_count += (actcount + ACT_ADVANCE + 1); + vm_page_unlock(m); VM_OBJECT_UNLOCK(object); - m->act_count += (actcount + ACT_ADVANCE + 1); continue; } @@ -852,7 +860,7 @@ rescan0: /* * Invalid pages can be easily freed */ - vm_page_free(m); + vm_page_free_locked(m); cnt.v_dfree++; --page_shortage; } else if (m->dirty == 0) { @@ -860,7 +868,7 @@ rescan0: * Clean pages can be placed onto the cache queue. * This effectively frees them. */ - vm_page_cache(m); + vm_page_cache_locked(m); --page_shortage; } else if ((m->flags & PG_WINATCFLS) == 0 && pass == 0) { /* @@ -876,7 +884,7 @@ rescan0: * the thrash point for a heavily loaded machine. */ vm_page_flag_set(m, PG_WINATCFLS); - vm_page_requeue(m); + vm_page_requeue_locked(m); } else if (maxlaunder > 0) { /* * We always want to try to flush some dirty pages if @@ -903,8 +911,9 @@ rescan0: * Those objects are in a "rundown" state. */ if (!swap_pageouts_ok || (object->flags & OBJ_DEAD)) { + vm_page_unlock(m); VM_OBJECT_UNLOCK(object); - vm_page_requeue(m); + vm_page_requeue_locked(m); continue; } @@ -942,6 +951,8 @@ rescan0: * of time. */ if (object->type == OBJT_VNODE) { + vm_page_unlock_queues(); + vm_page_unlock(m); vp = object->handle; if (vp->v_type == VREG && vn_start_write(vp, &mp, V_NOWAIT) != 0) { @@ -949,25 +960,26 @@ rescan0: ++pageout_lock_miss; if (object->flags & OBJ_MIGHTBEDIRTY) vnodes_skipped++; + vm_page_lock_queues(); goto unlock_and_continue; } KASSERT(mp != NULL, ("vp %p with NULL v_mount", vp)); - vm_page_unlock_queues(); vm_object_reference_locked(object); VM_OBJECT_UNLOCK(object); vfslocked = VFS_LOCK_GIANT(vp->v_mount); if (vget(vp, LK_EXCLUSIVE | LK_TIMELOCK, curthread)) { VM_OBJECT_LOCK(object); - vm_page_lock_queues(); ++pageout_lock_miss; if (object->flags & OBJ_MIGHTBEDIRTY) vnodes_skipped++; vp = NULL; + vm_page_lock_queues(); goto unlock_and_continue; } VM_OBJECT_LOCK(object); + vm_page_lock(m); vm_page_lock_queues(); /* * The page might have been moved to another @@ -978,6 +990,7 @@ rescan0: if (VM_PAGE_GETQUEUE(m) != PQ_INACTIVE || m->object != object || TAILQ_NEXT(m, pageq) != &marker) { + vm_page_unlock(m); if (object->flags & OBJ_MIGHTBEDIRTY) vnodes_skipped++; goto unlock_and_continue; @@ -990,6 +1003,7 @@ rescan0: * statistics are more correct if we don't. */ if (m->busy || (m->oflags & VPO_BUSY)) { + vm_page_unlock(m); goto unlock_and_continue; } @@ -998,12 +1012,15 @@ rescan0: * be undergoing I/O, so skip it */ if (m->hold_count) { - vm_page_requeue(m); + vm_page_unlock(m); + vm_page_requeue_locked(m); if (object->flags & OBJ_MIGHTBEDIRTY) vnodes_skipped++; goto unlock_and_continue; } } + vm_page_unlock(m); + /* * If a page is dirty, then it is either being washed @@ -1015,11 +1032,14 @@ rescan0: * the (future) cleaned page. Otherwise we could wind * up laundering or cleaning too many pages. */ + vm_page_unlock_queues(); if (vm_pageout_clean(m) != 0) { --page_shortage; --maxlaunder; } + vm_page_lock_queues(); unlock_and_continue: + vm_page_lock_assert(m, MA_NOTOWNED); VM_OBJECT_UNLOCK(object); if (mp != NULL) { vm_page_unlock_queues(); @@ -1033,8 +1053,10 @@ unlock_and_continue: next = TAILQ_NEXT(&marker, pageq); TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, &marker, pageq); + vm_page_lock_assert(m, MA_NOTOWNED); continue; } + vm_page_unlock(m); VM_OBJECT_UNLOCK(object); } @@ -1053,6 +1075,7 @@ unlock_and_continue: */ pcount = cnt.v_active_count; m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl); + mtx_assert(&vm_page_queue_mtx, MA_OWNED); while ((m != NULL) && (pcount-- > 0) && (page_shortage > 0)) { @@ -1061,25 +1084,33 @@ unlock_and_continue: next = TAILQ_NEXT(m, pageq); object = m->object; + if ((m->flags & PG_MARKER) != 0) { m = next; continue; } + + if (!vm_page_trylock(m) || (object = m->object) == NULL) { + m = next; + continue; + } + if (!VM_OBJECT_TRYLOCK(object) && !vm_pageout_fallback_object_lock(m, &next)) { VM_OBJECT_UNLOCK(object); + vm_page_unlock(m); m = next; continue; } - /* * Don't deactivate pages that are busy. */ if ((m->busy != 0) || (m->oflags & VPO_BUSY) || (m->hold_count != 0)) { + vm_page_unlock(m); VM_OBJECT_UNLOCK(object); - vm_page_requeue(m); + vm_page_requeue_locked(m); m = next; continue; } @@ -1116,7 +1147,7 @@ unlock_and_continue: * page activation count stats. */ if (actcount && (object->ref_count != 0)) { - vm_page_requeue(m); + vm_page_requeue_locked(m); } else { m->act_count -= min(m->act_count, ACT_DECLINE); if (vm_pageout_algorithm || @@ -1126,16 +1157,17 @@ unlock_and_continue: if (object->ref_count == 0) { pmap_remove_all(m); if (m->dirty == 0) - vm_page_cache(m); + vm_page_cache_locked(m); else - vm_page_deactivate(m); + vm_page_deactivate_locked(m); } else { - vm_page_deactivate(m); + vm_page_deactivate_locked(m); } } else { - vm_page_requeue(m); + vm_page_requeue_locked(m); } } + vm_page_unlock(m); VM_OBJECT_UNLOCK(object); m = next; } @@ -1321,9 +1353,15 @@ vm_pageout_page_stats() m = next; continue; } + vm_page_lock_assert(m, MA_NOTOWNED); + if (vm_page_trylock(m) == 0 || (object = m->object) == NULL) { + m = next; + continue; + } if (!VM_OBJECT_TRYLOCK(object) && !vm_pageout_fallback_object_lock(m, &next)) { VM_OBJECT_UNLOCK(object); + vm_page_unlock(m); m = next; continue; } @@ -1334,8 +1372,9 @@ vm_pageout_page_stats() if ((m->busy != 0) || (m->oflags & VPO_BUSY) || (m->hold_count != 0)) { + vm_page_unlock(m); VM_OBJECT_UNLOCK(object); - vm_page_requeue(m); + vm_page_requeue_locked(m); m = next; continue; } @@ -1351,7 +1390,7 @@ vm_pageout_page_stats() m->act_count += ACT_ADVANCE + actcount; if (m->act_count > ACT_MAX) m->act_count = ACT_MAX; - vm_page_requeue(m); + vm_page_requeue_locked(m); } else { if (m->act_count == 0) { /* @@ -1364,12 +1403,13 @@ vm_pageout_page_stats() * of doing the operation. */ pmap_remove_all(m); - vm_page_deactivate(m); + vm_page_deactivate_locked(m); } else { m->act_count -= min(m->act_count, ACT_DECLINE); - vm_page_requeue(m); + vm_page_requeue_locked(m); } } + vm_page_unlock(m); VM_OBJECT_UNLOCK(object); m = next; } Index: /usr/src/sys/vm/vm_map.c =================================================================== --- /usr/src/sys/vm/vm_map.c (revision 206700) +++ /usr/src/sys/vm/vm_map.c (working copy) @@ -1767,25 +1767,31 @@ vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, p_start = p; } } else if (p_start != NULL) { +#ifndef VM_PAGE_LOCK if (!are_queues_locked) { are_queues_locked = TRUE; vm_page_lock_queues(); } +#endif pmap_enter_object(map->pmap, start, addr + ptoa(tmpidx), p_start, prot); p_start = NULL; } } if (p_start != NULL) { +#ifndef VM_PAGE_LOCK if (!are_queues_locked) { are_queues_locked = TRUE; vm_page_lock_queues(); } +#endif pmap_enter_object(map->pmap, start, addr + ptoa(psize), p_start, prot); } +#ifndef VM_PAGE_LOCK if (are_queues_locked) vm_page_unlock_queues(); +#endif unlock_return: VM_OBJECT_UNLOCK(object); } Index: /usr/src/sys/vm/sg_pager.c =================================================================== --- /usr/src/sys/vm/sg_pager.c (revision 206700) +++ /usr/src/sys/vm/sg_pager.c (working copy) @@ -198,10 +198,11 @@ sg_pager_getpages(vm_object_t object, vm_page_t *m TAILQ_INSERT_TAIL(&object->un_pager.sgp.sgp_pglist, page, pageq); /* Free the original pages and insert this fake page into the object. */ - vm_page_lock_queues(); - for (i = 0; i < count; i++) + for (i = 0; i < count; i++) { + vm_page_lock(m[i]); vm_page_free(m[i]); - vm_page_unlock_queues(); + vm_page_unlock(m[i]); + } vm_page_insert(page, object, offset); m[reqpage] = page; page->valid = VM_PAGE_BITS_ALL; Index: /usr/src/sys/vm/swap_pager.c =================================================================== --- /usr/src/sys/vm/swap_pager.c (revision 206700) +++ /usr/src/sys/vm/swap_pager.c (working copy) @@ -1130,12 +1130,16 @@ swap_pager_getpages(vm_object_t object, vm_page_t if (0 < i || j < count) { int k; - vm_page_lock_queues(); - for (k = 0; k < i; ++k) + for (k = 0; k < i; ++k) { + vm_page_lock(m[k]); vm_page_free(m[k]); - for (k = j; k < count; ++k) + vm_page_unlock(m[k]); + } + for (k = j; k < count; ++k) { + vm_page_lock(m[k]); vm_page_free(m[k]); - vm_page_unlock_queues(); + vm_page_unlock(m[k]); + } } /* @@ -1493,7 +1497,6 @@ swp_pager_async_iodone(struct buf *bp) object = bp->b_pages[0]->object; VM_OBJECT_LOCK(object); } - vm_page_lock_queues(); /* * cleanup pages. If an error occurs writing to swap, we are in * very serious trouble. If it happens to be a disk error, though, @@ -1505,6 +1508,7 @@ swp_pager_async_iodone(struct buf *bp) for (i = 0; i < bp->b_npages; ++i) { vm_page_t m = bp->b_pages[i]; + vm_page_lock(m); m->oflags &= ~VPO_SWAPINPROG; if (bp->b_ioflags & BIO_ERROR) { @@ -1601,8 +1605,8 @@ swp_pager_async_iodone(struct buf *bp) if (vm_page_count_severe()) vm_page_try_to_cache(m); } + vm_page_unlock(m); } - vm_page_unlock_queues(); /* * adjust pip. NOTE: the original parent may still have its own @@ -1698,10 +1702,10 @@ swp_pager_force_pagein(vm_object_t object, vm_pind m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL|VM_ALLOC_RETRY); if (m->valid == VM_PAGE_BITS_ALL) { vm_object_pip_subtract(object, 1); - vm_page_lock_queues(); + vm_page_lock(m); vm_page_activate(m); vm_page_dirty(m); - vm_page_unlock_queues(); + vm_page_unlock(m); vm_page_wakeup(m); vm_pager_page_unswapped(m); return; @@ -1710,10 +1714,10 @@ swp_pager_force_pagein(vm_object_t object, vm_pind if (swap_pager_getpages(object, &m, 1, 0) != VM_PAGER_OK) panic("swap_pager_force_pagein: read from swap failed");/*XXX*/ vm_object_pip_subtract(object, 1); - vm_page_lock_queues(); + vm_page_lock(m); vm_page_dirty(m); vm_page_dontneed(m); - vm_page_unlock_queues(); + vm_page_unlock(m); vm_page_wakeup(m); vm_pager_page_unswapped(m); } Index: /usr/src/sys/vm/vm_mmap.c =================================================================== --- /usr/src/sys/vm/vm_mmap.c (revision 206700) +++ /usr/src/sys/vm/vm_mmap.c (working copy) @@ -866,7 +866,7 @@ RestartScan: */ if (m != NULL && m->valid != 0) { mincoreinfo = MINCORE_INCORE; - vm_page_lock_queues(); + vm_page_lock(m); if (m->dirty || pmap_is_modified(m)) mincoreinfo |= MINCORE_MODIFIED_OTHER; @@ -875,7 +875,7 @@ RestartScan: vm_page_flag_set(m, PG_REFERENCED); mincoreinfo |= MINCORE_REFERENCED_OTHER; } - vm_page_unlock_queues(); + vm_page_unlock(m); } VM_OBJECT_UNLOCK(current->object.vm_object); } Index: /usr/src/sys/vm/vm_glue.c =================================================================== --- /usr/src/sys/vm/vm_glue.c (revision 206700) +++ /usr/src/sys/vm/vm_glue.c (working copy) @@ -257,16 +257,16 @@ vm_imgact_hold_page(vm_object_t object, vm_ooffset if (m == NULL) goto out; if (rv != VM_PAGER_OK) { - vm_page_lock_queues(); + vm_page_lock(m); vm_page_free(m); - vm_page_unlock_queues(); + vm_page_unlock(m); m = NULL; goto out; } } - vm_page_lock_queues(); + vm_page_lock(m); vm_page_hold(m); - vm_page_unlock_queues(); + vm_page_unlock(m); vm_page_wakeup(m); out: VM_OBJECT_UNLOCK(object); @@ -300,9 +300,9 @@ vm_imgact_unmap_page(struct sf_buf *sf) m = sf_buf_page(sf); sf_buf_free(sf); sched_unpin(); - vm_page_lock_queues(); + vm_page_lock(m); vm_page_unhold(m); - vm_page_unlock_queues(); + vm_page_unlock(m); } void @@ -425,10 +425,10 @@ vm_thread_stack_dispose(vm_object_t ksobj, vm_offs m = vm_page_lookup(ksobj, i); if (m == NULL) panic("vm_thread_dispose: kstack already missing?"); - vm_page_lock_queues(); + vm_page_lock(m); vm_page_unwire(m, 0); vm_page_free(m); - vm_page_unlock_queues(); + vm_page_unlock(m); } VM_OBJECT_UNLOCK(ksobj); vm_object_deallocate(ksobj); Index: /usr/src/sys/vm/pmap.h =================================================================== --- /usr/src/sys/vm/pmap.h (revision 206700) +++ /usr/src/sys/vm/pmap.h (working copy) @@ -139,6 +139,7 @@ void pmap_zero_page_area(vm_page_t, int off, int void pmap_zero_page_idle(vm_page_t); int pmap_mincore(pmap_t pmap, vm_offset_t addr); void pmap_activate(struct thread *td); +struct mtx *pmap_page_lockptr(vm_page_t); #define pmap_resident_count(pm) ((pm)->pm_stats.resident_count) #define pmap_wired_count(pm) ((pm)->pm_stats.wired_count) Index: /usr/src/sys/vm/vm_object.c =================================================================== --- /usr/src/sys/vm/vm_object.c (revision 206700) +++ /usr/src/sys/vm/vm_object.c (working copy) @@ -717,8 +717,8 @@ vm_object_terminate(vm_object_t object) * removes them from paging queues. Don't free wired pages, just * remove them from the object. */ - vm_page_lock_queues(); while ((p = TAILQ_FIRST(&object->memq)) != NULL) { + vm_page_lock(p); KASSERT(!p->busy && (p->oflags & VPO_BUSY) == 0, ("vm_object_terminate: freeing busy page %p " "p->busy = %d, p->oflags %x\n", p, p->busy, p->oflags)); @@ -728,8 +728,8 @@ vm_object_terminate(vm_object_t object) } else { vm_page_remove(p); } + vm_page_unlock(p); } - vm_page_unlock_queues(); #if VM_NRESERVLEVEL > 0 if (__predict_false(!LIST_EMPTY(&object->rvq))) @@ -789,7 +789,6 @@ vm_object_page_clean(vm_object_t object, vm_pindex tend = end; } - vm_page_lock_queues(); /* * If the caller is smart and only msync()s a range he knows is * dirty, we may be able to avoid an object scan. This results in @@ -818,8 +817,10 @@ vm_object_page_clean(vm_object_t object, vm_pindex ++tscan; continue; } + vm_page_lock(p); vm_page_test_dirty(p); if (p->dirty == 0) { + vm_page_unlock(p); if (--scanlimit == 0) break; ++tscan; @@ -830,6 +831,7 @@ vm_object_page_clean(vm_object_t object, vm_pindex * this is a nosync page, we can't continue. */ if ((flags & OBJPC_NOSYNC) && (p->oflags & VPO_NOSYNC)) { + vm_page_unlock(p); if (--scanlimit == 0) break; ++tscan; @@ -839,7 +841,7 @@ vm_object_page_clean(vm_object_t object, vm_pindex /* * This returns 0 if it was unable to busy the first - * page (i.e. had to sleep). + * page (i.e. had to sleep) and always unlocks p. */ tscan += vm_object_page_collect_flush(object, p, curgeneration, pagerflags); } @@ -851,7 +853,6 @@ vm_object_page_clean(vm_object_t object, vm_pindex * return immediately. */ if (tscan >= tend && (tstart || tend < object->size)) { - vm_page_unlock_queues(); vm_object_clear_flag(object, OBJ_CLEANING); return; } @@ -869,10 +870,13 @@ vm_object_page_clean(vm_object_t object, vm_pindex clearobjflags = 1; TAILQ_FOREACH(p, &object->memq, listq) { p->oflags |= VPO_CLEANCHK; - if ((flags & OBJPC_NOSYNC) && (p->oflags & VPO_NOSYNC)) + if ((flags & OBJPC_NOSYNC) && (p->oflags & VPO_NOSYNC)) { clearobjflags = 0; - else + } else { + vm_page_lock(p); pmap_remove_write(p); + vm_page_unlock(p); + } } if (clearobjflags && (tstart == 0) && (tend == object->size)) @@ -895,8 +899,10 @@ again: continue; } + vm_page_lock(p); vm_page_test_dirty(p); if (p->dirty == 0) { + vm_page_unlock(p); p->oflags &= ~VPO_CLEANCHK; continue; } @@ -907,10 +913,11 @@ again: * not cleared in this case so we do not have to set them. */ if ((flags & OBJPC_NOSYNC) && (p->oflags & VPO_NOSYNC)) { + vm_page_unlock(p); p->oflags &= ~VPO_CLEANCHK; continue; } - + /* Always unlocks p. */ n = vm_object_page_collect_flush(object, p, curgeneration, pagerflags); if (n == 0) @@ -928,7 +935,6 @@ again: goto again; } } - vm_page_unlock_queues(); #if 0 VOP_FSYNC(vp, (pagerflags & VM_PAGER_PUT_SYNC)?MNT_WAIT:0, curproc); #endif @@ -950,14 +956,16 @@ vm_object_page_collect_flush(vm_object_t object, v vm_page_t mab[vm_pageout_page_count]; vm_page_t ma[vm_pageout_page_count]; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + vm_page_lock_assert(p, MA_OWNED); pi = p->pindex; while (vm_page_sleep_if_busy(p, TRUE, "vpcwai")) { - vm_page_lock_queues(); if (object->generation != curgeneration) { return(0); } + vm_page_lock(p); } + vm_page_unlock(p); + maxf = 0; for(i = 1; i < vm_pageout_page_count; i++) { vm_page_t tp; @@ -968,11 +976,14 @@ vm_object_page_collect_flush(vm_object_t object, v (tp->oflags & VPO_CLEANCHK) == 0) || (tp->busy != 0)) break; + vm_page_lock(tp); vm_page_test_dirty(tp); if (tp->dirty == 0) { + vm_page_unlock(tp); tp->oflags &= ~VPO_CLEANCHK; break; } + vm_page_unlock(tp); maf[ i - 1 ] = tp; maxf++; continue; @@ -992,11 +1003,14 @@ vm_object_page_collect_flush(vm_object_t object, v (tp->oflags & VPO_CLEANCHK) == 0) || (tp->busy != 0)) break; + vm_page_lock(tp); vm_page_test_dirty(tp); if (tp->dirty == 0) { + vm_page_unlock(tp); tp->oflags &= ~VPO_CLEANCHK; break; } + vm_page_unlock(tp); mab[ i - 1 ] = tp; maxb++; continue; @@ -1022,7 +1036,9 @@ vm_object_page_collect_flush(vm_object_t object, v vm_pageout_flush(ma, runlen, pagerflags); for (i = 0; i < runlen; i++) { if (ma[i]->dirty) { + vm_page_lock(ma[i]); pmap_remove_write(ma[i]); + vm_page_unlock(ma[i]); ma[i]->oflags |= VPO_CLEANCHK; /* @@ -1196,12 +1212,12 @@ shadowlookup: * page queues to mess with. Things can break if we mess * with pages in any of the below states. */ - vm_page_lock_queues(); + vm_page_lock(m); if (m->hold_count || m->wire_count || (m->flags & PG_UNMANAGED) || m->valid != VM_PAGE_BITS_ALL) { - vm_page_unlock_queues(); + vm_page_unlock(m); goto unlock_tobject; } if ((m->oflags & VPO_BUSY) || m->busy) { @@ -1239,7 +1255,7 @@ shadowlookup: m->act_count = 0; vm_page_dontneed(m); } - vm_page_unlock_queues(); + vm_page_unlock(m); if (advise == MADV_FREE && tobject->type == OBJT_SWAP) swap_pager_freespace(tobject, tpindex, 1); unlock_tobject: @@ -1402,7 +1418,6 @@ retry: m = TAILQ_NEXT(m, listq); } } - vm_page_lock_queues(); for (; m != NULL && (idx = m->pindex - offidxstart) < size; m = m_next) { m_next = TAILQ_NEXT(m, listq); @@ -1426,8 +1441,8 @@ retry: vm_page_rename(m, new_object, idx); /* page automatically made dirty by rename and cache handled */ vm_page_busy(m); + vm_page_unlock(m); } - vm_page_unlock_queues(); if (orig_object->type == OBJT_SWAP) { /* * swap_pager_copy() can sleep, in which case the orig_object's @@ -1598,14 +1613,14 @@ vm_object_backing_scan(vm_object_t object, int op) * Page is out of the parent object's range, we * can simply destroy it. */ - vm_page_lock_queues(); + vm_page_lock(p); KASSERT(!pmap_page_is_mapped(p), ("freeing mapped page %p", p)); if (p->wire_count == 0) vm_page_free(p); else vm_page_remove(p); - vm_page_unlock_queues(); + vm_page_unlock(p); p = next; continue; } @@ -1622,14 +1637,14 @@ vm_object_backing_scan(vm_object_t object, int op) * * Leave the parent's page alone */ - vm_page_lock_queues(); + vm_page_lock(p); KASSERT(!pmap_page_is_mapped(p), ("freeing mapped page %p", p)); if (p->wire_count == 0) vm_page_free(p); else vm_page_remove(p); - vm_page_unlock_queues(); + vm_page_unlock(p); p = next; continue; } @@ -1649,9 +1664,9 @@ vm_object_backing_scan(vm_object_t object, int op) * If the page was mapped to a process, it can remain * mapped through the rename. */ - vm_page_lock_queues(); + vm_page_lock(p); vm_page_rename(p, object, new_pindex); - vm_page_unlock_queues(); + vm_page_unlock(p); /* page automatically made dirty by rename */ } p = next; @@ -1916,7 +1931,7 @@ again: p = TAILQ_NEXT(p, listq); } } - vm_page_lock_queues(); + /* * Assert: the variable p is either (1) the page with the * least pindex greater than or equal to the parameter pindex @@ -1935,6 +1950,7 @@ again: * cannot be freed. They can, however, be invalidated * if "clean_only" is FALSE. */ + vm_page_lock(p); if ((wirings = p->wire_count) != 0 && (wirings = pmap_page_wired_mappings(p)) != p->wire_count) { /* Fictitious pages do not have managed mappings. */ @@ -1946,6 +1962,7 @@ again: p->valid = 0; vm_page_undirty(p); } + vm_page_unlock(p); continue; } if (vm_page_sleep_if_busy(p, TRUE, "vmopar")) @@ -1954,16 +1971,18 @@ again: ("vm_object_page_remove: page %p is fictitious", p)); if (clean_only && p->valid) { pmap_remove_write(p); - if (p->dirty) + if (p->dirty) { + vm_page_unlock(p); continue; + } } pmap_remove_all(p); /* Account for removal of managed, wired mappings. */ if (wirings != 0) p->wire_count -= wirings; vm_page_free(p); + vm_page_unlock(p); } - vm_page_unlock_queues(); vm_object_pip_wakeup(object); skipmemq: if (__predict_false(object->cache != NULL)) @@ -1998,9 +2017,9 @@ vm_object_populate(vm_object_t object, vm_pindex_t if (m == NULL) break; if (rv != VM_PAGER_OK) { - vm_page_lock_queues(); + vm_page_lock(m); vm_page_free(m); - vm_page_unlock_queues(); + vm_page_unlock(m); break; } } Index: /usr/src/sys/vm/vm_fault.c =================================================================== --- /usr/src/sys/vm/vm_fault.c (revision 206700) +++ /usr/src/sys/vm/vm_fault.c (working copy) @@ -135,12 +135,15 @@ struct faultstate { static inline void release_page(struct faultstate *fs) { + vm_page_t m; - vm_page_wakeup(fs->m); - vm_page_lock_queues(); - vm_page_deactivate(fs->m); - vm_page_unlock_queues(); + m = fs->m; fs->m = NULL; + vm_page_wakeup(m); + vm_page_lock(m); + vm_page_deactivate(m); + vm_page_unlock(m); + } static inline void @@ -156,17 +159,19 @@ unlock_map(struct faultstate *fs) static void unlock_and_deallocate(struct faultstate *fs) { + vm_page_t m; vm_object_pip_wakeup(fs->object); VM_OBJECT_UNLOCK(fs->object); if (fs->object != fs->first_object) { VM_OBJECT_LOCK(fs->first_object); - vm_page_lock_queues(); - vm_page_free(fs->first_m); - vm_page_unlock_queues(); + m = fs->first_m; + fs->first_m = NULL; + vm_page_lock(m); + vm_page_free(m); + vm_page_unlock(m); vm_object_pip_wakeup(fs->first_object); VM_OBJECT_UNLOCK(fs->first_object); - fs->first_m = NULL; } vm_object_deallocate(fs->first_object); unlock_map(fs); @@ -305,12 +310,12 @@ RetryFault:; * removes the page from the backing object, * which is not what we want. */ - vm_page_lock_queues(); + vm_page_lock(fs.m); if ((fs.m->cow) && (fault_type & VM_PROT_WRITE) && (fs.object == fs.first_object)) { vm_page_cowfault(fs.m); - vm_page_unlock_queues(); + vm_page_unlock(fs.m); unlock_and_deallocate(&fs); goto RetryFault; } @@ -332,13 +337,13 @@ RetryFault:; * to pmap it. */ if ((fs.m->oflags & VPO_BUSY) || fs.m->busy) { - vm_page_unlock_queues(); + vm_page_unlock(fs.m); VM_OBJECT_UNLOCK(fs.object); if (fs.object != fs.first_object) { VM_OBJECT_LOCK(fs.first_object); - vm_page_lock_queues(); + vm_page_lock(fs.first_m); vm_page_free(fs.first_m); - vm_page_unlock_queues(); + vm_page_unlock(fs.first_m); vm_object_pip_wakeup(fs.first_object); VM_OBJECT_UNLOCK(fs.first_object); fs.first_m = NULL; @@ -357,7 +362,7 @@ RetryFault:; goto RetryFault; } vm_pageq_remove(fs.m); - vm_page_unlock_queues(); + vm_page_unlock(fs.m); /* * Mark page busy for other processes, and the @@ -479,22 +484,20 @@ readrest: if (mt->busy || (mt->oflags & VPO_BUSY)) continue; - if (!are_queues_locked) { - are_queues_locked = TRUE; - vm_page_lock_queues(); - } + if (mt->hold_count || mt->wire_count) continue; + vm_page_lock(mt); pmap_remove_all(mt); if (mt->dirty) { vm_page_deactivate(mt); } else { vm_page_cache(mt); } + vm_page_unlock(mt); + } - if (are_queues_locked) - vm_page_unlock_queues(); ahead += behind; behind = 0; } @@ -623,17 +626,17 @@ vnode_locked: */ if (((fs.map != kernel_map) && (rv == VM_PAGER_ERROR)) || (rv == VM_PAGER_BAD)) { - vm_page_lock_queues(); + vm_page_lock(fs.m); vm_page_free(fs.m); - vm_page_unlock_queues(); + vm_page_unlock(fs.m); fs.m = NULL; unlock_and_deallocate(&fs); return ((rv == VM_PAGER_ERROR) ? KERN_FAILURE : KERN_PROTECTION_FAILURE); } if (fs.object != fs.first_object) { - vm_page_lock_queues(); + vm_page_lock(fs.m); vm_page_free(fs.m); - vm_page_unlock_queues(); + vm_page_unlock(fs.m); fs.m = NULL; /* * XXX - we cannot just fall out at this @@ -746,18 +749,20 @@ vnode_locked: * We don't chase down the shadow chain */ fs.object == fs.first_object->backing_object) { - vm_page_lock_queues(); /* * get rid of the unnecessary page */ + vm_page_lock(fs.first_m); vm_page_free(fs.first_m); + vm_page_unlock(fs.first_m); /* * grab the page and put it into the * process'es object. The page is * automatically made dirty. */ + vm_page_lock(fs.m); vm_page_rename(fs.m, fs.first_object, fs.first_pindex); - vm_page_unlock_queues(); + vm_page_unlock(fs.m); vm_page_busy(fs.m); fs.first_m = fs.m; fs.m = NULL; @@ -770,10 +775,13 @@ vnode_locked: fs.first_m->valid = VM_PAGE_BITS_ALL; if (wired && (fault_flags & VM_FAULT_CHANGE_WIRING) == 0) { - vm_page_lock_queues(); + vm_page_lock(fs.first_m); vm_page_wire(fs.first_m); + vm_page_unlock(fs.first_m); + + vm_page_lock(fs.m); vm_page_unwire(fs.m, FALSE); - vm_page_unlock_queues(); + vm_page_unlock(fs.m); } /* * We no longer need the old page or object. @@ -923,7 +931,7 @@ vnode_locked: if ((fault_flags & VM_FAULT_CHANGE_WIRING) == 0 && wired == 0) vm_fault_prefault(fs.map->pmap, vaddr, fs.entry); VM_OBJECT_LOCK(fs.object); - vm_page_lock_queues(); + vm_page_lock(fs.m); vm_page_flag_set(fs.m, PG_REFERENCED); /* @@ -938,7 +946,7 @@ vnode_locked: } else { vm_page_activate(fs.m); } - vm_page_unlock_queues(); + vm_page_unlock(fs.m); vm_page_wakeup(fs.m); /* @@ -1015,9 +1023,9 @@ vm_fault_prefault(pmap_t pmap, vm_offset_t addra, } if (m->valid == VM_PAGE_BITS_ALL && (m->flags & PG_FICTITIOUS) == 0) { - vm_page_lock_queues(); + vm_page_lock(m); pmap_enter_quick(pmap, addr, m, entry->protection); - vm_page_unlock_queues(); + vm_page_unlock(m); } VM_OBJECT_UNLOCK(lobject); } @@ -1080,6 +1088,7 @@ vm_fault_unwire(vm_map_t map, vm_offset_t start, v { vm_paddr_t pa; vm_offset_t va; + vm_page_t m; pmap_t pmap; pmap = vm_map_pmap(map); @@ -1093,9 +1102,10 @@ vm_fault_unwire(vm_map_t map, vm_offset_t start, v if (pa != 0) { pmap_change_wiring(pmap, va, FALSE); if (!fictitious) { - vm_page_lock_queues(); - vm_page_unwire(PHYS_TO_VM_PAGE(pa), 1); - vm_page_unlock_queues(); + m = PHYS_TO_VM_PAGE(pa); + vm_page_lock(m); + vm_page_unwire(m, 1); + vm_page_unlock(m); } } } @@ -1238,13 +1248,19 @@ vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src * Mark it no longer busy, and put it on the active list. */ VM_OBJECT_LOCK(dst_object); - vm_page_lock_queues(); if (upgrade) { + vm_page_lock(src_m); vm_page_unwire(src_m, 0); + vm_page_unlock(src_m); + + vm_page_lock(dst_m); vm_page_wire(dst_m); - } else + vm_page_unlock(dst_m); + } else { + vm_page_lock(dst_m); vm_page_activate(dst_m); - vm_page_unlock_queues(); + vm_page_unlock(dst_m); + } vm_page_wakeup(dst_m); } VM_OBJECT_UNLOCK(dst_object); Index: /usr/src/sys/vm/device_pager.c =================================================================== --- /usr/src/sys/vm/device_pager.c (revision 206700) +++ /usr/src/sys/vm/device_pager.c (working copy) @@ -251,12 +251,13 @@ dev_pager_getpages(object, m, count, reqpage) VM_OBJECT_LOCK(object); dev_pager_updatefake(page, paddr, memattr); if (count > 1) { - vm_page_lock_queues(); for (i = 0; i < count; i++) { - if (i != reqpage) + if (i != reqpage) { + vm_page_lock(m[i]); vm_page_free(m[i]); + vm_page_unlock(m[i]); + } } - vm_page_unlock_queues(); } } else { /* @@ -266,10 +267,11 @@ dev_pager_getpages(object, m, count, reqpage) page = dev_pager_getfake(paddr, memattr); VM_OBJECT_LOCK(object); TAILQ_INSERT_TAIL(&object->un_pager.devp.devp_pglist, page, pageq); - vm_page_lock_queues(); - for (i = 0; i < count; i++) + for (i = 0; i < count; i++) { + vm_page_lock(m[i]); vm_page_free(m[i]); - vm_page_unlock_queues(); + vm_page_unlock(m[i]); + } vm_page_insert(page, object, offset); m[reqpage] = page; } Index: /usr/src/sys/vm/vm_page.c =================================================================== --- /usr/src/sys/vm/vm_page.c (revision 206700) +++ /usr/src/sys/vm/vm_page.c (working copy) @@ -148,7 +148,7 @@ TUNABLE_INT("vm.boot_pages", &boot_pages); SYSCTL_INT(_vm, OID_AUTO, boot_pages, CTLFLAG_RD, &boot_pages, 0, "number of pages allocated for bootstrapping the VM system"); -static void vm_page_enqueue(int queue, vm_page_t m); +static void _vm_page_free_toq(vm_page_t m, boolean_t locked); /* Make sure that u_long is at least 64 bits when PAGE_SIZE is 32K. */ #if PAGE_SIZE == 32768 @@ -406,7 +406,7 @@ void vm_page_flag_set(vm_page_t m, unsigned short bits) { - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + vm_page_lock_assert(m, MA_OWNED); m->flags |= bits; } @@ -414,7 +414,7 @@ void vm_page_flag_clear(vm_page_t m, unsigned short bits) { - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + vm_page_lock_assert(m, MA_OWNED); m->flags &= ~bits; } @@ -489,7 +489,7 @@ void vm_page_hold(vm_page_t mem) { - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + vm_page_lock_assert(mem, MA_OWNED); mem->hold_count++; } @@ -497,11 +497,11 @@ void vm_page_unhold(vm_page_t mem) { - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + vm_page_lock_assert(mem, MA_OWNED); --mem->hold_count; KASSERT(mem->hold_count >= 0, ("vm_page_unhold: hold count < 0!!!")); if (mem->hold_count == 0 && VM_PAGE_INQUEUE2(mem, PQ_HOLD)) - vm_page_free_toq(mem); + _vm_page_free_toq(mem, TRUE); } /* @@ -517,6 +517,14 @@ vm_page_free(vm_page_t m) vm_page_free_toq(m); } +void +vm_page_free_locked(vm_page_t m) +{ + + m->flags &= ~PG_ZERO; + _vm_page_free_toq(m, 1); +} + /* * vm_page_free_zero: * @@ -542,10 +550,10 @@ vm_page_sleep(vm_page_t m, const char *msg) { VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); - if (!mtx_owned(&vm_page_queue_mtx)) - vm_page_lock_queues(); + if (!mtx_owned(vm_page_lockptr(m))) + vm_page_lock(m); vm_page_flag_set(m, PG_REFERENCED); - vm_page_unlock_queues(); + vm_page_unlock(m); /* * It's possible that while we sleep, the page will get @@ -731,7 +739,7 @@ vm_page_remove(vm_page_t m) m->oflags &= ~VPO_BUSY; vm_page_flash(m); } - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + vm_page_lock_assert(m, MA_OWNED); /* * Now remove from the object's list of backed pages. @@ -1251,18 +1259,28 @@ vm_waitpfault(void) * The page queues must be locked. */ void -vm_page_requeue(vm_page_t m) +vm_page_requeue_locked(vm_page_t m) { - int queue = VM_PAGE_GETQUEUE(m); + int queue; struct vpgqueues *vpq; + queue = VM_PAGE_GETQUEUE(m); if (queue != PQ_NONE) { + mtx_assert(&vm_page_queue_mtx, MA_OWNED); vpq = &vm_page_queues[queue]; TAILQ_REMOVE(&vpq->pl, m, pageq); TAILQ_INSERT_TAIL(&vpq->pl, m, pageq); } } +void +vm_page_requeue(vm_page_t m) +{ + vm_page_lock_queues(); + vm_page_requeue_locked(m); + vm_page_unlock_queues(); +} + /* * vm_pageq_remove: * @@ -1271,20 +1289,41 @@ void * The queue containing the given page must be locked. * This routine may not block. */ -void -vm_pageq_remove(vm_page_t m) +static void +_vm_pageq_remove(vm_page_t m, boolean_t locked) { - int queue = VM_PAGE_GETQUEUE(m); + int queue; struct vpgqueues *pq; + queue = VM_PAGE_GETQUEUE(m); if (queue != PQ_NONE) { + if (locked == FALSE) + vm_page_lock_queues(); VM_PAGE_SETQUEUE2(m, PQ_NONE); pq = &vm_page_queues[queue]; TAILQ_REMOVE(&pq->pl, m, pageq); (*pq->cnt)--; + if (locked == FALSE) + vm_page_unlock_queues(); } } +void +vm_pageq_remove_locked(vm_page_t m) +{ + + mtx_assert(&vm_page_queue_mtx, MA_OWNED); + _vm_pageq_remove(m, TRUE); +} + +void +vm_pageq_remove(vm_page_t m) +{ + + vm_page_lock_queues_assert_notowned(); + _vm_pageq_remove(m, FALSE); +} + /* * vm_page_enqueue: * @@ -1293,14 +1332,26 @@ void * The page queues must be locked. */ static void -vm_page_enqueue(int queue, vm_page_t m) +_vm_page_enqueue(int queue, vm_page_t m, boolean_t locked) { struct vpgqueues *vpq; + if (locked == FALSE) { + vm_page_lock_queues_assert_notowned(); + vm_page_lock_queues(); + } +#ifdef INVARIANTS + /* avoid dangling else */ + else + mtx_assert(&vm_page_queue_mtx, MA_OWNED); +#endif + vpq = &vm_page_queues[queue]; VM_PAGE_SETQUEUE2(m, queue); TAILQ_INSERT_TAIL(&vpq->pl, m, pageq); ++*vpq->cnt; + if (locked == FALSE) + vm_page_unlock_queues(); } /* @@ -1314,16 +1365,17 @@ static void * This routine may not block. */ void -vm_page_activate(vm_page_t m) +vm_page_activate_locked(vm_page_t m) { + vm_page_lock_assert(m, MA_OWNED); mtx_assert(&vm_page_queue_mtx, MA_OWNED); if (VM_PAGE_GETKNOWNQUEUE2(m) != PQ_ACTIVE) { - vm_pageq_remove(m); + vm_pageq_remove_locked(m); if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) { if (m->act_count < ACT_INIT) m->act_count = ACT_INIT; - vm_page_enqueue(PQ_ACTIVE, m); + _vm_page_enqueue(PQ_ACTIVE, m, TRUE); } } else { if (m->act_count < ACT_INIT) @@ -1331,6 +1383,14 @@ void } } +void +vm_page_activate(vm_page_t m) +{ + vm_page_lock_queues(); + vm_page_activate_locked(m); + vm_page_unlock_queues(); +} + /* * vm_page_free_wakeup: * @@ -1376,15 +1436,18 @@ vm_page_free_wakeup(void) * This routine may not block. */ -void -vm_page_free_toq(vm_page_t m) +static void +_vm_page_free_toq(vm_page_t m, boolean_t locked) { - if (VM_PAGE_GETQUEUE(m) != PQ_NONE) - mtx_assert(&vm_page_queue_mtx, MA_OWNED); - KASSERT(!pmap_page_is_mapped(m), - ("vm_page_free_toq: freeing mapped page %p", m)); PCPU_INC(cnt.v_tfree); +#ifdef INVARIANTS + if (VM_PAGE_GETQUEUE(m) != PQ_NONE || m->object != NULL || + m->hold_count) { + vm_page_lock_assert(m, MA_OWNED); + KASSERT(!pmap_page_is_mapped(m), + ("vm_page_free_toq: freeing mapped page %p", m)); + } if (m->busy || VM_PAGE_IS_FREE(m)) { printf( @@ -1396,6 +1459,10 @@ vm_page_free_wakeup(void) else panic("vm_page_free: freeing busy page"); } + KASSERT(m->wire_count == 0, + ("vm_page_free: freeing wired page. Count: %d, pindex: 0x%lx", + m->wire_count, (long)m->pindex)); +#endif /* * unqueue, then remove page. Note that we cannot destroy @@ -1403,7 +1470,6 @@ vm_page_free_wakeup(void) * callback routine until after we've put the page on the * appropriate free queue. */ - vm_pageq_remove(m); vm_page_remove(m); /* @@ -1426,8 +1492,10 @@ vm_page_free_wakeup(void) } if (m->hold_count != 0) { m->flags &= ~PG_ZERO; - vm_page_enqueue(PQ_HOLD, m); + _vm_pageq_remove(m, locked); + _vm_page_enqueue(PQ_HOLD, m, locked); } else { + _vm_pageq_remove(m, locked); /* * Restore the default memory attribute to the page. */ @@ -1456,6 +1524,13 @@ vm_page_free_wakeup(void) } } +void +vm_page_free_toq(vm_page_t m) +{ + + _vm_page_free_toq(m, FALSE); +} + /* * vm_page_wire: * @@ -1475,7 +1550,7 @@ vm_page_wire(vm_page_t m) * and only unqueue the page if it is on some queue (if it is unmanaged * it is already off the queues). */ - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + vm_page_lock_assert(m, MA_OWNED); if (m->flags & PG_FICTITIOUS) return; if (m->wire_count == 0) { @@ -1519,7 +1594,7 @@ void vm_page_unwire(vm_page_t m, int activate) { - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + vm_page_lock_assert(m, MA_OWNED); if (m->flags & PG_FICTITIOUS) return; if (m->wire_count > 0) { @@ -1529,10 +1604,10 @@ vm_page_unwire(vm_page_t m, int activate) if (m->flags & PG_UNMANAGED) { ; } else if (activate) - vm_page_enqueue(PQ_ACTIVE, m); + _vm_page_enqueue(PQ_ACTIVE, m, FALSE); else { vm_page_flag_clear(m, PG_WINATCFLS); - vm_page_enqueue(PQ_INACTIVE, m); + _vm_page_enqueue(PQ_INACTIVE, m, FALSE); } } } else { @@ -1556,7 +1631,7 @@ _vm_page_deactivate(vm_page_t m, int athead) { mtx_assert(&vm_page_queue_mtx, MA_OWNED); - + vm_page_lock_assert(m, MA_OWNED); /* * Ignore if already inactive. */ @@ -1564,7 +1639,7 @@ _vm_page_deactivate(vm_page_t m, int athead) return; if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) { vm_page_flag_clear(m, PG_WINATCFLS); - vm_pageq_remove(m); + vm_pageq_remove_locked(m); if (athead) TAILQ_INSERT_HEAD(&vm_page_queues[PQ_INACTIVE].pl, m, pageq); else @@ -1575,9 +1650,19 @@ _vm_page_deactivate(vm_page_t m, int athead) } void +vm_page_deactivate_locked(vm_page_t m) +{ + + _vm_page_deactivate(m, 0); +} + +void vm_page_deactivate(vm_page_t m) { - _vm_page_deactivate(m, 0); + + vm_page_lock_queues(); + _vm_page_deactivate(m, 0); + vm_page_unlock_queues(); } /* @@ -1589,7 +1674,7 @@ int vm_page_try_to_cache(vm_page_t m) { - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + vm_page_lock_assert(m, MA_OWNED); VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); if (m->dirty || m->hold_count || m->busy || m->wire_count || (m->oflags & VPO_BUSY) || (m->flags & PG_UNMANAGED)) { @@ -1612,7 +1697,7 @@ int vm_page_try_to_free(vm_page_t m) { - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + vm_page_lock_assert(m, MA_OWNED); if (m->object != NULL) VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); if (m->dirty || m->hold_count || m->busy || m->wire_count || @@ -1633,13 +1718,13 @@ vm_page_try_to_free(vm_page_t m) * * This routine may not block. */ -void -vm_page_cache(vm_page_t m) +static void +_vm_page_cache(vm_page_t m, boolean_t locked) { vm_object_t object; vm_page_t root; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + vm_page_lock_assert(m, MA_OWNED); object = m->object; VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); if ((m->flags & PG_UNMANAGED) || (m->oflags & VPO_BUSY) || m->busy || @@ -1667,7 +1752,7 @@ vm_page_try_to_free(vm_page_t m) /* * Remove the page from the paging queues. */ - vm_pageq_remove(m); + _vm_pageq_remove(m, locked); /* * Remove the page from the object's collection of resident @@ -1743,6 +1828,20 @@ vm_page_try_to_free(vm_page_t m) } } +void +vm_page_cache(vm_page_t m) +{ + + _vm_page_cache(m, FALSE); +} + +void +vm_page_cache_locked(vm_page_t m) +{ + + _vm_page_cache(m, TRUE); +} + /* * vm_page_dontneed * @@ -1771,7 +1870,7 @@ vm_page_dontneed(vm_page_t m) int dnw; int head; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + vm_page_lock_assert(m, MA_OWNED); dnw = ++dnweight; /* @@ -1807,7 +1906,9 @@ vm_page_dontneed(vm_page_t m) */ head = 1; } + vm_page_lock_queues(); _vm_page_deactivate(m, head); + vm_page_unlock_queues(); } /* @@ -1832,9 +1933,9 @@ retrylookup: goto retrylookup; } else { if ((allocflags & VM_ALLOC_WIRED) != 0) { - vm_page_lock_queues(); + vm_page_lock(m); vm_page_wire(m); - vm_page_unlock_queues(); + vm_page_unlock(m); } if ((allocflags & VM_ALLOC_NOBUSY) == 0) vm_page_busy(m); @@ -1953,7 +2054,7 @@ vm_page_set_validclean(vm_page_t m, int base, int int frag; int endoff; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + vm_page_lock_assert(m, MA_OWNED); VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); if (size == 0) /* handle degenerate case */ return; @@ -2012,7 +2113,7 @@ void vm_page_clear_dirty(vm_page_t m, int base, int size) { - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + vm_page_lock_assert(m, MA_OWNED); m->dirty &= ~vm_page_bits(base, size); } @@ -2031,7 +2132,7 @@ vm_page_set_invalid(vm_page_t m, int base, int siz VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); bits = vm_page_bits(base, size); - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + vm_page_lock_assert(m, MA_OWNED); if (m->valid == VM_PAGE_BITS_ALL && bits != 0) pmap_remove_all(m); m->valid &= ~bits; @@ -2133,6 +2234,8 @@ vm_page_cowfault(vm_page_t m) vm_object_t object; vm_pindex_t pindex; + /* XXX Not properly locked. */ + panic("vm_page_cowfault: Not properly locked\n"); object = m->object; VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); KASSERT(object->paging_in_progress != 0, @@ -2146,18 +2249,18 @@ vm_page_cowfault(vm_page_t m) mnew = vm_page_alloc(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY); if (mnew == NULL) { vm_page_insert(m, object, pindex); - vm_page_unlock_queues(); + vm_page_unlock(m); VM_OBJECT_UNLOCK(object); VM_WAIT; VM_OBJECT_LOCK(object); if (m == vm_page_lookup(object, pindex)) { - vm_page_lock_queues(); + vm_page_lock(m); goto retry_alloc; } else { /* * Page disappeared during the wait. */ - vm_page_lock_queues(); + vm_page_lock(m); return; } } @@ -2184,7 +2287,7 @@ void vm_page_cowclear(vm_page_t m) { - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + vm_page_lock_assert(m, MA_OWNED); if (m->cow) { m->cow--; /* @@ -2200,7 +2303,7 @@ int vm_page_cowsetup(vm_page_t m) { - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + vm_page_lock_assert(m, MA_OWNED); if (m->cow == USHRT_MAX - 1) return (EBUSY); m->cow++; Index: /usr/src/sys/vm/vm_page.h =================================================================== --- /usr/src/sys/vm/vm_page.h (revision 206700) +++ /usr/src/sys/vm/vm_page.h (working copy) @@ -271,7 +271,21 @@ extern struct vpglocks vm_page_queue_lock; #define vm_page_queue_mtx vm_page_queue_lock.data #define vm_page_lock_queues() mtx_lock(&vm_page_queue_mtx) #define vm_page_unlock_queues() mtx_unlock(&vm_page_queue_mtx) +#define vm_page_trylock_queues() mtx_trylock(&vm_page_queue_mtx) +#ifdef VM_PAGE_LOCK +#define vm_page_lockptr(m) pmap_page_lockptr(m) +#define vm_page_lock_queues_assert_notowned() mtx_assert(&vm_page_queue_mtx, MA_NOTOWNED) +#else +#define vm_page_lockptr(m) (&vm_page_queue_mtx) +#define vm_page_lock_queues_assert_notowned() +#endif +#define vm_page_lock(m) mtx_lock(vm_page_lockptr((m))) +#define vm_page_unlock(m) mtx_unlock(vm_page_lockptr((m))) +#define vm_page_trylock(m) mtx_trylock(vm_page_lockptr((m))) +#define vm_page_lock_assert(m, a) mtx_assert(vm_page_lockptr((m)), (a)) + + #if PAGE_SIZE == 4096 #define VM_PAGE_BITS_ALL 0xffu #elif PAGE_SIZE == 8192 @@ -310,10 +324,13 @@ void vm_page_dirty(vm_page_t m); void vm_page_wakeup(vm_page_t m); void vm_pageq_remove(vm_page_t m); +void vm_pageq_remove_locked(vm_page_t m); void vm_page_activate (vm_page_t); +void vm_page_activate_locked (vm_page_t); vm_page_t vm_page_alloc (vm_object_t, vm_pindex_t, int); vm_page_t vm_page_grab (vm_object_t, vm_pindex_t, int); +void vm_page_cache_locked(vm_page_t); void vm_page_cache(vm_page_t); void vm_page_cache_free(vm_object_t, vm_pindex_t, vm_pindex_t); void vm_page_cache_remove(vm_page_t); @@ -322,11 +339,13 @@ int vm_page_try_to_cache (vm_page_t); int vm_page_try_to_free (vm_page_t); void vm_page_dontneed(vm_page_t); void vm_page_deactivate (vm_page_t); +void vm_page_deactivate_locked (vm_page_t); void vm_page_insert (vm_page_t, vm_object_t, vm_pindex_t); vm_page_t vm_page_lookup (vm_object_t, vm_pindex_t); void vm_page_remove (vm_page_t); void vm_page_rename (vm_page_t, vm_object_t, vm_pindex_t); void vm_page_requeue(vm_page_t m); +void vm_page_requeue_locked(vm_page_t m); void vm_page_set_valid(vm_page_t m, int base, int size); void vm_page_sleep(vm_page_t m, const char *msg); vm_page_t vm_page_splay(vm_pindex_t, vm_page_t); @@ -341,6 +360,7 @@ void vm_page_test_dirty (vm_page_t); int vm_page_bits (int, int); void vm_page_zero_invalid(vm_page_t m, boolean_t setvalid); void vm_page_free_toq(vm_page_t m); +void vm_page_free_locked(vm_page_t m); void vm_page_zero_idle_wakeup(void); void vm_page_cowfault (vm_page_t); int vm_page_cowsetup(vm_page_t); @@ -349,7 +369,7 @@ void vm_page_cowclear (vm_page_t); /* * vm_page_sleep_if_busy: * - * Sleep and release the page queues lock if VPO_BUSY is set or, + * Sleep and release the page lock if VPO_BUSY is set or, * if also_m_busy is TRUE, busy is non-zero. Returns TRUE if the * thread slept and the page queues lock was released. * Otherwise, retains the page queues lock and returns FALSE. Index: /usr/src/sys/vm/vm_contig.c =================================================================== --- /usr/src/sys/vm/vm_contig.c (revision 206700) +++ /usr/src/sys/vm/vm_contig.c (working copy) @@ -105,6 +105,11 @@ vm_contig_launder_page(vm_page_t m, vm_page_t *nex VM_OBJECT_UNLOCK(object); return (EAGAIN); } + if (vm_page_trylock(m) == 0) { + VM_OBJECT_UNLOCK(object); + return (EAGAIN); + } + vm_page_unlock_queues(); if (vm_page_sleep_if_busy(m, TRUE, "vpctw0")) { VM_OBJECT_UNLOCK(object); vm_page_lock_queues(); @@ -115,11 +120,13 @@ vm_contig_launder_page(vm_page_t m, vm_page_t *nex pmap_remove_all(m); if (m->dirty) { if ((object->flags & OBJ_DEAD) != 0) { + vm_page_unlock(m); VM_OBJECT_UNLOCK(object); + vm_page_lock_queues(); return (EAGAIN); } if (object->type == OBJT_VNODE) { - vm_page_unlock_queues(); + vm_page_unlock(m); vp = object->handle; vm_object_reference_locked(object); VM_OBJECT_UNLOCK(object); @@ -140,11 +147,14 @@ vm_contig_launder_page(vm_page_t m, vm_page_t *nex m_tmp = m; vm_pageout_flush(&m_tmp, 1, VM_PAGER_PUT_SYNC); VM_OBJECT_UNLOCK(object); + vm_page_lock_queues(); return (0); } } else if (m->hold_count == 0) vm_page_cache(m); + vm_page_unlock(m); VM_OBJECT_UNLOCK(object); + vm_page_lock_queues(); return (0); } @@ -257,9 +267,9 @@ retry: i -= PAGE_SIZE; m = vm_page_lookup(object, OFF_TO_IDX(offset + i)); - vm_page_lock_queues(); + vm_page_lock(m); vm_page_free(m); - vm_page_unlock_queues(); + vm_page_unlock(m); } VM_OBJECT_UNLOCK(object); vm_map_delete(map, addr, addr + size); Index: /usr/src/sys/vm/vnode_pager.c =================================================================== --- /usr/src/sys/vm/vnode_pager.c (revision 206700) +++ /usr/src/sys/vm/vnode_pager.c (working copy) @@ -429,9 +429,9 @@ vnode_pager_setsize(vp, nsize) * bits. This would prevent bogus_page * replacement from working properly. */ - vm_page_lock_queues(); + vm_page_lock(m); vm_page_clear_dirty(m, base, PAGE_SIZE - base); - vm_page_unlock_queues(); + vm_page_unlock(m); } else if ((nsize & PAGE_MASK) && __predict_false(object->cache != NULL)) { vm_page_cache_free(object, OFF_TO_IDX(nsize), @@ -719,11 +719,12 @@ vnode_pager_generic_getpages(vp, m, bytecount, req error = VOP_BMAP(vp, foff / bsize, &bo, &reqblock, NULL, NULL); if (error == EOPNOTSUPP) { VM_OBJECT_LOCK(object); - vm_page_lock_queues(); for (i = 0; i < count; i++) - if (i != reqpage) + if (i != reqpage) { + vm_page_lock(m[i]); vm_page_free(m[i]); - vm_page_unlock_queues(); + vm_page_unlock(m[i]); + } PCPU_INC(cnt.v_vnodein); PCPU_INC(cnt.v_vnodepgsin); error = vnode_pager_input_old(object, m[reqpage]); @@ -731,11 +732,12 @@ vnode_pager_generic_getpages(vp, m, bytecount, req return (error); } else if (error != 0) { VM_OBJECT_LOCK(object); - vm_page_lock_queues(); for (i = 0; i < count; i++) - if (i != reqpage) + if (i != reqpage) { + vm_page_lock(m[i]); vm_page_free(m[i]); - vm_page_unlock_queues(); + vm_page_unlock(m[i]); + } VM_OBJECT_UNLOCK(object); return (VM_PAGER_ERROR); @@ -747,11 +749,12 @@ vnode_pager_generic_getpages(vp, m, bytecount, req } else if ((PAGE_SIZE / bsize) > 1 && (vp->v_mount->mnt_stat.f_type != nfs_mount_type)) { VM_OBJECT_LOCK(object); - vm_page_lock_queues(); for (i = 0; i < count; i++) - if (i != reqpage) + if (i != reqpage) { + vm_page_lock(m[i]); vm_page_free(m[i]); - vm_page_unlock_queues(); + vm_page_unlock(m[i]); + } VM_OBJECT_UNLOCK(object); PCPU_INC(cnt.v_vnodein); PCPU_INC(cnt.v_vnodepgsin); @@ -765,11 +768,12 @@ vnode_pager_generic_getpages(vp, m, bytecount, req */ VM_OBJECT_LOCK(object); if (m[reqpage]->valid == VM_PAGE_BITS_ALL) { - vm_page_lock_queues(); for (i = 0; i < count; i++) - if (i != reqpage) + if (i != reqpage) { + vm_page_lock(m[i]); vm_page_free(m[i]); - vm_page_unlock_queues(); + vm_page_unlock(m[i]); + } VM_OBJECT_UNLOCK(object); return VM_PAGER_OK; } else if (reqblock == -1) { @@ -777,11 +781,12 @@ vnode_pager_generic_getpages(vp, m, bytecount, req KASSERT(m[reqpage]->dirty == 0, ("vnode_pager_generic_getpages: page %p is dirty", m)); m[reqpage]->valid = VM_PAGE_BITS_ALL; - vm_page_lock_queues(); for (i = 0; i < count; i++) - if (i != reqpage) + if (i != reqpage) { + vm_page_lock(m[i]); vm_page_free(m[i]); - vm_page_unlock_queues(); + vm_page_unlock(m[i]); + } VM_OBJECT_UNLOCK(object); return (VM_PAGER_OK); } @@ -800,11 +805,12 @@ vnode_pager_generic_getpages(vp, m, bytecount, req if (vnode_pager_addr(vp, IDX_TO_OFF(m[i]->pindex), &firstaddr, &runpg) != 0) { VM_OBJECT_LOCK(object); - vm_page_lock_queues(); for (; i < count; i++) - if (i != reqpage) + if (i != reqpage) { + vm_page_lock(m[i]); vm_page_free(m[i]); - vm_page_unlock_queues(); + vm_page_unlock(m[i]); + } VM_OBJECT_UNLOCK(object); return (VM_PAGER_ERROR); } @@ -818,9 +824,9 @@ vnode_pager_generic_getpages(vp, m, bytecount, req (object->un_pager.vnp.vnp_size >> 32), (uintmax_t)object->un_pager.vnp.vnp_size); } - vm_page_lock_queues(); + vm_page_lock(m[i]); vm_page_free(m[i]); - vm_page_unlock_queues(); + vm_page_unlock(m[i]); VM_OBJECT_UNLOCK(object); runend = i + 1; first = runend; @@ -829,18 +835,20 @@ vnode_pager_generic_getpages(vp, m, bytecount, req runend = i + runpg; if (runend <= reqpage) { VM_OBJECT_LOCK(object); - vm_page_lock_queues(); - for (j = i; j < runend; j++) + for (j = i; j < runend; j++) { + vm_page_lock(m[j]); vm_page_free(m[j]); - vm_page_unlock_queues(); + vm_page_unlock(m[j]); + } VM_OBJECT_UNLOCK(object); } else { if (runpg < (count - first)) { VM_OBJECT_LOCK(object); - vm_page_lock_queues(); - for (i = first + runpg; i < count; i++) + for (i = first + runpg; i < count; i++) { + vm_page_lock(m[i]); vm_page_free(m[i]); - vm_page_unlock_queues(); + vm_page_unlock(m[i]); + } VM_OBJECT_UNLOCK(object); count = first + runpg; } @@ -931,13 +939,13 @@ vnode_pager_generic_getpages(vp, m, bytecount, req relpbuf(bp, &vnode_pbuf_freecnt); VM_OBJECT_LOCK(object); - vm_page_lock_queues(); for (i = 0, tfoff = foff; i < count; i++, tfoff = nextoff) { vm_page_t mt; nextoff = tfoff + PAGE_SIZE; mt = m[i]; + vm_page_lock(mt); if (nextoff <= object->un_pager.vnp.vnp_size) { /* * Read filled up entire page. @@ -989,8 +997,8 @@ vnode_pager_generic_getpages(vp, m, bytecount, req vm_page_free(mt); } } + vm_page_unlock(mt); } - vm_page_unlock_queues(); VM_OBJECT_UNLOCK(object); if (error) { printf("vnode_pager_getpages: I/O read error\n"); @@ -1113,10 +1121,13 @@ vnode_pager_generic_putpages(vp, m, bytecount, fla maxsize = object->un_pager.vnp.vnp_size - poffset; ncount = btoc(maxsize); if ((pgoff = (int)maxsize & PAGE_MASK) != 0) { - vm_page_lock_queues(); - vm_page_clear_dirty(m[ncount - 1], pgoff, + vm_page_t p; + + p = m[ncount - 1]; + vm_page_lock(p); + vm_page_clear_dirty(p, pgoff, PAGE_SIZE - pgoff); - vm_page_unlock_queues(); + vm_page_unlock(p); } } else { maxsize = 0; Index: /usr/src/sys/vm/uma_core.c =================================================================== --- /usr/src/sys/vm/uma_core.c (revision 206700) +++ /usr/src/sys/vm/uma_core.c (working copy) @@ -1022,10 +1022,10 @@ obj_alloc(uma_zone_t zone, int bytes, u_int8_t *fl while (pages != startpages) { pages--; p = TAILQ_LAST(&object->memq, pglist); - vm_page_lock_queues(); + vm_page_lock(p); vm_page_unwire(p, 0); vm_page_free(p); - vm_page_unlock_queues(); + vm_page_unlock(p); } retkva = 0; goto done; Index: /usr/src/sys/net/bpf_zerocopy.c =================================================================== --- /usr/src/sys/net/bpf_zerocopy.c (revision 206700) +++ /usr/src/sys/net/bpf_zerocopy.c (working copy) @@ -112,11 +112,11 @@ static void zbuf_page_free(vm_page_t pp) { - vm_page_lock_queues(); + vm_page_lock(pp); vm_page_unwire(pp, 0); - if (pp->wire_count == 0 && pp->object == NULL) + if (pp->wire_count == 0 && pp->object == NULL) vm_page_free(pp); - vm_page_unlock_queues(); + vm_page_unlock(pp); } /* @@ -168,10 +168,10 @@ zbuf_sfbuf_get(struct vm_map *map, vm_offset_t uad VM_PROT_WRITE); if (pp == NULL) return (NULL); - vm_page_lock_queues(); + vm_page_lock(pp); vm_page_wire(pp); vm_page_unhold(pp); - vm_page_unlock_queues(); + vm_page_unlock(pp); sf = sf_buf_alloc(pp, SFB_NOWAIT); if (sf == NULL) { zbuf_page_free(pp); Index: /usr/src/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c =================================================================== --- /usr/src/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c (revision 206700) +++ /usr/src/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c (working copy) @@ -341,9 +341,9 @@ again: goto again; fsize = obj->un_pager.vnp.vnp_size; vm_page_busy(m); - vm_page_lock_queues(); + vm_page_lock(m); vm_page_undirty(m); - vm_page_unlock_queues(); + vm_page_unlock(m); VM_OBJECT_UNLOCK(obj); if (dirbytes > 0) { error = dmu_write_uio(os, zp->z_id, uio, Index: /usr/src/sys/amd64/include/pmap.h =================================================================== --- /usr/src/sys/amd64/include/pmap.h (revision 206700) +++ /usr/src/sys/amd64/include/pmap.h (working copy) @@ -230,6 +230,7 @@ extern pt_entry_t pg_nx; */ struct pv_entry; struct pv_chunk; +TAILQ_HEAD(pv_list_head, pv_entry); struct md_page { TAILQ_HEAD(,pv_entry) pv_list; @@ -248,6 +249,7 @@ struct pmap { /* spare u_int here due to padding */ struct pmap_statistics pm_stats; /* pmap statistics */ vm_page_t pm_root; /* spare page table pages */ + vm_page_t pm_free; /* Temporary free pages. */ }; typedef struct pmap *pmap_t; @@ -257,7 +259,9 @@ extern struct pmap kernel_pmap_store; #define kernel_pmap (&kernel_pmap_store) #define PMAP_LOCK(pmap) mtx_lock(&(pmap)->pm_mtx) -#define PMAP_LOCK_ASSERT(pmap, type) \ +#define PMAP_LOCKPTR(pmap) (&(pmap)->pm_mtx) + +#define PMAP_LOCK_ASSERT(pmap, type) \ mtx_assert(&(pmap)->pm_mtx, (type)) #define PMAP_LOCK_DESTROY(pmap) mtx_destroy(&(pmap)->pm_mtx) #define PMAP_LOCK_INIT(pmap) mtx_init(&(pmap)->pm_mtx, "pmap", \ Index: /usr/src/sys/amd64/include/vmparam.h =================================================================== --- /usr/src/sys/amd64/include/vmparam.h (revision 206700) +++ /usr/src/sys/amd64/include/vmparam.h (working copy) @@ -88,6 +88,12 @@ #define UMA_MD_SMALL_ALLOC /* + * We support hashing pages to an entry in array of locks on amd64 + */ + +#define VM_PAGE_LOCK + +/* * The physical address space is densely populated. */ #define VM_PHYSSEG_DENSE Index: /usr/src/sys/amd64/amd64/pmap.c =================================================================== --- /usr/src/sys/amd64/amd64/pmap.c (revision 206700) +++ /usr/src/sys/amd64/amd64/pmap.c (working copy) @@ -160,16 +160,33 @@ __FBSDID("$FreeBSD$"); #define PMAP_INLINE #endif -#define PV_STATS #ifdef PV_STATS #define PV_STAT(x) do { x ; } while (0) #else #define PV_STAT(x) do { } while (0) #endif +#define PA_LOCK_PAD CACHE_LINE_SIZE + +struct vp_lock { + struct mtx vp_lock; + unsigned char pad[(PA_LOCK_PAD - sizeof(struct mtx))]; +}; + #define pa_index(pa) ((pa) >> PDRSHIFT) #define pa_to_pvh(pa) (&pv_table[pa_index(pa)]) +#define PA_LOCKPTR(pa) &pa_lock[pa_index((pa)) % PA_LOCK_COUNT].vp_lock +#define PA_LOCK(pa) mtx_lock(PA_LOCKPTR(pa)) +#define PA_TRYLOCK(pa) mtx_trylock(PA_LOCKPTR(pa)) +#define PA_UNLOCK(pa) mtx_unlock(PA_LOCKPTR(pa)) +#define PA_LOCK_ASSERT(pa, a) mtx_assert(PA_LOCKPTR(pa), (a)) + +#define PA_LOCK_COUNT 256 + +struct vp_lock pa_lock[PA_LOCK_COUNT] __aligned(CACHE_LINE_SIZE); + + struct pmap kernel_pmap_store; vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */ @@ -188,6 +205,15 @@ static int pg_ps_enabled = 1; SYSCTL_INT(_vm_pmap, OID_AUTO, pg_ps_enabled, CTLFLAG_RDTUN, &pg_ps_enabled, 0, "Are large page mappings enabled?"); +static uint64_t pmap_tryrelock_calls; +SYSCTL_QUAD(_vm_pmap, OID_AUTO, tryrelock_calls, CTLFLAG_RD, + &pmap_tryrelock_calls, 0, "Number of tryrelock calls"); + +static int pmap_tryrelock_restart; +SYSCTL_INT(_vm_pmap, OID_AUTO, tryrelock_restart, CTLFLAG_RD, + &pmap_tryrelock_restart, 0, "Number of tryrelock restarts"); + + static u_int64_t KPTphys; /* phys addr of kernel level 1 */ static u_int64_t KPDphys; /* phys addr of kernel level 2 */ u_int64_t KPDPphys; /* phys addr of kernel level 3 */ @@ -199,7 +225,8 @@ static u_int64_t DMPDPphys; /* phys addr of direct /* * Data for the pv entry allocation mechanism */ -static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0; +static int pv_entry_count __aligned(CACHE_LINE_SIZE); +static int pv_entry_max = 0, pv_entry_high_water = 0; static struct md_page *pv_table; static int shpgperproc = PMAP_SHPGPERPROC; @@ -215,8 +242,9 @@ caddr_t CADDR1 = 0; static caddr_t crashdumpmap; static void free_pv_entry(pmap_t pmap, pv_entry_t pv); -static pv_entry_t get_pv_entry(pmap_t locked_pmap, int try); -static void pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa); +static pv_entry_t get_pv_entry(pmap_t locked_pmap); +static void pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa, + struct pv_list_head *pv_list); static boolean_t pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa); static void pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa); static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va); @@ -225,7 +253,8 @@ static pv_entry_t pmap_pvh_remove(struct md_page * static int pmap_pvh_wired_mappings(struct md_page *pvh, int count); static int pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode); -static boolean_t pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va); +static boolean_t pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va, + struct pv_list_head *pv_list); static boolean_t pmap_demote_pdpe(pmap_t pmap, pdp_entry_t *pdpe, vm_offset_t va); static boolean_t pmap_enter_pde(pmap_t pmap, vm_offset_t va, vm_page_t m, @@ -244,7 +273,7 @@ static boolean_t pmap_protect_pde(pmap_t pmap, pd_ vm_prot_t prot); static void pmap_pte_attr(pt_entry_t *pte, int cache_bits); static int pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva, - vm_page_t *free); + vm_page_t *free, struct pv_list_head *pv_list); static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva, pd_entry_t ptepde, vm_page_t *free); static void pmap_remove_pt_page(pmap_t pmap, vm_page_t mpte); @@ -252,17 +281,17 @@ static void pmap_remove_page(pmap_t pmap, vm_offse vm_page_t *free); static void pmap_remove_entry(struct pmap *pmap, vm_page_t m, vm_offset_t va); -static void pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m); static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m); static void pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde); static void pmap_update_pde_invalidate(vm_offset_t va, pd_entry_t newpde); -static vm_page_t pmap_allocpde(pmap_t pmap, vm_offset_t va, int flags); -static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags); +static vm_page_t pmap_allocpde(pmap_t pmap, vm_paddr_t pa, vm_offset_t va, int flags); +static vm_page_t pmap_allocpte(pmap_t pmap, vm_paddr_t pa, vm_offset_t va, int flags); -static vm_page_t _pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, int flags); +static vm_page_t _pmap_allocpte(pmap_t pmap, vm_paddr_t pa, + vm_pindex_t ptepindex, int flags); static int _pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t* free); static int pmap_unuse_pt(pmap_t, vm_offset_t, pd_entry_t, vm_page_t *); @@ -271,6 +300,82 @@ static vm_offset_t pmap_kmem_choose(vm_offset_t ad CTASSERT(1 << PDESHIFT == sizeof(pd_entry_t)); CTASSERT(1 << PTESHIFT == sizeof(pt_entry_t)); + +#define LS_MAX 4 +struct lock_stack { + struct mtx *ls_array[LS_MAX]; + int ls_top; +}; + +static void +ls_init(struct lock_stack *ls) +{ + + ls->ls_top = 0; +} + + +#define ls_push(ls, m) _ls_push((ls), (m), LOCK_FILE, LOCK_LINE) + +static void +_ls_push(struct lock_stack *ls, struct mtx *lock, char *file, int line) +{ + + KASSERT(ls->ls_top < LS_MAX, ("lock stack overflow")); + + ls->ls_array[ls->ls_top] = lock; + ls->ls_top++; +#if LOCK_DEBUG > 0 || defined(MUTEX_NOINLINE) + _mtx_lock_flags(lock, 0, file, line); +#else + _get_sleep_lock(lock, curthread, 0, file, line); +#endif +} + +static int +ls_trypush(struct lock_stack *ls, struct mtx *lock) +{ + + KASSERT(ls->ls_top < LS_MAX, ("lock stack overflow")); + + if (mtx_trylock(lock) == 0) + return (0); + + ls->ls_array[ls->ls_top] = lock; + ls->ls_top++; + return (1); +} + +#ifdef notyet +static void +ls_pop(struct lock_stack *ls) +{ + struct mtx *lock; + + KASSERT(ls->ls_top > 0, ("lock stack underflow")); + + ls->ls_top--; + lock = ls->ls_array[ls->ls_top]; + mtx_unlock(lock); +} +#endif + +static void +ls_popa(struct lock_stack *ls) +{ + struct mtx *lock; + + KASSERT(ls->ls_top > 0, ("lock stack underflow")); + + while (ls->ls_top > 0) { + ls->ls_top--; + lock = ls->ls_array[ls->ls_top]; + mtx_unlock(lock); + } +} +#ifdef INVARIANTS +extern void kdb_backtrace(void); +#endif /* * Move the kernel virtual free pointer to the next * 2MB. This is used to help improve performance @@ -420,6 +525,37 @@ vtopde(vm_offset_t va) return (PDmap + ((va >> PDRSHIFT) & mask)); } +/* + * Try to acquire a physical address lock while a pmap is locked. If we + * fail to trylock we unlock and lock the pmap directly and cache the + * locked pa in *locked. The caller should then restart their loop in case + * the virtual to physical mapping has changed. + */ +static int +pa_tryrelock(pmap_t pmap, vm_paddr_t pa, vm_paddr_t *locked) +{ + vm_paddr_t lockpa; + + PMAP_LOCK_ASSERT(pmap, MA_OWNED); + atomic_add_long((volatile long *)&pmap_tryrelock_calls, 1); + lockpa = *locked; + *locked = pa; + if (lockpa) { + PA_LOCK_ASSERT(lockpa, MA_OWNED); + if (PA_LOCKPTR(pa) == PA_LOCKPTR(lockpa)) + return (0); + PA_UNLOCK(lockpa); + } + if (PA_TRYLOCK(pa)) + return 0; + PMAP_UNLOCK(pmap); + atomic_add_int((volatile int *)&pmap_tryrelock_restart, 1); + PA_LOCK(pa); + PMAP_LOCK(pmap); + + return (EAGAIN); +} + static u_int64_t allocpages(vm_paddr_t *firstaddr, int n) { @@ -529,6 +665,7 @@ pmap_bootstrap(vm_paddr_t *firstaddr) { vm_offset_t va; pt_entry_t *pte, *unused; + int i; /* * Create an initial set of page tables to run the kernel in. @@ -578,6 +715,12 @@ pmap_bootstrap(vm_paddr_t *firstaddr) /* Initialize the PAT MSR. */ pmap_init_pat(); + + /* Setup page locks. */ + for (i = 0; i < PA_LOCK_COUNT; i++) + mtx_init(&pa_lock[i].vp_lock, "page lock", NULL, + MTX_DEF | MTX_RECURSE | MTX_DUPOK); + } /* @@ -651,6 +794,14 @@ pmap_page_init(vm_page_t m) m->md.pat_mode = PAT_WRITE_BACK; } +struct mtx * +pmap_page_lockptr(vm_page_t m) +{ + + KASSERT(m != NULL, ("pmap_page_lockptr: NULL page")); + return (PA_LOCKPTR(VM_PAGE_TO_PHYS(m))); +} + /* * Initialize the pmap module. * Called by vm_init, to initialize any structures that the pmap @@ -1184,15 +1335,20 @@ pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, { pd_entry_t pde, *pdep; pt_entry_t pte; + vm_paddr_t pa; vm_page_t m; + pa = 0; m = NULL; - vm_page_lock_queues(); PMAP_LOCK(pmap); +retry: pdep = pmap_pde(pmap, va); if (pdep != NULL && (pde = *pdep)) { if (pde & PG_PS) { if ((pde & PG_RW) || (prot & VM_PROT_WRITE) == 0) { + if (pa_tryrelock(pmap, pde & PG_PS_FRAME, &pa)) + goto retry; + m = PHYS_TO_VM_PAGE((pde & PG_PS_FRAME) | (va & PDRMASK)); vm_page_hold(m); @@ -1201,12 +1357,15 @@ pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, pte = *pmap_pde_to_pte(pdep, va); if ((pte & PG_V) && ((pte & PG_RW) || (prot & VM_PROT_WRITE) == 0)) { + if (pa_tryrelock(pmap, pte & PG_FRAME, &pa)) + goto retry; m = PHYS_TO_VM_PAGE(pte & PG_FRAME); vm_page_hold(m); } } } - vm_page_unlock_queues(); + if (pa) + PA_UNLOCK(pa); PMAP_UNLOCK(pmap); return (m); } @@ -1604,7 +1763,7 @@ pmap_pinit(pmap_t pmap) * race conditions. */ static vm_page_t -_pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, int flags) +_pmap_allocpte(pmap_t pmap, vm_paddr_t pa, vm_pindex_t ptepindex, int flags) { vm_page_t m, pdppg, pdpg; @@ -1619,9 +1778,9 @@ static vm_page_t VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) { if (flags & M_WAITOK) { PMAP_UNLOCK(pmap); - vm_page_unlock_queues(); + PA_UNLOCK(pa); VM_WAIT; - vm_page_lock_queues(); + PA_LOCK(pa); PMAP_LOCK(pmap); } @@ -1661,7 +1820,7 @@ static vm_page_t pml4 = &pmap->pm_pml4[pml4index]; if ((*pml4 & PG_V) == 0) { /* Have to allocate a new pdp, recurse */ - if (_pmap_allocpte(pmap, NUPDE + NUPDPE + pml4index, + if (_pmap_allocpte(pmap, pa, NUPDE + NUPDPE + pml4index, flags) == NULL) { --m->wire_count; atomic_subtract_int(&cnt.v_wire_count, 1); @@ -1694,7 +1853,7 @@ static vm_page_t pml4 = &pmap->pm_pml4[pml4index]; if ((*pml4 & PG_V) == 0) { /* Have to allocate a new pd, recurse */ - if (_pmap_allocpte(pmap, NUPDE + pdpindex, + if (_pmap_allocpte(pmap, pa, NUPDE + pdpindex, flags) == NULL) { --m->wire_count; atomic_subtract_int(&cnt.v_wire_count, 1); @@ -1708,7 +1867,7 @@ static vm_page_t pdp = &pdp[pdpindex & ((1ul << NPDPEPGSHIFT) - 1)]; if ((*pdp & PG_V) == 0) { /* Have to allocate a new pd, recurse */ - if (_pmap_allocpte(pmap, NUPDE + pdpindex, + if (_pmap_allocpte(pmap, pa, NUPDE + pdpindex, flags) == NULL) { --m->wire_count; atomic_subtract_int(&cnt.v_wire_count, @@ -1735,7 +1894,7 @@ static vm_page_t } static vm_page_t -pmap_allocpde(pmap_t pmap, vm_offset_t va, int flags) +pmap_allocpde(pmap_t pmap, vm_paddr_t pa, vm_offset_t va, int flags) { vm_pindex_t pdpindex, ptepindex; pdp_entry_t *pdpe; @@ -1754,7 +1913,7 @@ retry: /* Allocate a pd page. */ ptepindex = pmap_pde_pindex(va); pdpindex = ptepindex >> NPDPEPGSHIFT; - pdpg = _pmap_allocpte(pmap, NUPDE + pdpindex, flags); + pdpg = _pmap_allocpte(pmap, pa, NUPDE + pdpindex, flags); if (pdpg == NULL && (flags & M_WAITOK)) goto retry; } @@ -1762,11 +1921,12 @@ retry: } static vm_page_t -pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags) +pmap_allocpte(pmap_t pmap, vm_paddr_t pa, vm_offset_t va, int flags) { vm_pindex_t ptepindex; pd_entry_t *pd; vm_page_t m; + struct pv_list_head pv_list; KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT || (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK, @@ -1787,7 +1947,8 @@ retry: * normal 4K page. */ if (pd != NULL && (*pd & (PG_PS | PG_V)) == (PG_PS | PG_V)) { - if (!pmap_demote_pde(pmap, pd, va)) { + TAILQ_INIT(&pv_list); + if (!pmap_demote_pde(pmap, pd, va, &pv_list)) { /* * Invalidation of the 2MB page mapping may have caused * the deallocation of the underlying PD page. @@ -1808,7 +1969,7 @@ retry: * Here if the pte page isn't mapped, or if it has been * deallocated. */ - m = _pmap_allocpte(pmap, ptepindex, flags); + m = _pmap_allocpte(pmap, pa, ptepindex, flags); if (m == NULL && (flags & M_WAITOK)) goto retry; } @@ -2014,6 +2175,7 @@ SYSCTL_INT(_vm_pmap, OID_AUTO, pmap_collect_active * allocate per-page pv entries until repromotion occurs, thereby * exacerbating the shortage of free pv entries. */ +#ifdef nomore static void pmap_collect(pmap_t locked_pmap, struct vpgqueues *vpq) { @@ -2029,8 +2191,8 @@ pmap_collect(pmap_t locked_pmap, struct vpgqueues if (m->hold_count || m->busy) continue; TAILQ_FOREACH_SAFE(pv, &m->md.pv_list, pv_list, next_pv) { + pmap = PV_PMAP(pv); va = pv->pv_va; - pmap = PV_PMAP(pv); /* Avoid deadlock and lock recursion. */ if (pmap > locked_pmap) PMAP_LOCK(pmap); @@ -2064,8 +2226,8 @@ pmap_collect(pmap_t locked_pmap, struct vpgqueues } } } +#endif - /* * free the pv_entry back to the free list */ @@ -2076,11 +2238,11 @@ free_pv_entry(pmap_t pmap, pv_entry_t pv) struct pv_chunk *pc; int idx, field, bit; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); PMAP_LOCK_ASSERT(pmap, MA_OWNED); + + atomic_add_int(&pv_entry_count, -1); PV_STAT(pv_entry_frees++); PV_STAT(pv_entry_spare++); - pv_entry_count--; pc = pv_to_chunk(pv); idx = pv - &pc->pc_pventry[0]; field = idx / 64; @@ -2099,7 +2261,9 @@ free_pv_entry(pmap_t pmap, pv_entry_t pv) /* entire chunk is free, return it */ m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc)); dump_drop_page(m->phys_addr); - vm_page_unwire(m, 0); + KASSERT(m->wire_count == 1, ("wire_count == %d", m->wire_count)); + m->wire_count--; + atomic_subtract_int(&cnt.v_wire_count, 1); vm_page_free(m); } @@ -2108,7 +2272,7 @@ free_pv_entry(pmap_t pmap, pv_entry_t pv) * when needed. */ static pv_entry_t -get_pv_entry(pmap_t pmap, int try) +get_pv_entry(pmap_t pmap) { static const struct timeval printinterval = { 60, 0 }; static struct timeval lastprint; @@ -2120,16 +2284,15 @@ static pv_entry_t vm_page_t m; PMAP_LOCK_ASSERT(pmap, MA_OWNED); - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + + atomic_add_int(&pv_entry_count, 1); PV_STAT(pv_entry_allocs++); - pv_entry_count++; if (pv_entry_count > pv_entry_high_water) if (ratecheck(&lastprint, &printinterval)) printf("Approaching the limit on PV entries, consider " "increasing either the vm.pmap.shpgperproc or the " "vm.pmap.pv_entry_max sysctl.\n"); pq = NULL; -retry: pc = TAILQ_FIRST(&pmap->pm_pvchunk); if (pc != NULL) { for (field = 0; field < _NPCM; field++) { @@ -2156,26 +2319,9 @@ static pv_entry_t VM_ALLOC_SYSTEM : VM_ALLOC_NORMAL) | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED); if (m == NULL) { - if (try) { - pv_entry_count--; - PV_STAT(pc_chunk_tryfail++); - return (NULL); - } - /* - * Reclaim pv entries: At first, destroy mappings to inactive - * pages. After that, if a pv chunk entry is still needed, - * destroy mappings to active pages. - */ - if (pq == NULL) { - PV_STAT(pmap_collect_inactive++); - pq = &vm_page_queues[PQ_INACTIVE]; - } else if (pq == &vm_page_queues[PQ_INACTIVE]) { - PV_STAT(pmap_collect_active++); - pq = &vm_page_queues[PQ_ACTIVE]; - } else - panic("get_pv_entry: increase vm.pmap.shpgperproc"); - pmap_collect(pmap, pq); - goto retry; + PV_STAT(pc_chunk_tryfail++); + atomic_add_int(&pv_entry_count, -1); + return (NULL); } PV_STAT(pc_chunk_count++); PV_STAT(pc_chunk_allocs++); @@ -2189,9 +2335,63 @@ static pv_entry_t pv = &pc->pc_pventry[0]; TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list); PV_STAT(pv_entry_spare += _NPCPV - 1); + return (pv); } +static void +pmap_pv_list_free(pmap_t pmap, struct pv_list_head *pv_list) +{ + pv_entry_t pv; + + while (!TAILQ_EMPTY(pv_list)) { + pv = TAILQ_FIRST(pv_list); + TAILQ_REMOVE(pv_list, pv, pv_list); + free_pv_entry(pmap, pv); + } +} + +static boolean_t +pmap_pv_list_alloc(pmap_t pmap, int count, struct pv_list_head *pv_list) +{ + pv_entry_t pv; + int i; + boolean_t slept; + + slept = FALSE; + for (i = 0; i < count; i++) { + while ((pv = get_pv_entry(pmap)) == NULL) { + PMAP_UNLOCK(pmap); + slept = TRUE; + VM_WAIT; + PMAP_LOCK(pmap); + } + TAILQ_INSERT_HEAD(pv_list, pv, pv_list); + } + + return (slept); +} + +static boolean_t +pmap_pv_list_try_alloc(pmap_t pmap, int count, struct pv_list_head *pv_list) +{ + pv_entry_t pv; + int i; + boolean_t success; + + success = TRUE; + for (i = 0; i < count; i++) { + if ((pv = get_pv_entry(pmap)) == NULL) { + success = FALSE; + pmap_pv_list_free(pmap, pv_list); + goto done; + } + TAILQ_INSERT_HEAD(pv_list, pv, pv_list); + } +done: + return (success); +} + /* * First find and then remove the pv entry for the specified pmap and virtual * address from the specified pv list. Returns the pv entry if found and NULL @@ -2203,7 +2403,8 @@ pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, { pv_entry_t pv; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + PMAP_LOCK_ASSERT(pmap, MA_OWNED); + TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) { if (pmap == PV_PMAP(pv) && va == pv->pv_va) { TAILQ_REMOVE(&pvh->pv_list, pv, pv_list); @@ -2219,27 +2420,37 @@ pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, * entries for each of the 4KB page mappings. */ static void -pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa) +pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa, + struct pv_list_head *pv_list) { struct md_page *pvh; pv_entry_t pv; vm_offset_t va_last; vm_page_t m; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + PMAP_LOCK_ASSERT(pmap, MA_OWNED); + PA_LOCK_ASSERT(pa, MA_OWNED); KASSERT((pa & PDRMASK) == 0, ("pmap_pv_demote_pde: pa is not 2mpage aligned")); - /* - * Transfer the 2mpage's pv entry for this mapping to the first - * page's pv list. - */ + /* Transfer the 2mpage's pv entry for this mapping to the first + * page's pv list. + */ pvh = pa_to_pvh(pa); va = trunc_2mpage(va); pv = pmap_pvh_remove(pvh, pmap, va); KASSERT(pv != NULL, ("pmap_pv_demote_pde: pv not found")); m = PHYS_TO_VM_PAGE(pa); +#ifdef INVARIANTS + if (va == 0) { + printf("inserting va==0\n"); + kdb_backtrace(); + } +#endif + vm_page_lock(m); TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); + vm_page_unlock(m); + /* Instantiate the remaining NPTEPG - 1 pv entries. */ va_last = va + NBPDR - PAGE_SIZE; do { @@ -2247,8 +2458,20 @@ static void KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, ("pmap_pv_demote_pde: page %p is not managed", m)); va += PAGE_SIZE; - pmap_insert_entry(pmap, va, m); + pv = TAILQ_FIRST(pv_list); + TAILQ_REMOVE(pv_list, pv, pv_list); +#ifdef INVARIANTS + if (va == 0) { + printf("inserting va==0\n"); + kdb_backtrace(); + } +#endif + pv->pv_va = va; + vm_page_lock(m); + TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); + vm_page_unlock(m); } while (va < va_last); + } /* @@ -2264,7 +2487,7 @@ pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, v vm_offset_t va_last; vm_page_t m; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + PA_LOCK_ASSERT(pa, MA_OWNED); KASSERT((pa & PDRMASK) == 0, ("pmap_pv_promote_pde: pa is not 2mpage aligned")); @@ -2310,7 +2533,8 @@ pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_off { struct md_page *pvh; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + vm_page_lock_assert(m, MA_OWNED); + pmap_pvh_free(&m->md, pmap, va); if (TAILQ_EMPTY(&m->md.pv_list)) { pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); @@ -2320,22 +2544,6 @@ pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_off } /* - * Create a pv entry for page at pa for - * (pmap, va). - */ -static void -pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m) -{ - pv_entry_t pv; - - PMAP_LOCK_ASSERT(pmap, MA_OWNED); - mtx_assert(&vm_page_queue_mtx, MA_OWNED); - pv = get_pv_entry(pmap, FALSE); - pv->pv_va = va; - TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); -} - -/* * Conditionally create a pv entry. */ static boolean_t @@ -2344,9 +2552,15 @@ pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t pv_entry_t pv; PMAP_LOCK_ASSERT(pmap, MA_OWNED); - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + vm_page_lock_assert(m, MA_OWNED); if (pv_entry_count < pv_entry_high_water && - (pv = get_pv_entry(pmap, TRUE)) != NULL) { + (pv = get_pv_entry(pmap)) != NULL) { +#ifdef INVARIANTS + if (va == 0) { + printf("inserting va==0\n"); + kdb_backtrace(); + } +#endif pv->pv_va = va; TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); return (TRUE); @@ -2363,9 +2577,16 @@ pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, vm struct md_page *pvh; pv_entry_t pv; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + PA_LOCK_ASSERT(pa, MA_OWNED); + PMAP_LOCK_ASSERT(pmap, MA_OWNED); if (pv_entry_count < pv_entry_high_water && - (pv = get_pv_entry(pmap, TRUE)) != NULL) { + (pv = get_pv_entry(pmap)) != NULL) { +#ifdef INVARIANTS + if (va == 0) { + printf("inserting va==0\n"); + kdb_backtrace(); + } +#endif pv->pv_va = va; pvh = pa_to_pvh(pa); TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_list); @@ -2393,7 +2614,8 @@ pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t new * mapping is invalidated. */ static boolean_t -pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va) +pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va, + struct pv_list_head *pv_list) { pd_entry_t newpde, oldpde; pt_entry_t *firstpte, newpte; @@ -2429,7 +2651,7 @@ static boolean_t DMAP_MAX_ADDRESS ? VM_ALLOC_INTERRUPT : VM_ALLOC_NORMAL) | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) { free = NULL; - pmap_remove_pde(pmap, pde, trunc_2mpage(va), &free); + pmap_remove_pde(pmap, pde, trunc_2mpage(va), &free, pv_list); pmap_invalidate_page(pmap, trunc_2mpage(va)); pmap_free_zero_pages(free); CTR2(KTR_PMAP, "pmap_demote_pde: failure for va %#lx" @@ -2439,6 +2661,10 @@ static boolean_t if (va < VM_MAXUSER_ADDRESS) pmap->pm_stats.resident_count++; } + if (TAILQ_EMPTY(pv_list) && ((oldpde & PG_MANAGED) != 0)) { + if (pmap_pv_list_try_alloc(pmap, NPTEPG-1, pv_list) == FALSE) + return (FALSE); + } mptepa = VM_PAGE_TO_PHYS(mpte); firstpte = (pt_entry_t *)PHYS_TO_DMAP(mptepa); newpde = mptepa | PG_M | PG_A | (oldpde & PG_U) | PG_RW | PG_V; @@ -2496,7 +2722,7 @@ static boolean_t * the 2mpage to referencing the page table page. */ if ((oldpde & PG_MANAGED) != 0) - pmap_pv_demote_pde(pmap, va, oldpde & PG_PS_FRAME); + pmap_pv_demote_pde(pmap, va, oldpde & PG_PS_FRAME, pv_list); pmap_pde_demotions++; CTR2(KTR_PMAP, "pmap_demote_pde: success for va %#lx" @@ -2509,7 +2735,7 @@ static boolean_t */ static int pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva, - vm_page_t *free) + vm_page_t *free, struct pv_list_head *pv_list) { struct md_page *pvh; pd_entry_t oldpde; @@ -2536,6 +2762,10 @@ pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_o eva = sva + NBPDR; for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME); va < eva; va += PAGE_SIZE, m++) { + /* + * XXX do we need to individually lock each page? + * + */ if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW)) vm_page_dirty(m); if (oldpde & PG_A) @@ -2546,7 +2776,7 @@ pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_o } } if (pmap == kernel_pmap) { - if (!pmap_demote_pde(pmap, pdq, sva)) + if (!pmap_demote_pde(pmap, pdq, sva, pv_list)) panic("pmap_remove_pde: failed demotion"); } else { mpte = pmap_lookup_pt_page(pmap, sva); @@ -2563,6 +2793,7 @@ pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_o return (pmap_unuse_pt(pmap, sva, *pmap_pdpe(pmap, sva), free)); } + /* * pmap_remove_pte: do the things to unmap a page in a process */ @@ -2586,6 +2817,7 @@ pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_o pmap->pm_stats.resident_count -= 1; if (oldpte & PG_MANAGED) { m = PHYS_TO_VM_PAGE(oldpte & PG_FRAME); + vm_page_lock_assert(m, MA_OWNED); if ((oldpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) vm_page_dirty(m); if (oldpte & PG_A) @@ -2602,6 +2834,7 @@ static void pmap_remove_page(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, vm_page_t *free) { pt_entry_t *pte; + vm_paddr_t pa = 0; PMAP_LOCK_ASSERT(pmap, MA_OWNED); if ((*pde & PG_V) == 0) @@ -2609,10 +2842,89 @@ pmap_remove_page(pmap_t pmap, vm_offset_t va, pd_e pte = pmap_pde_to_pte(pde, va); if ((*pte & PG_V) == 0) return; + if (*pte & PG_MANAGED) + (void)pa_tryrelock(pmap, *pte & PG_FRAME, &pa); + pmap_remove_pte(pmap, pte, va, *pde, free); + if (pa) + PA_UNLOCK(pa); pmap_invalidate_page(pmap, va); } +static void +pmap_prealloc_pv_list(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, + struct pv_list_head *pv_list) +{ + vm_offset_t va_next; + pml4_entry_t *pml4e; + pdp_entry_t *pdpe; + pd_entry_t ptpaddr, *pde; + pt_entry_t *pte; + int i, alloc_count; + + alloc_count = 0; + PMAP_LOCK(pmap); + for (; sva < eva; sva = va_next) { + + pml4e = pmap_pml4e(pmap, sva); + if ((*pml4e & PG_V) == 0) { + va_next = (sva + NBPML4) & ~PML4MASK; + if (va_next < sva) + va_next = eva; + continue; + } + + pdpe = pmap_pml4e_to_pdpe(pml4e, sva); + if ((*pdpe & PG_V) == 0) { + va_next = (sva + NBPDP) & ~PDPMASK; + if (va_next < sva) + va_next = eva; + continue; + } + + /* + * Calculate index for next page table. + */ + va_next = (sva + NBPDR) & ~PDRMASK; + if (va_next < sva) + va_next = eva; + + pde = pmap_pdpe_to_pde(pdpe, sva); + ptpaddr = *pde; + + /* + * Weed out invalid mappings. + */ + if (ptpaddr == 0) + continue; + + /* + * Check for large page. + */ + if ((ptpaddr & PG_PS) != 0) { + alloc_count++; + continue; + } + /* + * Limit our scan to either the end of the va represented + * by the current page table page, or to the end of the + * range being removed. + */ + if (va_next > eva) + va_next = eva; + + for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++, + sva += PAGE_SIZE) { + if (*pte == 0) + continue; + } + } + for (i = 0; i < alloc_count; i++) + pmap_pv_list_alloc(pmap, NPTEPG-1, pv_list); + + PMAP_UNLOCK(pmap); +} + /* * Remove the given range of addresses from the specified map. * @@ -2627,7 +2939,9 @@ pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offse pdp_entry_t *pdpe; pd_entry_t ptpaddr, *pde; pt_entry_t *pte; + vm_paddr_t pa; vm_page_t free = NULL; + struct pv_list_head pv_list; int anyvalid; /* @@ -2636,11 +2950,19 @@ pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offse if (pmap->pm_stats.resident_count == 0) return; - anyvalid = 0; + pa = anyvalid = 0; + TAILQ_INIT(&pv_list); - vm_page_lock_queues(); + /* + * pre-allocate pvs + * + */ + if ((pmap == kernel_pmap) && + (sva + PAGE_SIZE != eva)) + pmap_prealloc_pv_list(pmap, sva, eva, &pv_list); + PMAP_LOCK(pmap); - +restart: /* * special handling of removing one page. a very * common operation and easy to short circuit some @@ -2695,6 +3017,11 @@ pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offse * Check for large page. */ if ((ptpaddr & PG_PS) != 0) { + if (pa_tryrelock(pmap, ptpaddr & PG_FRAME, &pa)) { + va_next = sva; + continue; + } + /* * Are we removing the entire large page? If not, * demote the mapping and fall through. @@ -2706,9 +3033,9 @@ pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offse */ if ((ptpaddr & PG_G) == 0) anyvalid = 1; - pmap_remove_pde(pmap, pde, sva, &free); + pmap_remove_pde(pmap, pde, sva, &free, &pv_list); continue; - } else if (!pmap_demote_pde(pmap, pde, sva)) { + } else if (!pmap_demote_pde(pmap, pde, sva, &pv_list)) { /* The large page mapping was destroyed. */ continue; } else @@ -2725,23 +3052,35 @@ pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offse for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++, sva += PAGE_SIZE) { + int ret; + if (*pte == 0) continue; + if ((*pte & PG_MANAGED) && + pa_tryrelock(pmap, *pte & PG_FRAME, &pa)) + goto restart; + /* * The TLB entry for a PG_G mapping is invalidated * by pmap_remove_pte(). */ if ((*pte & PG_G) == 0) anyvalid = 1; - if (pmap_remove_pte(pmap, pte, sva, ptpaddr, &free)) + ret = pmap_remove_pte(pmap, pte, sva, ptpaddr, &free); + + if (ret) break; } } out: + if (pa) + PA_UNLOCK(pa); if (anyvalid) pmap_invalidate_all(pmap); - vm_page_unlock_queues(); + if (!TAILQ_EMPTY(&pv_list)) + pmap_pv_list_free(pmap, &pv_list); + PMAP_UNLOCK(pmap); pmap_free_zero_pages(free); } @@ -2769,17 +3108,19 @@ pmap_remove_all(vm_page_t m) pd_entry_t *pde; vm_offset_t va; vm_page_t free; - + struct pv_list_head pv_list; + + TAILQ_INIT(&pv_list); KASSERT((m->flags & PG_FICTITIOUS) == 0, ("pmap_remove_all: page %p is fictitious", m)); - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + vm_page_lock_assert(m, MA_OWNED); pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) { - va = pv->pv_va; pmap = PV_PMAP(pv); PMAP_LOCK(pmap); + va = pv->pv_va; pde = pmap_pde(pmap, va); - (void)pmap_demote_pde(pmap, pde, va); + (void)pmap_demote_pde(pmap, pde, va, &pv_list); PMAP_UNLOCK(pmap); } while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { @@ -2873,6 +3214,8 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offs pd_entry_t ptpaddr, *pde; pt_entry_t *pte; int anychanged; + vm_paddr_t pa; + struct pv_list_head pv_list; if ((prot & VM_PROT_READ) == VM_PROT_NONE) { pmap_remove(pmap, sva, eva); @@ -2883,10 +3226,11 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offs (VM_PROT_WRITE|VM_PROT_EXECUTE)) return; + TAILQ_INIT(&pv_list); + pa = 0; anychanged = 0; - - vm_page_lock_queues(); PMAP_LOCK(pmap); +restart: for (; sva < eva; sva = va_next) { pml4e = pmap_pml4e(pmap, sva); @@ -2934,7 +3278,7 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offs if (pmap_protect_pde(pmap, pde, sva, prot)) anychanged = 1; continue; - } else if (!pmap_demote_pde(pmap, pde, sva)) { + } else if (!pmap_demote_pde(pmap, pde, sva, &pv_list)) { /* The large page mapping was destroyed. */ continue; } @@ -2954,6 +3298,8 @@ retry: continue; if (pbits & PG_MANAGED) { m = NULL; + if (pa_tryrelock(pmap, pbits & PG_FRAME, &pa)) + goto restart; if (pbits & PG_A) { m = PHYS_TO_VM_PAGE(pbits & PG_FRAME); vm_page_flag_set(m, PG_REFERENCED); @@ -2982,9 +3328,10 @@ retry: } } } + if (pa) + PA_UNLOCK(pa); if (anychanged) pmap_invalidate_all(pmap); - vm_page_unlock_queues(); PMAP_UNLOCK(pmap); } @@ -3123,28 +3470,43 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t vm_paddr_t pa; pd_entry_t *pde; pt_entry_t *pte; - vm_paddr_t opa; + vm_paddr_t opa, lockedpa; pt_entry_t origpte, newpte; vm_page_t mpte, om; - boolean_t invlva; + boolean_t invlva, opalocked; + pv_entry_t pv; + struct lock_stack ls; va = trunc_page(va); + KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig")); KASSERT(va < UPT_MIN_ADDRESS || va >= UPT_MAX_ADDRESS, ("pmap_enter: invalid to pmap_enter page table pages (va: 0x%lx)", va)); mpte = NULL; - - vm_page_lock_queues(); - PMAP_LOCK(pmap); - + pv = NULL; + lockedpa = pa = VM_PAGE_TO_PHYS(m); + opa = 0; + opalocked = FALSE; + ls_init(&ls); + ls_push(&ls, PA_LOCKPTR(lockedpa)); + ls_push(&ls, PMAP_LOCKPTR(pmap)); + if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) { + while ((pv = get_pv_entry(pmap)) == NULL) { + ls_popa(&ls); + VM_WAIT; + ls_push(&ls, PA_LOCKPTR(lockedpa)); + ls_push(&ls, PMAP_LOCKPTR(pmap)); + } + } + +restart: /* * In the case that a page table page is not * resident, we are creating it here. */ - if (va < VM_MAXUSER_ADDRESS) { - mpte = pmap_allocpte(pmap, va, M_WAITOK); - } + if (va < VM_MAXUSER_ADDRESS && mpte == NULL) + mpte = pmap_allocpte(pmap, lockedpa, va, M_WAITOK); pde = pmap_pde(pmap, va); if (pde != NULL && (*pde & PG_V) != 0) { @@ -3154,11 +3516,35 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t } else panic("pmap_enter: invalid page directory va=%#lx", va); - pa = VM_PAGE_TO_PHYS(m); om = NULL; origpte = *pte; + if (opa && (opa != (origpte & PG_FRAME))) { + ls_popa(&ls); + ls_push(&ls, PA_LOCKPTR(lockedpa)); + ls_push(&ls, PMAP_LOCKPTR(pmap)); + opalocked = FALSE; + opa = 0; + goto restart; + } + opa = origpte & PG_FRAME; - + if (opa && (opa != lockedpa) && (opalocked == FALSE)) { + opalocked = TRUE; + if (ls_trypush(&ls, PA_LOCKPTR(opa)) == 0) { + ls_popa(&ls); + if ((uintptr_t)PA_LOCKPTR(lockedpa) < + (uintptr_t)PA_LOCKPTR(opa)) { + ls_push(&ls, PA_LOCKPTR(lockedpa)); + ls_push(&ls, PA_LOCKPTR(opa)); + } else { + ls_push(&ls, PA_LOCKPTR(opa)); + ls_push(&ls, PA_LOCKPTR(lockedpa)); + } + ls_push(&ls, PMAP_LOCKPTR(pmap)); + goto restart; + } + } + /* * Mapping has not changed, must be protection or wiring change. */ @@ -3190,6 +3576,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t } goto validate; } + /* * Mapping has changed, invalidate old range and fall through to * handle validating new mapping. @@ -3199,6 +3586,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t pmap->pm_stats.wired_count--; if (origpte & PG_MANAGED) { om = PHYS_TO_VM_PAGE(opa); + vm_page_lock_assert(om, MA_OWNED); pmap_remove_entry(pmap, om, va); } if (mpte != NULL) { @@ -3216,8 +3604,16 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) { KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva, ("pmap_enter: managed mapping within the clean submap")); - pmap_insert_entry(pmap, va, m); +#ifdef INVARIANTS + if (va == 0) { + printf("inserting va==0\n"); + kdb_backtrace(); + } +#endif + pv->pv_va = va; + TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); pa |= PG_MANAGED; + pv = NULL; } /* @@ -3227,6 +3623,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t pmap->pm_stats.wired_count++; validate: + vm_page_lock_assert(m, MA_OWNED); /* * Now validate mapping with desired protection/wiring. */ @@ -3282,8 +3679,9 @@ validate: pg_ps_enabled && vm_reserv_level_iffullpop(m) == 0) pmap_promote_pde(pmap, pde, va); - vm_page_unlock_queues(); - PMAP_UNLOCK(pmap); + if (pv != NULL) + free_pv_entry(pmap, pv); + ls_popa(&ls); } /* @@ -3297,10 +3695,12 @@ pmap_enter_pde(pmap_t pmap, vm_offset_t va, vm_pag { pd_entry_t *pde, newpde; vm_page_t free, mpde; + vm_paddr_t pa; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + vm_page_lock_assert(m, MA_OWNED); PMAP_LOCK_ASSERT(pmap, MA_OWNED); - if ((mpde = pmap_allocpde(pmap, va, M_NOWAIT)) == NULL) { + pa = VM_PAGE_TO_PHYS(m); + if ((mpde = pmap_allocpde(pmap, pa, va, M_NOWAIT)) == NULL) { CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx" " in pmap %p", va, pmap); return (FALSE); @@ -3379,9 +3779,10 @@ pmap_enter_object(pmap_t pmap, vm_offset_t start, psize = atop(end - start); mpte = NULL; m = m_start; - PMAP_LOCK(pmap); while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { va = start + ptoa(diff); + vm_page_lock(m); + PMAP_LOCK(pmap); if ((va & PDRMASK) == 0 && va + NBPDR <= end && (VM_PAGE_TO_PHYS(m) & PDRMASK) == 0 && pg_ps_enabled && vm_reserv_level_iffullpop(m) == 0 && @@ -3390,9 +3791,10 @@ pmap_enter_object(pmap_t pmap, vm_offset_t start, else mpte = pmap_enter_quick_locked(pmap, va, m, prot, mpte); + PMAP_UNLOCK(pmap); + vm_page_unlock(m); m = TAILQ_NEXT(m, listq); } - PMAP_UNLOCK(pmap); } /* @@ -3408,6 +3810,7 @@ void pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot) { + vm_page_lock_assert(m, MA_OWNED); PMAP_LOCK(pmap); (void) pmap_enter_quick_locked(pmap, va, m, prot, NULL); PMAP_UNLOCK(pmap); @@ -3424,7 +3827,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t v KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva || (m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0, ("pmap_enter_quick_locked: managed mapping within the clean submap")); - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + vm_page_lock_assert(m, MA_OWNED); PMAP_LOCK_ASSERT(pmap, MA_OWNED); /* @@ -3457,7 +3860,8 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t v mpte = PHYS_TO_VM_PAGE(*ptepa & PG_FRAME); mpte->wire_count++; } else { - mpte = _pmap_allocpte(pmap, ptepindex, + pa = VM_PAGE_TO_PHYS(m); + mpte = _pmap_allocpte(pmap, pa, ptepindex, M_NOWAIT); if (mpte == NULL) return (mpte); @@ -3584,7 +3988,8 @@ pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, PMAP_LOCK(pmap); for (pa = ptepa | pmap_cache_bits(pat_mode, 1); pa < ptepa + size; pa += NBPDR) { - pdpg = pmap_allocpde(pmap, addr, M_NOWAIT); + pa = VM_PAGE_TO_PHYS(p); + pdpg = pmap_allocpde(pmap, pa, addr, M_NOWAIT); if (pdpg == NULL) { /* * The creation of mappings below is only an @@ -3629,28 +4034,33 @@ pmap_change_wiring(pmap_t pmap, vm_offset_t va, bo { pd_entry_t *pde; pt_entry_t *pte; - boolean_t are_queues_locked; + vm_paddr_t pa; + boolean_t slept; + struct pv_list_head pv_list; - are_queues_locked = FALSE; + TAILQ_INIT(&pv_list); /* * Wiring is not a hardware characteristic so there is no need to * invalidate TLB. */ + pa = 0; + PMAP_LOCK(pmap); retry: - PMAP_LOCK(pmap); + slept = FALSE; pde = pmap_pde(pmap, va); + if ((*pde & PG_PS) && (!wired != ((*pde & PG_W) == 0))) { + if (TAILQ_EMPTY(&pv_list)) + slept = pmap_pv_list_alloc(pmap, NPTEPG-1, &pv_list); + if (slept) + goto retry; + + if (pa_tryrelock(pmap, *pde & PG_FRAME, &pa)) + goto retry; + } if ((*pde & PG_PS) != 0) { if (!wired != ((*pde & PG_W) == 0)) { - if (!are_queues_locked) { - are_queues_locked = TRUE; - if (!mtx_trylock(&vm_page_queue_mtx)) { - PMAP_UNLOCK(pmap); - vm_page_lock_queues(); - goto retry; - } - } - if (!pmap_demote_pde(pmap, pde, va)) + if (!pmap_demote_pde(pmap, pde, va, &pv_list)) panic("pmap_change_wiring: demotion failed"); } else goto out; @@ -3664,13 +4074,13 @@ retry: atomic_clear_long(pte, PG_W); } out: - if (are_queues_locked) - vm_page_unlock_queues(); + if (pa) + PA_UNLOCK(pa); + if (!TAILQ_EMPTY(&pv_list)) + pmap_pv_list_free(pmap, &pv_list); PMAP_UNLOCK(pmap); } - - /* * Copy the range specified by src_addr/len * from the source map to the range dst_addr/len @@ -3687,11 +4097,12 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_off vm_offset_t addr; vm_offset_t end_addr = src_addr + len; vm_offset_t va_next; + vm_paddr_t pa; if (dst_addr != src_addr) return; - vm_page_lock_queues(); + free = NULL; if (dst_pmap < src_pmap) { PMAP_LOCK(dst_pmap); PMAP_LOCK(src_pmap); @@ -3735,7 +4146,11 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_off continue; if (srcptepaddr & PG_PS) { - dstmpde = pmap_allocpde(dst_pmap, addr, M_NOWAIT); + pa = srcptepaddr & PG_PS_FRAME; + if (PA_TRYLOCK(pa) == 0) + continue; + + dstmpde = pmap_allocpde(dst_pmap, pa, addr, M_NOWAIT); if (dstmpde == NULL) break; pde = (pd_entry_t *) @@ -3749,6 +4164,7 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_off NBPDR / PAGE_SIZE; } else dstmpde->wire_count--; + PA_UNLOCK(pa); continue; } @@ -3770,12 +4186,17 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_off * we only virtual copy managed pages */ if ((ptetemp & PG_MANAGED) != 0) { + pa = ptetemp & PG_FRAME; + if (PA_TRYLOCK(pa) == 0) + break; if (dstmpte != NULL && dstmpte->pindex == pmap_pde_pindex(addr)) dstmpte->wire_count++; else if ((dstmpte = pmap_allocpte(dst_pmap, - addr, M_NOWAIT)) == NULL) + pa, addr, M_NOWAIT)) == NULL) { + PA_UNLOCK(pa); goto out; + } dst_pte = (pt_entry_t *) PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dstmpte)); dst_pte = &dst_pte[pmap_pte_index(addr)]; @@ -3800,6 +4221,7 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_off } goto out; } + PA_UNLOCK(pa); if (dstmpte->wire_count >= srcmpte->wire_count) break; } @@ -3808,7 +4230,6 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_off } } out: - vm_page_unlock_queues(); PMAP_UNLOCK(src_pmap); PMAP_UNLOCK(dst_pmap); } @@ -3888,7 +4309,7 @@ pmap_page_exists_quick(pmap_t pmap, vm_page_t m) if (m->flags & PG_FICTITIOUS) return FALSE; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + vm_page_lock_assert(m, MA_OWNED); TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { if (PV_PMAP(pv) == pmap) { return TRUE; @@ -3924,6 +4345,7 @@ pmap_page_wired_mappings(vm_page_t m) count = 0; if ((m->flags & PG_FICTITIOUS) != 0) return (count); + vm_page_lock_assert(m, MA_OWNED); count = pmap_pvh_wired_mappings(&m->md, count); return (pmap_pvh_wired_mappings(pa_to_pvh(VM_PAGE_TO_PHYS(m)), count)); } @@ -3940,7 +4362,6 @@ pmap_pvh_wired_mappings(struct md_page *pvh, int c pt_entry_t *pte; pv_entry_t pv; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) { pmap = PV_PMAP(pv); PMAP_LOCK(pmap); @@ -3963,7 +4384,7 @@ pmap_page_is_mapped(vm_page_t m) if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) return (FALSE); - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + vm_page_lock_assert(m, MA_OWNED); if (TAILQ_EMPTY(&m->md.pv_list)) { pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); return (!TAILQ_EMPTY(&pvh->pv_list)); @@ -3986,10 +4407,11 @@ pmap_remove_pages(pmap_t pmap) pt_entry_t *pte, tpte; vm_page_t free = NULL; vm_page_t m, mpte, mt; + vm_paddr_t pa; pv_entry_t pv; struct md_page *pvh; struct pv_chunk *pc, *npc; - int field, idx; + int field, idx, iter; int64_t bit; uint64_t inuse, bitmask; int allfree; @@ -3998,11 +4420,14 @@ pmap_remove_pages(pmap_t pmap) printf("warning: pmap_remove_pages called with non-current pmap\n"); return; } - vm_page_lock_queues(); + pa = 0; + iter = 0; PMAP_LOCK(pmap); +restart: TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) { allfree = 1; for (field = 0; field < _NPCM; field++) { + iter++; inuse = (~(pc->pc_map[field])) & pc_freemask[field]; while (inuse != 0) { bit = bsfq(inuse); @@ -4023,7 +4448,8 @@ pmap_remove_pages(pmap_t pmap) tpte = *pte & ~PG_PTE_PAT; } if ((tpte & PG_V) == 0) - panic("bad pte"); + panic("bad pte tpte=%ld va=%lx idx=%d iter=%d", + tpte, pv->pv_va, idx, iter); /* * We cannot remove wired pages from a process' mapping at this time @@ -4033,6 +4459,9 @@ pmap_remove_pages(pmap_t pmap) continue; } + if (pa_tryrelock(pmap, tpte & PG_FRAME, &pa)) + goto restart; + m = PHYS_TO_VM_PAGE(tpte & PG_FRAME); KASSERT(m->phys_addr == (tpte & PG_FRAME), ("vm_page_t %p phys_addr mismatch %016jx %016jx", @@ -4055,11 +4484,10 @@ pmap_remove_pages(pmap_t pmap) } else vm_page_dirty(m); } - /* Mark free */ PV_STAT(pv_entry_frees++); PV_STAT(pv_entry_spare++); - pv_entry_count--; + atomic_add_int(&pv_entry_count, -1); pc->pc_map[field] |= bitmask; if ((tpte & PG_PS) != 0) { pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE; @@ -4099,12 +4527,17 @@ pmap_remove_pages(pmap_t pmap) TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc)); dump_drop_page(m->phys_addr); - vm_page_unwire(m, 0); + KASSERT(m->wire_count == 1, + ("wire_count == %d", m->wire_count)); + m->wire_count = 0; + atomic_subtract_int(&cnt.v_wire_count, 1); vm_page_free(m); } } + if (pa) + PA_UNLOCK(pa); + pmap_invalidate_all(pmap); - vm_page_unlock_queues(); PMAP_UNLOCK(pmap); pmap_free_zero_pages(free); } @@ -4139,7 +4572,6 @@ pmap_is_modified_pvh(struct md_page *pvh) pmap_t pmap; boolean_t rv; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); rv = FALSE; TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) { pmap = PV_PMAP(pv); @@ -4189,19 +4621,22 @@ pmap_remove_write(vm_page_t m) pd_entry_t *pde; pt_entry_t oldpte, *pte; vm_offset_t va; + struct pv_list_head pv_list; if ((m->flags & PG_FICTITIOUS) != 0 || (m->flags & PG_WRITEABLE) == 0) return; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + + TAILQ_INIT(&pv_list); + vm_page_lock_assert(m, MA_OWNED); pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) { - va = pv->pv_va; pmap = PV_PMAP(pv); PMAP_LOCK(pmap); + va = pv->pv_va; pde = pmap_pde(pmap, va); if ((*pde & PG_RW) != 0) - (void)pmap_demote_pde(pmap, pde, va); + (void)pmap_demote_pde(pmap, pde, va, &pv_list); PMAP_UNLOCK(pmap); } TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { @@ -4248,19 +4683,22 @@ pmap_ts_referenced(vm_page_t m) pt_entry_t *pte; vm_offset_t va; int rtval = 0; + struct pv_list_head pv_list; if (m->flags & PG_FICTITIOUS) return (rtval); - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + + TAILQ_INIT(&pv_list); + vm_page_lock_assert(m, MA_OWNED); pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, pvn) { - va = pv->pv_va; pmap = PV_PMAP(pv); PMAP_LOCK(pmap); + va = pv->pv_va; pde = pmap_pde(pmap, va); oldpde = *pde; if ((oldpde & PG_A) != 0) { - if (pmap_demote_pde(pmap, pde, va)) { + if (pmap_demote_pde(pmap, pde, va, &pv_list)) { if ((oldpde & PG_W) == 0) { /* * Remove the mapping to a single page @@ -4320,19 +4758,21 @@ pmap_clear_modify(vm_page_t m) pd_entry_t oldpde, *pde; pt_entry_t oldpte, *pte; vm_offset_t va; + struct pv_list_head pv_list; if ((m->flags & PG_FICTITIOUS) != 0) return; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + TAILQ_INIT(&pv_list); + vm_page_lock_assert(m, MA_OWNED); pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) { - va = pv->pv_va; pmap = PV_PMAP(pv); PMAP_LOCK(pmap); + va = pv->pv_va; pde = pmap_pde(pmap, va); oldpde = *pde; if ((oldpde & PG_RW) != 0) { - if (pmap_demote_pde(pmap, pde, va)) { + if (pmap_demote_pde(pmap, pde, va, &pv_list)) { if ((oldpde & PG_W) == 0) { /* * Write protect the mapping to a @@ -4385,19 +4825,21 @@ pmap_clear_reference(vm_page_t m) pd_entry_t oldpde, *pde; pt_entry_t *pte; vm_offset_t va; + struct pv_list_head pv_list; if ((m->flags & PG_FICTITIOUS) != 0) return; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + TAILQ_INIT(&pv_list); + vm_page_lock_assert(m, MA_OWNED); pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) { - va = pv->pv_va; pmap = PV_PMAP(pv); PMAP_LOCK(pmap); + va = pv->pv_va; pde = pmap_pde(pmap, va); oldpde = *pde; if ((oldpde & PG_A) != 0) { - if (pmap_demote_pde(pmap, pde, va)) { + if (pmap_demote_pde(pmap, pde, va, &pv_list)) { /* * Remove the mapping to a single page so * that a subsequent access may repromote. @@ -4644,6 +5086,7 @@ pmap_change_attr_locked(vm_offset_t va, vm_size_t pt_entry_t *pte; int cache_bits_pte, cache_bits_pde, error; boolean_t changed; + struct pv_list_head pv_list; PMAP_LOCK_ASSERT(kernel_pmap, MA_OWNED); base = trunc_page(va); @@ -4660,6 +5103,7 @@ pmap_change_attr_locked(vm_offset_t va, vm_size_t cache_bits_pde = pmap_cache_bits(mode, 1); cache_bits_pte = pmap_cache_bits(mode, 0); changed = FALSE; + TAILQ_INIT(&pv_list); /* * Pages that aren't mapped aren't supported. Also break down 2MB pages @@ -4717,7 +5161,7 @@ pmap_change_attr_locked(vm_offset_t va, vm_size_t tmpva += NBPDR; continue; } - if (!pmap_demote_pde(kernel_pmap, pde, tmpva)) + if (!pmap_demote_pde(kernel_pmap, pde, tmpva, &pv_list)) return (ENOMEM); } pte = pmap_pde_to_pte(pde, tmpva); @@ -4877,10 +5321,10 @@ pmap_mincore(pmap_t pmap, vm_offset_t addr) /* * Modified by someone else */ - vm_page_lock_queues(); + vm_page_lock(m); if (m->dirty || pmap_is_modified(m)) val |= MINCORE_MODIFIED_OTHER; - vm_page_unlock_queues(); + vm_page_unlock(m); } /* * Referenced by us @@ -4891,13 +5335,13 @@ pmap_mincore(pmap_t pmap, vm_offset_t addr) /* * Referenced by someone else */ - vm_page_lock_queues(); + vm_page_lock(m); if ((m->flags & PG_REFERENCED) || pmap_ts_referenced(m)) { val |= MINCORE_REFERENCED_OTHER; vm_page_flag_set(m, PG_REFERENCED); } - vm_page_unlock_queues(); + vm_page_unlock(m); } } return val;