Index: vm_map.c =================================================================== RCS file: /usr/repo/src/sys/vm/vm_map.c,v retrieving revision 1.379 diff -u -p -r1.379 vm_map.c --- vm_map.c 21 Oct 2006 21:02:04 -0000 1.379 +++ vm_map.c 3 Dec 2006 12:51:37 -0000 @@ -437,6 +437,16 @@ _vm_map_unlock(vm_map_t map, const char _sx_xunlock(&map->lock, file, line); } +static int +vm_map_locked(vm_map_t map) +{ + + if (map->system_map) + return (mtx_owned(&map->system_mtx)); + else + return (sx_xlocked(&map->lock)); +} + void _vm_map_lock_read(vm_map_t map, const char *file, int line) { @@ -2775,6 +2785,7 @@ vm_map_growstack(struct proc *p, vm_offs size_t grow_amount, max_grow; rlim_t stacklim, vmemlim; int is_procstack, rv; + int unlock; Retry: PROC_LOCK(p); @@ -2782,11 +2793,17 @@ Retry: vmemlim = lim_cur(p, RLIMIT_VMEM); PROC_UNLOCK(p); - vm_map_lock_read(map); + if (vm_map_locked(map)) + unlock = 0; + else { + vm_map_lock_read(map); + unlock = 1; + } /* If addr is already in the entry range, no need to grow.*/ if (vm_map_lookup_entry(map, addr, &prev_entry)) { - vm_map_unlock_read(map); + if (unlock) + vm_map_unlock_read(map); return (KERN_SUCCESS); } @@ -2799,7 +2816,8 @@ Retry: * never a growable entry, so it suffices to check the flags. */ if (!(next_entry->eflags & MAP_ENTRY_GROWS_DOWN)) { - vm_map_unlock_read(map); + if (unlock) + vm_map_unlock_read(map); return (KERN_SUCCESS); } stack_entry = next_entry; @@ -2840,7 +2858,8 @@ Retry: } if (grow_amount > stack_entry->avail_ssize) { - vm_map_unlock_read(map); + if (unlock) + vm_map_unlock_read(map); return (KERN_NO_SPACE); } @@ -2854,12 +2873,13 @@ Retry: * intended by limiting the stack size. */ if (grow_amount > max_grow) { - if (vm_map_lock_upgrade(map)) + if (unlock && vm_map_lock_upgrade(map)) goto Retry; stack_entry->avail_ssize = max_grow; - vm_map_unlock(map); + if (unlock) + vm_map_unlock(map); return (KERN_NO_SPACE); } @@ -2870,7 +2890,8 @@ Retry: * limit. */ if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) { - vm_map_unlock_read(map); + if (unlock) + vm_map_unlock_read(map); return (KERN_NO_SPACE); } @@ -2884,11 +2905,12 @@ Retry: /* If we would blow our VMEM resource limit, no go */ if (map->size + grow_amount > vmemlim) { - vm_map_unlock_read(map); + if (unlock) + vm_map_unlock_read(map); return (KERN_NO_SPACE); } - if (vm_map_lock_upgrade(map)) + if (unlock && vm_map_lock_upgrade(map)) goto Retry; if (stack_entry == next_entry) { @@ -2963,7 +2985,8 @@ Retry: if (rv == KERN_SUCCESS && is_procstack) vm->vm_ssize += btoc(grow_amount); - vm_map_unlock(map); + if (unlock) + vm_map_unlock(map); /* * Heed the MAP_WIREFUTURE flag if it was set for this process. @@ -3064,16 +3087,23 @@ vm_map_lookup(vm_map_t *var_map, /* IN/ vm_map_t map = *var_map; vm_prot_t prot; vm_prot_t fault_type = fault_typea; + int unlock; RetryLookup:; /* * Lookup the faulting address. */ - vm_map_lock_read(map); + if (vm_map_locked(map)) + unlock = 0; + else { + vm_map_lock_read(map); + unlock = 1; + } #define RETURN(why) \ { \ - vm_map_unlock_read(map); \ + if (unlock) \ + vm_map_unlock_read(map); \ return (why); \ } @@ -3102,7 +3132,8 @@ RetryLookup:; vm_map_t old_map = map; *var_map = map = entry->object.sub_map; - vm_map_unlock_read(old_map); + if (unlock) + vm_map_unlock_read(old_map); goto RetryLookup; }