diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c index 6bcebd543096..5b4efef23aa8 100644 --- a/sys/vm/vm_map.c +++ b/sys/vm/vm_map.c @@ -1954,7 +1954,9 @@ vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end, vm_map_entry_t current, entry; vm_object_t obj; struct ucred *cred; + vm_offset_t saved_start; vm_prot_t old_prot; + u_int last_timestamp; if (start == end) return (KERN_SUCCESS); @@ -1972,8 +1974,8 @@ vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end, /* * Make a first pass to check for protection violations. */ - current = entry; - while ((current != &map->header) && (current->start < end)) { + for (current = entry; current != &map->header && current->start < end; + current = current->next) { if (current->eflags & MAP_ENTRY_IS_SUB_MAP) { vm_map_unlock(map); return (KERN_INVALID_ARGUMENT); @@ -1982,17 +1984,15 @@ vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end, vm_map_unlock(map); return (KERN_PROTECTION_FAILURE); } - current = current->next; } - /* * Do an accounting pass for private read-only mappings that * now will do cow due to allowed write (e.g. debugger sets * breakpoint on text segment) */ - for (current = entry; (current != &map->header) && - (current->start < end); current = current->next) { + for (current = entry; current != &map->header && current->start < end; + current = current->next) { vm_map_clip_end(map, current, end); @@ -2042,11 +2042,12 @@ vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end, } /* - * Go back and fix up protections. [Note that clipping is not - * necessary the second time.] + * Go back and fix up protections. */ current = entry; - while ((current != &map->header) && (current->start < end)) { +restart: + for (; current != &map->header && current->start < end; + current = current->next) { old_prot = current->protection; if (set_max) @@ -2064,8 +2065,26 @@ vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end, */ if ((current->eflags & MAP_ENTRY_USER_WIRED) != 0 && (current->protection & VM_PROT_WRITE) != 0 && - (old_prot & VM_PROT_WRITE) == 0) + (old_prot & VM_PROT_WRITE) == 0) { + if ((current->eflags & MAP_ENTRY_IN_TRANSITION) != 0) { + current->eflags |= MAP_ENTRY_NEEDS_WAKEUP; + last_timestamp = map->timestamp; + saved_start = current->start; + (void)vm_map_unlock_and_wait(map, 0); + vm_map_lock(map); + if (map->timestamp + 1 != last_timestamp) { + if (!vm_map_lookup_entry(map, + saved_start, ¤t)) + current = current->next; + else + vm_map_clip_start(map, current, + saved_start); + vm_map_clip_end(map, current, end); + goto restart; + } + } vm_fault_copy_entry(map, map, current, current, NULL); + } /* * When restricting access, update the physical map. Worry @@ -2080,7 +2099,6 @@ vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end, #undef MASK } vm_map_simplify_entry(map, current); - current = current->next; } vm_map_unlock(map); return (KERN_SUCCESS);