diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c index 6bcebd543096..bb38d121a2ff 100644 --- a/sys/vm/vm_map.c +++ b/sys/vm/vm_map.c @@ -1655,6 +1655,7 @@ _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start) vm_map_entry_t new_entry; VM_MAP_ASSERT_LOCKED(map); + KASSERT(entry->end > start, ("invalid clip of entry %p", entry)); /* * Split off the front portion -- note that we must insert the new @@ -1739,6 +1740,7 @@ _vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end) vm_map_entry_t new_entry; VM_MAP_ASSERT_LOCKED(map); + KASSERT(entry->start < end, ("invalid clip of entry %p", entry)); /* * If there is no object backing this entry, we might as well create @@ -1954,7 +1956,9 @@ vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end, vm_map_entry_t current, entry; vm_object_t obj; struct ucred *cred; + vm_offset_t saved_start; vm_prot_t old_prot; + u_int last_timestamp; if (start == end) return (KERN_SUCCESS); @@ -1972,8 +1976,8 @@ vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end, /* * Make a first pass to check for protection violations. */ - current = entry; - while ((current != &map->header) && (current->start < end)) { + for (current = entry; current != &map->header && current->start < end; + current = current->next) { if (current->eflags & MAP_ENTRY_IS_SUB_MAP) { vm_map_unlock(map); return (KERN_INVALID_ARGUMENT); @@ -1982,17 +1986,15 @@ vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end, vm_map_unlock(map); return (KERN_PROTECTION_FAILURE); } - current = current->next; } - /* * Do an accounting pass for private read-only mappings that * now will do cow due to allowed write (e.g. debugger sets * breakpoint on text segment) */ - for (current = entry; (current != &map->header) && - (current->start < end); current = current->next) { + for (current = entry; current != &map->header && current->start < end; + current = current->next) { vm_map_clip_end(map, current, end); @@ -2042,19 +2044,22 @@ vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end, } /* - * Go back and fix up protections. [Note that clipping is not - * necessary the second time.] + * Go back and fix up protections. */ current = entry; - while ((current != &map->header) && (current->start < end)) { +restart: + for (; current != &map->header && current->start < end; + current = current->next) { old_prot = current->protection; - if (set_max) - current->protection = - (current->max_protection = new_prot) & - old_prot; - else - current->protection = new_prot; + if ((current->eflags & MAP_ENTRY_IN_TRANSITION) == 0) { + if (set_max) + current->protection = + (current->max_protection = new_prot) & + old_prot; + else + current->protection = new_prot; + } /* * For user wired map entries, the normal lazy evaluation of @@ -2062,10 +2067,33 @@ vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end, * undesirable. Instead, immediately copy any pages that are * copy-on-write and enable write access in the physical map. */ - if ((current->eflags & MAP_ENTRY_USER_WIRED) != 0 && + if ((current->eflags & (MAP_ENTRY_USER_WIRED | + MAP_ENTRY_IN_TRANSITION)) != 0 && (current->protection & VM_PROT_WRITE) != 0 && - (old_prot & VM_PROT_WRITE) == 0) - vm_fault_copy_entry(map, map, current, current, NULL); + (old_prot & VM_PROT_WRITE) == 0) { + if ((current->eflags & MAP_ENTRY_IN_TRANSITION) != 0) { + current->eflags |= MAP_ENTRY_NEEDS_WAKEUP; + last_timestamp = map->timestamp; + saved_start = current->start; + (void)vm_map_unlock_and_wait(map, 0); + vm_map_lock(map); + if (last_timestamp + 1 != map->timestamp) { + if (!vm_map_lookup_entry(map, + saved_start, ¤t)) + current = current->next; + else + vm_map_clip_start(map, current, + saved_start); + if (current->start < end) + vm_map_clip_end(map, current, + end); + goto restart; + } + } + if ((current->eflags & MAP_ENTRY_USER_WIRED) != 0) + vm_fault_copy_entry(map, map, current, current, + NULL); + } /* * When restricting access, update the physical map. Worry @@ -2080,7 +2108,6 @@ vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end, #undef MASK } vm_map_simplify_entry(map, current); - current = current->next; } vm_map_unlock(map); return (KERN_SUCCESS);