diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c index f7c0d2d..9cbe61f 100644 --- a/sys/amd64/amd64/pmap.c +++ b/sys/amd64/amd64/pmap.c @@ -1396,13 +1396,15 @@ pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count) PAGE_SIZE); } +void vm_check_stack_unmap(vm_offset_t sva, int count); +void pmap_qremove_stack(vm_offset_t sva, int count); /* * This routine tears out page mappings from the * kernel -- it is meant only for temporary mappings. * Note: SMP coherent. Uses a ranged shootdown IPI. */ void -pmap_qremove(vm_offset_t sva, int count) +pmap_qremove_stack(vm_offset_t sva, int count) { vm_offset_t va; @@ -1414,6 +1416,14 @@ pmap_qremove(vm_offset_t sva, int count) pmap_invalidate_range(kernel_pmap, sva, va); } +void +pmap_qremove(vm_offset_t sva, int count) +{ + + vm_check_stack_unmap(sva, count); + pmap_qremove_stack(sva, count); +} + /*************************************************** * Page table page management routines..... ***************************************************/ diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c index c552cb7..0b455e8 100644 --- a/sys/vm/vm_glue.c +++ b/sys/vm/vm_glue.c @@ -349,6 +349,30 @@ SYSCTL_INT(_vm, OID_AUTO, kstacks, CTLFLAG_RD, &kstacks, 0, #define KSTACK_MAX_PAGES 32 #endif +static struct stack_va { + vm_offset_t va; + int count; +} stack_vas[2048]; + +void vm_check_stack_unmap(vm_offset_t sva, int count); +void pmap_qremove_stack(vm_offset_t sva, int count); + +void +vm_check_stack_unmap(vm_offset_t sva, int count) +{ + struct stack_va *v; + int i; + + for (i = 0; i < sizeof(stack_vas) / sizeof(struct stack_va); i++) { + v = &stack_vas[i]; + if (v->va == 0) + continue; + if ((sva >= v->va && sva < v->va + v->count * PAGE_SIZE) || + (sva <= v->va && sva + count * PAGE_SIZE < v->va)) + panic("pmap_qremove stack"); + } +} + /* * Create the kernel stack (including pcb for i386) for a new thread. * This routine directly affects the fork perf for a process and @@ -411,7 +435,7 @@ vm_thread_new(struct thread *td, int pages) atomic_add_int(&kstacks, 1); if (KSTACK_GUARD_PAGES != 0) { - pmap_qremove(ks, KSTACK_GUARD_PAGES); + pmap_qremove_stack(ks, KSTACK_GUARD_PAGES); ks += KSTACK_GUARD_PAGES * PAGE_SIZE; } td->td_kstack_obj = ksobj; @@ -436,6 +460,21 @@ vm_thread_new(struct thread *td, int pages) m->valid = VM_PAGE_BITS_ALL; } VM_OBJECT_UNLOCK(ksobj); + mtx_lock(&kstack_cache_mtx); + for (i = 0; i < sizeof(stack_vas) / sizeof(struct stack_va); i++) { + struct stack_va *v; + + v = &stack_vas[i]; + if (v->va == 0) { + v->va = ks; + v->count = pages; + break; + } + } + mtx_unlock(&kstack_cache_mtx); + if (i == sizeof(stack_vas) / sizeof(struct stack_va)) + panic("stack_vas too small"); + pmap_qenter(ks, ma, pages); return (1); } @@ -447,7 +486,21 @@ vm_thread_stack_dispose(vm_object_t ksobj, vm_offset_t ks, int pages) int i; atomic_add_int(&kstacks, -1); - pmap_qremove(ks, pages); + pmap_qremove_stack(ks, pages); + mtx_lock(&kstack_cache_mtx); + for (i = 0; i < sizeof(stack_vas) / sizeof(struct stack_va); i++) { + struct stack_va *v; + + v = &stack_vas[i]; + if (v->va == ks) { + v->va = 0; + break; + } + } + mtx_unlock(&kstack_cache_mtx); + if (i == sizeof(stack_vas) / sizeof(struct stack_va)) + panic("stack prot not found"); + VM_OBJECT_LOCK(ksobj); for (i = 0; i < pages; i++) { m = vm_page_lookup(ksobj, i); @@ -536,7 +589,7 @@ vm_thread_swapout(struct thread *td) cpu_thread_swapout(td); pages = td->td_kstack_pages; ksobj = td->td_kstack_obj; - pmap_qremove(td->td_kstack, pages); + pmap_qremove_stack(td->td_kstack, pages); VM_OBJECT_LOCK(ksobj); for (i = 0; i < pages; i++) { m = vm_page_lookup(ksobj, i);