diff --git a/sys/arm/arm/vm_machdep.c b/sys/arm/arm/vm_machdep.c index 1a907cc..6bd5799 100644 --- a/sys/arm/arm/vm_machdep.c +++ b/sys/arm/arm/vm_machdep.c @@ -119,9 +119,6 @@ cpu_fork(register struct thread *td1, register struct proc *p2, #ifdef __XSCALE__ #ifndef CPU_XSCALE_CORE3 pmap_use_minicache(td2->td_kstack, td2->td_kstack_pages * PAGE_SIZE); - if (td2->td_altkstack) - pmap_use_minicache(td2->td_altkstack, td2->td_altkstack_pages * - PAGE_SIZE); #endif #endif td2->td_pcb = pcb2; diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c index 4e2eaa9..9cda110 100644 --- a/sys/kern/kern_fork.c +++ b/sys/kern/kern_fork.c @@ -39,6 +39,7 @@ __FBSDID("$FreeBSD$"); #include "opt_kdtrace.h" #include "opt_ktrace.h" +#include "opt_kstack_pages.h" #include #include @@ -276,25 +277,28 @@ norfproc_fail: mem_charged = 0; vm2 = NULL; + if (pages == 0) + pages = KSTACK_PAGES; /* Allocate new proc. */ newproc = uma_zalloc(proc_zone, M_WAITOK); - if (TAILQ_EMPTY(&newproc->p_threads)) { - td2 = thread_alloc(); + if (TAILQ_EMPTY(&newproc->p_threads)) + td2 = NULL; + else + td2 = FIRST_THREAD_IN_PROC(newproc); + if (td2 && pages != td2->td_kstack_pages) { + thread_unlink(td2); + thread_free(td2); + td2 = NULL; + } + if (!td2) { + td2 = thread_alloc(pages); if (td2 == NULL) { error = ENOMEM; goto fail1; } proc_linkup(newproc, td2); - } else - td2 = FIRST_THREAD_IN_PROC(newproc); - - /* Allocate and switch to an alternate kstack if specified. */ - if (pages != 0) { - if (!vm_thread_new_altkstack(td2, pages)) { - error = ENOMEM; - goto fail1; - } } + if ((flags & RFMEM) == 0) { vm2 = vmspace_fork(p1->p_vmspace, &mem_charged); if (vm2 == NULL) { diff --git a/sys/kern/kern_kthread.c b/sys/kern/kern_kthread.c index 1092832..3c5248e 100644 --- a/sys/kern/kern_kthread.c +++ b/sys/kern/kern_kthread.c @@ -256,7 +256,7 @@ kthread_add(void (*func)(void *), void *arg, struct proc *p, } /* Initialize our new td */ - newtd = thread_alloc(); + newtd = thread_alloc(pages); if (newtd == NULL) return (ENOMEM); @@ -282,9 +282,6 @@ kthread_add(void (*func)(void *), void *arg, struct proc *p, newtd->td_pflags |= TDP_KTHREAD; newtd->td_ucred = crhold(p->p_ucred); - /* Allocate and switch to an alternate kstack if specified. */ - if (pages != 0) - vm_thread_new_altkstack(newtd, pages); /* this code almost the same as create_thread() in kern_thr.c */ PROC_LOCK(p); diff --git a/sys/kern/kern_proc.c b/sys/kern/kern_proc.c index cdbc012..e012a3e 100644 --- a/sys/kern/kern_proc.c +++ b/sys/kern/kern_proc.c @@ -203,14 +203,6 @@ proc_dtor(void *mem, int size, void *arg) #endif /* Free all OSD associated to this thread. */ osd_thread_exit(td); - - /* Dispose of an alternate kstack, if it exists. - * XXX What if there are more than one thread in the proc? - * The first thread in the proc is special and not - * freed, so you gotta do this here. - */ - if (((p->p_flag & P_KTHREAD) != 0) && (td->td_altkstack != 0)) - vm_thread_dispose_altkstack(td); } EVENTHANDLER_INVOKE(process_dtor, p); if (p->p_ksi != NULL) @@ -767,8 +759,6 @@ fill_kinfo_proc_only(struct proc *p, struct kinfo_proc *kp) FOREACH_THREAD_IN_PROC(p, td0) { if (!TD_IS_SWAPPED(td0)) kp->ki_rssize += td0->td_kstack_pages; - if (td0->td_altkstack_obj != NULL) - kp->ki_rssize += td0->td_altkstack_pages; } kp->ki_swrss = vm->vm_swrss; kp->ki_tsize = vm->vm_tsize; diff --git a/sys/kern/kern_thr.c b/sys/kern/kern_thr.c index c478c63..630069b 100644 --- a/sys/kern/kern_thr.c +++ b/sys/kern/kern_thr.c @@ -176,7 +176,7 @@ create_thread(struct thread *td, mcontext_t *ctx, } /* Initialize our td */ - newtd = thread_alloc(); + newtd = thread_alloc(0); if (newtd == NULL) return (ENOMEM); diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c index d47bd8c..f23fc84 100644 --- a/sys/kern/kern_thread.c +++ b/sys/kern/kern_thread.c @@ -283,7 +283,7 @@ thread_reap(void) * Allocate a thread. */ struct thread * -thread_alloc(void) +thread_alloc(int pages) { struct thread *td; @@ -291,7 +291,7 @@ thread_alloc(void) td = (struct thread *)uma_zalloc(thread_zone, M_WAITOK); KASSERT(td->td_kstack == 0, ("thread_alloc got thread with kstack")); - if (!vm_thread_new(td, 0)) { + if (!vm_thread_new(td, pages)) { uma_zfree(thread_zone, td); return (NULL); } @@ -312,8 +312,6 @@ thread_free(struct thread *td) cpuset_rel(td->td_cpuset); td->td_cpuset = NULL; cpu_thread_free(td); - if (td->td_altkstack != 0) - vm_thread_dispose_altkstack(td); if (td->td_kstack != 0) vm_thread_dispose(td); uma_zfree(thread_zone, td); diff --git a/sys/sys/proc.h b/sys/sys/proc.h index b65db62..084c098 100644 --- a/sys/sys/proc.h +++ b/sys/sys/proc.h @@ -267,9 +267,6 @@ struct thread { struct vm_object *td_kstack_obj;/* (a) Kstack object. */ vm_offset_t td_kstack; /* (a) Kernel VA of kstack. */ int td_kstack_pages; /* (a) Size of the kstack. */ - struct vm_object *td_altkstack_obj;/* (a) Alternate kstack object. */ - vm_offset_t td_altkstack; /* (a) Kernel VA of alternate kstack. */ - int td_altkstack_pages; /* (a) Size of alternate kstack. */ volatile u_int td_critnest; /* (k*) Critical section nest level. */ struct mdthread td_md; /* (k) Any machine-dependent fields. */ struct td_sched *td_sched; /* (*) Scheduler-specific data. */ @@ -850,7 +847,7 @@ void cpu_thread_exit(struct thread *); void cpu_thread_free(struct thread *); void cpu_thread_swapin(struct thread *); void cpu_thread_swapout(struct thread *); -struct thread *thread_alloc(void); +struct thread *thread_alloc(int pages); void thread_exit(void) __dead2; void thread_free(struct thread *td); void thread_link(struct thread *td, struct proc *p); diff --git a/sys/vm/vm_extern.h b/sys/vm/vm_extern.h index 53f7694..65b6c8e 100644 --- a/sys/vm/vm_extern.h +++ b/sys/vm/vm_extern.h @@ -80,9 +80,7 @@ int vm_fault_quick(caddr_t v, int prot); struct sf_buf *vm_imgact_map_page(vm_object_t object, vm_ooffset_t offset); void vm_imgact_unmap_page(struct sf_buf *sf); void vm_thread_dispose(struct thread *td); -void vm_thread_dispose_altkstack(struct thread *td); int vm_thread_new(struct thread *td, int pages); -int vm_thread_new_altkstack(struct thread *td, int pages); void vm_thread_swapin(struct thread *td); void vm_thread_swapout(struct thread *td); #endif /* _KERNEL */ diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c index 9e43a3f..4863d94 100644 --- a/sys/vm/vm_glue.c +++ b/sys/vm/vm_glue.c @@ -468,37 +468,6 @@ vm_thread_swapin(struct thread *td) } /* - * Set up a variable-sized alternate kstack. - */ -int -vm_thread_new_altkstack(struct thread *td, int pages) -{ - - td->td_altkstack = td->td_kstack; - td->td_altkstack_obj = td->td_kstack_obj; - td->td_altkstack_pages = td->td_kstack_pages; - - return (vm_thread_new(td, pages)); -} - -/* - * Restore the original kstack. - */ -void -vm_thread_dispose_altkstack(struct thread *td) -{ - - vm_thread_dispose(td); - - td->td_kstack = td->td_altkstack; - td->td_kstack_obj = td->td_altkstack_obj; - td->td_kstack_pages = td->td_altkstack_pages; - td->td_altkstack = 0; - td->td_altkstack_obj = NULL; - td->td_altkstack_pages = 0; -} - -/* * Implement fork's actions on an address space. * Here we arrange for the address space to be copied or referenced, * allocate a user struct (pcb and kernel stack), then call the