? alpha/conf/ASHBURTON ? i386/conf/ACPI ? i386/conf/OVERCEE ? i386/conf/elfdump ? i386/conf/elfdump.tar ? i386/conf/foo.env ? i386/conf/foo.hints ? isa/eisaid ? isa/eisaid.c ? isa/pnpid ? isa/pnpid.c ? modules/syscons/matrix Index: alpha/alpha/vm_machdep.c =================================================================== RCS file: /home/ncvs/src/sys/alpha/alpha/vm_machdep.c,v retrieving revision 1.60 diff -u -r1.60 vm_machdep.c --- alpha/alpha/vm_machdep.c 2001/09/08 22:18:58 1.60 +++ alpha/alpha/vm_machdep.c 2001/09/10 04:22:38 @@ -241,40 +241,14 @@ cpu_exit(p) register struct proc *p; { - alpha_fpstate_drop(p); - - PROC_LOCK(p); - mtx_lock_spin(&sched_lock); - while (mtx_owned(&Giant)) - mtx_unlock_flags(&Giant, MTX_NOSWITCH); - - /* - * We have to wait until after releasing all locks before - * changing p_stat. If we block on a mutex then we will be - * back at SRUN when we resume and our parent will never - * harvest us. - */ - p->p_stat = SZOMB; - wakeup(p->p_pptr); - PROC_UNLOCK_NOSWITCH(p); - - cnt.v_swtch++; - cpu_switch(); - panic("cpu_exit"); + alpha_fpstate_drop(p); } void cpu_wait(p) struct proc *p; { - GIANT_REQUIRED; - - /* drop per-process resources */ - pmap_dispose_proc(p); - - /* and clean-out the vmspace */ - vmspace_free(p->p_vmspace); } /* Index: i386/i386/vm_machdep.c =================================================================== RCS file: /home/ncvs/src/sys/i386/i386/vm_machdep.c,v retrieving revision 1.170 diff -u -r1.170 vm_machdep.c --- i386/i386/vm_machdep.c 2001/09/08 22:18:58 1.170 +++ i386/i386/vm_machdep.c 2001/09/10 04:22:39 @@ -268,38 +268,12 @@ reset_dbregs(); pcb->pcb_flags &= ~PCB_DBREGS; } - PROC_LOCK(p); - mtx_lock_spin(&sched_lock); - while (mtx_owned(&Giant)) - mtx_unlock_flags(&Giant, MTX_NOSWITCH); - - /* - * We have to wait until after releasing all locks before - * changing p_stat. If we block on a mutex then we will be - * back at SRUN when we resume and our parent will never - * harvest us. - */ - p->p_stat = SZOMB; - - wakeup(p->p_pptr); - PROC_UNLOCK_NOSWITCH(p); - - cnt.v_swtch++; - cpu_throw(); - panic("cpu_exit"); } void cpu_wait(p) struct proc *p; { - GIANT_REQUIRED; - - /* drop per-process resources */ - pmap_dispose_proc(p); - - /* and clean-out the vmspace */ - vmspace_free(p->p_vmspace); } /* Index: ia64/ia64/vm_machdep.c =================================================================== RCS file: /home/ncvs/src/sys/ia64/ia64/vm_machdep.c,v retrieving revision 1.26 diff -u -r1.26 vm_machdep.c --- ia64/ia64/vm_machdep.c 2001/09/08 22:18:58 1.26 +++ ia64/ia64/vm_machdep.c 2001/09/10 04:22:39 @@ -284,40 +284,23 @@ cpu_exit(p) register struct proc *p; { - ia64_fpstate_drop(p); - - PROC_LOCK(p); - mtx_lock_spin(&sched_lock); - while (mtx_owned(&Giant)) - mtx_unlock_flags(&Giant, MTX_NOSWITCH); - - /* - * We have to wait until after releasing all locks before - * changing p_stat. If we block on a mutex then we will be - * back at SRUN when we resume and our parent will never - * harvest us. - */ - p->p_stat = SZOMB; - wakeup(p->p_pptr); - PROC_UNLOCK_NOSWITCH(p); - - cnt.v_swtch++; - cpu_switch(); - panic("cpu_exit"); + ia64_fpstate_drop(p); } void cpu_wait(p) struct proc *p; { - GIANT_REQUIRED; +} - /* drop per-process resources */ - pmap_dispose_proc(p); +/* Temporary helper */ +void +cpu_throw(void) +{ - /* and clean-out the vmspace */ - vmspace_free(p->p_vmspace); + cpu_switch(); + panic("cpu_throw() didn't"); } /* Index: kern/kern_exit.c =================================================================== RCS file: /home/ncvs/src/sys/kern/kern_exit.c,v retrieving revision 1.135 diff -u -r1.135 kern_exit.c --- kern/kern_exit.c 2001/09/01 04:37:34 1.135 +++ kern/kern_exit.c 2001/09/10 04:22:39 @@ -54,6 +54,7 @@ #include #include #include +#include #include #include #include @@ -67,6 +68,7 @@ #include #include +#include #include #include #include @@ -380,13 +382,30 @@ /* * Finally, call machine-dependent code to release the remaining * resources including address space, the kernel stack and pcb. - * The address space is released by "vmspace_free(p->p_vmspace)"; - * This is machine-dependent, as we may have to change stacks - * or ensure that the current one isn't reallocated before we - * finish. cpu_exit will end with a call to cpu_switch(), finishing - * our execution (pun intended). + * The address space is released by "vmspace_free(p->p_vmspace)" + * in vm_waitproc(); */ cpu_exit(p); + + PROC_LOCK(p); + mtx_lock_spin(&sched_lock); + while (mtx_owned(&Giant)) + mtx_unlock_flags(&Giant, MTX_NOSWITCH); + + /* + * We have to wait until after releasing all locks before + * changing p_stat. If we block on a mutex then we will be + * back at SRUN when we resume and our parent will never + * harvest us. + */ + p->p_stat = SZOMB; + + wakeup(p->p_pptr); + PROC_UNLOCK_NOSWITCH(p); + + cnt.v_swtch++; + cpu_throw(); + panic("exit1"); } #ifdef COMPAT_43 @@ -571,11 +590,11 @@ } /* - * Give machine-dependent layer a chance + * Give vm and machine-dependent layer a chance * to free anything that cpu_exit couldn't * release while still running in process context. */ - cpu_wait(p); + vm_waitproc(p); mtx_destroy(&p->p_mtx); zfree(proc_zone, p); nprocs--; Index: kern/kern_fork.c =================================================================== RCS file: /home/ncvs/src/sys/kern/kern_fork.c,v retrieving revision 1.118 diff -u -r1.118 kern_fork.c --- kern/kern_fork.c 2001/09/01 03:04:31 1.118 +++ kern/kern_fork.c 2001/09/10 04:22:40 @@ -235,7 +235,7 @@ * certain parts of a process from itself. */ if ((flags & RFPROC) == 0) { - vm_fork(p1, 0, flags); + vm_forkproc(p1, 0, flags); /* * Close all file descriptors. @@ -412,7 +412,7 @@ /* * Duplicate sub-structures as needed. * Increase reference counts on shared objects. - * The p_stats and p_sigacts substructs are set in vm_fork. + * The p_stats and p_sigacts substructs are set in vm_forkproc. */ p2->p_flag = 0; mtx_lock_spin(&sched_lock); @@ -461,7 +461,7 @@ PROC_LOCK(p1); bcopy(p1->p_procsig, p2->p_procsig, sizeof(*p2->p_procsig)); p2->p_procsig->ps_refcnt = 1; - p2->p_sigacts = NULL; /* finished in vm_fork() */ + p2->p_sigacts = NULL; /* finished in vm_forkproc() */ } if (flags & RFLINUXTHPN) p2->p_sigparent = SIGUSR1; @@ -573,7 +573,7 @@ * Finish creating the child process. It will return via a different * execution path later. (ie: directly into user mode) */ - vm_fork(p1, p2, flags); + vm_forkproc(p1, p2, flags); if (flags == (RFFDG | RFPROC)) { cnt.v_forks++; Index: powerpc/powerpc/vm_machdep.c =================================================================== RCS file: /home/ncvs/src/sys/powerpc/powerpc/vm_machdep.c,v retrieving revision 1.59 diff -u -r1.59 vm_machdep.c --- powerpc/powerpc/vm_machdep.c 2001/09/08 22:18:58 1.59 +++ powerpc/powerpc/vm_machdep.c 2001/09/10 04:22:41 @@ -159,38 +159,21 @@ cpu_exit(p) register struct proc *p; { - PROC_LOCK(p); - mtx_lock_spin(&sched_lock); - while (mtx_owned(&Giant)) - mtx_unlock_flags(&Giant, MTX_NOSWITCH); - - /* - * We have to wait until after releasing all locks before - * changing p_stat. If we block on a mutex then we will be - * back at SRUN when we resume and our parent will never - * harvest us. - */ - p->p_stat = SZOMB; - - wakeup(p->p_pptr); - PROC_UNLOCK_NOSWITCH(p); - - cnt.v_swtch++; - cpu_switch(); - panic("cpu_exit"); } void cpu_wait(p) struct proc *p; { - GIANT_REQUIRED; - - /* drop per-process resources */ - pmap_dispose_proc(p); +} - /* and clean-out the vmspace */ - vmspace_free(p->p_vmspace); +/* Temporary helper */ +void +cpu_throw(void) +{ + + cpu_switch(); + panic("cpu_throw() didn't"); } /* Index: sparc64/sparc64/vm_machdep.c =================================================================== RCS file: /home/ncvs/src/sys/sparc64/sparc64/vm_machdep.c,v retrieving revision 1.8 diff -u -r1.8 vm_machdep.c --- sparc64/sparc64/vm_machdep.c 2001/09/03 23:15:54 1.8 +++ sparc64/sparc64/vm_machdep.c 2001/09/10 04:22:41 @@ -63,30 +63,9 @@ #include #include -/* XXX: it seems that all that is in here should really be MI... */ void cpu_exit(struct proc *p) { - - PROC_LOCK(p); - mtx_lock_spin(&sched_lock); - while (mtx_owned(&Giant)) - mtx_unlock_flags(&Giant, MTX_NOSWITCH); - - /* - * We have to wait until after releasing all locks before - * changing p_stat. If we block on a mutex then we will be - * back at SRUN when we resume and our parent will never - * harvest us. - */ - p->p_stat = SZOMB; - - wakeup(p->p_pptr); - PROC_UNLOCK_NOSWITCH(p); - - cnt.v_swtch++; - cpu_throw(); - panic("cpu_exit"); } /* @@ -178,13 +157,6 @@ void cpu_wait(struct proc *p) { - GIANT_REQUIRED; - - /* drop per-process resources */ - pmap_dispose_proc(p); - - /* and clean-out the vmspace */ - vmspace_free(p->p_vmspace); } void Index: sys/proc.h =================================================================== RCS file: /home/ncvs/src/sys/sys/proc.h,v retrieving revision 1.178 diff -u -r1.178 proc.h --- sys/proc.h 2001/09/05 01:22:14 1.178 +++ sys/proc.h 2001/09/10 04:22:41 @@ -501,7 +501,7 @@ void userret __P((struct proc *, struct trapframe *, u_int)); void maybe_resched __P((struct proc *)); -void cpu_exit __P((struct proc *)) __dead2; +void cpu_exit __P((struct proc *)); void exit1 __P((struct proc *, int)) __dead2; void cpu_fork __P((struct proc *, struct proc *, int)); void cpu_set_fork_handler __P((struct proc *, void (*)(void *), void *)); Index: vm/vm_extern.h =================================================================== RCS file: /home/ncvs/src/sys/vm/vm_extern.h,v retrieving revision 1.47 diff -u -r1.47 vm_extern.h --- vm/vm_extern.h 2000/03/13 10:47:24 1.47 +++ vm/vm_extern.h 2001/09/10 04:22:41 @@ -80,7 +80,8 @@ void vm_fault_unwire __P((vm_map_t, vm_offset_t, vm_offset_t)); int vm_fault_wire __P((vm_map_t, vm_offset_t, vm_offset_t)); int vm_fault_user_wire __P((vm_map_t, vm_offset_t, vm_offset_t)); -void vm_fork __P((struct proc *, struct proc *, int)); +void vm_forkproc __P((struct proc *, struct proc *, int)); +void vm_waitproc __P((struct proc *)); int vm_mmap __P((vm_map_t, vm_offset_t *, vm_size_t, vm_prot_t, vm_prot_t, int, void *, vm_ooffset_t)); vm_offset_t vm_page_alloc_contig __P((vm_offset_t, vm_offset_t, vm_offset_t, vm_offset_t)); void vm_set_page_size __P((void)); Index: vm/vm_glue.c =================================================================== RCS file: /home/ncvs/src/sys/vm/vm_glue.c,v retrieving revision 1.117 diff -u -r1.117 vm_glue.c --- vm/vm_glue.c 2001/07/04 19:00:12 1.117 +++ vm/vm_glue.c 2001/09/10 04:22:41 @@ -209,7 +209,7 @@ * to user mode to avoid stack copying and relocation problems. */ void -vm_fork(p1, p2, flags) +vm_forkproc(p1, p2, flags) struct proc *p1, *p2; int flags; { @@ -283,6 +283,22 @@ * and make the child ready to run. */ cpu_fork(p1, p2, flags); +} + +/* + * Called after process has been wait(2)'ed apon and is being reaped. + * The idea is to reclaim resources that we could not reclaim while + * the process was still executing. + */ +void +vm_waitproc(p) + struct proc *p; +{ + + GIANT_REQUIRED; + cpu_wait(p); + pmap_dispose_proc(p); /* drop per-process resources */ + vmspace_free(p->p_vmspace); /* and clean-out the vmspace */ } /*