Index: kern/kern_proc.c =================================================================== RCS file: /home/ncvs/src/sys/kern/kern_proc.c,v retrieving revision 1.197 diff -u -p -r1.197 kern_proc.c --- kern/kern_proc.c 16 Oct 2003 08:39:15 -0000 1.197 +++ kern/kern_proc.c 26 Jan 2004 23:54:14 -0000 @@ -41,6 +41,7 @@ __FBSDID("$FreeBSD: src/sys/kern/kern_pr #include "opt_kstack_pages.h" #include +#include #include #include #include @@ -83,13 +84,47 @@ static void proc_dtor(void *mem, int siz static void proc_init(void *mem, int size); static void proc_fini(void *mem, int size); +/* + * pid to proc lookup is done by indexing the pid_table array. + * Since pid numbers are only allocated when an empty slot + * has been found, there is no need to search any lists ever. + * (an orphaned pgrp will lock the slot, a session will lock + * the pgrp with the same number.) + * If the table is too small it is reallocated with twice the + * previous size and the entries 'unzipped' into the two halves. + * A linked list of free entries is passed through the pt_proc + * field of 'free' items - set odd to be an invalid ptr. + */ + +struct pid_table { + struct proc* pt_proc; + struct pgrp* pt_pgrp; +}; +#if 1/* strongly typed cast - should be a noop */ +static __inline intptr_t p2u(struct proc *p) { return (intptr_t)p; }; +#else +#define p2u(p) ((intptr_t)p) +#endif +#define P_VALID(p) (!(p2u(p) & 1)) +#define P_NEXT(p) (p2u(p) >> 1) +#define P_FREE(pid) ((struct proc *)((pid) << 1 | 1)) + +static struct pid_table *pid_table; +#define INITIAL_PID_TABLE_SIZE (1 << 5) + +static uint pid_tbl_mask = (INITIAL_PID_TABLE_SIZE) - 1;/* table size 2^n */ +static uint pid_alloc_lim;/* max we allocate before growing table */ +static uint pid_alloc_cnt = 0; +/* links through free slots - never empty! */ +static uint next_free_pt, last_free_pt; +static uint next_free_pt_highid; +static pid_t pid_max = PID_MAX;/* largest value we alocate */ + +static int randompid = 0; + /* * Other process lists */ -struct pidhashhead *pidhashtbl; -u_long pidhash; -struct pgrphashhead *pgrphashtbl; -u_long pgrphash; struct proclist allproc; struct proclist zombproc; struct sx allproc_lock; @@ -109,26 +144,251 @@ SYSCTL_INT(_kern, OID_AUTO, uarea_pages, CTASSERT(sizeof(struct kinfo_proc) == KINFO_PROC_SIZE); /* - * Initialize global process hashing structures. + * Initialize global process mapping structures. */ void procinit() { - + int i; sx_init(&allproc_lock, "allproc"); sx_init(&proctree_lock, "proctree"); mtx_init(&pargs_ref_lock, "struct pargs.ref", NULL, MTX_DEF); mtx_init(&ppeers_lock, "p_peers", NULL, MTX_DEF); + + MALLOC(pid_table, struct pid_table *, + INITIAL_PID_TABLE_SIZE * sizeof *pid_table, M_PROC, M_WAITOK); + +#define LINK_EMPTY ((PID_MAX + INITIAL_PID_TABLE_SIZE) & ~(INITIAL_PID_TABLE_SIZE - 1)) + /* Set free list running through table... + Preset 'use count' to -1 so we allocate pid 1 next. */ + for (i = 0; i <= pid_tbl_mask; i++) { + pid_table[i].pt_proc = P_FREE(LINK_EMPTY + i + 1); + pid_table[i].pt_pgrp = 0; + } + /* slot 0 is just grabbed */ + next_free_pt = 1; + next_free_pt_highid = 10; + pid_table[0].pt_proc = &proc0; + pid_table[0].pt_pgrp = &pgrp0; + /* Need to fix fix last entry. */ + last_free_pt = pid_tbl_mask; + pid_table[last_free_pt].pt_proc = P_FREE(LINK_EMPTY); + /* point at which we grow table - to avoid reusing pids too often */ + pid_alloc_lim = pid_tbl_mask - 1; +#undef LINK_EMPTY + LIST_INIT(&allproc); LIST_INIT(&zombproc); - pidhashtbl = hashinit(maxproc / 4, M_PROC, &pidhash); - pgrphashtbl = hashinit(maxproc / 4, M_PROC, &pgrphash); proc_zone = uma_zcreate("PROC", sched_sizeof_proc(), proc_ctor, proc_dtor, proc_init, proc_fini, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); uihashinit(); } +static void +expand_pid_table(int *fpt) +{ + uint32_t pt_size = pid_tbl_mask + 1; + struct pid_table *n_pt, *new_pt; + struct proc *proc; + struct pgrp *pgrp; + int i; + pid_t pid; + + new_pt = malloc(pt_size * 2 * sizeof *new_pt, M_PROC, M_WAITOK); + + sx_xlock(&allproc_lock); + if (pt_size != pid_tbl_mask + 1) { + /* Another process beat us to it... */ + sx_xunlock(&allproc_lock); + FREE(new_pt, M_PROC); + return; + } + + /* + * Copy entries from old table into new one. + * If 'pid' is 'odd' we need to place in the upper half, + * even pid's to the lower half. + * Free items stay in the low half so we don't have to + * fixup the reference to them. + * We stuff free items on the front of the freelist + * because we can't write to unmodified entries. + * Processing the table backwards maintians a semblance + * of issueing pid numbers that increase with time. + */ + i = pt_size - 1; + n_pt = new_pt + i; + for (; ; i--, n_pt--) { + proc = pid_table[i].pt_proc; + pgrp = pid_table[i].pt_pgrp; + if (!P_VALID(proc)) { + /* Up 'use count' so that link is valid */ + pid = (P_NEXT(proc) + pt_size) & ~pt_size; + proc = P_FREE(pid); + if (pgrp) + pid = pgrp->pg_id; + } else + pid = proc->p_pid; + + /* Save entry in appropriate half of table */ + n_pt[pid & pt_size].pt_proc = proc; + n_pt[pid & pt_size].pt_pgrp = pgrp; + + /* Put other piece on start of free list */ + pid = (pid ^ pt_size) & ~pid_tbl_mask; + n_pt[pid & pt_size].pt_proc = + P_FREE((pid & ~pt_size) | *fpt); + n_pt[pid & pt_size].pt_pgrp = 0; + *fpt = i | (pid & pt_size); + if (i == 0) + break; + } + + /* Switch tables */ + n_pt = pid_table; + pid_table = new_pt; + pid_tbl_mask = pt_size * 2 - 1; + + /* + * pid_max starts as PID_MAX (= 30000), once we have 16384 + * allocated pids we need it to be larger! + */ + if (pid_tbl_mask > PID_MAX) { + pid_max = pid_tbl_mask * 2 + 1; + pid_alloc_lim |= pid_alloc_lim << 1; + } else + pid_alloc_lim <<= 1; /* doubles number of free slots... */ + + sx_xunlock(&allproc_lock); + FREE(n_pt, M_PROC); +} + + +/* + * Allocate a free proc structure. This method is + * called from fork + * Expend the mapping table if needs. + */ +struct proc * +proc_alloc(struct thread *td, int flags) +{ + struct proc *p, *p1 = td->td_proc; + int nxt = 0; + pid_t pid = 0; + struct pid_table *pt = NULL; + int highpid = 0; + int ok; + uid_t uid; + + p = uma_zalloc(proc_zone, M_WAITOK); + p->p_state = PRS_NEW; /* protect against others */ + + /* allocate next free pid */ + for (;;expand_pid_table(highpid ? &next_free_pt_highid : + &next_free_pt)) { + highpid = 0; + if (pid_alloc_cnt >= pid_alloc_lim) + /* ensure pids cycle through 2000+ values */ + continue; + sx_xlock(&allproc_lock); + if ((flags & RFHIGHPID) && next_free_pt < 10) { + highpid = 1; + pt = &pid_table[next_free_pt_highid]; + nxt = P_NEXT(pt->pt_proc); + if (nxt & pid_tbl_mask) { + if (!P_VALID(pid_table[9].pt_proc)) + pid_table[9].pt_proc = + P_FREE(P_NEXT(pid_table[9].pt_proc) + | (nxt & pid_tbl_mask)); + break; + } + } else { + pt = &pid_table[next_free_pt]; + nxt = P_NEXT(pt->pt_proc); + if (nxt & pid_tbl_mask) + break; + } + /* Table full - expand (NB last entry not used....) */ + sx_xunlock(&allproc_lock); + } + /* + * Handle this now, so that we don't have to grab the allproc lock + * again later in fork1(). + */ + /* + * Although process entries are dynamically created, we still keep + * a global limit on the maximum number we will create. Don't allow + * a nonprivileged user to use the last ten processes; don't let root + * exceed the limit. The variable nprocs is the current number of + * processes, maxproc is the limit. + */ + + /* pid is 'saved use count' + 'size' + entry */ + pid = (nxt & ~pid_tbl_mask) + pid_tbl_mask + 1 + + (highpid ? next_free_pt_highid : next_free_pt); + uid = td->td_ucred->cr_ruid; + if ((nprocs >= maxproc - 10 && uid != 0) || nprocs >= maxproc) + goto bad; + PROC_LOCK(p1); + ok = chgproccnt(td->td_ucred->cr_ruidinfo, 1, + (uid != 0) ? p1->p_rlimit[RLIMIT_NPROC].rlim_cur : 0); + PROC_UNLOCK(p1); + if (!ok) + goto bad; + + if (randompid) + pid += (arc4random() % randompid) * (pid_tbl_mask + 1); + if ((uint)pid > (uint)pid_max) + pid &= pid_tbl_mask; + p->p_pid = pid; + if (highpid) + next_free_pt_highid = nxt & pid_tbl_mask; + else + next_free_pt = nxt & pid_tbl_mask; + + /* Grab table slot */ + pt->pt_proc = p; + pid_alloc_cnt++; + + LIST_INSERT_HEAD(&allproc, p, p_list); + nprocs++; + sx_xunlock(&allproc_lock); + + return p; +bad: + uma_zfree(proc_zone, p); + sx_xunlock(&allproc_lock); + return (NULL); +} + +/* + * Free last resources of a process - called from exit1 (in kern_exit.c) + */ +void +proc_free(struct proc *p) +{ + pid_t pid = p->p_pid; + struct pid_table *pt; + + sx_xlock(&allproc_lock); + + pt = &pid_table[pid & pid_tbl_mask]; + /* save pid use count in slot */ + pt->pt_proc = P_FREE(pid & ~pid_tbl_mask); + + if (pt->pt_pgrp == NULL) { + /* link last freed entry onto ours */ + pid &= pid_tbl_mask; + pt = &pid_table[last_free_pt]; + pt->pt_proc = P_FREE(P_NEXT(pt->pt_proc) | pid); + last_free_pt = pid; + pid_alloc_cnt--; + } + + nprocs--; + sx_xunlock(&allproc_lock); + uma_zfree(proc_zone, p); +} /* * Prepare a proc for use. */ @@ -253,11 +513,12 @@ pfind(pid) register struct proc *p; sx_slock(&allproc_lock); - LIST_FOREACH(p, PIDHASH(pid), p_hash) - if (p->p_pid == pid) { - PROC_LOCK(p); - break; - } + p = pid_table[pid & pid_tbl_mask].pt_proc; + /* Only allow live processes to be found by pid. */ + if (!P_VALID(p) || p->p_pid != pid || p->p_state == PRS_ZOMBIE) + p = NULL; + else + PROC_LOCK(p); sx_sunlock(&allproc_lock); return (p); } @@ -272,15 +533,21 @@ pgfind(pgid) { register struct pgrp *pgrp; - sx_assert(&proctree_lock, SX_LOCKED); + sx_slock(&allproc_lock); + pgrp = pid_table[pgid & pid_tbl_mask].pt_pgrp; - LIST_FOREACH(pgrp, PGRPHASH(pgid), pg_hash) { - if (pgrp->pg_id == pgid) { - PGRP_LOCK(pgrp); - return (pgrp); - } - } - return (NULL); + /* + * Can't look up a pgrp that only exists because the session + * hasn't died yet (traditional) + */ + if (pgrp == NULL || pgrp->pg_id != pgid + || LIST_EMPTY(&pgrp->pg_members)) + pgrp = NULL; + else + PGRP_LOCK(pgrp); + /* XXX MP - need to have a reference count... */ + sx_sunlock(&allproc_lock); + return pgrp; } /* @@ -345,7 +612,7 @@ enterpgrp(p, pgid, pgrp, sess) * As we have an exclusive lock of proctree_lock, * this should not deadlock. */ - LIST_INSERT_HEAD(PGRPHASH(pgid), pgrp, pg_hash); + pid_table[pgid & pid_tbl_mask].pt_pgrp = pgrp; pgrp->pg_jobc = 0; SLIST_INIT(&pgrp->pg_sigiolst); PGRP_UNLOCK(pgrp); @@ -444,6 +711,31 @@ leavepgrp(p) } /* + * remove the pg from the PIDTable + */ +static void +pgunlink(pid_t pg_id) +{ + struct pgrp *pgrp; + struct pid_table *pt; + + sx_assert(&proctree_lock, SX_XLOCKED); + pt = &pid_table[pg_id & pid_tbl_mask]; + pgrp = pt->pt_pgrp; + pt->pt_pgrp = 0; + + if (!P_VALID(pt->pt_proc)) { + /* orphaned pgrp, put slot onto free list */ + pg_id &= pid_tbl_mask; + pt = &pid_table[last_free_pt]; + pt->pt_proc = P_FREE(P_NEXT(pt->pt_proc) | pg_id); + last_free_pt = pg_id; + pid_alloc_cnt--; + } + +} + +/* * delete a process group */ static void @@ -466,18 +758,25 @@ pgdelete(pgrp) if (pgrp->pg_session->s_ttyp != NULL && pgrp->pg_session->s_ttyp->t_pgrp == pgrp) pgrp->pg_session->s_ttyp->t_pgrp = NULL; - LIST_REMOVE(pgrp, pg_hash); + savesess = pgrp->pg_session; SESS_LOCK(savesess); savesess->s_count--; SESS_UNLOCK(savesess); PGRP_UNLOCK(pgrp); + if (savesess->s_count == 0) { mtx_destroy(&savesess->s_mtx); FREE(pgrp->pg_session, M_SESSION); + pgunlink(pgrp->pg_id); } - mtx_destroy(&pgrp->pg_mtx); - FREE(pgrp, M_PGRP); + else { + if (savesess->s_sid != pgrp->pg_id) + pgunlink(pgrp->pg_id); + } + + mtx_destroy(&pgrp->pg_mtx); + FREE(pgrp, M_PGRP); } static void @@ -497,6 +796,24 @@ pgadjustjobc(pgrp, entering) PGRP_UNLOCK(pgrp); } +/* + * Delete session - called from SESSRELE when s_count becomes zero. + */ +void +sessdelete(struct session *ss) +{ + /* + * We keep the pgrp with the same id as the session in + * order to stop a process being given the same pid. + * Since the pgrp holds a reference to the session, it + * must be a 'zombie' pgrp by now. + */ + + pgunlink(ss->s_sid); + + FREE(ss, M_SESSION); +} + /* * Adjust pgrp jobc counters when specified process changes process group. * We count the number of processes in each process group that "qualify" @@ -582,32 +899,45 @@ orphanpg(pg) #include "opt_ddb.h" #ifdef DDB #include - DB_SHOW_COMMAND(pgrpdump, pgrpdump) { - register struct pgrp *pgrp; - register struct proc *p; - register int i; - - for (i = 0; i <= pgrphash; i++) { - if (!LIST_EMPTY(&pgrphashtbl[i])) { - printf("\tindx %d\n", i); - LIST_FOREACH(pgrp, &pgrphashtbl[i], pg_hash) { - printf( - "\tpgrp %p, pgid %ld, sess %p, sesscnt %d, mem %p\n", - (void *)pgrp, (long)pgrp->pg_id, - (void *)pgrp->pg_session, - pgrp->pg_session->s_count, - (void *)LIST_FIRST(&pgrp->pg_members)); - LIST_FOREACH(p, &pgrp->pg_members, p_pglist) { - printf("\t\tpid %ld addr %p pgrp %p\n", - (long)p->p_pid, (void *)p, - (void *)p->p_pgrp); - } - } - } - } + struct pid_table *pt; + struct proc *p; + struct pgrp *pgrp; + int id; + + printf("pid table %p size %x, next %x, last %x\n", + pid_table, pid_tbl_mask+1, + next_free_pt, last_free_pt); + for (pt = pid_table, id = 0; id <= pid_tbl_mask; id++, pt++) { + p = pt->pt_proc; + if (!P_VALID(p) && !pt->pt_pgrp) + continue; + db_printf(" id %x: ", id); + if (P_VALID(p)) + db_printf("proc %p id %d (0x%x) %s\n", + p, p->p_pid, p->p_pid, p->p_comm); + else + db_printf("next %x use %x\n", + P_NEXT(p) & pid_tbl_mask, + P_NEXT(p) & ~pid_tbl_mask); + if ((pgrp = pt->pt_pgrp)) { + db_printf("\tsession %p, sid %d, count %d, login %s\n", + pgrp->pg_session, pgrp->pg_session->s_sid, + pgrp->pg_session->s_count, + pgrp->pg_session->s_login); + db_printf("\tpgrp %p, pg_id %d, pg_jobc %d, members %p\n", + pgrp, pgrp->pg_id, pgrp->pg_jobc, + pgrp->pg_members.lh_first); + for (p = pgrp->pg_members.lh_first; p != 0; + p = p->p_pglist.le_next) { + db_printf("\t\tpid %d addr %p pgrp %p %s\n", + p->p_pid, p, p->p_pgrp, p->p_comm); + } + } + } } + #endif /* DDB */ void fill_kinfo_thread(struct thread *td, struct kinfo_proc *kp); @@ -821,21 +1151,21 @@ fill_kinfo_thread(struct thread *td, str } /* - * Locate a zombie process by number - */ + * * Locate a zombie by number + * */ struct proc * zpfind(pid_t pid) { struct proc *p; sx_slock(&allproc_lock); - LIST_FOREACH(p, &zombproc, p_list) - if (p->p_pid == pid) { - PROC_LOCK(p); - break; - } + p = pid_table[pid & pid_tbl_mask].pt_proc; + if (!P_VALID(p) || p->p_pid != pid || p->p_state != PRS_ZOMBIE) + p = NULL; + else + PROC_LOCK(p); sx_sunlock(&allproc_lock); - return (p); + return (p); } #define KERN_PROC_ZOMBMASK 0x3 @@ -1187,3 +1517,33 @@ SYSCTL_NODE(_kern_proc, KERN_PROC_ARGS, SYSCTL_NODE(_kern_proc, KERN_PROC_SV_NAME, sv_name, CTLFLAG_RD, sysctl_kern_proc_sv_name, "Process syscall vector name (ABI type)"); + +/* + * Random component to lastpid generation. We mix in a random factor to make + * it a little harder to predict. We sanity check the modulus value to avoid + * doing it in critical paths. Don't let it be too small or we pointlessly + * waste randomness entropy, and don't let it be impossibly large. Using a + * modulus that is too big causes a LOT more process table scans and slows + * down fork processing as the pidchecked caching is defeated. + */ + +static int +sysctl_kern_randompid(SYSCTL_HANDLER_ARGS) +{ + int error, pid; + + sysctl_wire_old_buffer(req, sizeof(int)); + pid = randompid; + error = sysctl_handle_int(oidp, &pid, 0, req); + if (error == 0 && req->newptr != NULL) { + if (pid < 0 || pid > 100) /* out of range */ + pid = 100; + else if (pid < 2) /* NOP */ + pid = 0; + randompid = pid; + } + return (error); +} + +SYSCTL_PROC(_kern, OID_AUTO, randompid, CTLTYPE_INT|CTLFLAG_RW, + 0, 0, sysctl_kern_randompid, "I", "Random PID modulus"); Index: kern/kern_exit.c =================================================================== RCS file: /home/ncvs/src/sys/kern/kern_exit.c,v retrieving revision 1.219 diff -u -p -r1.219 kern_exit.c --- kern/kern_exit.c 21 Jan 2004 17:10:47 -0000 1.219 +++ kern/kern_exit.c 26 Jan 2004 23:54:14 -0000 @@ -396,7 +396,6 @@ exit1(struct thread *td, int rv) sx_xlock(&allproc_lock); LIST_REMOVE(p, p_list); LIST_INSERT_HEAD(&zombproc, p, p_list); - LIST_REMOVE(p, p_hash); sx_xunlock(&allproc_lock); sx_xlock(&proctree_lock); @@ -702,10 +701,7 @@ loop: #endif KASSERT(FIRST_THREAD_IN_PROC(p), ("wait1: no residual thread!")); - uma_zfree(proc_zone, p); - sx_xlock(&allproc_lock); - nprocs--; - sx_xunlock(&allproc_lock); + proc_free(p); mtx_unlock(&Giant); return (0); } Index: kern/kern_fork.c =================================================================== RCS file: /home/ncvs/src/sys/kern/kern_fork.c,v retrieving revision 1.212 diff -u -p -r1.212 kern_fork.c --- kern/kern_fork.c 25 Jan 2004 18:42:18 -0000 1.212 +++ kern/kern_fork.c 26 Jan 2004 23:55:22 -0000 @@ -73,7 +73,6 @@ __FBSDID("$FreeBSD: src/sys/kern/kern_fo #include #include #include -#include #include #include @@ -153,41 +152,6 @@ int lastpid = 0; SYSCTL_INT(_kern, OID_AUTO, lastpid, CTLFLAG_RD, &lastpid, 0, "Last used PID"); -/* - * Random component to lastpid generation. We mix in a random factor to make - * it a little harder to predict. We sanity check the modulus value to avoid - * doing it in critical paths. Don't let it be too small or we pointlessly - * waste randomness entropy, and don't let it be impossibly large. Using a - * modulus that is too big causes a LOT more process table scans and slows - * down fork processing as the pidchecked caching is defeated. - */ -static int randompid = 0; - -static int -sysctl_kern_randompid(SYSCTL_HANDLER_ARGS) -{ - int error, pid; - - sysctl_wire_old_buffer(req, sizeof(int)); - sx_xlock(&allproc_lock); - pid = randompid; - error = sysctl_handle_int(oidp, &pid, 0, req); - if (error == 0 && req->newptr != NULL) { - if (pid < 0 || pid > PID_MAX - 100) /* out of range */ - pid = PID_MAX - 100; - else if (pid < 2) /* NOP */ - pid = 0; - else if (pid < 100) /* Make it reasonable */ - pid = 100; - randompid = pid; - } - sx_xunlock(&allproc_lock); - return (error); -} - -SYSCTL_PROC(_kern, OID_AUTO, randompid, CTLTYPE_INT|CTLFLAG_RW, - 0, 0, sysctl_kern_randompid, "I", "Random PID modulus"); - int fork1(td, flags, pages, procp) struct thread *td; @@ -196,10 +160,9 @@ fork1(td, flags, pages, procp) struct proc **procp; { struct proc *p1, *p2, *pptr; - uid_t uid; + uid_t uid = td->td_ucred->cr_ruid; struct proc *newproc; - int ok, trypid; - static int curfail, pidchecked = 0; + static int curfail; static struct timeval lastfail; struct filedesc *fd; struct filedesc_to_leader *fdtol; @@ -283,124 +246,21 @@ fork1(td, flags, pages, procp) } /* Allocate new proc. */ - newproc = uma_zalloc(proc_zone, M_WAITOK); -#ifdef MAC - mac_init_proc(newproc); -#endif - - /* - * Although process entries are dynamically created, we still keep - * a global limit on the maximum number we will create. Don't allow - * a nonprivileged user to use the last ten processes; don't let root - * exceed the limit. The variable nprocs is the current number of - * processes, maxproc is the limit. - */ - sx_xlock(&allproc_lock); - uid = td->td_ucred->cr_ruid; - if ((nprocs >= maxproc - 10 && uid != 0) || nprocs >= maxproc) { - error = EAGAIN; - goto fail; - } - - /* - * Increment the count of procs running with this uid. Don't allow - * a nonprivileged user to exceed their current limit. - */ - PROC_LOCK(p1); - ok = chgproccnt(td->td_ucred->cr_ruidinfo, 1, - (uid != 0) ? p1->p_rlimit[RLIMIT_NPROC].rlim_cur : 0); - PROC_UNLOCK(p1); - if (!ok) { + newproc = proc_alloc(td, flags); + if (!newproc) { error = EAGAIN; goto fail; } +#ifdef MAC + mac_init_proc(newproc); +#endif - /* - * Increment the nprocs resource before blocking can occur. There - * are hard-limits as to the number of processes that can run. - */ - nprocs++; - - /* - * Find an unused process ID. We remember a range of unused IDs - * ready to use (from lastpid+1 through pidchecked-1). - * - * If RFHIGHPID is set (used during system boot), do not allocate - * low-numbered pids. - */ - trypid = lastpid + 1; - if (flags & RFHIGHPID) { - if (trypid < 10) - trypid = 10; - } else { - if (randompid) - trypid += arc4random() % randompid; - } -retry: - /* - * If the process ID prototype has wrapped around, - * restart somewhat above 0, as the low-numbered procs - * tend to include daemons that don't exit. - */ - if (trypid >= PID_MAX) { - trypid = trypid % PID_MAX; - if (trypid < 100) - trypid += 100; - pidchecked = 0; - } - if (trypid >= pidchecked) { - int doingzomb = 0; - - pidchecked = PID_MAX; - /* - * Scan the active and zombie procs to check whether this pid - * is in use. Remember the lowest pid that's greater - * than trypid, so we can avoid checking for a while. - */ - p2 = LIST_FIRST(&allproc); -again: - for (; p2 != NULL; p2 = LIST_NEXT(p2, p_list)) { - PROC_LOCK(p2); - while (p2->p_pid == trypid || - p2->p_pgrp->pg_id == trypid || - p2->p_session->s_sid == trypid) { - trypid++; - if (trypid >= pidchecked) { - PROC_UNLOCK(p2); - goto retry; - } - } - if (p2->p_pid > trypid && pidchecked > p2->p_pid) - pidchecked = p2->p_pid; - if (p2->p_pgrp->pg_id > trypid && - pidchecked > p2->p_pgrp->pg_id) - pidchecked = p2->p_pgrp->pg_id; - if (p2->p_session->s_sid > trypid && - pidchecked > p2->p_session->s_sid) - pidchecked = p2->p_session->s_sid; - PROC_UNLOCK(p2); - } - if (!doingzomb) { - doingzomb = 1; - p2 = LIST_FIRST(&zombproc); - goto again; - } - } - + p2 = newproc; /* * RFHIGHPID does not mess with the lastpid counter during boot. */ - if (flags & RFHIGHPID) - pidchecked = 0; - else - lastpid = trypid; - - p2 = newproc; - p2->p_state = PRS_NEW; /* protect against others */ - p2->p_pid = trypid; - LIST_INSERT_HEAD(&allproc, p2, p_list); - LIST_INSERT_HEAD(PIDHASH(p2->p_pid), p2, p_hash); - sx_xunlock(&allproc_lock); + if (!(flags & RFHIGHPID)) + lastpid = p2->p_pid; /* * Malloc things while we don't hold any locks. @@ -736,11 +596,9 @@ fail: if (ppsratecheck(&lastfail, &curfail, 1)) printf("maxproc limit exceeded by uid %i, please see tuning(7) and login.conf(5).\n", uid); - sx_xunlock(&allproc_lock); #ifdef MAC mac_destroy_proc(newproc); #endif - uma_zfree(proc_zone, newproc); if (p1->p_flag & P_SA) { PROC_LOCK(p1); thread_single_end(); Index: kern/init_main.c =================================================================== RCS file: /home/ncvs/src/sys/kern/init_main.c,v retrieving revision 1.240 diff -u -p -r1.240 init_main.c --- kern/init_main.c 16 Jan 2004 20:29:23 -0000 1.240 +++ kern/init_main.c 26 Jan 2004 23:54:14 -0000 @@ -87,7 +87,7 @@ void mi_startup(void); /* Should be e /* Components of the first process -- never freed. */ static struct session session0; -static struct pgrp pgrp0; +struct pgrp pgrp0; struct proc proc0; struct thread thread0; struct kse kse0; @@ -346,10 +346,8 @@ proc0_init(void *dummy __unused) * Create process 0 (the swapper). */ LIST_INSERT_HEAD(&allproc, p, p_list); - LIST_INSERT_HEAD(PIDHASH(0), p, p_hash); mtx_init(&pgrp0.pg_mtx, "process group", NULL, MTX_DEF | MTX_DUPOK); p->p_pgrp = &pgrp0; - LIST_INSERT_HEAD(PGRPHASH(0), &pgrp0, pg_hash); LIST_INIT(&pgrp0.pg_members); LIST_INSERT_HEAD(&pgrp0.pg_members, p, p_pglist); Index: sys/proc.h =================================================================== RCS file: /home/ncvs/src/sys/sys/proc.h,v retrieving revision 1.365 diff -u -p -r1.365 proc.h --- sys/proc.h 25 Jan 2004 03:54:51 -0000 1.365 +++ sys/proc.h 26 Jan 2004 23:55:54 -0000 @@ -92,7 +92,6 @@ struct session { * (c) const until freeing */ struct pgrp { - LIST_ENTRY(pgrp) pg_hash; /* (e) Hash chain. */ LIST_HEAD(, proc) pg_members; /* (m + e) Pointer to pgrp members. */ struct session *pg_session; /* (c) Pointer to session. */ struct sigiolst pg_sigiolst; /* (m) List of sigio sources. */ @@ -715,7 +714,7 @@ MALLOC_DECLARE(M_ZOMBIE); #define SESSHOLD(s) ((s)->s_count++) #define SESSRELE(s) { \ if (--(s)->s_count == 0) \ - FREE(s, M_SESSION); \ + sessdelete(s); \ } #define STOPEVENT(p, e, v) do { \ @@ -787,18 +786,11 @@ MALLOC_DECLARE(M_ZOMBIE); #define PARGS_LOCK(p) mtx_lock(&pargs_ref_lock) #define PARGS_UNLOCK(p) mtx_unlock(&pargs_ref_lock) -#define PIDHASH(pid) (&pidhashtbl[(pid) & pidhash]) -extern LIST_HEAD(pidhashhead, proc) *pidhashtbl; -extern u_long pidhash; - -#define PGRPHASH(pgid) (&pgrphashtbl[(pgid) & pgrphash]) -extern LIST_HEAD(pgrphashhead, pgrp) *pgrphashtbl; -extern u_long pgrphash; - extern struct sx allproc_lock; extern struct sx proctree_lock; extern struct mtx pargs_ref_lock; extern struct mtx ppeers_lock; +extern struct pgrp pgrp0; /* Process group for swapper. */ extern struct proc proc0; /* Process slot for swapper. */ extern struct thread thread0; /* Primary thread in proc0. */ extern struct ksegrp ksegrp0; /* Primary ksegrp in proc0. */ @@ -825,7 +817,8 @@ extern int lastpid; struct proc *pfind(pid_t); /* Find process by id. */ struct pgrp *pgfind(pid_t); /* Find process group by id. */ -struct proc *zpfind(pid_t); /* Find zombie process by id. */ +struct proc *zpfind(pid_t); /* Find zombie process by id. */ + void adjustrunqueue(struct thread *, int newpri); void ast(struct trapframe *framep); @@ -841,6 +834,9 @@ void fork_exit(void (*)(void *, struct t void fork_return(struct thread *, struct trapframe *); int inferior(struct proc *p); int leavepgrp(struct proc *p); +void sessdelete(struct session *); +struct proc *proc_alloc(struct thread *td, int flags); +void proc_free(struct proc *p); void mi_switch(int flags); /* Flags for mi_switch(). */ #define SW_VOL 0x0001 /* Voluntary switch. */