Index: sys/vm/vm_glue.c =================================================================== --- sys/vm/vm_glue.c (revision 263931) +++ sys/vm/vm_glue.c (working copy) @@ -777,6 +777,57 @@ kick_proc0(void) wakeup(&proc0); } +int +vm_create_shchan(vm_map_t map, vm_offset_t *kva, vm_offset_t *uva) +{ + vm_offset_t local_kva, local_uva; + vm_page_t m; + + local_kva = kva_alloc(PAGE_SIZE); + if (local_kva == 0) + return (1); + local_uva = vm_map_min(map); + if (vm_map_find(map, NULL, 0, &local_uva, PAGE_SIZE, 0, + VMFS_NO_SPACE, VM_PROT_READ, VM_PROT_READ, 0) != KERN_SUCCESS) { + kva_free(local_kva, PAGE_SIZE); + return (1); + } + + do { + m = vm_page_alloc(NULL, 0, VM_ALLOC_WIRED | VM_ALLOC_NOOBJ); + if (m == NULL) + VM_WAIT; + } while (m == NULL); + + pmap_qenter(local_kva, &m, 1); + pmap_enter(vm_map_pmap(map), local_uva, VM_PROT_READ, m, VM_PROT_READ, + TRUE); + + *kva = local_kva; + *uva = local_uva; + return (0); +} + +void +vm_destroy_shchan(vm_map_t map, vm_offset_t kva, vm_offset_t uva) +{ + vm_page_t m; + + m = PHYS_TO_VM_PAGE(vtophys(kva)); + + pmap_remove(vm_map_pmap(map), uva, uva + PAGE_SIZE); + pmap_qremove(kva, 1); + + vm_page_lock(m); + vm_page_unwire(m, 0); + vm_page_free(m); + vm_page_unlock(m); + + if (vm_map_remove(map, uva, uva + PAGE_SIZE) != KERN_SUCCESS) + panic("vm_destroy_shchan: invalid return value"); + kva_free(kva, PAGE_SIZE); +} + #ifndef NO_SWAPPING /* Index: sys/vm/vm_extern.h =================================================================== --- sys/vm/vm_extern.h (revision 263931) +++ sys/vm/vm_extern.h (working copy) @@ -72,6 +72,8 @@ void kmeminit(void); void swapout_procs(int); int kernacc(void *, int, int); int useracc(void *, int, int); +int vm_create_shchan(vm_map_t, vm_offset_t *, vm_offset_t *); +void vm_destroy_shchan(vm_map_t, vm_offset_t, vm_offset_t); int vm_fault(vm_map_t, vm_offset_t, vm_prot_t, int); void vm_fault_copy_entry(vm_map_t, vm_map_t, vm_map_entry_t, vm_map_entry_t, vm_ooffset_t *); Index: sys/sys/thr.h =================================================================== --- sys/sys/thr.h (revision 263931) +++ sys/sys/thr.h (working copy) @@ -55,6 +55,8 @@ struct thr_param { long *parent_tid; /* parent accesses the new TID here. */ int flags; /* thread flags. */ struct rtprio *rtp; /* Real-time scheduling priority */ + enum state_thread **child_chan; /* XXX */ + enum state_thread **parent_chan; /* XXX */ void *spare[3]; /* TODO: cpu affinity mask etc. */ }; Index: sys/sys/proc.h =================================================================== --- sys/sys/proc.h (revision 263931) +++ sys/sys/proc.h (working copy) @@ -197,6 +197,28 @@ struct rusage_ext { }; /* + * XXX + */ +enum state_thread { + TDS_INACTIVE = 0x0, + TDS_INHIBITED, + TDS_CAN_RUN, + TDS_RUNQ, + TDS_RUNNING, + TDS_INVALID +}; + +/* + * XXX + */ +struct shchan { + SLIST_ENTRY(shchan) sh_iter; + enum state_thread *sh_kern; + enum state_thread *sh_user; + u_int sh_free_slots; +}; + +/* * Kernel runnable context (thread). * This is what is put to sleep and reactivated. * Thread context. Processes may have multiple threads. @@ -275,6 +297,7 @@ struct thread { u_int td_vp_reserv; /* (k) Count of reserved vnodes. */ int td_no_sleeping; /* (k) Sleeping disabled count. */ int td_dom_rr_idx; /* (k) RR Numa domain selection. */ + enum state_thread *td_sh_state; /* (t) Shared channel thread state */ #define td_endzero td_sigmask /* Copied during fork1() or create_thread(). */ @@ -293,13 +316,8 @@ struct thread { * or already have been set in the allocator, constructor, etc. */ struct pcb *td_pcb; /* (k) Kernel VA of pcb and kstack. */ - enum { - TDS_INACTIVE = 0x0, - TDS_INHIBITED, - TDS_CAN_RUN, - TDS_RUNQ, - TDS_RUNNING - } td_state; /* (t) thread state */ + struct shchan *td_sh_chan; /* XXX */ + enum state_thread td_state; /* (t) thread state */ union { register_t tdu_retval[2]; off_t tdu_off; @@ -457,12 +475,17 @@ do { \ #define TD_SET_INHIB(td, inhib) do { \ (td)->td_state = TDS_INHIBITED; \ (td)->td_inhibitors |= (inhib); \ + if ((td)->td_sh_state != NULL) \ + *(td)->td_sh_state = TDS_INHIBITED; \ } while (0) -#define TD_CLR_INHIB(td, inhib) do { \ - if (((td)->td_inhibitors & (inhib)) && \ - (((td)->td_inhibitors &= ~(inhib)) == 0)) \ - (td)->td_state = TDS_CAN_RUN; \ +#define TD_CLR_INHIB(td, inhib) do { \ + if (((td)->td_inhibitors & (inhib)) && \ + (((td)->td_inhibitors &= ~(inhib)) == 0)) { \ + (td)->td_state = TDS_CAN_RUN; \ + if ((td)->td_sh_state != NULL) \ + *td->td_sh_state = TDS_CAN_RUN; \ + } \ } while (0) #define TD_SET_SLEEPING(td) TD_SET_INHIB((td), TDI_SLEEPING) @@ -478,9 +501,21 @@ do { \ #define TD_CLR_SUSPENDED(td) TD_CLR_INHIB((td), TDI_SUSPENDED) #define TD_CLR_IWAIT(td) TD_CLR_INHIB((td), TDI_IWAIT) -#define TD_SET_RUNNING(td) (td)->td_state = TDS_RUNNING -#define TD_SET_RUNQ(td) (td)->td_state = TDS_RUNQ -#define TD_SET_CAN_RUN(td) (td)->td_state = TDS_CAN_RUN +#define TD_SET_RUNNING(td) do { \ + (td)->td_state = TDS_RUNNING; \ + if ((td)->td_sh_state != NULL) \ + *td->td_sh_state = TDS_RUNNING; \ +} while (0) +#define TD_SET_RUNQ(td) do { \ + (td)->td_state = TDS_RUNQ; \ + if ((td)->td_sh_state != NULL) \ + *td->td_sh_state = TDS_RUNQ; \ +} while (0) +#define TD_SET_CAN_RUN(td) do { \ + (td)->td_state = TDS_CAN_RUN; \ + if ((td)->td_sh_state != NULL) \ + *td->td_sh_state = TDS_CAN_RUN; \ +} while (0) /* * Process structure. @@ -488,6 +523,7 @@ do { \ struct proc { LIST_ENTRY(proc) p_list; /* (d) List of all processes. */ TAILQ_HEAD(, thread) p_threads; /* (c) all threads. */ + SLIST_HEAD(, shchan) p_shchans; /* XXX */ struct mtx p_slock; /* process spin lock */ struct ucred *p_ucred; /* (c) Process owner's identity. */ struct filedesc *p_fd; /* (b) Open files. */ @@ -823,6 +859,7 @@ extern int lastpid; extern int nprocs, maxproc; /* Current and max number of procs. */ extern int maxprocperuid; /* Max procs per uid. */ extern u_long ps_arg_cache_limit; +extern u_long maxshchansperproc; /* Max shared channel size per proc. */ LIST_HEAD(proclist, proc); TAILQ_HEAD(procqueue, proc); @@ -884,6 +921,11 @@ int proc_getargv(struct thread *td, struct proc *p int proc_getauxv(struct thread *td, struct proc *p, struct sbuf *sb); int proc_getenvv(struct thread *td, struct proc *p, struct sbuf *sb); void procinit(void); +int proc_alloc_shchan(struct proc *p, struct shchan **retch, + enum state_thread **kent, enum state_thread **uent); +void proc_reclaim_shchans(struct proc *p); +void proc_free_shchan(struct proc *p, struct shchan *chan, + enum state_thread *kent); void proc_linkup0(struct proc *p, struct thread *td); void proc_linkup(struct proc *p, struct thread *td); void proc_reap(struct thread *td, struct proc *p, int *status, int options); Index: sys/sys/resource.h =================================================================== --- sys/sys/resource.h (revision 263931) +++ sys/sys/resource.h (working copy) @@ -104,8 +104,9 @@ struct __wrusage { #define RLIMIT_NPTS 11 /* pseudo-terminals */ #define RLIMIT_SWAP 12 /* swap used */ #define RLIMIT_KQUEUES 13 /* kqueues allocated */ +#define RLIMIT_SHCHANS 14 /* per-proc shared chans size */ -#define RLIM_NLIMITS 14 /* number of resource limits */ +#define RLIM_NLIMITS 15 /* number of resource limits */ #define RLIM_INFINITY ((rlim_t)(((uint64_t)1 << 63) - 1)) /* XXX Missing: RLIM_SAVED_MAX, RLIM_SAVED_CUR */ Index: sys/conf/options =================================================================== --- sys/conf/options (revision 263931) +++ sys/conf/options (working copy) @@ -591,6 +591,7 @@ VM_KMEM_SIZE_SCALE opt_vm.h VM_KMEM_SIZE_MAX opt_vm.h VM_NRESERVLEVEL opt_vm.h VM_LEVEL_0_ORDER opt_vm.h +VM_SHARED_CHANS opt_vm.h NO_SWAPPING opt_vm.h MALLOC_MAKE_FAILURES opt_vm.h MALLOC_PROFILE opt_vm.h Index: sys/kern/kern_thread.c =================================================================== --- sys/kern/kern_thread.c (revision 263931) +++ sys/kern/kern_thread.c (working copy) @@ -58,12 +58,17 @@ __FBSDID("$FreeBSD$"); #include #include +#include +#include #include #include +#define PAGE_NUM_THRSTATE (PAGE_SIZE / sizeof (enum state_thread)) + SDT_PROVIDER_DECLARE(proc); SDT_PROBE_DEFINE(proc, , , lwp__exit); +static MALLOC_DEFINE(M_SHCHAN, "shchan", "shared channels"); /* * thread related storage. @@ -242,6 +247,7 @@ void proc_linkup0(struct proc *p, struct thread *td) { TAILQ_INIT(&p->p_threads); /* all threads in proc */ + SLIST_INIT(&p->p_shchans); proc_linkup(p, td); } @@ -261,6 +267,128 @@ proc_linkup(struct proc *p, struct thread *td) } /* + * XXX + */ +int +proc_alloc_shchan(struct proc *p, struct shchan **retch, + enum state_thread **kent, enum state_thread **uent) +{ + rlim_t maxshchansize; + enum state_thread *newkva, *newuva; + struct shchan *chan, *newchan; + u_int i, npages; + + npages = 0; + PROC_LOCK(p); + SLIST_FOREACH(chan, &p->p_shchans, sh_iter) { + npages++; + if (chan->sh_free_slots != 0) + break; + } + if (chan == NULL) { + maxshchansize = lim_cur(p, RLIMIT_SHCHANS); + PROC_UNLOCK(p); + if ((npages + 1) * PAGE_SIZE > maxshchansize) + return (ENOMEM); + newchan = NULL; + + if (vm_create_shchan(&p->p_vmspace->vm_map, + (vm_offset_t *)&newkva, (vm_offset_t *)&newuva) == 0) + return (ENOMEM); + newchan = malloc(sizeof(*newchan), M_SHCHAN, M_WAITOK); + newchan->sh_kern = newkva; + newchan->sh_user = newuva; + newchan->sh_free_slots = PAGE_NUM_THRSTATE; + for (i = 0; i < PAGE_NUM_THRSTATE; i++) + newkva[i] = TDS_INVALID; + + PROC_LOCK(p); + SLIST_FOREACH(chan, &p->p_shchans, sh_iter) + if (chan->sh_free_slots != 0) + break; + if (chan != NULL) { + /* + * New space has been made available while allocating + * the new shared channel page. + * Free the newly created page and reclaim the + * just freed slot. + */ + vm_destroy_shchan(&p->p_vmspace->vm_map, + (vm_offset_t)newchan->sh_kern, + (vm_offset_t)newchan->sh_user); + free(newchan, M_SHCHAN); + } else { + SLIST_INSERT_HEAD(&p->p_shchans, newchan, sh_iter); + chan = newchan; + } + } + KASSERT(chan != NULL && chan->sh_free_slots != 0, + ("proc_alloc_shchan: invalid NULL shared channel")); + + for (i = 0; i < PAGE_NUM_THRSTATE; i++) { + if (chan->sh_kern[i] > TDS_INVALID) + panic("proc_alloc_shchan: invalid page %p content %p", + chan->sh_kern, &chan->sh_kern[i]); + if (chan->sh_kern[i] == TDS_INVALID) + break; + } + if (i == PAGE_NUM_THRSTATE) + panic("proc_alloc_shchan: no valid state found"); + + /* Use the same value as thread_ctor(). */ + chan->sh_kern[i] = TDS_INACTIVE; + chan->sh_free_slots--; + PROC_UNLOCK(p); + + *retch = chan; + *kent = chan->sh_kern + i; + *uent = chan->sh_user + i; + return (0); +} + +void +proc_reclaim_shchans(struct proc *p) +{ + struct shchan *tmpchan; + + PROC_LOCK_ASSERT(p, MA_OWNED); + + while (!SLIST_EMPTY(&p->p_shchans)) { + tmpchan = SLIST_FIRST(&p->p_shchans); + SLIST_REMOVE_HEAD(&p->p_shchans, sh_iter); + vm_destroy_shchan(&p->p_vmspace->vm_map, + (vm_offset_t)tmpchan->sh_kern, + (vm_offset_t)tmpchan->sh_user); + free(tmpchan, M_SHCHAN); + } +} + +void +proc_free_shchan(struct proc *p, struct shchan *chan, enum state_thread *kent) +{ + u_int i; + + PROC_LOCK_ASSERT(p, MA_OWNED); + + i = (kent - chan->sh_kern) / sizeof(enum state_thread); + KASSERT(kent == &chan->sh_kern[i] && chan->sh_kern[i] != TDS_INVALID, + ("proc_free_shchan: invalid index retrieval %u", i)); + + chan->sh_kern[i] = TDS_INVALID; + chan->sh_free_slots++; + if (chan->sh_free_slots < PAGE_NUM_THRSTATE) + return; + KASSERT(chan->sh_free_slots == PAGE_NUM_THRSTATE, + ("proc_free_shchan: invalid number of free slots")); + + SLIST_REMOVE(&p->p_shchans, chan, shchan, sh_iter); + + vm_destroy_shchan(&p->p_vmspace->vm_map, (vm_offset_t)chan->sh_kern, + (vm_offset_t)chan->sh_user); + free(chan, M_SHCHAN); +} + +/* * Initialize global thread allocation resources. */ void @@ -722,8 +850,15 @@ stopme: p->p_singlethread = NULL; p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT); thread_unthread(td); - } - PROC_SUNLOCK(p); + PROC_SUNLOCK(p); + + /* + * As long as the process is single-threaded, reclaim + * all the available shared channels. + */ + proc_reclaim_shchans(p); + } else + PROC_SUNLOCK(p); return (0); } @@ -810,6 +945,12 @@ thread_suspend_check(int return_instead) * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE. */ if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) { + /* + * The shared channels should be teared down now. + * However it is responsibility of the thread + * requesting single-threading to do so when it is + * actually safe. + */ PROC_UNLOCK(p); tidhash_remove(td); PROC_LOCK(p); Index: sys/kern/kern_thr.c =================================================================== --- sys/kern/kern_thr.c (revision 263931) +++ sys/kern/kern_thr.c (working copy) @@ -92,7 +92,9 @@ static int create_thread(struct thread *td, mconte char *stack_base, size_t stack_size, char *tls_base, long *child_tid, long *parent_tid, - int flags, struct rtprio *rtp); + int flags, struct rtprio *rtp, + enum state_thread **child_chan, + enum state_thread **parent_chan); /* * System call interface. @@ -108,7 +110,7 @@ sys_thr_create(struct thread *td, struct thr_creat return (error); error = create_thread(td, &ctx.uc_mcontext, NULL, NULL, - NULL, 0, NULL, uap->id, NULL, uap->flags, NULL); + NULL, 0, NULL, uap->id, NULL, uap->flags, NULL, NULL, NULL); return (error); } @@ -143,7 +145,7 @@ kern_thr_new(struct thread *td, struct thr_param * error = create_thread(td, NULL, param->start_func, param->arg, param->stack_base, param->stack_size, param->tls_base, param->child_tid, param->parent_tid, param->flags, - rtpp); + rtpp, param->child_chan, param->parent_chan); return (error); } @@ -153,12 +155,15 @@ create_thread(struct thread *td, mcontext_t *ctx, char *stack_base, size_t stack_size, char *tls_base, long *child_tid, long *parent_tid, - int flags, struct rtprio *rtp) + int flags, struct rtprio *rtp, enum state_thread **child_chan, + enum state_thread **parent_chan) { stack_t stack; struct thread *newtd; + struct shchan *local_shchan; + enum state_thread *kern_shchanp, *user_shchanp; struct proc *p; - int error; + int error, numthreads, ret_pchan; p = td->td_proc; @@ -250,7 +255,59 @@ create_thread(struct thread *td, mcontext_t *ctx, } } + if (child_chan != NULL && proc_alloc_shchan(p, &local_shchan, + &kern_shchanp, &user_shchanp) == 0) { + /* Lockless, the thread is not linked anywhere. */ + newtd->td_sh_state = kern_shchanp; + newtd->td_sh_chan = local_shchan; + if (copyout(child_chan, &user_shchanp, + sizeof(enum state_thread *)) != 0) { + PROC_LOCK(p); + proc_free_shchan(p, local_shchan, kern_shchanp); + PROC_UNLOCK(p); + newtd->td_sh_state = NULL; + newtd->td_sh_chan = NULL; + } + } + + /* + * If there is just one single thread it means that no other + * threads can be added in the meanwhile, as curthread is dealing + * with current thr_new(). + * There is no race, then about allocating also a shared channel + * for the single curthread. + * It is only important to care about the race where a + * multi-threaded process is made single-thread while PROC_LOCK() + * is dropped. + */ + ret_pchan = ENOMEM; PROC_LOCK(td->td_proc); + do { + numthreads = td->td_proc->p_numthreads; + PROC_UNLOCK(td->td_proc); + + if (parent_chan != NULL && numthreads == 1) { + ret_pchan = proc_alloc_shchan(p, &local_shchan, + &kern_shchanp, &user_shchanp); + if (ret_pchan == 0) { + /* + * Lock for consistency as, right now, + * the process is still single-threaded + * and the only thread is executing + * sys_thr_new(). + */ + thread_lock(td); + td->td_sh_state = kern_shchanp; + td->td_sh_chan = local_shchan; + thread_unlock(td); + } + } + + PROC_LOCK(td->td_proc); + if (numthreads == 1 && td->td_proc->p_numthreads > 1) + panic("sys_thr_new: unexpected threading of curproc"); + } while (numthreads > 1 && td->td_proc->p_numthreads == 1); + td->td_proc->p_flag |= P_HADTHREADS; thread_link(newtd, p); bcopy(p->p_comm, newtd->td_name, sizeof(newtd->td_name)); @@ -262,6 +319,20 @@ create_thread(struct thread *td, mcontext_t *ctx, newtd->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK; PROC_UNLOCK(p); + if (parent_chan != NULL && ret_pchan == 0 && copyout(parent_chan, + &user_shchanp, sizeof(enum state_thread *)) != 0) { + /* See locking comment above. */ + thread_lock(td); + kern_shchanp = td->td_sh_state; + local_shchan = td->td_sh_chan; + td->td_sh_state = NULL; + td->td_sh_chan = NULL; + thread_unlock(td); + PROC_LOCK(p); + proc_free_shchan(p, local_shchan, kern_shchanp); + PROC_UNLOCK(p); + } + tidhash_add(newtd); thread_lock(newtd); @@ -304,6 +375,9 @@ sys_thr_exit(struct thread *td, struct thr_exit_ar /* long *state */ { struct proc *p; + struct thread *td2; + struct shchan *tmpchan; + enum state_thread *tmpstate; p = td->td_proc; @@ -326,11 +400,70 @@ sys_thr_exit(struct thread *td, struct thr_exit_ar LIST_REMOVE(td, td_hash); rw_wunlock(&tidhash_lock); tdsigcleanup(td); + if (p->p_numthreads == 2) { + if (!SLIST_EMPTY(&p->p_shchans)) { + /* + * After this syscall the thread is going to be + * single-threaded. Reclaim all the allocated + * shared channels. + */ + td2 = TAILQ_FIRST(&p->p_threads); + if (td2 == td) + td2 = TAILQ_NEXT(td2, td_plist); + thread_lock(td); + td->td_sh_state = NULL; + td->td_sh_chan = NULL; + thread_unlock(td); + thread_lock(td2); + td2->td_sh_state = NULL; + td2->td_sh_chan = NULL; + thread_unlock(td2); + proc_reclaim_shchans(p); + } else { +#ifdef INVARIANTS + td2 = TAILQ_FIRST(&p->p_threads); + if (td2 == td) + td2 = TAILQ_NEXT(td2, td_plist); + + /* See comment below for locking. */ + thread_lock(td); + KASSERT(td->td_sh_state == NULL && + td->td_sh_chan == NULL, + ("sys_thr_exit: unexpected td_sh_chan")); + thread_unlock(td); + thread_lock(td2); + KASSERT(td2->td_sh_state == NULL && + td2->td_sh_chan == NULL, + ("sys_thr_exit: unexpected td_sh_chan")); + thread_unlock(td2); +#endif + } + } else { + /* + * Lock held for consistency, should be not necessary + * as it cannot transition from not-present to + * present. + */ + thread_lock(td); + if (td->td_sh_state != NULL) { + KASSERT(td->td_sh_chan != NULL, + ("sys_thr_exit: invalid td_sh_chan")); + tmpchan = td->td_sh_chan; + tmpstate = td->td_sh_state; + td->td_sh_state = NULL; + td->td_sh_chan = NULL; + thread_unlock(td); + proc_free_shchan(p, tmpchan, tmpstate); + } else + thread_unlock(td); + } PROC_SLOCK(p); thread_stopped(p); thread_exit(); /* NOTREACHED */ } + KASSERT(SLIST_EMPTY(&p->p_shchans), + ("sys_thr_exit: shared channels present with single thread")); PROC_UNLOCK(p); rw_wunlock(&tidhash_lock); return (0); Index: sys/kern/init_main.c =================================================================== --- sys/kern/init_main.c (revision 263931) +++ sys/kern/init_main.c (working copy) @@ -549,6 +549,8 @@ proc0_init(void *dummy __unused) p->p_limit->pl_rlimit[RLIMIT_RSS].rlim_max = pageablemem; p->p_limit->pl_rlimit[RLIMIT_MEMLOCK].rlim_cur = pageablemem / 3; p->p_limit->pl_rlimit[RLIMIT_MEMLOCK].rlim_max = pageablemem; + p->p_limit->pl_rlimit[RLIMIT_SHCHANS].rlim_cur = + p->p_limit->pl_rlimit[RLIMIT_SHCHANS].rlim_max = maxshchansperproc; p->p_cpulimit = RLIM_INFINITY; /* Initialize resource accounting structures. */ Index: sys/kern/kern_proc.c =================================================================== --- sys/kern/kern_proc.c (revision 263931) +++ sys/kern/kern_proc.c (working copy) @@ -227,6 +227,7 @@ proc_init(void *mem, int size, int flags) cv_init(&p->p_pwait, "ppwait"); cv_init(&p->p_dbgwait, "dbgwait"); TAILQ_INIT(&p->p_threads); /* all threads in proc */ + SLIST_INIT(&p->p_shchans); EVENTHANDLER_INVOKE(process_init, p); p->p_stats = pstats_alloc(); SDT_PROBE(proc, kernel, init, return, p, size, flags, 0, 0); Index: sys/kern/kern_exit.c =================================================================== --- sys/kern/kern_exit.c (revision 263931) +++ sys/kern/kern_exit.c (working copy) @@ -855,6 +855,8 @@ proc_reap(struct thread *td, struct proc *p, int * #endif KASSERT(FIRST_THREAD_IN_PROC(p), ("proc_reap: no residual thread!")); + KASSERT(SLIST_EMPTY(&p->p_shchans), + ("proc_reap: shared channels present when destroying proc")); uma_zfree(proc_zone, p); sx_xlock(&allproc_lock); nprocs--; Index: sys/kern/subr_param.c =================================================================== --- sys/kern/subr_param.c (revision 263931) +++ sys/kern/subr_param.c (working copy) @@ -40,6 +40,7 @@ __FBSDID("$FreeBSD$"); #include "opt_param.h" #include "opt_msgbuf.h" #include "opt_maxusers.h" +#include "opt_vm.h" #include #include @@ -78,6 +79,7 @@ __FBSDID("$FreeBSD$"); #ifndef MAXFILES #define MAXFILES (maxproc * 2) #endif +#define MAXSHCHANSPERPROC (100 * PAGE_SIZE) static int sysctl_kern_vm_guest(SYSCTL_HANDLER_ARGS); @@ -106,6 +108,7 @@ u_long maxdsiz; /* max data size */ u_long dflssiz; /* initial stack size limit */ u_long maxssiz; /* max stack size */ u_long sgrowsiz; /* amount to grow stack */ +u_long maxshchansperproc; /* max shared channel size per proc */ SYSCTL_INT(_kern, OID_AUTO, hz, CTLFLAG_RDTUN, &hz, 0, "Number of clock ticks per second"); @@ -324,6 +327,16 @@ init_param2(long physpages) if (maxfiles > (physpages / 4)) maxfiles = physpages / 4; maxfilesperproc = (maxfiles / 10) * 9; + + /* + * XXX + */ +#ifdef VM_SHARED_CHANS + maxshchansperproc = MAXSHCHANSPERPROC; + TUNABLE_ULONG_FETCH("kern.maxshchansperproc", &maxshchansperproc); +#else + maxshchansperproc = 0; +#endif /* * Cannot be changed after boot. Index: sys/kern/kern_fork.c =================================================================== --- sys/kern/kern_fork.c (revision 263931) +++ sys/kern/kern_fork.c (working copy) @@ -926,6 +926,8 @@ fail: #endif racct_proc_exit(newproc); fail1: + KASSERT(SLIST_EMPTY(&newproc->p_shchans), + ("fork1: shared channels present when destroying proc")); if (vm2 != NULL) vmspace_free(vm2); uma_zfree(proc_zone, newproc); Index: sys/kern/kern_resource.c =================================================================== --- sys/kern/kern_resource.c (revision 263931) +++ sys/kern/kern_resource.c (working copy) @@ -747,6 +747,12 @@ kern_proc_setrlimit(struct thread *td, struct proc if (limp->rlim_max < 1) limp->rlim_max = 1; break; + case RLIMIT_SHCHANS: + if (limp->rlim_cur > maxshchansperproc) + limp->rlim_cur = maxshchansperproc; + if (limp->rlim_max > maxshchansperproc) + limp->rlim_cur = maxshchansperproc; + break; } if (p->p_sysent->sv_fixlimit != NULL) p->p_sysent->sv_fixlimit(limp, which); Index: usr.bin/limits/limits.c =================================================================== --- usr.bin/limits/limits.c (revision 263931) +++ usr.bin/limits/limits.c (working copy) @@ -619,6 +619,7 @@ resource_num(int which, int ch, const char *str) case RLIMIT_SBSIZE: case RLIMIT_VMEM: case RLIMIT_SWAP: + case RLIMIT_SHCHANS: errno = 0; res = 0; while (*s) { Index: usr.bin/procstat/procstat_rlimit.c =================================================================== --- usr.bin/procstat/procstat_rlimit.c (revision 263931) +++ usr.bin/procstat/procstat_rlimit.c (working copy) @@ -46,7 +46,7 @@ static struct { const char *name; const char *suffix; -} rlimit_param[14] = { +} rlimit_param[15] = { {"cputime", "sec"}, {"filesize", "B "}, {"datasize", "B "}, @@ -61,9 +61,10 @@ static struct { {"pseudo-terminals", " "}, {"swapuse", "B "}, {"kqueues", " "}, + {"maxshchanssize", "B "}, }; -#if RLIM_NLIMITS > 14 +#if RLIM_NLIMITS > 15 #error "Resource limits have grown. Add new entries to rlimit_param[]." #endif Index: lib/libthr/thread/thr_create.c =================================================================== --- lib/libthr/thread/thr_create.c (revision 263931) +++ lib/libthr/thread/thr_create.c (working copy) @@ -55,7 +55,7 @@ _pthread_create(pthread_t * thread, const pthread_ struct thr_param param; struct sched_param sched_param; struct rtprio rtp; - int ret = 0, locked, create_suspended; + int ret = 0, locked, create_suspended, expect_pchan, wasthreaded; sigset_t set, oset; cpuset_t *cpusetp = NULL; int cpusetsize = 0; @@ -66,10 +66,24 @@ _pthread_create(pthread_t * thread, const pthread_ /* * Tell libc and others now they need lock to protect their data. */ - if (_thr_isthreaded() == 0 && _thr_setthreaded(1)) + wasthreaded = _thr_isthreaded(); + if (wasthreaded == 0 && _thr_setthreaded(1)) return (EAGAIN); curthread = _get_curthread(); + expect_pchan = 0; + if (_thread_active_threads == 1) { + if (wasthreaded != 0) { + /* + * If curthread is respawning again after having been + * multithreaded once the sh_chan_state pointer could + * sill reference the old shared channel. Clean it up. + */ + curthread->sh_chan_state = NULL; + } else if (curthread->sh_chan_state != NULL) + PANIC("Thread has already a shared channel state"); + expect_pchan = 1; + } if ((new_thread = _thr_alloc(curthread)) == NULL) return (EAGAIN); @@ -167,6 +181,11 @@ _pthread_create(pthread_t * thread, const pthread_ &sched_param, &rtp); param.rtp = &rtp; } + param.child_chan = &new_thread->sh_chan_state; + if (expect_pchan != 0) + param.parent_chan = &curthread->sh_chan_state; + else + param.parent_chan = NULL; /* Schedule the new thread. */ if (create_suspended) { Index: lib/libthr/thread/thr_exit.c =================================================================== --- lib/libthr/thread/thr_exit.c (revision 263931) +++ lib/libthr/thread/thr_exit.c (working copy) @@ -269,8 +269,11 @@ exit_thread(void) _thread_cleanupspecific(); } - if (!_thr_isthreaded()) + if (!_thr_isthreaded()) { + if (curthread->sh_chan_state != NULL) + PANIC("Unexpected shared channel state still up"); exit(0); + } if (atomic_fetchadd_int(&_thread_active_threads, -1) == 1) { exit(0); @@ -282,6 +285,7 @@ exit_thread(void) THR_LOCK(curthread); curthread->state = PS_DEAD; + curthread->sh_chan_state = NULL; if (curthread->flags & THR_FLAGS_NEED_SUSPEND) { curthread->cycle++; _thr_umtx_wake(&curthread->cycle, INT_MAX, 0); Index: lib/libthr/thread/thr_private.h =================================================================== --- lib/libthr/thread/thr_private.h (revision 263931) +++ lib/libthr/thread/thr_private.h (working copy) @@ -41,6 +41,7 @@ #include #include #include +#include #include #include #include @@ -518,6 +519,9 @@ struct pthread { /* Referenced mutex. */ struct pthread_mutex *mutex_obj; + /* Thread state shared channel. */ + enum state_thread *sh_chan_state; + /* Thread will sleep. */ int will_sleep;