diff --git a/sys/kern/imgact_elf.c b/sys/kern/imgact_elf.c index d7c7dcc..9d061fa 100644 --- a/sys/kern/imgact_elf.c +++ b/sys/kern/imgact_elf.c @@ -482,7 +482,9 @@ __elfN(load_file)(struct proc *p, const char *file, u_long *addr, if (curthread->td_proc != p) panic("elf_load_file - thread"); /* XXXKSE DIAGNOSTIC */ - tempdata = malloc(sizeof(*tempdata), M_TEMP, M_WAITOK); + tempdata = malloc(sizeof(*tempdata), M_TEMP, M_WAITOK | M_FAILOK); + if (tempdata == NULL) + return (ENOMEM); nd = &tempdata->nd; attr = &tempdata->attr; imgp = &tempdata->image_params; @@ -791,7 +793,11 @@ __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp) VOP_UNLOCK(imgp->vp, 0, td); if (brand_info->emul_path != NULL && brand_info->emul_path[0] != '\0') { - path = malloc(MAXPATHLEN, M_TEMP, M_WAITOK); + path = malloc(MAXPATHLEN, M_TEMP, M_WAITOK | M_FAILOK); + if (path == NULL) { + vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY, td); + return (ENOMEM); + } snprintf(path, MAXPATHLEN, "%s%s", brand_info->emul_path, interp); error = __elfN(load_file)(imgp->proc, path, &addr, @@ -814,7 +820,9 @@ __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp) /* * Construct auxargs table (used by the fixup routine) */ - elf_auxargs = malloc(sizeof(Elf_Auxargs), M_TEMP, M_WAITOK); + elf_auxargs = malloc(sizeof(Elf_Auxargs), M_TEMP, M_WAITOK | M_FAILOK); + if (elf_auxargs == NULL) + return (ENOMEM); elf_auxargs->execfd = -1; elf_auxargs->phdr = proghdr; elf_auxargs->phent = hdr->e_phentsize; @@ -928,7 +936,7 @@ __elfN(coredump)(td, vp, limit) * Allocate memory for building the header, fill it up, * and write it out. */ - hdr = malloc(hdrsize, M_TEMP, M_WAITOK); + hdr = malloc(hdrsize, M_TEMP, M_WAITOK | M_FAILOK); if (hdr == NULL) { return (EINVAL); } diff --git a/sys/kern/init_main.c b/sys/kern/init_main.c index 5ecbed8..10fce2e 100644 --- a/sys/kern/init_main.c +++ b/sys/kern/init_main.c @@ -460,7 +460,7 @@ proc0_init(void *dummy __unused) siginit(&proc0); /* Create the file descriptor table. */ - p->p_fd = fdinit(NULL); + p->p_fd = fdinit(NULL, fdprealloc()); p->p_fdtol = NULL; /* Create the limits structures. */ diff --git a/sys/kern/kern_descrip.c b/sys/kern/kern_descrip.c index 941a4e5..bf4cd7e 100644 --- a/sys/kern/kern_descrip.c +++ b/sys/kern/kern_descrip.c @@ -1414,16 +1414,29 @@ falloc(struct thread *td, struct file **resultfp, int *resultfd) return (0); } +void * +fdprealloc() +{ + return (malloc(sizeof(struct filedesc0), M_FILEDESC, + M_WAITOK | M_ZERO | M_FAILOK)); +} + +void +fdprealloc_free(void *prealloc) +{ + free(prealloc, M_FILEDESC); +} + /* * Build a new filedesc structure from another. * Copy the current, root, and jail root vnode references. */ struct filedesc * -fdinit(struct filedesc *fdp) +fdinit(struct filedesc *fdp, void *prealloc) { struct filedesc0 *newfdp; - newfdp = malloc(sizeof *newfdp, M_FILEDESC, M_WAITOK | M_ZERO); + newfdp = (struct filedesc0 *)prealloc; FILEDESC_LOCK_INIT(&newfdp->fd_fd); if (fdp != NULL) { FILEDESC_XLOCK(fdp); @@ -1496,7 +1509,7 @@ fdshare(struct filedesc *fdp) * Unshare a filedesc structure, if necessary by making a copy */ void -fdunshare(struct proc *p, struct thread *td) +fdunshare(struct proc *p, struct thread *td, void **prealloc) { FILEDESC_XLOCK(p->p_fd); @@ -1504,7 +1517,8 @@ fdunshare(struct proc *p, struct thread *td) struct filedesc *tmp; FILEDESC_XUNLOCK(p->p_fd); - tmp = fdcopy(p->p_fd); + tmp = fdcopy(p->p_fd, *prealloc); + *prealloc = NULL; fdfree(td); p->p_fd = tmp; } else @@ -1516,7 +1530,7 @@ fdunshare(struct proc *p, struct thread *td) * this is to ease callers, not catch errors. */ struct filedesc * -fdcopy(struct filedesc *fdp) +fdcopy(struct filedesc *fdp, void *prealloc) { struct filedesc *newfdp; int i; @@ -1525,7 +1539,7 @@ fdcopy(struct filedesc *fdp) if (fdp == NULL) return (NULL); - newfdp = fdinit(fdp); + newfdp = fdinit(fdp, prealloc); FILEDESC_SLOCK(fdp); while (fdp->fd_lastfile >= newfdp->fd_nfiles) { FILEDESC_SUNLOCK(fdp); diff --git a/sys/kern/kern_exec.c b/sys/kern/kern_exec.c index 4613939..786b72c 100644 --- a/sys/kern/kern_exec.c +++ b/sys/kern/kern_exec.c @@ -288,12 +288,13 @@ do_execve(td, args, mac_p) struct vattr attr; int (*img_first)(struct image_params *); struct pargs *oldargs = NULL, *newargs = NULL; - struct sigacts *oldsigacts, *newsigacts; + struct sigacts *oldsigacts, *newsigacts = NULL; #ifdef KTRACE struct vnode *tracevp = NULL; struct ucred *tracecred = NULL; #endif struct vnode *textvp = NULL; + void *fdpmem = NULL; int credential_changing; int vfslocked; int textset; @@ -470,23 +471,44 @@ interpret: else suword(--stack_base, imgp->args->argc); + i = imgp->args->begin_envv - imgp->args->begin_argv; + /* Cache arguments if they fit inside our allowance */ + if (ps_arg_cache_limit >= i + sizeof(struct pargs)) { + newargs = pargs_alloc(i); + if (newargs == NULL) { + error = ENOMEM; + goto exec_fail_dealloc; + } + bcopy(imgp->args->begin_argv, newargs->ar_args, i); + } + + PROC_LOCK(p); + if (sigacts_shared(p->p_sigacts)) { + PROC_UNLOCK(p); + newsigacts = sigacts_alloc(); + if (newsigacts == NULL) { + error = ENOMEM; + goto exec_fail_dealloc; + } + } else + PROC_UNLOCK(p); + /* * For security and other reasons, the file descriptor table cannot * be shared after an exec. */ - fdunshare(p, td); + fdpmem = fdprealloc(); + if (fdpmem == NULL) { + error = ENOMEM; + goto exec_fail_dealloc; + } + fdunshare(p, td, &fdpmem); /* * Malloc things before we need locks. */ newcred = crget(); euip = uifind(attr.va_uid); - i = imgp->args->begin_envv - imgp->args->begin_argv; - /* Cache arguments if they fit inside our allowance */ - if (ps_arg_cache_limit >= i + sizeof(struct pargs)) { - newargs = pargs_alloc(i); - bcopy(imgp->args->begin_argv, newargs->ar_args, i); - } /* close files on exec */ VOP_UNLOCK(imgp->vp, 0, td); @@ -506,10 +528,10 @@ interpret: if (sigacts_shared(p->p_sigacts)) { oldsigacts = p->p_sigacts; PROC_UNLOCK(p); - newsigacts = sigacts_alloc(); sigacts_copy(newsigacts, oldsigacts); PROC_LOCK(p); p->p_sigacts = newsigacts; + newsigacts = NULL; } else oldsigacts = NULL; @@ -749,8 +771,6 @@ done1: vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY, td); if (oldargs != NULL) pargs_drop(oldargs); - if (newargs != NULL) - pargs_drop(newargs); if (oldsigacts != NULL) sigacts_free(oldsigacts); @@ -759,6 +779,15 @@ exec_fail_dealloc: /* * free various allocated resources */ + if (newargs != NULL) + pargs_drop(newargs); + + if (fdpmem != NULL) + fdprealloc_free(fdpmem); + + if (newsigacts != NULL) + sigacts_free(newsigacts); + if (imgp->firstpage != NULL) exec_unmap_first_page(imgp); diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c index eb042c0..33c7911 100644 --- a/sys/kern/kern_fork.c +++ b/sys/kern/kern_fork.c @@ -196,6 +196,7 @@ fork1(td, flags, pages, procp) struct thread *td2; struct sigacts *newsigacts; struct vmspace *vm2; + void *fdpmem1, *fdpmem2; int error; /* Can't copy and clear. */ @@ -219,6 +220,24 @@ fork1(td, flags, pages, procp) PROC_UNLOCK(p1); } + if (flags & RFCFDG) { + fdpmem1 = fdprealloc(); + if (fdpmem1 == NULL) { + error = ENOMEM; + goto norfproc_fail; + } + } else + fdpmem1 = NULL; + + if (flags & RFFDG) { + fdpmem2 = fdprealloc(); + if (fdpmem2 == NULL) { + error = ENOMEM; + goto norfproc_fail; + } + } else + fdpmem2 = NULL; + error = vm_forkproc(td, NULL, NULL, NULL, flags); if (error) goto norfproc_fail; @@ -228,7 +247,8 @@ fork1(td, flags, pages, procp) */ if (flags & RFCFDG) { struct filedesc *fdtmp; - fdtmp = fdinit(td->td_proc->p_fd); + fdtmp = fdinit(td->td_proc->p_fd, fdpmem1); + fdpmem1 = NULL; fdfree(td); p1->p_fd = fdtmp; } @@ -237,9 +257,13 @@ fork1(td, flags, pages, procp) * Unshare file descriptors (from parent). */ if (flags & RFFDG) - fdunshare(p1, td); + fdunshare(p1, td, &fdpmem2); norfproc_fail: + if (fdpmem1 != NULL) + fdprealloc_free(fdpmem1); + if (fdpmem2 != NULL) + fdprealloc_free(fdpmem2); if (((p1->p_flag & (P_HADTHREADS|P_SYSTEM)) == P_HADTHREADS) && (flags & (RFCFDG | RFFDG))) { PROC_LOCK(p1); @@ -256,13 +280,38 @@ norfproc_fail: * however it proved un-needed and caused problems */ + /* + * Malloc things while we don't hold any locks. + */ + if (flags & RFSIGSHARE) + newsigacts = NULL; + else { + newsigacts = sigacts_alloc(); + if (newsigacts == NULL) { + error = ENOMEM; + goto fail1; + } + } + if (flags & (RFCFDG | RFFDG)) { + fdpmem1 = fdprealloc(); + if (fdpmem1 == NULL) { + error = ENOMEM; + goto fail2; + } + } else + fdpmem1 = NULL; + /* Allocate new proc. */ - newproc = uma_zalloc(proc_zone, M_WAITOK); + newproc = uma_zalloc(proc_zone, M_WAITOK | M_FAILOK); + if (newproc == NULL) { + error = ENOMEM; + goto fail2; + } if (TAILQ_EMPTY(&newproc->p_threads)) { td2 = thread_alloc(); if (td2 == NULL) { error = ENOMEM; - goto fail1; + goto fail3; } proc_linkup(newproc, td2); sched_newproc(newproc, td2); @@ -273,14 +322,14 @@ norfproc_fail: if (pages != 0) { if (!vm_thread_new_altkstack(td2, pages)) { error = ENOMEM; - goto fail1; + goto fail3; } } if ((flags & RFMEM) == 0) { vm2 = vmspace_fork(p1->p_vmspace); if (vm2 == NULL) { error = ENOMEM; - goto fail1; + goto fail3; } } else vm2 = NULL; @@ -438,21 +487,15 @@ again: PROC_UNLOCK(p2); /* - * Malloc things while we don't hold any locks. - */ - if (flags & RFSIGSHARE) - newsigacts = NULL; - else - newsigacts = sigacts_alloc(); - - /* * Copy filedesc. */ if (flags & RFCFDG) { - fd = fdinit(p1->p_fd); + fd = fdinit(p1->p_fd, fdpmem1); + fdpmem1 = NULL; fdtol = NULL; } else if (flags & RFFDG) { - fd = fdcopy(p1->p_fd); + fd = fdcopy(p1->p_fd, fdpmem1); + fdpmem1 = NULL; fdtol = NULL; } else { fd = fdshare(p1->p_fd); @@ -739,8 +782,21 @@ fail: #ifdef MAC mac_proc_destroy(newproc); #endif -fail1: +fail3: uma_zfree(proc_zone, newproc); +fail2: + if (fdpmem1 != NULL) + fdprealloc_free(fdpmem1); +fail1: + if (newsigacts != NULL) + sigacts_free(newsigacts); + if (p1->p_flag & P_HADTHREADS) { + PROC_LOCK(p1); + thread_single_end(); + PROC_UNLOCK(p1); + } +======= +>>>>>>> master:sys/kern/kern_fork.c pause("fork", hz / 2); return (error); } diff --git a/sys/kern/kern_malloc.c b/sys/kern/kern_malloc.c index 3d89243..eb48669 100644 --- a/sys/kern/kern_malloc.c +++ b/sys/kern/kern_malloc.c @@ -365,7 +365,8 @@ malloc(unsigned long size, struct malloc_type *mtp, int flags) malloc_type_allocated(mtp, va == NULL ? 0 : size); } if (flags & M_WAITOK) - KASSERT(va != NULL, ("malloc(M_WAITOK) returned NULL")); + KASSERT((flags & M_FAILOK) || va != NULL, + ("malloc(M_WAITOK) returned NULL")); else if (va == NULL) t_malloc_fail = time_uptime; #ifdef DIAGNOSTIC @@ -737,7 +738,9 @@ restart: mtx_unlock(&malloc_mtx); buflen = sizeof(mtsh) + count * (sizeof(mth) + sizeof(struct malloc_type_stats) * MAXCPU) + 1; - buffer = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO); + buffer = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO | M_FAILOK); + if (buffer == NULL) + return (ENOMEM); mtx_lock(&malloc_mtx); if (count < kmemcount) { free(buffer, M_TEMP); diff --git a/sys/kern/kern_proc.c b/sys/kern/kern_proc.c index db70abe..7dcff28 100644 --- a/sys/kern/kern_proc.c +++ b/sys/kern/kern_proc.c @@ -1120,7 +1120,9 @@ pargs_alloc(int len) struct pargs *pa; MALLOC(pa, struct pargs *, sizeof(struct pargs) + len, M_PARGS, - M_WAITOK); + M_WAITOK | M_FAILOK); + if (pa == NULL) + return (pa); refcount_init(&pa->ar_ref, 1); pa->ar_length = len; return (pa); @@ -1196,6 +1198,8 @@ sysctl_kern_proc_args(SYSCTL_HANDLER_ARGS) if (req->newlen + sizeof(struct pargs) > ps_arg_cache_limit) return (ENOMEM); newpa = pargs_alloc(req->newlen); + if (newpa == NULL) + return (ENOMEM); error = SYSCTL_IN(req, newpa->ar_args, req->newlen); if (error != 0) { pargs_free(newpa); diff --git a/sys/kern/kern_sig.c b/sys/kern/kern_sig.c index 9b7d755..a09a688 100644 --- a/sys/kern/kern_sig.c +++ b/sys/kern/kern_sig.c @@ -3274,7 +3274,10 @@ sigacts_alloc(void) { struct sigacts *ps; - ps = malloc(sizeof(struct sigacts), M_SUBPROC, M_WAITOK | M_ZERO); + ps = malloc(sizeof(struct sigacts), M_SUBPROC, M_WAITOK | M_ZERO | + M_FAILOK); + if (ps == NULL) + return (NULL); ps->ps_refcnt = 1; mtx_init(&ps->ps_mtx, "sigacts", NULL, MTX_DEF); return (ps); diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c index 2454b94..2cf0193 100644 --- a/sys/kern/kern_thread.c +++ b/sys/kern/kern_thread.c @@ -114,23 +114,6 @@ thread_ctor(void *mem, int size, void *arg, int flags) struct thread *td; td = (struct thread *)mem; - td->td_state = TDS_INACTIVE; - td->td_oncpu = NOCPU; - - td->td_tid = alloc_unr(tid_unrhdr); - td->td_syscalls = 0; - - /* - * Note that td_critnest begins life as 1 because the thread is not - * running and is thereby implicitly waiting to be on the receiving - * end of a context switch. - */ - td->td_critnest = 1; - -#ifdef AUDIT - audit_thread_alloc(td); -#endif - umtx_thread_alloc(td); return (0); } @@ -140,35 +123,6 @@ thread_ctor(void *mem, int size, void *arg, int flags) static void thread_dtor(void *mem, int size, void *arg) { - struct thread *td; - - td = (struct thread *)mem; - -#ifdef INVARIANTS - /* Verify that this thread is in a safe state to free. */ - switch (td->td_state) { - case TDS_INHIBITED: - case TDS_RUNNING: - case TDS_CAN_RUN: - case TDS_RUNQ: - /* - * We must never unlink a thread that is in one of - * these states, because it is currently active. - */ - panic("bad state for thread unlinking"); - /* NOTREACHED */ - case TDS_INACTIVE: - break; - default: - panic("bad thread state"); - /* NOTREACHED */ - } -#endif -#ifdef AUDIT - audit_thread_free(td); -#endif - free_unr(tid_unrhdr, td->td_tid); - sched_newthread(td); } /* @@ -180,12 +134,6 @@ thread_init(void *mem, int size, int flags) struct thread *td; td = (struct thread *)mem; - - td->td_sleepqueue = sleepq_alloc(); - td->td_turnstile = turnstile_alloc(); - td->td_sched = (struct td_sched *)&td[1]; - sched_newthread(td); - umtx_thread_init(td); td->td_kstack = 0; return (0); } @@ -199,9 +147,11 @@ thread_fini(void *mem, int size) struct thread *td; td = (struct thread *)mem; - turnstile_free(td->td_turnstile); - sleepq_free(td->td_sleepqueue); - umtx_thread_fini(td); + if (td->td_turnstile) { + turnstile_free(td->td_turnstile); + sleepq_free(td->td_sleepqueue); + umtx_thread_fini(td); + } } /* @@ -250,6 +200,7 @@ threadinit(void) thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(), thread_ctor, thread_dtor, thread_init, thread_fini, 16 - 1, 0); +/* uma_zone_set_max(thread_zone, XXX); */ #ifdef KSE kseinit(); /* set up kse specific stuff e.g. upcall zone*/ #endif @@ -318,14 +269,56 @@ thread_alloc(void) thread_reap(); /* check if any zombies to get */ - td = (struct thread *)uma_zalloc(thread_zone, M_WAITOK); - KASSERT(td->td_kstack == 0, ("thread_alloc got thread with kstack")); + td = (struct thread *)uma_zalloc(thread_zone, M_WAITOK | M_FAILOK); + if (td == NULL) + return (NULL); + KASSERT(td->td_kstack == 0, ("thread_alloc got initialized thread")); if (!vm_thread_new(td, 0)) { uma_zfree(thread_zone, td); return (NULL); } cpu_thread_setup(td); + if (td->td_sleepqueue == NULL) { + td->td_sleepqueue = sleepq_alloc(); + if (td->td_sleepqueue == NULL) + goto fail1; + } + if (td->td_turnstile == NULL) { + td->td_turnstile = turnstile_alloc(); + if (td->td_turnstile == NULL) + goto fail2; + } + td->td_sched = (struct td_sched *)&td[1]; + if (!umtx_thread_init(td)) + goto fail3; + sched_newthread(td); + + td->td_state = TDS_INACTIVE; + td->td_oncpu = NOCPU; + + td->td_tid = alloc_unr(tid_unrhdr); + td->td_syscalls = 0; + + /* + * Note that td_critnest begins life as 1 because the thread is not + * running and is thereby implicitly waiting to be on the receiving + * end of a context switch. + */ + td->td_critnest = 1; + +#ifdef AUDIT + audit_thread_alloc(td); +#endif + umtx_thread_alloc(td); + return (td); +fail3: + turnstile_free(td->td_turnstile); + td->td_turnstile = NULL; +fail2: + sleepq_free(td->td_sleepqueue); +fail1: + return (NULL); } @@ -337,6 +330,31 @@ thread_free(struct thread *td) { cpu_thread_clean(td); +#ifdef INVARIANTS + /* Verify that this thread is in a safe state to free. */ + switch (td->td_state) { + case TDS_INHIBITED: + case TDS_RUNNING: + case TDS_CAN_RUN: + case TDS_RUNQ: + /* + * We must never unlink a thread that is in one of + * these states, because it is currently active. + */ + panic("bad state for thread unlinking"); + /* NOTREACHED */ + case TDS_INACTIVE: + break; + default: + panic("bad thread state"); + /* NOTREACHED */ + } +#endif +#ifdef AUDIT + audit_thread_free(td); +#endif + free_unr(tid_unrhdr, td->td_tid); + sched_newthread(td); if (td->td_altkstack != 0) vm_thread_dispose_altkstack(td); if (td->td_kstack != 0) diff --git a/sys/kern/kern_umtx.c b/sys/kern/kern_umtx.c index 6229cd9..007d420 100644 --- a/sys/kern/kern_umtx.c +++ b/sys/kern/kern_umtx.c @@ -245,7 +245,8 @@ umtxq_alloc(void) { struct umtx_q *uq; - uq = malloc(sizeof(struct umtx_q), M_UMTX, M_WAITOK | M_ZERO); + uq = malloc(sizeof(struct umtx_q), M_UMTX, + M_WAITOK | M_FAILOK | M_ZERO); TAILQ_INIT(&uq->uq_pi_contested); uq->uq_inherited_pri = PRI_MAX; return (uq); @@ -2639,11 +2640,13 @@ freebsd32_umtx_op(struct thread *td, struct freebsd32_umtx_op_args *uap) } #endif -void +int umtx_thread_init(struct thread *td) { - td->td_umtxq = umtxq_alloc(); + if ((td->td_umtxq = umtxq_alloc()) == NULL) + return (0); td->td_umtxq->uq_thread = td; + return (1); } void diff --git a/sys/kern/subr_sleepqueue.c b/sys/kern/subr_sleepqueue.c index 8f0ddcf..3b532da 100644 --- a/sys/kern/subr_sleepqueue.c +++ b/sys/kern/subr_sleepqueue.c @@ -207,7 +207,7 @@ struct sleepqueue * sleepq_alloc(void) { - return (uma_zalloc(sleepq_zone, M_WAITOK)); + return (uma_zalloc(sleepq_zone, M_WAITOK | M_FAILOK)); } /* diff --git a/sys/kern/subr_turnstile.c b/sys/kern/subr_turnstile.c index 9dd08b8..9788ded 100644 --- a/sys/kern/subr_turnstile.c +++ b/sys/kern/subr_turnstile.c @@ -511,7 +511,7 @@ struct turnstile * turnstile_alloc(void) { - return (uma_zalloc(turnstile_zone, M_WAITOK)); + return (uma_zalloc(turnstile_zone, M_WAITOK | M_FAILOK)); } /* diff --git a/sys/sys/filedesc.h b/sys/sys/filedesc.h index 48db9d8..41e2092 100644 --- a/sys/sys/filedesc.h +++ b/sys/sys/filedesc.h @@ -117,10 +117,12 @@ int fdavail(struct thread *td, int n); int fdcheckstd(struct thread *td); void fdclose(struct filedesc *fdp, struct file *fp, int idx, struct thread *td); void fdcloseexec(struct thread *td); -struct filedesc *fdcopy(struct filedesc *fdp); -void fdunshare(struct proc *p, struct thread *td); +struct filedesc *fdcopy(struct filedesc *fdp, void *fdpmem); +void fdunshare(struct proc *p, struct thread *td, void **prealloc); void fdfree(struct thread *td); -struct filedesc *fdinit(struct filedesc *fdp); +struct filedesc *fdinit(struct filedesc *fdp, void *prealloc); +void * fdprealloc(void); +void fdprealloc_free(void *prealloc); struct filedesc *fdshare(struct filedesc *fdp); struct filedesc_to_leader * filedesc_to_leader_alloc(struct filedesc_to_leader *old, diff --git a/sys/sys/malloc.h b/sys/sys/malloc.h index 05e1381..21a7cf8 100644 --- a/sys/sys/malloc.h +++ b/sys/sys/malloc.h @@ -47,6 +47,8 @@ */ #define M_NOWAIT 0x0001 /* do not block */ #define M_WAITOK 0x0002 /* ok to block */ +#define M_FAILOK 0x0004 /* despite M_WAITOK, fail when + no addr space */ #define M_ZERO 0x0100 /* bzero the allocation */ #define M_NOVM 0x0200 /* don't ask VM for pages */ #define M_USE_RESERVE 0x0400 /* can alloc out of reserve memory */ diff --git a/sys/sys/umtx.h b/sys/sys/umtx.h index c4f6029..75a24d8 100644 --- a/sys/sys/umtx.h +++ b/sys/sys/umtx.h @@ -171,7 +171,7 @@ struct umtx_q *umtxq_alloc(void); void umtxq_free(struct umtx_q *); int kern_umtx_wake(struct thread *td, void *uaddr, int n_wake); void umtx_pi_adjust(struct thread *td, u_char oldpri); -void umtx_thread_init(struct thread *td); +int umtx_thread_init(struct thread *td); void umtx_thread_fini(struct thread *td); void umtx_thread_alloc(struct thread *td); void umtx_thread_exit(struct thread *td); diff --git a/sys/vm/uma_core.c b/sys/vm/uma_core.c index eee5cee..93ee89d 100644 --- a/sys/vm/uma_core.c +++ b/sys/vm/uma_core.c @@ -2870,7 +2870,9 @@ restart: buflen = sizeof(ush) + count * (sizeof(uth) + sizeof(ups) * (mp_maxid + 1)) + 1; - buffer = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO); + buffer = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO | M_FAILOK); + if (buffer == NULL) + return (ENOMEM); mtx_lock(&uma_mtx); i = 0; diff --git a/sys/vm/vm_kern.c b/sys/vm/vm_kern.c index 116ae9b..a08444b 100644 --- a/sys/vm/vm_kern.c +++ b/sys/vm/vm_kern.c @@ -302,8 +302,12 @@ kmem_malloc(map, size, flags) vm_map_lock(map); if (vm_map_findspace(map, vm_map_min(map), size, &addr)) { vm_map_unlock(map); - panic("kmem_malloc(%ld): kmem_map too small: %ld total allocated", - (long)size, (long)map->size); + if (flags & M_FAILOK) + return (0); + else + panic("kmem_malloc(%ld): kmem_map too " + "small: %ld total allocated", + (long)size, (long)map->size); } } else { return (0);