diff --git a/sys/kern/kern_exec.c b/sys/kern/kern_exec.c index 154dbf8..208187f 100644 --- a/sys/kern/kern_exec.c +++ b/sys/kern/kern_exec.c @@ -1323,8 +1323,18 @@ err_exit: int exec_alloc_args(struct image_args *args) { + void *buf; - args->buf = (char *)kmap_alloc_wait(exec_map, PATH_MAX + ARG_MAX); + critical_enter(); + if ((buf = PCPU_GET(execargs)) != NULL) { + args->buf = buf; + PCPU_SET(execargs, NULL); + critical_exit(); + } else { + critical_exit(); + args->buf = (char *)kmap_alloc_wait(exec_map, + PATH_MAX + ARG_MAX); + } return (args->buf != NULL ? 0 : ENOMEM); } @@ -1333,8 +1343,15 @@ exec_free_args(struct image_args *args) { if (args->buf != NULL) { - kmap_free_wakeup(exec_map, (vm_offset_t)args->buf, - PATH_MAX + ARG_MAX); + critical_enter(); + if (PCPU_GET(execargs) == NULL) { + PCPU_SET(execargs, args->buf); + critical_exit(); + } else { + critical_exit(); + kmap_free_wakeup(exec_map, (vm_offset_t)args->buf, + PATH_MAX + ARG_MAX); + } args->buf = NULL; } if (args->fname_buf != NULL) { diff --git a/sys/sys/pcpu.h b/sys/sys/pcpu.h index d6d1b3d..a9c79b4 100644 --- a/sys/sys/pcpu.h +++ b/sys/sys/pcpu.h @@ -166,6 +166,7 @@ struct pcpu { int pc_domain; /* Memory domain. */ struct rm_queue pc_rm_queue; /* rmlock list of trackers */ uintptr_t pc_dynamic; /* Dynamic per-cpu data area */ + void *pc_execargs; /* cached exec arg buffer */ /* * Keep MD fields last, so that CPU-specific variations on a diff --git a/sys/vm/vm_init.c b/sys/vm/vm_init.c index e664585..10ffdda 100644 --- a/sys/vm/vm_init.c +++ b/sys/vm/vm_init.c @@ -91,8 +91,8 @@ __FBSDID("$FreeBSD$"); long physmem; -static int exec_map_entries = 16; -SYSCTL_INT(_vm, OID_AUTO, exec_map_entries, CTLFLAG_RDTUN, &exec_map_entries, 0, +static int exec_map_entries; +SYSCTL_INT(_vm, OID_AUTO, exec_map_entries, CTLFLAG_RD, &exec_map_entries, 0, "Maximum number of simultaneous execs"); /* @@ -261,8 +261,11 @@ again: panic("Clean map calculation incorrect"); /* - * Allocate the pageable submaps. + * Allocate the pageable submaps. We may cache a buffer of size + * PATH_MAX+ARG_MAX per CPU, so we therefore need to allocate space for + * at least ncpu+1 buffers to avoid deadlock. */ + exec_map_entries = mp_ncpus * 2; exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr, exec_map_entries * round_page(PATH_MAX + ARG_MAX), FALSE); pipe_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr, maxpipekva,