diff --git a/sys/kern/sysv_shm.c b/sys/kern/sysv_shm.c index 64ba74d..24709b0 100644 --- a/sys/kern/sysv_shm.c +++ b/sys/kern/sysv_shm.c @@ -181,10 +181,15 @@ SYSCTL_INT(_kern_ipc, OID_AUTO, shm_use_phys, CTLFLAG_RWTUN, SYSCTL_INT(_kern_ipc, OID_AUTO, shm_allow_removed, CTLFLAG_RWTUN, &shm_allow_removed, 0, "Enable/Disable attachment to attached segments marked for removal"); -SYSCTL_PROC(_kern_ipc, OID_AUTO, shmsegs, CTLTYPE_OPAQUE | CTLFLAG_RD, - NULL, 0, sysctl_shmsegs, "", +SYSCTL_PROC(_kern_ipc, OID_AUTO, shmsegs, CTLTYPE_OPAQUE | CTLFLAG_RD | + CTLFLAG_MPSAFE, NULL, 0, sysctl_shmsegs, "", "Current number of shared memory segments allocated"); +static struct sx sysvshmsx; +#define SYSVSHM_LOCK() sx_xlock(&sysvshmsx) +#define SYSVSHM_UNLOCK() sx_xunlock(&sysvshmsx) +#define SYSVSHM_ASSERT_LOCKED() sx_assert(&sysvshmsx, SA_LOCKED) + static int shm_find_segment_by_key(key) key_t key; @@ -237,7 +242,7 @@ shm_deallocate_segment(shmseg) { vm_size_t size; - GIANT_REQUIRED; + SYSVSHM_ASSERT_LOCKED(); vm_object_deallocate(shmseg->object); shmseg->object = NULL; @@ -261,7 +266,7 @@ shm_delete_mapping(struct vmspace *vm, struct shmmap_state *shmmap_s) int segnum, result; vm_size_t size; - GIANT_REQUIRED; + SYSVSHM_ASSERT_LOCKED(); segnum = IPCID_TO_IX(shmmap_s->shmid); shmseg = &shmsegs[segnum]; @@ -299,7 +304,7 @@ sys_shmdt(td, uap) if (!prison_allow(td->td_ucred, PR_ALLOW_SYSVIPC)) return (ENOSYS); - mtx_lock(&Giant); + SYSVSHM_LOCK(); shmmap_s = p->p_vmspace->vm_shm; if (shmmap_s == NULL) { error = EINVAL; @@ -323,7 +328,7 @@ sys_shmdt(td, uap) #endif error = shm_delete_mapping(p->p_vmspace, shmmap_s); done2: - mtx_unlock(&Giant); + SYSVSHM_UNLOCK(); return (error); } @@ -353,27 +358,15 @@ kern_shmat(td, shmid, shmaddr, shmflg) if (!prison_allow(td->td_ucred, PR_ALLOW_SYSVIPC)) return (ENOSYS); - mtx_lock(&Giant); + SYSVSHM_LOCK(); shmmap_s = p->p_vmspace->vm_shm; if (shmmap_s == NULL) { shmmap_s = malloc(shminfo.shmseg * sizeof(struct shmmap_state), M_SHM, M_WAITOK); - - /* - * If malloc() above sleeps, the Giant lock is - * temporarily dropped, which allows another thread to - * allocate shmmap_state and set vm_shm. Recheck - * vm_shm and free the new shmmap_state if another one - * is already allocated. - */ - if (p->p_vmspace->vm_shm != NULL) { - free(shmmap_s, M_SHM); - shmmap_s = p->p_vmspace->vm_shm; - } else { - for (i = 0; i < shminfo.shmseg; i++) - shmmap_s[i].shmid = -1; - p->p_vmspace->vm_shm = shmmap_s; - } + for (i = 0; i < shminfo.shmseg; i++) + shmmap_s[i].shmid = -1; + KASSERT(p->p_vmspace->vm_shm == NULL, ("raced")); + p->p_vmspace->vm_shm = shmmap_s; } shmseg = shm_find_segment_by_shmid(shmid); if (shmseg == NULL) { @@ -439,7 +432,7 @@ kern_shmat(td, shmid, shmaddr, shmflg) shmseg->u.shm_nattch++; td->td_retval[0] = attach_va; done2: - mtx_unlock(&Giant); + SYSVSHM_UNLOCK(); return (error); } @@ -465,7 +458,7 @@ kern_shmctl(td, shmid, cmd, buf, bufsz) if (!prison_allow(td->td_ucred, PR_ALLOW_SYSVIPC)) return (ENOSYS); - mtx_lock(&Giant); + SYSVSHM_LOCK(); switch (cmd) { /* * It is possible that kern_shmctl is being called from the Linux ABI @@ -557,7 +550,7 @@ kern_shmctl(td, shmid, cmd, buf, bufsz) break; } done2: - mtx_unlock(&Giant); + SYSVSHM_UNLOCK(); return (error); } @@ -622,6 +615,7 @@ shmget_existing(td, uap, mode, segnum) struct shmid_kernel *shmseg; int error; + SYSVSHM_ASSERT_LOCKED(); shmseg = &shmsegs[segnum]; if (shmseg->u.shm_perm.mode & SHMSEG_REMOVED) { /* @@ -630,7 +624,8 @@ shmget_existing(td, uap, mode, segnum) * allocation failed or it was freed). */ shmseg->u.shm_perm.mode |= SHMSEG_WANTED; - error = tsleep(shmseg, PLOCK | PCATCH, "shmget", 0); + error = sx_sleep(shmseg, &sysvshmsx, PLOCK | PCATCH, + "shmget", 0); if (error) return (error); return (EAGAIN); @@ -660,7 +655,7 @@ shmget_allocate_segment(td, uap, mode) struct shmid_kernel *shmseg; vm_object_t shm_object; - GIANT_REQUIRED; + SYSVSHM_ASSERT_LOCKED(); if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax) return (EINVAL); @@ -770,7 +765,7 @@ sys_shmget(td, uap) if (!prison_allow(td->td_ucred, PR_ALLOW_SYSVIPC)) return (ENOSYS); - mtx_lock(&Giant); + SYSVSHM_LOCK(); mode = uap->shmflg & ACCESSPERMS; if (uap->key != IPC_PRIVATE) { again: @@ -788,7 +783,7 @@ sys_shmget(td, uap) } error = shmget_allocate_segment(td, uap, mode); done2: - mtx_unlock(&Giant); + SYSVSHM_UNLOCK(); return (error); } @@ -800,15 +795,16 @@ shmfork_myhook(p1, p2) size_t size; int i; - mtx_lock(&Giant); + SYSVSHM_LOCK(); size = shminfo.shmseg * sizeof(struct shmmap_state); shmmap_s = malloc(size, M_SHM, M_WAITOK); bcopy(p1->p_vmspace->vm_shm, shmmap_s, size); p2->p_vmspace->vm_shm = shmmap_s; - for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) + for (i = 0; i < shminfo.shmseg; i++, shmmap_s++) { if (shmmap_s->shmid != -1) shmsegs[IPCID_TO_IX(shmmap_s->shmid)].u.shm_nattch++; - mtx_unlock(&Giant); + } + SYSVSHM_UNLOCK(); } static void @@ -817,16 +813,16 @@ shmexit_myhook(struct vmspace *vm) struct shmmap_state *base, *shm; int i; + SYSVSHM_LOCK(); if ((base = vm->vm_shm) != NULL) { vm->vm_shm = NULL; - mtx_lock(&Giant); for (i = 0, shm = base; i < shminfo.shmseg; i++, shm++) { if (shm->shmid != -1) shm_delete_mapping(vm, shm); } - mtx_unlock(&Giant); free(base, M_SHM); } + SYSVSHM_UNLOCK(); } static void @@ -919,6 +915,7 @@ shminit() shm_last_free = 0; shm_nused = 0; shm_committed = 0; + sx_init(&sysvshmsx, "sysvshmsx"); shmexit_hook = &shmexit_myhook; shmfork_hook = &shmfork_myhook; @@ -961,14 +958,19 @@ shmunload() free(shmsegs, M_SHM); shmexit_hook = NULL; shmfork_hook = NULL; + sx_destroy(&sysvshmsx); return (0); } static int sysctl_shmsegs(SYSCTL_HANDLER_ARGS) { + int error; - return (SYSCTL_OUT(req, shmsegs, shmalloced * sizeof(shmsegs[0]))); + SYSVSHM_LOCK(); + error = SYSCTL_OUT(req, shmsegs, shmalloced * sizeof(shmsegs[0])); + SYSVSHM_UNLOCK(); + return (error); } #if defined(__i386__) && (defined(COMPAT_FREEBSD4) || defined(COMPAT_43)) @@ -1000,7 +1002,7 @@ oshmctl(struct thread *td, struct oshmctl_args *uap) if (!prison_allow(td->td_ucred, PR_ALLOW_SYSVIPC)) return (ENOSYS); - mtx_lock(&Giant); + SYSVSHM_LOCK(); shmseg = shm_find_segment_by_shmid(uap->shmid); if (shmseg == NULL) { error = EINVAL; @@ -1034,7 +1036,7 @@ oshmctl(struct thread *td, struct oshmctl_args *uap) break; } done2: - mtx_unlock(&Giant); + SYSVSHM_UNLOCK(); return (error); #else return (EINVAL); @@ -1066,9 +1068,9 @@ sys_shmsys(td, uap) if (uap->which < 0 || uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0])) return (EINVAL); - mtx_lock(&Giant); + SYSVSHM_LOCK(); error = (*shmcalls[uap->which])(td, &uap->a2); - mtx_unlock(&Giant); + SYSVSHM_UNLOCK(); return (error); }