diff --git a/lib/libc/gen/Symbol-atomic64e.map b/lib/libc/gen/Symbol-atomic64e.map new file mode 100644 index 00000000000..61f7d99709e --- /dev/null +++ b/lib/libc/gen/Symbol-atomic64e.map @@ -0,0 +1,17 @@ +/* + * $FreeBSD$ + */ + +FBSD_1.6 { + atomic_add_64; + atomic_cmpset_64; + atomic_clear_64; + atomic_fcmpset_64; + atomic_fetchadd_64; + atomic_load_64; + atomic_readandclear_64; + atomic_set_64; + atomic_subtract_64; + atomic_store_64; + atomic_swap_64; +}; diff --git a/lib/libc/powerpc/gen/Makefile.common b/lib/libc/powerpc/gen/Makefile.common index 4ba72799a5c..7374286d05d 100644 --- a/lib/libc/powerpc/gen/Makefile.common +++ b/lib/libc/powerpc/gen/Makefile.common @@ -4,3 +4,13 @@ SRCS += _ctx_start.S eabi.S infinity.c ldexp.c makecontext.c \ signalcontext.c syncicache.c _set_tp.c trivial-getcontextx.c + +SRCS+= subr_atomic64.c + +# Copy kern/subr_atomic64.c to the libc object directory. +subr_atomic64.c: ${SRCTOP}/sys/kern/subr_atomic64.c + cat ${.ALLSRC} > ${.TARGET} + +CLEANFILES+= subr_atomic64.c + +SYM_MAPS+=${LIBC_SRCTOP}/gen/Symbol-atomic64e.map diff --git a/share/mk/bsd.cpu.mk b/share/mk/bsd.cpu.mk index b5d0453c2ed..ffcb665a076 100644 --- a/share/mk/bsd.cpu.mk +++ b/share/mk/bsd.cpu.mk @@ -138,6 +138,9 @@ _CPUCFLAGS = -mcpu=${CPUTYPE} -mno-powerpc64 . endif . elif ${MACHINE_ARCH} == "powerpc64" _CPUCFLAGS = -mcpu=${CPUTYPE} +. elif ${MACHINE_ARCH} == "powerpcspe" +#_CPUCFLAGS = -Wa,-me500 -mspe=yes -mabi=spe -mfloat-gprs=double -mcpu=8548 +_CPUCFLAGS = -mcpu=8548 -mllvm -mattr=+spe . elif ${MACHINE_CPUARCH} == "mips" # mips[1234], mips32, mips64, and all later releases need to have mips # preserved (releases later than r2 require external toolchain) diff --git a/sys/cddl/compat/opensolaris/kern/opensolaris_atomic.c b/sys/cddl/compat/opensolaris/kern/opensolaris_atomic.c index 89cd1db4674..5e13c4a5d7a 100644 --- a/sys/cddl/compat/opensolaris/kern/opensolaris_atomic.c +++ b/sys/cddl/compat/opensolaris/kern/opensolaris_atomic.c @@ -55,6 +55,7 @@ atomic_init(void) } #endif +#ifndef __powerpc__ void atomic_add_64(volatile uint64_t *target, int64_t delta) { @@ -72,6 +73,7 @@ atomic_dec_64(volatile uint64_t *target) *target -= 1; mtx_unlock(&atomic_mtx); } +#endif uint64_t atomic_swap_64(volatile uint64_t *a, uint64_t value) diff --git a/sys/conf/Makefile.powerpc b/sys/conf/Makefile.powerpc index 2f28a9ff732..e250a4fa1e6 100644 --- a/sys/conf/Makefile.powerpc +++ b/sys/conf/Makefile.powerpc @@ -37,7 +37,7 @@ INCLUDES+= -I$S/contrib/libfdt .if "${MACHINE_ARCH}" == "powerpcspe" # Force __SPE__, since the builtin will be removed later with -mno-spe -CFLAGS+= -mabi=spe -D__SPE__ +CFLAGS+= -D__SPE__ .endif CFLAGS+= -msoft-float CFLAGS.gcc+= -Wa,-many diff --git a/sys/conf/files.powerpc b/sys/conf/files.powerpc index 013fe0fea33..fbdc8f59b52 100644 --- a/sys/conf/files.powerpc +++ b/sys/conf/files.powerpc @@ -76,6 +76,7 @@ dev/uart/uart_cpu_powerpc.c optional uart dev/usb/controller/ehci_fsl.c optional ehci mpc85xx dev/vt/hw/ofwfb/ofwfb.c optional vt aim kern/kern_clocksource.c standard +#kern/subr_atomic64.c optional powerpc | powerpcspe kern/subr_dummy_vdso_tc.c standard kern/syscalls.c optional ktr kern/subr_sfbuf.c standard diff --git a/sys/kern/subr_atomic64.c b/sys/kern/subr_atomic64.c new file mode 100644 index 00000000000..8f7982afd23 --- /dev/null +++ b/sys/kern/subr_atomic64.c @@ -0,0 +1,251 @@ +/*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * + * Copyright (c) 2019 Justin Hibbits + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include + +#include +#include + +#include +#include + +enum { + ATOMIC64_ADD, + ATOMIC64_CLEAR, + ATOMIC64_CMPSET, + ATOMIC64_FCMPSET, + ATOMIC64_FETCHADD, + ATOMIC64_LOAD, + ATOMIC64_SET, + ATOMIC64_SUBTRACT, + ATOMIC64_STORE, + ATOMIC64_SWAP +}; + +struct atomic64_sysarch_args { + u_int64_t *p; + u_int64_t v; + u_int64_t arg2; + int type; + u_int64_t rval; +}; + +#ifdef _KERNEL +#define A64_POOL_SIZE MAXCPU +/* Estimated size of a cacheline */ +#define CACHE_ALIGN 32 + +#define GET_MUTEX(p) \ + (&a64_mtx_pool[(pmap_kextract((vm_offset_t)p) / CACHE_ALIGN) % (A64_POOL_SIZE)]) + +#define LOCK_A64() \ + struct mtx *_amtx = GET_MUTEX(p); \ + if (smp_started) mtx_lock(_amtx) + +#define UNLOCK_A64() if (smp_started) mtx_unlock(_amtx) + +#define ATOMIC64_EMU_BIN(op, rt, block, ret) \ + rt \ + atomic_##op##_64(volatile u_int64_t *p, u_int64_t v) { \ + u_int64_t tmp __unused; \ + LOCK_A64(); \ + block; \ + UNLOCK_A64(); \ + ret; } struct hack + +static struct mtx a64_mtx_pool[A64_POOL_SIZE]; + +ATOMIC64_EMU_BIN(add, void, (*p = *p + v), return); +ATOMIC64_EMU_BIN(clear, void, *p &= ~v, return); +ATOMIC64_EMU_BIN(fetchadd, u_int64_t, (*p = *p + v, v = *p - v), return (v)); +ATOMIC64_EMU_BIN(set, void, *p |= v, return); +ATOMIC64_EMU_BIN(subtract, void, (*p = *p - v), return); +ATOMIC64_EMU_BIN(store, void, *p = v, return); +ATOMIC64_EMU_BIN(swap, u_int64_t, tmp = *p; *p = v; v = tmp, return(v)); + +int atomic_cmpset_64(volatile u_int64_t *p, u_int64_t old, u_int64_t new) +{ + u_int64_t tmp; + + LOCK_A64(); + tmp = *p; + if (tmp == old) + *p = new; + UNLOCK_A64(); + + return (tmp == old); +} + +int atomic_fcmpset_64(volatile u_int64_t *p, u_int64_t *old, u_int64_t new) +{ + u_int64_t tmp, tmp_old; + + LOCK_A64(); + tmp = *p; + tmp_old = *old; + if (tmp == tmp_old) + *p = new; + else + *old = tmp; + UNLOCK_A64(); + + return (tmp == tmp_old); +} + +static void +atomic64_mtxinit(void) +{ + int i; + + for (i = 0; i < A64_POOL_SIZE; i++) + mtx_init(&a64_mtx_pool[i], "atomic64 mutex", NULL, MTX_DEF); +} + +SYSINIT(atomic64_mtxinit, SI_SUB_LOCK, SI_ORDER_MIDDLE, atomic64_mtxinit, NULL); + +struct mtx atomic64_user_mtx; +MTX_SYSINIT(atomic64_user, &atomic64_user_mtx, "atomic64 user mutex", MTX_DEF); +int +sysarch_atomic64(void *args) +{ + struct atomic64_sysarch_args a64_args; + u_int64_t p, fv; + int err; + + err = copyin(args, &a64_args, sizeof(a64_args)); + if (err < 0) + return (err); + + mtx_lock(&atomic64_user_mtx); + err = copyin(a64_args, &p, sizeof(p)); + if (err < 0) { + mtx_unlock(&atomic64_user_mtx); + goto err; + } + switch (a64_args->type) { + case ATOMIC64_ADD: + atomic_add_64(&p, a64_args->v); + break; + case ATOMIC64_CLEAR: + atomic_clear_64(&p, a64_args->v); + break; + case ATOMIC64_CMPSET: + a64_args->rval = + atomic_cmpset_64(p, a64_args->v, a64_args->arg2); + break; + case ATOMIC64_FCMPSET: + a64_args->rval = + atomic_fcmpset_64(p, &a64_args->v, a64_args->arg2); + break; + case ATOMIC64_FETCHADD: + atomic_fetchadd_64(&p, a64_args->v); + break; + case ATOMIC64_LOAD: + a64_args->rval = p; + break; + case ATOMIC64_SET: + atomic_set_64(&p, a64_args->v); + break; + case ATOMIC64_SUBTRACT: + atomic_subtract_64(&p, a64_args->v); + break; + case ATOMIC64_STORE: + copyout(&a64_args->v, a64_args->p, sizeof(a64_args->p)); + break; + case ATOMIC64_SWAP: + a64_args->rval = p; + copyout(&a64_args->v, a64_args->p, sizeof(a64_args->p)); + break; + }; + + mtx_unlock(&atomic64_user_mtx); + + err = copyout(&a64_args, args, sizeof(a64_args)); +err: + return (err); +} + +#else /* !_KERNEL */ + +#define ATOMIC64_EMU_BIN_V(op, op_enum) \ + void \ + atomic_##op##_64(volatile u_int64_t *p, u_int64_t v) { \ + struct atomic64_sysarch_args args; \ + args.p = __DEVOLATILE(u_int64_t *, p); \ + args.v = v; \ + args.type = ATOMIC64_##op_enum; \ + sysarch(ATOMIC64_SYSARCH, &args); \ + return; } struct hack + +#define ATOMIC64_EMU_BIN(op, rtype, op_enum) \ + rtype \ + atomic_##op##_64(volatile u_int64_t *p, u_int64_t v) { \ + struct atomic64_sysarch_args args; \ + args.p = __DEVOLATILE(u_int64_t *, p); \ + args.v = v; \ + args.type = ATOMIC64_##op_enum; \ + sysarch(ATOMIC64_SYSARCH, &args); \ + return args.rval; } struct hack + +ATOMIC64_EMU_BIN_V(add, ADD); +ATOMIC64_EMU_BIN_V(clear, CLEAR); +ATOMIC64_EMU_BIN_V(set, SET); +ATOMIC64_EMU_BIN_V(subtract, SUBTRACT); +ATOMIC64_EMU_BIN_V(store, STORE); + +ATOMIC64_EMU_BIN(fetchadd, u_int64_t, FETCHADD); +ATOMIC64_EMU_BIN(swap, u_int64_t, SWAP); + +int +atomic_cmpset_64(volatile u_int64_t *dst, u_int64_t old, u_int64_t new) +{ + struct atomic64_sysarch_args args; + + args.p = __DEVOLATILE(u_int64_t *, dst); + args.v = old; + args.type = ATOMIC64_FCMPSET; + sysarch(ATOMIC64_SYSARCH, &args); + return args.rval; +} + +int +atomic_fcmpset_64(volatile u_int64_t *dst, u_int64_t *old, u_int64_t new) +{ + struct atomic64_sysarch_args args; + + args.p = __DEVOLATILE(u_int64_t *, dst); + args.v = *old; + args.type = ATOMIC64_FCMPSET; + sysarch(ATOMIC64_SYSARCH, &args); + *old = args.v; + return args.rval; +} +#endif /* _KERNEL */ diff --git a/sys/powerpc/include/atomic.h b/sys/powerpc/include/atomic.h index 710a50f5aba..eca6b59efbf 100644 --- a/sys/powerpc/include/atomic.h +++ b/sys/powerpc/include/atomic.h @@ -40,6 +40,10 @@ #include +#ifndef __powerpc64__ +#include +#endif + /* * The __ATOMIC_REL/ACQ() macros provide memory barriers only in conjunction * with the atomic lXarx/stXcx. sequences below. They are not exposed outside diff --git a/sys/powerpc/include/sysarch.h b/sys/powerpc/include/sysarch.h index a18451e9e09..49b394b4137 100644 --- a/sys/powerpc/include/sysarch.h +++ b/sys/powerpc/include/sysarch.h @@ -42,4 +42,6 @@ int sysarch(int, void *); __END_DECLS #endif +#define ATOMIC64_SYSARCH -1 + #endif /* !_MACHINE_SYSARCH_H_ */ diff --git a/sys/powerpc/powerpc/sys_machdep.c b/sys/powerpc/powerpc/sys_machdep.c index 9138074ed7c..ab5527ad69e 100644 --- a/sys/powerpc/powerpc/sys_machdep.c +++ b/sys/powerpc/powerpc/sys_machdep.c @@ -40,6 +40,11 @@ int freebsd32_sysarch(struct thread *td, struct freebsd32_sysarch_args *uap) { + switch (uap->op) { + case ATOMIC64_SYSARCH: + return (sysarch_atomic64(uap->parms)); + } + return (EINVAL); } #endif @@ -48,6 +53,12 @@ int sysarch(struct thread *td, struct sysarch_args *uap) { +#ifndef __powerpc64__ + switch (uap->op) { + case ATOMIC64_SYSARCH: + return (sysarch_atomic64(uap->parms)); + } +#endif return (EINVAL); } diff --git a/sys/sys/_atomic64e.h b/sys/sys/_atomic64e.h new file mode 100644 index 00000000000..e195113ad2d --- /dev/null +++ b/sys/sys/_atomic64e.h @@ -0,0 +1,79 @@ +/*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * + * Copyright (c) 2019 Justin Hibbits + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + + +#ifndef _SYS_ATOMIC64E_H_ +#define _SYS_ATOMIC64E_H_ + +#ifndef _MACHINE_ATOMIC_H_ +#error "This should not be included directly. Include " +#endif + +/* Emulated versions of 64-bit atomic operations. */ + +void atomic_add_64(volatile u_int64_t *, u_int64_t); +#define atomic_add_acq_64 atomic_add_64 +#define atomic_add_rel_64 atomic_add_64 + +int atomic_cmpset_64(volatile u_int64_t *, u_int64_t, u_int64_t); +#define atomic_cmpset_acq_64 atomic_cmpset_64 +#define atomic_cmpset_rel_64 atomic_cmpset_64 + +void atomic_clear_64(volatile u_int64_t *, u_int64_t); +#define atomic_clear_acq_64 atomic_clear_64 +#define atomic_clear_rel_64 atomic_clear_64 + +int atomic_fcmpset_64(volatile u_int64_t *, u_int64_t *, u_int64_t); +#define atomic_fcmpset_acq_64 atomic_fcmpset_64 +#define atomic_fcmpset_rel_64 atomic_fcmpset_64 + +u_int64_t atomic_fetchadd_64(volatile u_int64_t *, u_int64_t); + +void atomic_load_64(volatile u_int64_t *); +#define atomic_load_acq_64 atomic_load_64 + +void atomic_readandclear_64(volatile u_int64_t *); + +void atomic_set_64(volatile u_int64_t *, u_int64_t); +#define atomic_set_acq_64 atomic_set_64 +#define atomic_set_rel_64 atomic_set_64 + +void atomic_subtract_64(volatile u_int64_t *, u_int64_t); +#define atomic_subtract_acq_64 atomic_subtract_64 +#define atomic_subtract_rel_64 atomic_subtract_64 + +void atomic_store_64(volatile u_int64_t *, u_int64_t); +#define atomic_store_rel_64 atomic_store_64 + +u_int64_t atomic_swap_64(volatile u_int64_t *, u_int64_t); + +#ifdef _KERNEL +int sysarch_atomic64(void *); +#endif +#endif /* _SYS_ATOMIC64E_H_ */