Index: lib/libc/arm/aeabi/Makefile.inc =================================================================== --- lib/libc/arm/aeabi/Makefile.inc (revision 253396) +++ lib/libc/arm/aeabi/Makefile.inc (working copy) @@ -5,7 +5,9 @@ SRCS+= aeabi_atexit.c \ aeabi_double.c \ aeabi_float.c \ - aeabi_unwind_cpp.c + aeabi_unwind_cpp.c \ + aeabi_vfp_double.S \ + aeabi_vfp_float.S # Add the aeabi_mem* functions. While they live in compiler-rt they call into # libc. This causes issues when other parts of libc call these functions. Index: lib/libc/arm/aeabi/Symbol.map =================================================================== --- lib/libc/arm/aeabi/Symbol.map (revision 253396) +++ lib/libc/arm/aeabi/Symbol.map (working copy) @@ -44,4 +44,6 @@ __aeabi_i2d; __aeabi_i2f; + + _libc_arm_fpu_present; }; Index: lib/libc/arm/aeabi/aeabi_double.c =================================================================== --- lib/libc/arm/aeabi/aeabi_double.c (revision 253396) +++ lib/libc/arm/aeabi/aeabi_double.c (working copy) @@ -32,70 +32,39 @@ #include "milieu.h" #include "softfloat.h" -flag __unorddf2(float64, float64); +#include "aeabi_vfp.h" -int __aeabi_dcmpeq(float64 a, float64 b) -{ - return float64_eq(a, b); -} +extern int _libc_arm_fpu_present; -int __aeabi_dcmplt(float64 a, float64 b) -{ - return float64_lt(a, b); -} +flag __unorddf2(float64, float64); +int __aeabi_dcmpeq_softfp(float64, float64); +int __aeabi_dcmplt_softfp(float64, float64); +int __aeabi_dcmple_softfp(float64, float64); +int __aeabi_dcmpge_softfp(float64, float64); +int __aeabi_dcmpgt_softfp(float64, float64); +int __aeabi_dcmpun_softfp(float64, float64); +int __aeabi_d2iz_softfp(float64); +float64 __aeabi_d2f_softfp(float64); +float64 __aeabi_i2d_softfp(int); +float64 __aeabi_dadd_softfp(float64, float64); +float64 __aeabi_ddiv_softfp(float64, float64); +float64 __aeabi_dmul_softfp(float64, float64); +float64 __aeabi_dsub_softfp(float64, float64); -int __aeabi_dcmple(float64 a, float64 b) -{ - return float64_le(a, b); -} -int __aeabi_dcmpge(float64 a, float64 b) -{ - return float64_le(b, a); -} +int AEABI_FUNC2(dcmpeq, float64, float64_eq) +int AEABI_FUNC2(dcmplt, float64, float64_lt) +int AEABI_FUNC2(dcmple, float64, float64_le) +int AEABI_FUNC2_REV(dcmpge, float64, float64_lt) +int AEABI_FUNC2_REV(dcmpgt, float64, float64_le) +int AEABI_FUNC2(dcmpun, float64, __unorddf2) -int __aeabi_dcmpgt(float64 a, float64 b) -{ - return float64_lt(b, a); -} +int AEABI_FUNC(d2iz, float64, float64_to_int32_round_to_zero) +float32 AEABI_FUNC(d2f, float64, float64_to_float32) +float64 AEABI_FUNC(i2d, int, int32_to_float64) -int __aeabi_dcmpun(float64 a, float64 b) -{ - return __unorddf2(a, b); -} +float64 AEABI_FUNC2(dadd, float64, float64_add) +float64 AEABI_FUNC2(ddiv, float64, float64_div) +float64 AEABI_FUNC2(dmul, float64, float64_mul) +float64 AEABI_FUNC2(dsub, float64, float64_sub) -int __aeabi_d2iz(float64 a) -{ - return float64_to_int32_round_to_zero(a); -} - -float32 __aeabi_d2f(float64 a) -{ - return float64_to_float32(a); -} - -float64 __aeabi_i2d(int a) -{ - return int32_to_float64(a); -} - -float64 __aeabi_dadd(float64 a, float64 b) -{ - return float64_add(a, b); -} - -float64 __aeabi_ddiv(float64 a, float64 b) -{ - return float64_div(a, b); -} - -float64 __aeabi_dmul(float64 a, float64 b) -{ - return float64_mul(a, b); -} - -float64 __aeabi_dsub(float64 a, float64 b) -{ - return float64_sub(a, b); -} - Index: lib/libc/arm/aeabi/aeabi_float.c =================================================================== --- lib/libc/arm/aeabi/aeabi_float.c (revision 253396) +++ lib/libc/arm/aeabi/aeabi_float.c (working copy) @@ -32,70 +32,39 @@ #include "milieu.h" #include "softfloat.h" -flag __unordsf2(float32, float32); +#include "aeabi_vfp.h" -int __aeabi_fcmpeq(float32 a, float32 b) -{ - return float32_eq(a, b); -} +extern int _libc_arm_fpu_present; -int __aeabi_fcmplt(float32 a, float32 b) -{ - return float32_lt(a, b); -} +flag __unordsf2(float32, float32); +int __aeabi_fcmpeq_softfp(float32, float32); +int __aeabi_fcmplt_softfp(float32, float32); +int __aeabi_fcmple_softfp(float32, float32); +int __aeabi_fcmpge_softfp(float32, float32); +int __aeabi_fcmpgt_softfp(float32, float32); +int __aeabi_fcmpun_softfp(float32, float32); +int __aeabi_f2iz_softfp(float32); +float64 __aeabi_f2d_softfp(float32); +float32 __aeabi_i2f_softfp(int); +float32 __aeabi_fadd_softfp(float32, float32); +float32 __aeabi_fdiv_softfp(float32, float32); +float32 __aeabi_fmul_softfp(float32, float32); +float32 __aeabi_fsub_softfp(float32, float32); -int __aeabi_fcmple(float32 a, float32 b) -{ - return float32_le(a, b); -} -int __aeabi_fcmpge(float32 a, float32 b) -{ - return float32_le(b, a); -} +int AEABI_FUNC2(fcmpeq, float32, float32_eq) +int AEABI_FUNC2(fcmplt, float32, float32_lt) +int AEABI_FUNC2(fcmple, float32, float32_le) +int AEABI_FUNC2_REV(fcmpge, float32, float32_lt) +int AEABI_FUNC2_REV(fcmpgt, float32, float32_le) +int AEABI_FUNC2(fcmpun, float32, __unordsf2) -int __aeabi_fcmpgt(float32 a, float32 b) -{ - return float32_lt(b, a); -} +int AEABI_FUNC(f2iz, float32, float32_to_int32_round_to_zero) +float64 AEABI_FUNC(f2d, float32, float32_to_float64) +float32 AEABI_FUNC(i2f, int, int32_to_float32) -int __aeabi_fcmpun(float32 a, float32 b) -{ - return __unordsf2(a, b); -} +float32 AEABI_FUNC2(fadd, float32, float32_add) +float32 AEABI_FUNC2(fdiv, float32, float32_div) +float32 AEABI_FUNC2(fmul, float32, float32_mul) +float32 AEABI_FUNC2(fsub, float32, float32_sub) -int __aeabi_f2iz(float32 a) -{ - return float32_to_int32_round_to_zero(a); -} - -float32 __aeabi_f2d(float32 a) -{ - return float32_to_float64(a); -} - -float32 __aeabi_i2f(int a) -{ - return int32_to_float32(a); -} - -float32 __aeabi_fadd(float32 a, float32 b) -{ - return float32_add(a, b); -} - -float32 __aeabi_fdiv(float32 a, float32 b) -{ - return float32_div(a, b); -} - -float32 __aeabi_fmul(float32 a, float32 b) -{ - return float32_mul(a, b); -} - -float32 __aeabi_fsub(float32 a, float32 b) -{ - return float32_sub(a, b); -} - Index: lib/libc/arm/aeabi/aeabi_vfp.h =================================================================== --- lib/libc/arm/aeabi/aeabi_vfp.h (revision 0) +++ lib/libc/arm/aeabi/aeabi_vfp.h (working copy) @@ -0,0 +1,96 @@ +/* + * Copyright (C) 2013 Andrew Turner + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#ifndef AEABI_VFP_H +#define AEABI_VFP_H + +/* + * ASM helper macros. These allow the functions to be changed when + * building for a hard-float version of the ABI. + */ + +/* Define a standard name for the function */ +#define AEABI_ENTRY(x) ENTRY(__aeabi_ ## x ## _softfp) +#define AEABI_END(x) END(__aeabi_ ## x ## _softfp) + +/* + * These should be used when a function either takes, or returns a floating + * point falue. They will load the data from an ARM to a VFP register(s), + * or from a VFP to an ARM register + */ +#ifdef __ARMEB__ +#define LOAD_DREG(vreg, reg0, reg1) vmov vreg, reg1, reg0 +#define UNLOAD_DREG(reg0, reg1, vreg) vmov reg1, reg0, vreg +#else +#define LOAD_DREG(vreg, reg0, reg1) vmov vreg, reg0, reg1 +#define UNLOAD_DREG(reg0, reg1, vreg) vmov reg0, reg1, vreg +#endif + +#define LOAD_SREGS(vreg0, vreg1, reg0, reg1) vmov vreg0, vreg1, reg0, reg1 +#define LOAD_SREG(vreg, reg) vmov vreg, reg +#define UNLOAD_SREG(reg, vreg) vmov reg, vreg + +/* + * C Helper macros + */ + +/* + * Generate a function that will either call into the VFP implementation, + * or the soft float version for a given __aeabi_* helper. The function + * will take a single argument of the type given by in_type. + */ +#define AEABI_FUNC(name, in_type, soft_func) \ +__aeabi_ ## name(in_type a) \ +{ \ + if (_libc_arm_fpu_present) \ + return __aeabi_ ## name ## _softfp(a); \ + else \ + return soft_func (a); \ +} + +/* As above, but takes two arguments of the same type */ +#define AEABI_FUNC2(name, in_type, soft_func) \ +__aeabi_ ## name(in_type a, in_type b) \ +{ \ + if (_libc_arm_fpu_present) \ + return __aeabi_ ## name ## _softfp(a, b); \ + else \ + return soft_func (a, b); \ +} + +/* As above, but with the soft float arguments reversed */ +#define AEABI_FUNC2_REV(name, in_type, soft_func) \ +__aeabi_ ## name(in_type a, in_type b) \ +{ \ + if (_libc_arm_fpu_present) \ + return __aeabi_ ## name ## _softfp(a, b); \ + else \ + return soft_func (b, a); \ +} + + +#endif Index: lib/libc/arm/aeabi/aeabi_vfp_double.S =================================================================== --- lib/libc/arm/aeabi/aeabi_vfp_double.S (revision 0) +++ lib/libc/arm/aeabi/aeabi_vfp_double.S (working copy) @@ -0,0 +1,168 @@ +/* + * Copyright (C) 2013 Andrew Turner + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#include +#include "aeabi_vfp.h" + +.fpu vfp +.syntax unified + +/* int __aeabi_dcmpeq(double, double) */ +AEABI_ENTRY(dcmpeq) + LOAD_DREG(d0, r0, r1) + LOAD_DREG(d1, r2, r3) + vcmp.f64 d0, d1 + vmrs APSR_nzcv, fpscr + movne r0, #0 + moveq r0, #1 + RET +AEABI_END(dcmpeq) + +/* int __aeabi_dcmplt(double, double) */ +AEABI_ENTRY(dcmplt) + LOAD_DREG(d0, r0, r1) + LOAD_DREG(d1, r2, r3) + vcmp.f64 d0, d1 + vmrs APSR_nzcv, fpscr + movcs r0, #0 + movlt r0, #1 + RET +AEABI_END(dcmplt) + +/* int __aeabi_dcmple(double, double) */ +AEABI_ENTRY(dcmple) + LOAD_DREG(d0, r0, r1) + LOAD_DREG(d1, r2, r3) + vcmp.f64 d0, d1 + vmrs APSR_nzcv, fpscr + movhi r0, #0 + movls r0, #1 + RET +AEABI_END(dcmple) + +/* int __aeabi_dcmpge(double, double) */ +AEABI_ENTRY(dcmpge) + LOAD_DREG(d0, r0, r1) + LOAD_DREG(d1, r2, r3) + vcmp.f64 d0, d1 + vmrs APSR_nzcv, fpscr + movlt r0, #0 + movge r0, #1 + RET +AEABI_END(dcmpge) + +/* int __aeabi_dcmpgt(double, double) */ +AEABI_ENTRY(dcmpgt) + LOAD_DREG(d0, r0, r1) + LOAD_DREG(d1, r2, r3) + vcmp.f64 d0, d1 + vmrs APSR_nzcv, fpscr + movle r0, #0 + movgt r0, #1 + RET +AEABI_END(dcmpgt) + +/* int __aeabi_dcmpun(double, double) */ +AEABI_ENTRY(dcmpun) + LOAD_DREG(d0, r0, r1) + LOAD_DREG(d1, r2, r3) + vcmp.f64 d0, d1 + vmrs APSR_nzcv, fpscr + movvc r0, #0 + movvs r0, #1 + RET +AEABI_END(dcmpun) + +/* int __aeabi_d2iz(double) */ +AEABI_ENTRY(d2iz) + LOAD_DREG(d0, r0, r1) +#if 0 + /* + * This should be the correct instruction, but binutils incorrectly + * encodes it as the version that used FPSCR to determine the rounding. + * When binutils is fixed we can use this again. + */ + vcvt.s32.f64 s0, d0 +#else + ftosizd s0, d0 +#endif + vmov r0, s0 + RET +AEABI_END(d2iz) + +/* float __aeabi_d2f(double) */ +AEABI_ENTRY(d2f) + LOAD_DREG(d0, r0, r1) + vcvt.f32.f64 s0, d0 + UNLOAD_SREG(r0, s0) + RET +AEABI_END(d2f) + +/* double __aeabi_i2d(int) */ +AEABI_ENTRY(i2d) + vmov s0, r0 + vcvt.f64.s32 d0, s0 + UNLOAD_DREG(r0, r1, d0) + RET +AEABI_END(i2d) + +/* double __aeabi_dadd(double, double) */ +AEABI_ENTRY(dadd) + LOAD_DREG(d0, r0, r1) + LOAD_DREG(d1, r2, r3) + vadd.f64 d0, d0, d1 + UNLOAD_DREG(r0, r1, d0) + RET +AEABI_END(dadd) + +/* double __aeabi_ddiv(double, double) */ +AEABI_ENTRY(ddiv) + LOAD_DREG(d0, r0, r1) + LOAD_DREG(d1, r2, r3) + vdiv.f64 d0, d0, d1 + UNLOAD_DREG(r0, r1, d0) + RET +AEABI_END(ddiv) + +/* double __aeabi_dmul(double, double) */ +AEABI_ENTRY(dmul) + LOAD_DREG(d0, r0, r1) + LOAD_DREG(d1, r2, r3) + vmul.f64 d0, d0, d1 + UNLOAD_DREG(r0, r1, d0) + RET +AEABI_END(dmul) + +/* double __aeabi_dsub(double, double) */ +AEABI_ENTRY(dsub) + LOAD_DREG(d0, r0, r1) + LOAD_DREG(d1, r2, r3) + vsub.f64 d0, d0, d1 + UNLOAD_DREG(r0, r1, d0) + RET +AEABI_END(dsub) + Index: lib/libc/arm/aeabi/aeabi_vfp_float.S =================================================================== --- lib/libc/arm/aeabi/aeabi_vfp_float.S (revision 0) +++ lib/libc/arm/aeabi/aeabi_vfp_float.S (working copy) @@ -0,0 +1,158 @@ +/* + * Copyright (C) 2013 Andrew Turner + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#include +#include "aeabi_vfp.h" + +.fpu vfp +.syntax unified + +/* int __aeabi_fcmpeq(float, float) */ +AEABI_ENTRY(fcmpeq) + LOAD_SREGS(s0, s1, r0, r1) + vcmp.f32 s0, s1 + vmrs APSR_nzcv, fpscr + movne r0, #0 + moveq r0, #1 + RET +AEABI_END(fcmpeq) + +/* int __aeabi_fcmplt(float, float) */ +AEABI_ENTRY(fcmplt) + LOAD_SREGS(s0, s1, r0, r1) + vcmp.f32 s0, s1 + vmrs APSR_nzcv, fpscr + movcs r0, #0 + movlt r0, #1 + RET +AEABI_END(fcmplt) + +/* int __aeabi_fcmple(float, float) */ +AEABI_ENTRY(fcmple) + LOAD_SREGS(s0, s1, r0, r1) + vcmp.f32 s0, s1 + vmrs APSR_nzcv, fpscr + movhi r0, #0 + movls r0, #1 + RET +AEABI_END(fcmple) + +/* int __aeabi_fcmpge(float, float) */ +AEABI_ENTRY(fcmpge) + LOAD_SREGS(s0, s1, r0, r1) + vcmp.f32 s0, s1 + vmrs APSR_nzcv, fpscr + movlt r0, #0 + movge r0, #1 + RET +AEABI_END(fcmpge) + +/* int __aeabi_fcmpgt(float, float) */ +AEABI_ENTRY(fcmpgt) + LOAD_SREGS(s0, s1, r0, r1) + vcmp.f32 s0, s1 + vmrs APSR_nzcv, fpscr + movle r0, #0 + movgt r0, #1 + RET +AEABI_END(fcmpgt) + +/* int __aeabi_fcmpun(float, float) */ +AEABI_ENTRY(fcmpun) + LOAD_SREGS(s0, s1, r0, r1) + vcmp.f32 s0, s1 + vmrs APSR_nzcv, fpscr + movvc r0, #0 + movvs r0, #1 + RET +AEABI_END(fcmpun) + +/* int __aeabi_f2iz(float) */ +AEABI_ENTRY(f2iz) + LOAD_SREG(s0, r0) +#if 0 + /* + * This should be the correct instruction, but binutils incorrectly + * encodes it as the version that used FPSCR to determine the rounding. + * When binutils is fixed we can use this again. + */ + vcvt.s32.f32 s0, s0 +#else + ftosizs s0, s0 +#endif + vmov r0, s0 + RET +AEABI_END(f2iz) + +/* double __aeabi_f2d(float) */ +AEABI_ENTRY(f2d) + LOAD_SREG(s0, r0) + vcvt.f64.f32 d0, s0 + UNLOAD_DREG(r0, r1, d0) + RET +AEABI_END(f2d) + +/* float __aeabi_i2f(int) */ +AEABI_ENTRY(i2f) + vmov s0, r0 + vcvt.f32.s32 s0, s0 + UNLOAD_SREG(r0, s0) + RET +AEABI_END(i2f) + +/* float __aeabi_fadd(float, float) */ +AEABI_ENTRY(fadd) + LOAD_SREGS(s0, s1, r0, r1) + vadd.f32 s0, s0, s1 + UNLOAD_SREG(r0, s0) + RET +AEABI_END(fadd) + +/* float __aeabi_fmul(float, float) */ +AEABI_ENTRY(fdiv) + LOAD_SREGS(s0, s1, r0, r1) + vdiv.f32 s0, s0, s1 + UNLOAD_SREG(r0, s0) + RET +AEABI_END(fdiv) + +/* float __aeabi_fmul(float, float) */ +AEABI_ENTRY(fmul) + LOAD_SREGS(s0, s1, r0, r1) + vmul.f32 s0, s0, s1 + UNLOAD_SREG(r0, s0) + RET +AEABI_END(fmul) + +/* float __aeabi_fsub(float, float) */ +AEABI_ENTRY(fsub) + LOAD_SREGS(s0, s1, r0, r1) + vsub.f32 s0, s0, s1 + UNLOAD_SREG(r0, s0) + RET +AEABI_END(fsub) +