Note: you may remove the following files: sys/amd64/amd64/bpf_jit_machdep.c sys/amd64/amd64/bpf_jit_machdep.h sys/i386/i386/bpf_jit_machdep.c sys/i386/i386/bpf_jit_machdep.h sys/net/bpf_jitter.c sys/net/bpf_jitter.h Index: UPDATING =================================================================== --- UPDATING (revision 243911) +++ UPDATING (working copy) @@ -26,6 +26,11 @@ NOTE TO PEOPLE WHO THINK THAT FreeBSD 10.x IS SLOW disable the most expensive debugging functionality run "ln -s 'abort:false,junk:false' /etc/malloc.conf".) +20121205: + The BPF_JITTER kernel option has been removed. Its functionality + is now replaced by BPFJIT option and the GENERIC kernels on amd64, + i386, pc98, and powerpc architectures include the new option. + 20121201: With the addition of auditdistd(8), a new auditdistd user is now depended on during installworld. "mergemaster -p" can be used to add Index: sys/amd64/conf/GENERIC =================================================================== --- sys/amd64/conf/GENERIC (revision 243911) +++ sys/amd64/conf/GENERIC (working copy) @@ -306,6 +306,7 @@ device firmware # firmware assist module # Be aware of the administrative consequences of enabling this! # Note that 'bpf' is required for DHCP. device bpf # Berkeley packet filter +options BPFJIT # enable BPF just-in-time compiler # USB support options USB_DEBUG # enable debug msgs Index: sys/amd64/conf/NOTES =================================================================== --- sys/amd64/conf/NOTES (revision 243911) +++ sys/amd64/conf/NOTES (working copy) @@ -86,10 +86,6 @@ cpu HAMMER # aka K8, aka Opteron & Athlon64 options DEVICE_POLLING -# BPF_JITTER adds support for BPF just-in-time compiler. - -options BPF_JITTER - # OpenFabrics Enterprise Distribution (Infiniband). options OFED options OFED_DEBUG_INIT Index: sys/conf/files =================================================================== --- sys/conf/files (revision 243911) +++ sys/conf/files (working copy) @@ -513,6 +513,7 @@ contrib/ngatm/netnatm/sig/sig_unimsgcpy.c optional compile-with "${NORMAL_C} -I$S/contrib/ngatm" contrib/ngatm/netnatm/sig/sig_verify.c optional ngatm_uni \ compile-with "${NORMAL_C} -I$S/contrib/ngatm" +contrib/sljit/sljitLir.c optional bpfjit crypto/blowfish/bf_ecb.c optional ipsec crypto/blowfish/bf_skey.c optional crypto | ipsec crypto/camellia/camellia.c optional crypto | ipsec @@ -2858,9 +2859,9 @@ libkern/strtouq.c standard libkern/strvalid.c standard net/bpf.c standard net/bpf_buffer.c optional bpf -net/bpf_jitter.c optional bpf_jitter -net/bpf_filter.c optional bpf | netgraph_bpf +net/bpf_filter.c optional bpf | bpfjit | netgraph_bpf net/bpf_zerocopy.c optional bpf +net/bpfjit.c optional bpfjit net/bridgestp.c optional bridge | if_bridge net/flowtable.c optional flowtable inet | flowtable inet6 net/ieee8023ad_lacp.c optional lagg Index: sys/conf/files.amd64 =================================================================== --- sys/conf/files.amd64 (revision 243911) +++ sys/conf/files.amd64 (working copy) @@ -98,7 +98,6 @@ amd64/amd64/amd64_mem.c optional mem amd64/amd64/atomic.c standard amd64/amd64/autoconf.c standard amd64/amd64/bios.c standard -amd64/amd64/bpf_jit_machdep.c optional bpf_jitter amd64/amd64/cpu_switch.S standard amd64/amd64/db_disasm.c optional ddb amd64/amd64/db_interface.c optional ddb Index: sys/conf/files.i386 =================================================================== --- sys/conf/files.i386 (revision 243911) +++ sys/conf/files.i386 (working copy) @@ -403,7 +403,6 @@ i386/i386/atomic.c standard \ i386/i386/autoconf.c standard i386/i386/bios.c optional native i386/i386/bioscall.s optional native -i386/i386/bpf_jit_machdep.c optional bpf_jitter i386/i386/db_disasm.c optional ddb i386/i386/db_interface.c optional ddb i386/i386/db_trace.c optional ddb Index: sys/conf/files.pc98 =================================================================== --- sys/conf/files.pc98 (revision 243911) +++ sys/conf/files.pc98 (working copy) @@ -134,7 +134,6 @@ i386/i386/atomic.c standard \ i386/i386/autoconf.c standard i386/i386/bios.c standard i386/i386/bioscall.s standard -i386/i386/bpf_jit_machdep.c optional bpf_jitter i386/i386/db_disasm.c optional ddb i386/i386/db_interface.c optional ddb i386/i386/db_trace.c optional ddb Index: sys/conf/options =================================================================== --- sys/conf/options (revision 243911) +++ sys/conf/options (working copy) @@ -894,3 +894,6 @@ RACCT opt_global.h # Resource Limits RCTL opt_global.h + +# BPF just-in-time compiler +BPFJIT opt_bpf.h Index: sys/conf/options.amd64 =================================================================== --- sys/conf/options.amd64 (revision 243911) +++ sys/conf/options.amd64 (working copy) @@ -61,9 +61,6 @@ DEV_ATPIC opt_atpic.h # Debugging KDTRACE_FRAME opt_kdtrace.h -# BPF just-in-time compiler -BPF_JITTER opt_bpf.h - XENHVM opt_global.h # options for the Intel C600 SAS driver (isci) Index: sys/conf/options.i386 =================================================================== --- sys/conf/options.i386 (revision 243911) +++ sys/conf/options.i386 (working copy) @@ -114,9 +114,6 @@ ASR_COMPAT opt_asr.h # Debugging NPX_DEBUG opt_npx.h -# BPF just-in-time compiler -BPF_JITTER opt_bpf.h - NATIVE opt_global.h XEN opt_global.h XENHVM opt_global.h Index: sys/conf/options.pc98 =================================================================== --- sys/conf/options.pc98 (revision 243911) +++ sys/conf/options.pc98 (working copy) @@ -97,6 +97,3 @@ DEV_NPX opt_npx.h # Debugging NPX_DEBUG opt_npx.h AGP_DEBUG opt_agp.h - -# BPF just-in-time compiler -BPF_JITTER opt_bpf.h Index: sys/contrib/sljit/sljitConfig.h =================================================================== --- sys/contrib/sljit/sljitConfig.h (revision 0) +++ sys/contrib/sljit/sljitConfig.h (working copy) @@ -0,0 +1,133 @@ +/* + * Stack-less Just-In-Time compiler + * + * Copyright 2009-2012 Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, are + * permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of + * conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list + * of conditions and the following disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED + * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#ifndef _SLJIT_CONFIG_H_ +#define _SLJIT_CONFIG_H_ + +/* --------------------------------------------------------------------- */ +/* Custom defines */ +/* --------------------------------------------------------------------- */ + +/* Put your custom defines here. This empty section will never change + which helps maintaining patches (with diff / patch utilities). */ + +#ifdef _KERNEL +#include +#include + +#define SLJIT_CALL +#define SLJIT_CONFIG_AUTO 1 +#define SLJIT_DEBUG 0 +#define SLJIT_EXECUTABLE_ALLOCATOR 0 +#define SLJIT_STD_MACROS_DEFINED 1 +#define SLJIT_SINGLE_THREADED 1 +#define SLJIT_UTIL_STACK 0 +#define SLJIT_VERBOSE 0 + +#define SLJIT_FREE(ptr) free(ptr, M_TEMP) +#define SLJIT_FREE_EXEC(ptr) free(ptr, M_TEMP) +#define SLJIT_MALLOC(size) malloc(size, M_TEMP, M_NOWAIT) +#define SLJIT_MALLOC_EXEC(size) malloc(size, M_TEMP, M_NOWAIT) +#define SLJIT_MEMMOVE(dest, src, len) bcopy(src, dest, len) +#define SLJIT_ZEROMEM(dest, len) bzero(dest, len) +#endif + +/* --------------------------------------------------------------------- */ +/* Architecture */ +/* --------------------------------------------------------------------- */ + +/* Architecture selection. */ +/* #define SLJIT_CONFIG_X86_32 1 */ +/* #define SLJIT_CONFIG_X86_64 1 */ +/* #define SLJIT_CONFIG_ARM_V5 1 */ +/* #define SLJIT_CONFIG_ARM_V7 1 */ +/* #define SLJIT_CONFIG_ARM_THUMB2 1 */ +/* #define SLJIT_CONFIG_PPC_32 1 */ +/* #define SLJIT_CONFIG_PPC_64 1 */ +/* #define SLJIT_CONFIG_MIPS_32 1 */ +/* #define SLJIT_CONFIG_SPARC_32 1 */ + +/* #define SLJIT_CONFIG_AUTO 1 */ +/* #define SLJIT_CONFIG_UNSUPPORTED 1 */ + +/* --------------------------------------------------------------------- */ +/* Utilities */ +/* --------------------------------------------------------------------- */ + +/* Useful for thread-safe compiling of global functions. */ +#ifndef SLJIT_UTIL_GLOBAL_LOCK +/* Enabled by default */ +#define SLJIT_UTIL_GLOBAL_LOCK 1 +#endif + +/* Implements a stack like data structure (by using mmap / VirtualAlloc). */ +#ifndef SLJIT_UTIL_STACK +/* Enabled by default */ +#define SLJIT_UTIL_STACK 1 +#endif + +/* Single threaded application. Does not require any locks. */ +#ifndef SLJIT_SINGLE_THREADED +/* Disabled by default. */ +#define SLJIT_SINGLE_THREADED 0 +#endif + +/* --------------------------------------------------------------------- */ +/* Configuration */ +/* --------------------------------------------------------------------- */ + +/* If SLJIT_STD_MACROS_DEFINED is not defined, the application should + define SLJIT_MALLOC, SLJIT_FREE, SLJIT_MEMMOVE, and NULL. */ +#ifndef SLJIT_STD_MACROS_DEFINED +/* Disabled by default. */ +#define SLJIT_STD_MACROS_DEFINED 0 +#endif + +/* Executable code allocation: + If SLJIT_EXECUTABLE_ALLOCATOR is not defined, the application should + define both SLJIT_MALLOC_EXEC and SLJIT_FREE_EXEC. */ +#ifndef SLJIT_EXECUTABLE_ALLOCATOR +/* Enabled by default. */ +#define SLJIT_EXECUTABLE_ALLOCATOR 1 +#endif + +/* Debug checks (assertions, etc.). */ +#ifndef SLJIT_DEBUG +/* Enabled by default */ +#define SLJIT_DEBUG 1 +#endif + +/* Verbose operations */ +#ifndef SLJIT_VERBOSE +/* Enabled by default */ +#define SLJIT_VERBOSE 1 +#endif + +/* See the beginning of sljitConfigInternal.h */ + +#endif Property changes on: sys/contrib/sljit/sljitConfig.h ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Index: sys/contrib/sljit/sljitConfigInternal.h =================================================================== --- sys/contrib/sljit/sljitConfigInternal.h (revision 0) +++ sys/contrib/sljit/sljitConfigInternal.h (working copy) @@ -0,0 +1,484 @@ +/* + * Stack-less Just-In-Time compiler + * + * Copyright 2009-2012 Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, are + * permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of + * conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list + * of conditions and the following disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED + * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _SLJIT_CONFIG_INTERNAL_H_ +#define _SLJIT_CONFIG_INTERNAL_H_ + +/* + SLJIT defines the following macros depending on the target architecture: + + Feature detection (boolean) macros: + SLJIT_32BIT_ARCHITECTURE : 32 bit architecture + SLJIT_64BIT_ARCHITECTURE : 64 bit architecture + SLJIT_WORD_SHIFT : the shift required to apply when accessing a sljit_sw/sljit_uw array by index + SLJIT_DOUBLE_SHIFT : the shift required to apply when accessing a double array by index + SLJIT_LITTLE_ENDIAN : little endian architecture + SLJIT_BIG_ENDIAN : big endian architecture + SLJIT_UNALIGNED : allows unaligned memory accesses for non-fpu operations (only!) + SLJIT_INDIRECT_CALL : see SLJIT_FUNC_OFFSET() for more information + SLJIT_RETURN_ADDRESS_OFFSET : a return instruction always adds this offset to the return address + + Types and useful macros: + sljit_sb, sljit_ub : signed and unsigned 8 bit byte + sljit_sh, sljit_uh : signed and unsigned 16 bit half-word (short) type + sljit_si, sljit_ui : signed and unsigned 32 bit integer type + sljit_sw, sljit_uw : signed and unsigned machine word, enough to store a pointer + sljit_p : unsgined pointer value (usually the same as sljit_uw, but + some 64 bit ABIs may use 32 bit pointers) + sljit_s : single precision floating point value + sljit_d : double precision floating point value + SLJIT_CALL : C calling convention define for both calling JIT form C and C callbacks for JIT + SLJIT_W(number) : defining 64 bit constants on 64 bit architectures (compiler independent helper) +*/ + +#if !((defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) \ + || (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) \ + || (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) \ + || (defined SLJIT_CONFIG_ARM_V7 && SLJIT_CONFIG_ARM_V7) \ + || (defined SLJIT_CONFIG_ARM_THUMB2 && SLJIT_CONFIG_ARM_THUMB2) \ + || (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32) \ + || (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) \ + || (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) \ + || (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) \ + || (defined SLJIT_CONFIG_AUTO && SLJIT_CONFIG_AUTO) \ + || (defined SLJIT_CONFIG_UNSUPPORTED && SLJIT_CONFIG_UNSUPPORTED)) +#error "An architecture must be selected" +#endif + +/* Sanity check. */ +#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) \ + + (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) \ + + (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) \ + + (defined SLJIT_CONFIG_ARM_V7 && SLJIT_CONFIG_ARM_V7) \ + + (defined SLJIT_CONFIG_ARM_THUMB2 && SLJIT_CONFIG_ARM_THUMB2) \ + + (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32) \ + + (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) \ + + (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) \ + + (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) \ + + (defined SLJIT_CONFIG_AUTO && SLJIT_CONFIG_AUTO) \ + + (defined SLJIT_CONFIG_UNSUPPORTED && SLJIT_CONFIG_UNSUPPORTED) >= 2 +#error "Multiple architectures are selected" +#endif + +/* Auto select option (requires compiler support) */ +#if (defined SLJIT_CONFIG_AUTO && SLJIT_CONFIG_AUTO) + +#ifndef _WIN32 + +#if defined(__i386__) || defined(__i386) +#define SLJIT_CONFIG_X86_32 1 +#elif defined(__x86_64__) +#define SLJIT_CONFIG_X86_64 1 +#elif defined(__arm__) || defined(__ARM__) +#ifdef __thumb2__ +#define SLJIT_CONFIG_ARM_THUMB2 1 +#elif defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) +#define SLJIT_CONFIG_ARM_V7 1 +#else +#define SLJIT_CONFIG_ARM_V5 1 +#endif +#elif defined(__ppc64__) || defined(__powerpc64__) || defined(_ARCH_PPC64) || (defined(_POWER) && defined(__64BIT__)) +#define SLJIT_CONFIG_PPC_64 1 +#elif defined(__ppc__) || defined(__powerpc__) || defined(_ARCH_PPC) || defined(_ARCH_PWR) || defined(_ARCH_PWR2) || defined(_POWER) +#define SLJIT_CONFIG_PPC_32 1 +#elif defined(__mips__) +#define SLJIT_CONFIG_MIPS_32 1 +#elif defined(__sparc__) || defined(__sparc) +#define SLJIT_CONFIG_SPARC_32 1 +#else +/* Unsupported architecture */ +#define SLJIT_CONFIG_UNSUPPORTED 1 +#endif + +#else /* !_WIN32 */ + +#if defined(_M_X64) || defined(__x86_64__) +#define SLJIT_CONFIG_X86_64 1 +#elif defined(_ARM_) +#define SLJIT_CONFIG_ARM_V5 1 +#else +#define SLJIT_CONFIG_X86_32 1 +#endif + +#endif /* !WIN32 */ +#endif /* SLJIT_CONFIG_AUTO */ + +#if (defined SLJIT_CONFIG_UNSUPPORTED && SLJIT_CONFIG_UNSUPPORTED) +#undef SLJIT_EXECUTABLE_ALLOCATOR +#endif + +#if !(defined SLJIT_STD_MACROS_DEFINED && SLJIT_STD_MACROS_DEFINED) + +/* These libraries are needed for the macros below. */ +#include +#include + +#endif /* STD_MACROS_DEFINED */ + +/* General macros: + Note: SLJIT is designed to be independent from them as possible. + + In release mode (SLJIT_DEBUG is not defined) only the following macros are needed: +*/ + +#ifndef SLJIT_MALLOC +#define SLJIT_MALLOC(size) malloc(size) +#endif + +#ifndef SLJIT_FREE +#define SLJIT_FREE(ptr) free(ptr) +#endif + +#ifndef SLJIT_MEMMOVE +#define SLJIT_MEMMOVE(dest, src, len) memmove(dest, src, len) +#endif + +#ifndef SLJIT_ZEROMEM +#define SLJIT_ZEROMEM(dest, len) memset(dest, 0, len) +#endif + +#if !defined(SLJIT_LIKELY) && !defined(SLJIT_UNLIKELY) + +#if defined(__GNUC__) && (__GNUC__ >= 3) +#define SLJIT_LIKELY(x) __builtin_expect((x), 1) +#define SLJIT_UNLIKELY(x) __builtin_expect((x), 0) +#else +#define SLJIT_LIKELY(x) (x) +#define SLJIT_UNLIKELY(x) (x) +#endif + +#endif /* !defined(SLJIT_LIKELY) && !defined(SLJIT_UNLIKELY) */ + +#ifndef SLJIT_INLINE +/* Inline functions. */ +#define SLJIT_INLINE __inline +#endif + +#ifndef SLJIT_CONST +/* Const variables. */ +#define SLJIT_CONST const +#endif + +#ifndef SLJIT_UNUSED_ARG +/* Unused arguments. */ +#define SLJIT_UNUSED_ARG(arg) (void)arg +#endif + +#if (defined SLJIT_CONFIG_STATIC && SLJIT_CONFIG_STATIC) +/* Static ABI functions. For all-in-one programs. */ + +#if defined(__GNUC__) +/* Disable unused warnings in gcc. */ +#define SLJIT_API_FUNC_ATTRIBUTE static __attribute__((unused)) +#else +#define SLJIT_API_FUNC_ATTRIBUTE static +#endif + +#else +#define SLJIT_API_FUNC_ATTRIBUTE +#endif /* (defined SLJIT_CONFIG_STATIC && SLJIT_CONFIG_STATIC) */ + +#ifndef SLJIT_CACHE_FLUSH + +#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) || (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) + +/* Not required to implement on archs with unified caches. */ +#define SLJIT_CACHE_FLUSH(from, to) + +#elif defined __APPLE__ + +/* Supported by all macs since Mac OS 10.5. + However, it does not work on non-jailbroken iOS devices, + although the compilation is successful. */ + +#define SLJIT_CACHE_FLUSH(from, to) \ + sys_icache_invalidate((char*)(from), (char*)(to) - (char*)(from)) + +#elif (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32) || (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) + +/* The __clear_cache() implementation of GCC is a dummy function on PowerPC. */ +#define SLJIT_CACHE_FLUSH(from, to) \ + ppc_cache_flush((from), (to)) + +#elif (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) + +/* The __clear_cache() implementation of GCC is a dummy function on Sparc. */ +#define SLJIT_CACHE_FLUSH(from, to) \ + sparc_cache_flush((from), (to)) + +#else + +/* Calls __ARM_NR_cacheflush on ARM-Linux. */ +#define SLJIT_CACHE_FLUSH(from, to) \ + __clear_cache((char*)(from), (char*)(to)) + +#endif + +#endif /* !SLJIT_CACHE_FLUSH */ + +/* 8 bit byte type. */ +typedef unsigned char sljit_ub; +typedef signed char sljit_sb; + +/* 16 bit half-word type. */ +typedef unsigned short int sljit_uh; +typedef signed short int sljit_sh; + +/* 32 bit integer type. */ +typedef unsigned int sljit_ui; +typedef signed int sljit_si; + +/* Machine word type. Can encapsulate a pointer. + 32 bit for 32 bit machines. + 64 bit for 64 bit machines. */ +#if (defined SLJIT_CONFIG_UNSUPPORTED && SLJIT_CONFIG_UNSUPPORTED) +/* Just to have something. */ +#define SLJIT_WORD_SHIFT 0 +typedef unsigned long int sljit_uw; +typedef long int sljit_sw; +#elif !(defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) && !(defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) +#define SLJIT_32BIT_ARCHITECTURE 1 +#define SLJIT_WORD_SHIFT 2 +typedef unsigned int sljit_uw; +typedef int sljit_sw; +#else +#define SLJIT_64BIT_ARCHITECTURE 1 +#define SLJIT_WORD_SHIFT 3 +#ifdef _WIN32 +typedef unsigned __int64 sljit_uw; +typedef __int64 sljit_sw; +#else +typedef unsigned long int sljit_uw; +typedef long int sljit_sw; +#endif +#endif + +typedef sljit_uw sljit_p; + +/* Floating point types. */ +typedef float sljit_s; +typedef double sljit_d; + +/* Shift for pointer sized data. */ +#define SLJIT_POINTER_SHIFT SLJIT_WORD_SHIFT + +/* Shift for double precision sized data. */ +#define SLJIT_DOUBLE_SHIFT 3 + +#ifndef SLJIT_W + +/* Defining long constants. */ +#if (defined SLJIT_64BIT_ARCHITECTURE && SLJIT_64BIT_ARCHITECTURE) +#define SLJIT_W(w) (w##ll) +#else +#define SLJIT_W(w) (w) +#endif + +#endif /* !SLJIT_W */ + +#ifndef SLJIT_CALL + +/* ABI (Application Binary Interface) types. */ +#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) + +#if defined(__GNUC__) + +#define SLJIT_CALL __attribute__ ((fastcall)) +#define SLJIT_X86_32_FASTCALL 1 + +#elif defined(_MSC_VER) + +#define SLJIT_CALL __fastcall +#define SLJIT_X86_32_FASTCALL 1 + +#elif defined(__BORLANDC__) + +#define SLJIT_CALL __msfastcall +#define SLJIT_X86_32_FASTCALL 1 + +#else /* Unknown compiler. */ + +/* The cdecl attribute is the default. */ +#define SLJIT_CALL + +#endif + +#else /* Non x86-32 architectures. */ + +#define SLJIT_CALL + +#endif /* SLJIT_CONFIG_X86_32 */ + +#endif /* !SLJIT_CALL */ + +#if !defined(SLJIT_BIG_ENDIAN) && !defined(SLJIT_LITTLE_ENDIAN) + +/* These macros are useful for the application. */ +#if (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32) \ + || (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) \ + || (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) +#define SLJIT_BIG_ENDIAN 1 + +#elif (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) + +#ifdef __MIPSEL__ +#define SLJIT_LITTLE_ENDIAN 1 +#else +#define SLJIT_BIG_ENDIAN 1 +#endif + +#else +#define SLJIT_LITTLE_ENDIAN 1 +#endif + +#endif /* !defined(SLJIT_BIG_ENDIAN) && !defined(SLJIT_LITTLE_ENDIAN) */ + +/* Sanity check. */ +#if (defined SLJIT_BIG_ENDIAN && SLJIT_BIG_ENDIAN) && (defined SLJIT_LITTLE_ENDIAN && SLJIT_LITTLE_ENDIAN) +#error "Exactly one endianness must be selected" +#endif + +#if !(defined SLJIT_BIG_ENDIAN && SLJIT_BIG_ENDIAN) && !(defined SLJIT_LITTLE_ENDIAN && SLJIT_LITTLE_ENDIAN) +#error "Exactly one endianness must be selected" +#endif + +#ifndef SLJIT_INDIRECT_CALL +#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) || (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32 && defined _AIX) +/* It seems certain ppc compilers use an indirect addressing for functions + which makes things complicated. */ +#define SLJIT_INDIRECT_CALL 1 +#endif +#endif /* SLJIT_INDIRECT_CALL */ + +#ifndef SLJIT_RETURN_ADDRESS_OFFSET +#if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) +#define SLJIT_RETURN_ADDRESS_OFFSET 8 +#else +#define SLJIT_RETURN_ADDRESS_OFFSET 0 +#endif +#endif /* SLJIT_RETURN_ADDRESS_OFFSET */ + +#ifndef SLJIT_SSE2 + +#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) || (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) +/* Turn on SSE2 support on x86. */ +#define SLJIT_SSE2 1 + +#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) +/* Auto detect SSE2 support using CPUID. + On 64 bit x86 cpus, sse2 must be present. */ +#define SLJIT_DETECT_SSE2 1 +#endif + +#endif /* (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) || (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) */ + +#endif /* !SLJIT_SSE2 */ + +#ifndef SLJIT_UNALIGNED + +#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) \ + || (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) \ + || (defined SLJIT_CONFIG_ARM_V7 && SLJIT_CONFIG_ARM_V7) \ + || (defined SLJIT_CONFIG_ARM_THUMB2 && SLJIT_CONFIG_ARM_THUMB2) \ + || (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32) \ + || (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) +#define SLJIT_UNALIGNED 1 +#endif + +#endif /* !SLJIT_UNALIGNED */ + +#if (defined SLJIT_EXECUTABLE_ALLOCATOR && SLJIT_EXECUTABLE_ALLOCATOR) +SLJIT_API_FUNC_ATTRIBUTE void* sljit_malloc_exec(sljit_uw size); +SLJIT_API_FUNC_ATTRIBUTE void sljit_free_exec(void* ptr); +#define SLJIT_MALLOC_EXEC(size) sljit_malloc_exec(size) +#define SLJIT_FREE_EXEC(ptr) sljit_free_exec(ptr) +#endif + +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) +#include +#endif + +#if (defined SLJIT_DEBUG && SLJIT_DEBUG) + +#if !defined(SLJIT_ASSERT) || !defined(SLJIT_ASSERT_STOP) + +/* SLJIT_HALT_PROCESS must halt the process. */ +#ifndef SLJIT_HALT_PROCESS +#include + +#define SLJIT_HALT_PROCESS() \ + abort(); +#endif /* !SLJIT_HALT_PROCESS */ + +#include + +#endif /* !SLJIT_ASSERT || !SLJIT_ASSERT_STOP */ + +/* Feel free to redefine these two macros. */ +#ifndef SLJIT_ASSERT + +#define SLJIT_ASSERT(x) \ + do { \ + if (SLJIT_UNLIKELY(!(x))) { \ + printf("Assertion failed at " __FILE__ ":%d\n", __LINE__); \ + SLJIT_HALT_PROCESS(); \ + } \ + } while (0) + +#endif /* !SLJIT_ASSERT */ + +#ifndef SLJIT_ASSERT_STOP + +#define SLJIT_ASSERT_STOP() \ + do { \ + printf("Should never been reached " __FILE__ ":%d\n", __LINE__); \ + SLJIT_HALT_PROCESS(); \ + } while (0) + +#endif /* !SLJIT_ASSERT_STOP */ + +#else /* (defined SLJIT_DEBUG && SLJIT_DEBUG) */ + +/* Forcing empty, but valid statements. */ +#undef SLJIT_ASSERT +#undef SLJIT_ASSERT_STOP + +#define SLJIT_ASSERT(x) \ + do { } while (0) +#define SLJIT_ASSERT_STOP() \ + do { } while (0) + +#endif /* (defined SLJIT_DEBUG && SLJIT_DEBUG) */ + +#ifndef SLJIT_COMPILE_ASSERT + +/* Should be improved eventually. */ +#define SLJIT_COMPILE_ASSERT(x, description) \ + SLJIT_ASSERT(x) + +#endif /* !SLJIT_COMPILE_ASSERT */ + +#endif Property changes on: sys/contrib/sljit/sljitConfigInternal.h ___________________________________________________________________ Added: fbsd:nokeywords ## -0,0 +1 ## +yes \ No newline at end of property Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: sys/contrib/sljit/sljitExecAllocator.c =================================================================== --- sys/contrib/sljit/sljitExecAllocator.c (revision 0) +++ sys/contrib/sljit/sljitExecAllocator.c (working copy) @@ -0,0 +1,289 @@ +/* + * Stack-less Just-In-Time compiler + * + * Copyright 2009-2012 Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, are + * permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of + * conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list + * of conditions and the following disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED + * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* + This file contains a simple executable memory allocator + + It is assumed, that executable code blocks are usually medium (or sometimes + large) memory blocks, and the allocator is not too frequently called (less + optimized than other allocators). Thus, using it as a generic allocator is + not suggested. + + How does it work: + Memory is allocated in continuous memory areas called chunks by alloc_chunk() + Chunk format: + [ block ][ block ] ... [ block ][ block terminator ] + + All blocks and the block terminator is started with block_header. The block + header contains the size of the previous and the next block. These sizes + can also contain special values. + Block size: + 0 - The block is a free_block, with a different size member. + 1 - The block is a block terminator. + n - The block is used at the moment, and the value contains its size. + Previous block size: + 0 - This is the first block of the memory chunk. + n - The size of the previous block. + + Using these size values we can go forward or backward on the block chain. + The unused blocks are stored in a chain list pointed by free_blocks. This + list is useful if we need to find a suitable memory area when the allocator + is called. + + When a block is freed, the new free block is connected to its adjacent free + blocks if possible. + + [ free block ][ used block ][ free block ] + and "used block" is freed, the three blocks are connected together: + [ one big free block ] +*/ + +/* --------------------------------------------------------------------- */ +/* System (OS) functions */ +/* --------------------------------------------------------------------- */ + +/* 64 KByte. */ +#define CHUNK_SIZE 0x10000 + +/* + alloc_chunk / free_chunk : + * allocate executable system memory chunks + * the size is always divisible by CHUNK_SIZE + allocator_grab_lock / allocator_release_lock : + * make the allocator thread safe + * can be empty if the OS (or the application) does not support threading + * only the allocator requires this lock, sljit is fully thread safe + as it only uses local variables +*/ + +#ifdef _WIN32 + +static SLJIT_INLINE void* alloc_chunk(sljit_uw size) +{ + return VirtualAlloc(NULL, size, MEM_COMMIT | MEM_RESERVE, PAGE_EXECUTE_READWRITE); +} + +static SLJIT_INLINE void free_chunk(void* chunk, sljit_uw size) +{ + SLJIT_UNUSED_ARG(size); + VirtualFree(chunk, 0, MEM_RELEASE); +} + +#else + +static SLJIT_INLINE void* alloc_chunk(sljit_uw size) +{ + void* retval; + +#ifdef MAP_ANON + retval = mmap(NULL, size, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_ANON, -1, 0); +#else + if (dev_zero < 0) { + if (open_dev_zero()) + return NULL; + } + retval = mmap(NULL, size, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE, dev_zero, 0); +#endif + + return (retval != MAP_FAILED) ? retval : NULL; +} + +static SLJIT_INLINE void free_chunk(void* chunk, sljit_uw size) +{ + munmap(chunk, size); +} + +#endif + +/* --------------------------------------------------------------------- */ +/* Common functions */ +/* --------------------------------------------------------------------- */ + +#define CHUNK_MASK (~(CHUNK_SIZE - 1)) + +struct block_header { + sljit_uw size; + sljit_uw prev_size; +}; + +struct free_block { + struct block_header header; + struct free_block *next; + struct free_block *prev; + sljit_uw size; +}; + +#define AS_BLOCK_HEADER(base, offset) \ + ((struct block_header*)(((sljit_ub*)base) + offset)) +#define AS_FREE_BLOCK(base, offset) \ + ((struct free_block*)(((sljit_ub*)base) + offset)) +#define MEM_START(base) ((void*)(((sljit_ub*)base) + sizeof(struct block_header))) +#define ALIGN_SIZE(size) (((size) + sizeof(struct block_header) + 7) & ~7) + +static struct free_block* free_blocks; +static sljit_uw allocated_size; +static sljit_uw total_size; + +static SLJIT_INLINE void sljit_insert_free_block(struct free_block *free_block, sljit_uw size) +{ + free_block->header.size = 0; + free_block->size = size; + + free_block->next = free_blocks; + free_block->prev = 0; + if (free_blocks) + free_blocks->prev = free_block; + free_blocks = free_block; +} + +static SLJIT_INLINE void sljit_remove_free_block(struct free_block *free_block) +{ + if (free_block->next) + free_block->next->prev = free_block->prev; + + if (free_block->prev) + free_block->prev->next = free_block->next; + else { + SLJIT_ASSERT(free_blocks == free_block); + free_blocks = free_block->next; + } +} + +SLJIT_API_FUNC_ATTRIBUTE void* sljit_malloc_exec(sljit_uw size) +{ + struct block_header *header; + struct block_header *next_header; + struct free_block *free_block; + sljit_uw chunk_size; + + allocator_grab_lock(); + if (size < sizeof(struct free_block)) + size = sizeof(struct free_block); + size = ALIGN_SIZE(size); + + free_block = free_blocks; + while (free_block) { + if (free_block->size >= size) { + chunk_size = free_block->size; + if (chunk_size > size + 64) { + /* We just cut a block from the end of the free block. */ + chunk_size -= size; + free_block->size = chunk_size; + header = AS_BLOCK_HEADER(free_block, chunk_size); + header->prev_size = chunk_size; + AS_BLOCK_HEADER(header, size)->prev_size = size; + } + else { + sljit_remove_free_block(free_block); + header = (struct block_header*)free_block; + size = chunk_size; + } + allocated_size += size; + header->size = size; + allocator_release_lock(); + return MEM_START(header); + } + free_block = free_block->next; + } + + chunk_size = (size + sizeof(struct block_header) + CHUNK_SIZE - 1) & CHUNK_MASK; + header = (struct block_header*)alloc_chunk(chunk_size); + if (!header) { + allocator_release_lock(); + return NULL; + } + + chunk_size -= sizeof(struct block_header); + total_size += chunk_size; + + header->prev_size = 0; + if (chunk_size > size + 64) { + /* Cut the allocated space into a free and a used block. */ + allocated_size += size; + header->size = size; + chunk_size -= size; + + free_block = AS_FREE_BLOCK(header, size); + free_block->header.prev_size = size; + sljit_insert_free_block(free_block, chunk_size); + next_header = AS_BLOCK_HEADER(free_block, chunk_size); + } + else { + /* All space belongs to this allocation. */ + allocated_size += chunk_size; + header->size = chunk_size; + next_header = AS_BLOCK_HEADER(header, chunk_size); + } + next_header->size = 1; + next_header->prev_size = chunk_size; + allocator_release_lock(); + return MEM_START(header); +} + +SLJIT_API_FUNC_ATTRIBUTE void sljit_free_exec(void* ptr) +{ + struct block_header *header; + struct free_block* free_block; + + allocator_grab_lock(); + header = AS_BLOCK_HEADER(ptr, -(sljit_sw)sizeof(struct block_header)); + allocated_size -= header->size; + + /* Connecting free blocks together if possible. */ + + /* If header->prev_size == 0, free_block will equal to header. + In this case, free_block->header.size will be > 0. */ + free_block = AS_FREE_BLOCK(header, -(sljit_sw)header->prev_size); + if (SLJIT_UNLIKELY(!free_block->header.size)) { + free_block->size += header->size; + header = AS_BLOCK_HEADER(free_block, free_block->size); + header->prev_size = free_block->size; + } + else { + free_block = (struct free_block*)header; + sljit_insert_free_block(free_block, header->size); + } + + header = AS_BLOCK_HEADER(free_block, free_block->size); + if (SLJIT_UNLIKELY(!header->size)) { + free_block->size += ((struct free_block*)header)->size; + sljit_remove_free_block((struct free_block*)header); + header = AS_BLOCK_HEADER(free_block, free_block->size); + header->prev_size = free_block->size; + } + + /* The whole chunk is free. */ + if (SLJIT_UNLIKELY(!free_block->header.prev_size && header->size == 1)) { + /* If this block is freed, we still have (allocated_size / 2) free space. */ + if (total_size - free_block->size > (allocated_size * 3 / 2)) { + total_size -= free_block->size; + sljit_remove_free_block(free_block); + free_chunk(free_block, free_block->size + sizeof(struct block_header)); + } + } + + allocator_release_lock(); +} Property changes on: sys/contrib/sljit/sljitExecAllocator.c ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Added: fbsd:nokeywords ## -0,0 +1 ## +yes \ No newline at end of property Index: sys/contrib/sljit/sljitLir.c =================================================================== --- sys/contrib/sljit/sljitLir.c (revision 0) +++ sys/contrib/sljit/sljitLir.c (working copy) @@ -0,0 +1,1766 @@ +/* + * Stack-less Just-In-Time compiler + * + * Copyright 2009-2012 Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, are + * permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of + * conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list + * of conditions and the following disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED + * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "sljitLir.h" + +#define CHECK_ERROR() \ + do { \ + if (SLJIT_UNLIKELY(compiler->error)) \ + return compiler->error; \ + } while (0) + +#define CHECK_ERROR_PTR() \ + do { \ + if (SLJIT_UNLIKELY(compiler->error)) \ + return NULL; \ + } while (0) + +#define CHECK_ERROR_VOID() \ + do { \ + if (SLJIT_UNLIKELY(compiler->error)) \ + return; \ + } while (0) + +#define FAIL_IF(expr) \ + do { \ + if (SLJIT_UNLIKELY(expr)) \ + return compiler->error; \ + } while (0) + +#define PTR_FAIL_IF(expr) \ + do { \ + if (SLJIT_UNLIKELY(expr)) \ + return NULL; \ + } while (0) + +#define FAIL_IF_NULL(ptr) \ + do { \ + if (SLJIT_UNLIKELY(!(ptr))) { \ + compiler->error = SLJIT_ERR_ALLOC_FAILED; \ + return SLJIT_ERR_ALLOC_FAILED; \ + } \ + } while (0) + +#define PTR_FAIL_IF_NULL(ptr) \ + do { \ + if (SLJIT_UNLIKELY(!(ptr))) { \ + compiler->error = SLJIT_ERR_ALLOC_FAILED; \ + return NULL; \ + } \ + } while (0) + +#define PTR_FAIL_WITH_EXEC_IF(ptr) \ + do { \ + if (SLJIT_UNLIKELY(!(ptr))) { \ + compiler->error = SLJIT_ERR_EX_ALLOC_FAILED; \ + return NULL; \ + } \ + } while (0) + +#if !(defined SLJIT_CONFIG_UNSUPPORTED && SLJIT_CONFIG_UNSUPPORTED) + +#define GET_OPCODE(op) \ + ((op) & ~(SLJIT_INT_OP | SLJIT_SET_E | SLJIT_SET_S | SLJIT_SET_U | SLJIT_SET_O | SLJIT_SET_C | SLJIT_KEEP_FLAGS)) + +#define GET_FLAGS(op) \ + ((op) & (SLJIT_SET_E | SLJIT_SET_S | SLJIT_SET_U | SLJIT_SET_O | SLJIT_SET_C)) + +#define GET_ALL_FLAGS(op) \ + ((op) & (SLJIT_INT_OP | SLJIT_SET_E | SLJIT_SET_S | SLJIT_SET_U | SLJIT_SET_O | SLJIT_SET_C | SLJIT_KEEP_FLAGS)) + +#define TYPE_CAST_NEEDED(op) \ + (((op) >= SLJIT_MOV_UB && (op) <= SLJIT_MOV_SH) || ((op) >= SLJIT_MOVU_UB && (op) <= SLJIT_MOVU_SH)) + +#define BUF_SIZE 4096 + +#if (defined SLJIT_32BIT_ARCHITECTURE && SLJIT_32BIT_ARCHITECTURE) +#define ABUF_SIZE 2048 +#else +#define ABUF_SIZE 4096 +#endif + +/* Jump flags. */ +#define JUMP_LABEL 0x1 +#define JUMP_ADDR 0x2 +/* SLJIT_REWRITABLE_JUMP is 0x1000. */ + +#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) || (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) +# define PATCH_MB 0x4 +# define PATCH_MW 0x8 +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) +# define PATCH_MD 0x10 +#endif +#endif + +#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) || (defined SLJIT_CONFIG_ARM_V7 && SLJIT_CONFIG_ARM_V7) +# define IS_BL 0x4 +# define PATCH_B 0x8 +#endif + +#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) +# define CPOOL_SIZE 512 +#endif + +#if (defined SLJIT_CONFIG_ARM_THUMB2 && SLJIT_CONFIG_ARM_THUMB2) +# define IS_COND 0x04 +# define IS_BL 0x08 + /* cannot be encoded as branch */ +# define B_TYPE0 0x00 + /* conditional + imm8 */ +# define B_TYPE1 0x10 + /* conditional + imm20 */ +# define B_TYPE2 0x20 + /* IT + imm24 */ +# define B_TYPE3 0x30 + /* imm11 */ +# define B_TYPE4 0x40 + /* imm24 */ +# define B_TYPE5 0x50 + /* BL + imm24 */ +# define BL_TYPE6 0x60 + /* 0xf00 cc code for branches */ +#endif + +#if (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32) || (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) +# define UNCOND_B 0x04 +# define PATCH_B 0x08 +# define ABSOLUTE_B 0x10 +#endif + +#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) +# define IS_MOVABLE 0x04 +# define IS_JAL 0x08 +# define IS_BIT26_COND 0x10 +# define IS_BIT16_COND 0x20 + +# define IS_COND (IS_BIT26_COND | IS_BIT16_COND) + +# define PATCH_B 0x40 +# define PATCH_J 0x80 + + /* instruction types */ +# define MOVABLE_INS 0 + /* 1 - 31 last destination register */ + /* no destination (i.e: store) */ +# define UNMOVABLE_INS 32 + /* FPU status register */ +# define FCSR_FCC 33 +#endif + +#if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) +# define IS_MOVABLE 0x04 +# define IS_COND 0x08 +# define IS_CALL 0x10 + +# define PATCH_B 0x20 +# define PATCH_CALL 0x40 + + /* instruction types */ +# define MOVABLE_INS 0 + /* 1 - 31 last destination register */ + /* no destination (i.e: store) */ +# define UNMOVABLE_INS 32 + +# define DST_INS_MASK 0xff + + /* ICC_SET is the same as SET_FLAGS. */ +# define ICC_IS_SET (1 << 23) +# define FCC_IS_SET (1 << 24) +#endif + +#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) +#define SLJIT_HAS_VARIABLE_LOCALS_OFFSET 1 +#if !(defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) +#define FIXED_LOCALS_OFFSET (3 * sizeof(sljit_sw)) +#endif +#endif + +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) +#define SLJIT_HAS_FIXED_LOCALS_OFFSET 1 +#ifdef _WIN64 +#define FIXED_LOCALS_OFFSET ((4 + 2) * sizeof(sljit_sw)) +#else +#define FIXED_LOCALS_OFFSET (sizeof(sljit_sw)) +#endif +#endif + +#if (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32) +#define SLJIT_HAS_FIXED_LOCALS_OFFSET 1 +#if (defined SLJIT_INDIRECT_CALL && SLJIT_INDIRECT_CALL) +#define FIXED_LOCALS_OFFSET ((6 + 8) * sizeof(sljit_sw)) +#else +#define FIXED_LOCALS_OFFSET (2 * sizeof(sljit_sw)) +#endif +#endif + +#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) +#define SLJIT_HAS_FIXED_LOCALS_OFFSET 1 +#define FIXED_LOCALS_OFFSET ((6 + 8) * sizeof(sljit_sw)) +#endif + +#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) +#define SLJIT_HAS_FIXED_LOCALS_OFFSET 1 +#define FIXED_LOCALS_OFFSET (4 * sizeof(sljit_sw)) +#endif + +#if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) +#define SLJIT_HAS_FIXED_LOCALS_OFFSET 1 +#define FIXED_LOCALS_OFFSET (23 * sizeof(sljit_sw)) +#endif + +#if (defined SLJIT_HAS_VARIABLE_LOCALS_OFFSET && SLJIT_HAS_VARIABLE_LOCALS_OFFSET) + +#define ADJUST_LOCAL_OFFSET(p, i) \ + if ((p) == (SLJIT_MEM1(SLJIT_LOCALS_REG))) \ + (i) += compiler->locals_offset; + +#elif (defined SLJIT_HAS_FIXED_LOCALS_OFFSET && SLJIT_HAS_FIXED_LOCALS_OFFSET) + +#define ADJUST_LOCAL_OFFSET(p, i) \ + if ((p) == (SLJIT_MEM1(SLJIT_LOCALS_REG))) \ + (i) += FIXED_LOCALS_OFFSET; + +#else + +#define ADJUST_LOCAL_OFFSET(p, i) + +#endif + +#endif /* !(defined SLJIT_CONFIG_UNSUPPORTED && SLJIT_CONFIG_UNSUPPORTED) */ + +/* Utils can still be used even if SLJIT_CONFIG_UNSUPPORTED is set. */ +#include "sljitUtils.c" + +#if !(defined SLJIT_CONFIG_UNSUPPORTED && SLJIT_CONFIG_UNSUPPORTED) + +#if (defined SLJIT_EXECUTABLE_ALLOCATOR && SLJIT_EXECUTABLE_ALLOCATOR) +#include "sljitExecAllocator.c" +#endif + +#if (defined SLJIT_SSE2_AUTO && SLJIT_SSE2_AUTO) && !(defined SLJIT_SSE2 && SLJIT_SSE2) +#error SLJIT_SSE2_AUTO cannot be enabled without SLJIT_SSE2 +#endif + +/* --------------------------------------------------------------------- */ +/* Public functions */ +/* --------------------------------------------------------------------- */ + +#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) || ((defined SLJIT_SSE2 && SLJIT_SSE2) && ((defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) || (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64))) +#define SLJIT_NEEDS_COMPILER_INIT 1 +static sljit_si compiler_initialized = 0; +/* A thread safe initialization. */ +static void init_compiler(void); +#endif + +SLJIT_API_FUNC_ATTRIBUTE struct sljit_compiler* sljit_create_compiler(void) +{ + struct sljit_compiler *compiler = (struct sljit_compiler*)SLJIT_MALLOC(sizeof(struct sljit_compiler)); + if (!compiler) + return NULL; + SLJIT_ZEROMEM(compiler, sizeof(struct sljit_compiler)); + + SLJIT_COMPILE_ASSERT( + sizeof(sljit_sb) == 1 && sizeof(sljit_ub) == 1 + && sizeof(sljit_sh) == 2 && sizeof(sljit_uh) == 2 + && sizeof(sljit_si) == 4 && sizeof(sljit_ui) == 4 + && (sizeof(sljit_p) == 4 || sizeof(sljit_p) == 8) + && sizeof(sljit_p) <= sizeof(sljit_sw) + && (sizeof(sljit_sw) == 4 || sizeof(sljit_sw) == 8) + && (sizeof(sljit_uw) == 4 || sizeof(sljit_uw) == 8), + invalid_integer_types); + SLJIT_COMPILE_ASSERT(SLJIT_INT_OP == SLJIT_SINGLE_OP, + int_op_and_single_op_must_be_the_same); + SLJIT_COMPILE_ASSERT(SLJIT_REWRITABLE_JUMP != SLJIT_SINGLE_OP, + rewritable_jump_and_single_op_must_not_be_the_same); + + /* Only the non-zero members must be set. */ + compiler->error = SLJIT_SUCCESS; + + compiler->buf = (struct sljit_memory_fragment*)SLJIT_MALLOC(BUF_SIZE); + compiler->abuf = (struct sljit_memory_fragment*)SLJIT_MALLOC(ABUF_SIZE); + + if (!compiler->buf || !compiler->abuf) { + if (compiler->buf) + SLJIT_FREE(compiler->buf); + if (compiler->abuf) + SLJIT_FREE(compiler->abuf); + SLJIT_FREE(compiler); + return NULL; + } + + compiler->buf->next = NULL; + compiler->buf->used_size = 0; + compiler->abuf->next = NULL; + compiler->abuf->used_size = 0; + + compiler->scratches = -1; + compiler->saveds = -1; + +#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) + compiler->args = -1; +#endif + +#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) + compiler->cpool = (sljit_uw*)SLJIT_MALLOC(CPOOL_SIZE * sizeof(sljit_uw) + CPOOL_SIZE * sizeof(sljit_ub)); + if (!compiler->cpool) { + SLJIT_FREE(compiler->buf); + SLJIT_FREE(compiler->abuf); + SLJIT_FREE(compiler); + return NULL; + } + compiler->cpool_unique = (sljit_ub*)(compiler->cpool + CPOOL_SIZE); + compiler->cpool_diff = 0xffffffff; +#endif + +#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) + compiler->delay_slot = UNMOVABLE_INS; +#endif + +#if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) + compiler->delay_slot = UNMOVABLE_INS; +#endif + +#if (defined SLJIT_NEEDS_COMPILER_INIT && SLJIT_NEEDS_COMPILER_INIT) + if (!compiler_initialized) { + init_compiler(); + compiler_initialized = 1; + } +#endif + + return compiler; +} + +SLJIT_API_FUNC_ATTRIBUTE void sljit_free_compiler(struct sljit_compiler *compiler) +{ + struct sljit_memory_fragment *buf; + struct sljit_memory_fragment *curr; + + buf = compiler->buf; + while (buf) { + curr = buf; + buf = buf->next; + SLJIT_FREE(curr); + } + + buf = compiler->abuf; + while (buf) { + curr = buf; + buf = buf->next; + SLJIT_FREE(curr); + } + +#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) + SLJIT_FREE(compiler->cpool); +#endif + SLJIT_FREE(compiler); +} + +#if (defined SLJIT_CONFIG_ARM_THUMB2 && SLJIT_CONFIG_ARM_THUMB2) +SLJIT_API_FUNC_ATTRIBUTE void sljit_free_code(void* code) +{ + /* Remove thumb mode flag. */ + SLJIT_FREE_EXEC((void*)((sljit_uw)code & ~0x1)); +} +#elif (defined SLJIT_INDIRECT_CALL && SLJIT_INDIRECT_CALL) +SLJIT_API_FUNC_ATTRIBUTE void sljit_free_code(void* code) +{ + /* Resolve indirection. */ + code = (void*)(*(sljit_uw*)code); + SLJIT_FREE_EXEC(code); +} +#else +SLJIT_API_FUNC_ATTRIBUTE void sljit_free_code(void* code) +{ + SLJIT_FREE_EXEC(code); +} +#endif + +SLJIT_API_FUNC_ATTRIBUTE void sljit_set_label(struct sljit_jump *jump, struct sljit_label* label) +{ + if (SLJIT_LIKELY(!!jump) && SLJIT_LIKELY(!!label)) { + jump->flags &= ~JUMP_ADDR; + jump->flags |= JUMP_LABEL; + jump->u.label = label; + } +} + +SLJIT_API_FUNC_ATTRIBUTE void sljit_set_target(struct sljit_jump *jump, sljit_uw target) +{ + if (SLJIT_LIKELY(!!jump)) { + SLJIT_ASSERT(jump->flags & SLJIT_REWRITABLE_JUMP); + + jump->flags &= ~JUMP_LABEL; + jump->flags |= JUMP_ADDR; + jump->u.target = target; + } +} + +/* --------------------------------------------------------------------- */ +/* Private functions */ +/* --------------------------------------------------------------------- */ + +static void* ensure_buf(struct sljit_compiler *compiler, sljit_uw size) +{ + sljit_ub *ret; + struct sljit_memory_fragment *new_frag; + + SLJIT_ASSERT(size <= 256); + if (compiler->buf->used_size + size <= (BUF_SIZE - (sljit_uw)SLJIT_OFFSETOF(struct sljit_memory_fragment, memory))) { + ret = compiler->buf->memory + compiler->buf->used_size; + compiler->buf->used_size += size; + return ret; + } + new_frag = (struct sljit_memory_fragment*)SLJIT_MALLOC(BUF_SIZE); + PTR_FAIL_IF_NULL(new_frag); + new_frag->next = compiler->buf; + compiler->buf = new_frag; + new_frag->used_size = size; + return new_frag->memory; +} + +static void* ensure_abuf(struct sljit_compiler *compiler, sljit_uw size) +{ + sljit_ub *ret; + struct sljit_memory_fragment *new_frag; + + SLJIT_ASSERT(size <= 256); + if (compiler->abuf->used_size + size <= (ABUF_SIZE - (sljit_uw)SLJIT_OFFSETOF(struct sljit_memory_fragment, memory))) { + ret = compiler->abuf->memory + compiler->abuf->used_size; + compiler->abuf->used_size += size; + return ret; + } + new_frag = (struct sljit_memory_fragment*)SLJIT_MALLOC(ABUF_SIZE); + PTR_FAIL_IF_NULL(new_frag); + new_frag->next = compiler->abuf; + compiler->abuf = new_frag; + new_frag->used_size = size; + return new_frag->memory; +} + +SLJIT_API_FUNC_ATTRIBUTE void* sljit_alloc_memory(struct sljit_compiler *compiler, sljit_si size) +{ + CHECK_ERROR_PTR(); + +#if (defined SLJIT_64BIT_ARCHITECTURE && SLJIT_64BIT_ARCHITECTURE) + if (size <= 0 || size > 128) + return NULL; + size = (size + 7) & ~7; +#else + if (size <= 0 || size > 64) + return NULL; + size = (size + 3) & ~3; +#endif + return ensure_abuf(compiler, size); +} + +static SLJIT_INLINE void reverse_buf(struct sljit_compiler *compiler) +{ + struct sljit_memory_fragment *buf = compiler->buf; + struct sljit_memory_fragment *prev = NULL; + struct sljit_memory_fragment *tmp; + + do { + tmp = buf->next; + buf->next = prev; + prev = buf; + buf = tmp; + } while (buf != NULL); + + compiler->buf = prev; +} + +static SLJIT_INLINE void set_label(struct sljit_label *label, struct sljit_compiler *compiler) +{ + label->next = NULL; + label->size = compiler->size; + if (compiler->last_label) + compiler->last_label->next = label; + else + compiler->labels = label; + compiler->last_label = label; +} + +static SLJIT_INLINE void set_jump(struct sljit_jump *jump, struct sljit_compiler *compiler, sljit_si flags) +{ + jump->next = NULL; + jump->flags = flags; + if (compiler->last_jump) + compiler->last_jump->next = jump; + else + compiler->jumps = jump; + compiler->last_jump = jump; +} + +static SLJIT_INLINE void set_const(struct sljit_const *const_, struct sljit_compiler *compiler) +{ + const_->next = NULL; + const_->addr = compiler->size; + if (compiler->last_const) + compiler->last_const->next = const_; + else + compiler->consts = const_; + compiler->last_const = const_; +} + +#define ADDRESSING_DEPENDS_ON(exp, reg) \ + (((exp) & SLJIT_MEM) && (((exp) & 0xf) == reg || (((exp) >> 4) & 0xf) == reg)) + +#if (defined SLJIT_DEBUG && SLJIT_DEBUG) +#define FUNCTION_CHECK_OP() \ + SLJIT_ASSERT(!GET_FLAGS(op) || !(op & SLJIT_KEEP_FLAGS)); \ + switch (GET_OPCODE(op)) { \ + case SLJIT_NOT: \ + case SLJIT_CLZ: \ + case SLJIT_AND: \ + case SLJIT_OR: \ + case SLJIT_XOR: \ + case SLJIT_SHL: \ + case SLJIT_LSHR: \ + case SLJIT_ASHR: \ + SLJIT_ASSERT(!(op & (SLJIT_SET_S | SLJIT_SET_U | SLJIT_SET_O | SLJIT_SET_C))); \ + break; \ + case SLJIT_NEG: \ + SLJIT_ASSERT(!(op & (SLJIT_SET_S | SLJIT_SET_U | SLJIT_SET_C))); \ + break; \ + case SLJIT_MUL: \ + SLJIT_ASSERT(!(op & (SLJIT_SET_E | SLJIT_SET_S | SLJIT_SET_U | SLJIT_SET_C))); \ + break; \ + case SLJIT_CMPD: \ + SLJIT_ASSERT(!(op & (SLJIT_SET_U | SLJIT_SET_O | SLJIT_SET_C | SLJIT_KEEP_FLAGS))); \ + SLJIT_ASSERT((op & (SLJIT_SET_E | SLJIT_SET_S))); \ + break; \ + case SLJIT_ADD: \ + SLJIT_ASSERT(!(op & (SLJIT_SET_S | SLJIT_SET_U))); \ + break; \ + case SLJIT_SUB: \ + break; \ + case SLJIT_ADDC: \ + case SLJIT_SUBC: \ + SLJIT_ASSERT(!(op & (SLJIT_SET_E | SLJIT_SET_S | SLJIT_SET_U | SLJIT_SET_O))); \ + break; \ + case SLJIT_BREAKPOINT: \ + case SLJIT_NOP: \ + case SLJIT_UMUL: \ + case SLJIT_SMUL: \ + case SLJIT_MOV: \ + case SLJIT_MOV_P: \ + case SLJIT_MOVU: \ + case SLJIT_MOVU_P: \ + /* Nothing allowed */ \ + SLJIT_ASSERT(!(op & (SLJIT_INT_OP | SLJIT_SET_E | SLJIT_SET_S | SLJIT_SET_U | SLJIT_SET_O | SLJIT_SET_C | SLJIT_KEEP_FLAGS))); \ + break; \ + default: \ + /* Only SLJIT_INT_OP or SLJIT_SINGLE_OP is allowed. */ \ + SLJIT_ASSERT(!(op & (SLJIT_SET_E | SLJIT_SET_S | SLJIT_SET_U | SLJIT_SET_O | SLJIT_SET_C | SLJIT_KEEP_FLAGS))); \ + break; \ + } + +#define FUNCTION_CHECK_IS_REG(r) \ + ((r) == SLJIT_UNUSED || \ + ((r) >= SLJIT_SCRATCH_REG1 && (r) <= SLJIT_SCRATCH_REG1 - 1 + compiler->scratches) || \ + ((r) >= SLJIT_SAVED_REG1 && (r) <= SLJIT_SAVED_REG1 - 1 + compiler->saveds)) + +#define FUNCTION_CHECK_SRC(p, i) \ + SLJIT_ASSERT(compiler->scratches != -1 && compiler->saveds != -1); \ + if (FUNCTION_CHECK_IS_REG(p)) \ + SLJIT_ASSERT((i) == 0 && (p) != SLJIT_UNUSED); \ + else if ((p) == SLJIT_IMM) \ + ; \ + else if ((p) == (SLJIT_MEM1(SLJIT_LOCALS_REG))) \ + SLJIT_ASSERT((i) >= 0 && (i) < compiler->logical_local_size); \ + else if ((p) & SLJIT_MEM) { \ + SLJIT_ASSERT(FUNCTION_CHECK_IS_REG((p) & 0xf)); \ + if ((p) & 0xf0) { \ + SLJIT_ASSERT(FUNCTION_CHECK_IS_REG(((p) >> 4) & 0xf)); \ + SLJIT_ASSERT(!((i) & ~0x3)); \ + } \ + SLJIT_ASSERT(((p) >> 9) == 0); \ + } \ + else \ + SLJIT_ASSERT_STOP(); + +#define FUNCTION_CHECK_DST(p, i) \ + SLJIT_ASSERT(compiler->scratches != -1 && compiler->saveds != -1); \ + if (FUNCTION_CHECK_IS_REG(p)) \ + SLJIT_ASSERT((i) == 0); \ + else if ((p) == (SLJIT_MEM1(SLJIT_LOCALS_REG))) \ + SLJIT_ASSERT((i) >= 0 && (i) < compiler->logical_local_size); \ + else if ((p) & SLJIT_MEM) { \ + SLJIT_ASSERT(FUNCTION_CHECK_IS_REG((p) & 0xf)); \ + if ((p) & 0xf0) { \ + SLJIT_ASSERT(FUNCTION_CHECK_IS_REG(((p) >> 4) & 0xf)); \ + SLJIT_ASSERT(!((i) & ~0x3)); \ + } \ + SLJIT_ASSERT(((p) >> 9) == 0); \ + } \ + else \ + SLJIT_ASSERT_STOP(); + +#define FUNCTION_FCHECK(p, i) \ + if ((p) >= SLJIT_FLOAT_REG1 && (p) <= SLJIT_FLOAT_REG6) \ + SLJIT_ASSERT(i == 0); \ + else if ((p) & SLJIT_MEM) { \ + SLJIT_ASSERT(FUNCTION_CHECK_IS_REG((p) & 0xf)); \ + if ((p) & 0xf0) { \ + SLJIT_ASSERT(FUNCTION_CHECK_IS_REG(((p) >> 4) & 0xf)); \ + SLJIT_ASSERT(((p) & 0xf0) != (SLJIT_LOCALS_REG << 4) && !(i & ~0x3)); \ + } else \ + SLJIT_ASSERT((((p) >> 4) & 0xf) == 0); \ + SLJIT_ASSERT(((p) >> 9) == 0); \ + } \ + else \ + SLJIT_ASSERT_STOP(); + +#define FUNCTION_CHECK_OP1() \ + if (GET_OPCODE(op) >= SLJIT_MOVU && GET_OPCODE(op) <= SLJIT_MOVU_P) { \ + SLJIT_ASSERT(!(src & SLJIT_MEM) || (src & 0xf) != SLJIT_LOCALS_REG); \ + SLJIT_ASSERT(!(dst & SLJIT_MEM) || (dst & 0xf) != SLJIT_LOCALS_REG); \ + if ((src & SLJIT_MEM) && (src & 0xf)) \ + SLJIT_ASSERT((dst & 0xf) != (src & 0xf) && ((dst >> 4) & 0xf) != (src & 0xf)); \ + } + +#endif + +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) + +SLJIT_API_FUNC_ATTRIBUTE void sljit_compiler_verbose(struct sljit_compiler *compiler, FILE* verbose) +{ + compiler->verbose = verbose; +} + +static char* reg_names[] = { + (char*)"", (char*)"t1", (char*)"t2", (char*)"t3", + (char*)"te1", (char*)"te2", (char*)"s1", (char*)"s2", + (char*)"s3", (char*)"se1", (char*)"se2", (char*)"lcr" +}; + +static char* freg_names[] = { + (char*)"", (char*)"float_r1", (char*)"float_r2", (char*)"float_r3", + (char*)"float_r4", (char*)"float_r5", (char*)"float_r6" +}; + +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) || (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) +#ifdef _WIN64 +# define SLJIT_PRINT_D "I64" +#else +# define SLJIT_PRINT_D "l" +#endif +#else +# define SLJIT_PRINT_D "" +#endif + +#define sljit_verbose_param(p, i) \ + if ((p) & SLJIT_IMM) \ + fprintf(compiler->verbose, "#%" SLJIT_PRINT_D "d", (i)); \ + else if ((p) & SLJIT_MEM) { \ + if ((p) & 0xf) { \ + if (i) { \ + if (((p) >> 4) & 0xf) \ + fprintf(compiler->verbose, "[%s + %s * %d]", reg_names[(p) & 0xF], reg_names[((p) >> 4)& 0xF], 1 << (i)); \ + else \ + fprintf(compiler->verbose, "[%s + #%" SLJIT_PRINT_D "d]", reg_names[(p) & 0xF], (i)); \ + } \ + else { \ + if (((p) >> 4) & 0xf) \ + fprintf(compiler->verbose, "[%s + %s]", reg_names[(p) & 0xF], reg_names[((p) >> 4)& 0xF]); \ + else \ + fprintf(compiler->verbose, "[%s]", reg_names[(p) & 0xF]); \ + } \ + } \ + else \ + fprintf(compiler->verbose, "[#%" SLJIT_PRINT_D "d]", (i)); \ + } else \ + fprintf(compiler->verbose, "%s", reg_names[p]); +#define sljit_verbose_fparam(p, i) \ + if ((p) & SLJIT_MEM) { \ + if ((p) & 0xf) { \ + if (i) { \ + if (((p) >> 4) & 0xf) \ + fprintf(compiler->verbose, "[%s + %s * %d]", reg_names[(p) & 0xF], reg_names[((p) >> 4)& 0xF], 1 << (i)); \ + else \ + fprintf(compiler->verbose, "[%s + #%" SLJIT_PRINT_D "d]", reg_names[(p) & 0xF], (i)); \ + } \ + else { \ + if (((p) >> 4) & 0xF) \ + fprintf(compiler->verbose, "[%s + %s]", reg_names[(p) & 0xF], reg_names[((p) >> 4)& 0xF]); \ + else \ + fprintf(compiler->verbose, "[%s]", reg_names[(p) & 0xF]); \ + } \ + } \ + else \ + fprintf(compiler->verbose, "[#%" SLJIT_PRINT_D "d]", (i)); \ + } else \ + fprintf(compiler->verbose, "%s", freg_names[p]); + +static SLJIT_CONST char* op_names[] = { + /* op0 */ + (char*)"breakpoint", (char*)"nop", + (char*)"umul", (char*)"smul", (char*)"udiv", (char*)"sdiv", + /* op1 */ + (char*)"mov", (char*)"mov.ub", (char*)"mov.sb", (char*)"mov.uh", + (char*)"mov.sh", (char*)"mov.ui", (char*)"mov.si", (char*)"mov.p", + (char*)"movu", (char*)"movu.ub", (char*)"movu.sb", (char*)"movu.uh", + (char*)"movu.sh", (char*)"movu.ui", (char*)"movu.si", (char*)"movu.p", + (char*)"not", (char*)"neg", (char*)"clz", + /* op2 */ + (char*)"add", (char*)"addc", (char*)"sub", (char*)"subc", + (char*)"mul", (char*)"and", (char*)"or", (char*)"xor", + (char*)"shl", (char*)"lshr", (char*)"ashr", + /* fop1 */ + (char*)"cmp", (char*)"mov", (char*)"neg", (char*)"abs", + /* fop2 */ + (char*)"add", (char*)"sub", (char*)"mul", (char*)"div" +}; + +static char* jump_names[] = { + (char*)"c_equal", (char*)"c_not_equal", + (char*)"c_less", (char*)"c_greater_equal", + (char*)"c_greater", (char*)"c_less_equal", + (char*)"c_sig_less", (char*)"c_sig_greater_equal", + (char*)"c_sig_greater", (char*)"c_sig_less_equal", + (char*)"c_overflow", (char*)"c_not_overflow", + (char*)"c_mul_overflow", (char*)"c_mul_not_overflow", + (char*)"c_float_equal", (char*)"c_float_not_equal", + (char*)"c_float_less", (char*)"c_float_greater_equal", + (char*)"c_float_greater", (char*)"c_float_less_equal", + (char*)"c_float_unordered", (char*)"c_float_ordered", + (char*)"jump", (char*)"fast_call", + (char*)"call0", (char*)"call1", (char*)"call2", (char*)"call3" +}; + +#endif + +/* --------------------------------------------------------------------- */ +/* Arch dependent */ +/* --------------------------------------------------------------------- */ + +static SLJIT_INLINE void check_sljit_generate_code(struct sljit_compiler *compiler) +{ +#if (defined SLJIT_DEBUG && SLJIT_DEBUG) + struct sljit_jump *jump; +#endif + /* If debug and verbose are disabled, all arguments are unused. */ + SLJIT_UNUSED_ARG(compiler); + + SLJIT_ASSERT(compiler->size > 0); +#if (defined SLJIT_DEBUG && SLJIT_DEBUG) + jump = compiler->jumps; + while (jump) { + /* All jumps have target. */ + SLJIT_ASSERT(jump->flags & (JUMP_LABEL | JUMP_ADDR)); + jump = jump->next; + } +#endif +} + +static SLJIT_INLINE void check_sljit_emit_enter(struct sljit_compiler *compiler, sljit_si args, sljit_si scratches, sljit_si saveds, sljit_si local_size) +{ + /* If debug and verbose are disabled, all arguments are unused. */ + SLJIT_UNUSED_ARG(compiler); + SLJIT_UNUSED_ARG(args); + SLJIT_UNUSED_ARG(scratches); + SLJIT_UNUSED_ARG(saveds); + SLJIT_UNUSED_ARG(local_size); + + SLJIT_ASSERT(args >= 0 && args <= 3); + SLJIT_ASSERT(scratches >= 0 && scratches <= SLJIT_NO_TMP_REGISTERS); + SLJIT_ASSERT(saveds >= 0 && saveds <= SLJIT_NO_GEN_REGISTERS); + SLJIT_ASSERT(args <= saveds); + SLJIT_ASSERT(local_size >= 0 && local_size <= SLJIT_MAX_LOCAL_SIZE); +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) + if (SLJIT_UNLIKELY(!!compiler->verbose)) + fprintf(compiler->verbose, " enter args=%d scratches=%d saveds=%d local_size=%d\n", args, scratches, saveds, local_size); +#endif +} + +static SLJIT_INLINE void check_sljit_set_context(struct sljit_compiler *compiler, sljit_si args, sljit_si scratches, sljit_si saveds, sljit_si local_size) +{ + /* If debug and verbose are disabled, all arguments are unused. */ + SLJIT_UNUSED_ARG(compiler); + SLJIT_UNUSED_ARG(args); + SLJIT_UNUSED_ARG(scratches); + SLJIT_UNUSED_ARG(saveds); + SLJIT_UNUSED_ARG(local_size); + +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) || (defined SLJIT_DEBUG && SLJIT_DEBUG) + if (SLJIT_UNLIKELY(compiler->skip_checks)) { + compiler->skip_checks = 0; + return; + } +#endif + + SLJIT_ASSERT(args >= 0 && args <= 3); + SLJIT_ASSERT(scratches >= 0 && scratches <= SLJIT_NO_TMP_REGISTERS); + SLJIT_ASSERT(saveds >= 0 && saveds <= SLJIT_NO_GEN_REGISTERS); + SLJIT_ASSERT(args <= saveds); + SLJIT_ASSERT(local_size >= 0 && local_size <= SLJIT_MAX_LOCAL_SIZE); +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) + if (SLJIT_UNLIKELY(!!compiler->verbose)) + fprintf(compiler->verbose, " set_context args=%d scratches=%d saveds=%d local_size=%d\n", args, scratches, saveds, local_size); +#endif +} + +static SLJIT_INLINE void check_sljit_emit_return(struct sljit_compiler *compiler, sljit_si op, sljit_si src, sljit_sw srcw) +{ + /* If debug and verbose are disabled, all arguments are unused. */ + SLJIT_UNUSED_ARG(compiler); + SLJIT_UNUSED_ARG(op); + SLJIT_UNUSED_ARG(src); + SLJIT_UNUSED_ARG(srcw); + +#if (defined SLJIT_DEBUG && SLJIT_DEBUG) + if (op != SLJIT_UNUSED) { + SLJIT_ASSERT(op >= SLJIT_MOV && op <= SLJIT_MOV_P); + FUNCTION_CHECK_SRC(src, srcw); + } + else + SLJIT_ASSERT(src == 0 && srcw == 0); +#endif +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) + if (SLJIT_UNLIKELY(!!compiler->verbose)) { + if (op == SLJIT_UNUSED) + fprintf(compiler->verbose, " return\n"); + else { + fprintf(compiler->verbose, " return %s ", op_names[op]); + sljit_verbose_param(src, srcw); + fprintf(compiler->verbose, "\n"); + } + } +#endif +} + +static SLJIT_INLINE void check_sljit_emit_fast_enter(struct sljit_compiler *compiler, sljit_si dst, sljit_sw dstw) +{ + /* If debug and verbose are disabled, all arguments are unused. */ + SLJIT_UNUSED_ARG(compiler); + SLJIT_UNUSED_ARG(dst); + SLJIT_UNUSED_ARG(dstw); + +#if (defined SLJIT_DEBUG && SLJIT_DEBUG) + FUNCTION_CHECK_DST(dst, dstw); +#endif +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) + if (SLJIT_UNLIKELY(!!compiler->verbose)) { + fprintf(compiler->verbose, " fast_enter "); + sljit_verbose_param(dst, dstw); + fprintf(compiler->verbose, "\n"); + } +#endif +} + +static SLJIT_INLINE void check_sljit_emit_fast_return(struct sljit_compiler *compiler, sljit_si src, sljit_sw srcw) +{ + /* If debug and verbose are disabled, all arguments are unused. */ + SLJIT_UNUSED_ARG(compiler); + SLJIT_UNUSED_ARG(src); + SLJIT_UNUSED_ARG(srcw); + +#if (defined SLJIT_DEBUG && SLJIT_DEBUG) + FUNCTION_CHECK_SRC(src, srcw); +#endif +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) + if (SLJIT_UNLIKELY(!!compiler->verbose)) { + fprintf(compiler->verbose, " fast_return "); + sljit_verbose_param(src, srcw); + fprintf(compiler->verbose, "\n"); + } +#endif +} + +static SLJIT_INLINE void check_sljit_emit_op0(struct sljit_compiler *compiler, sljit_si op) +{ + /* If debug and verbose are disabled, all arguments are unused. */ + SLJIT_UNUSED_ARG(compiler); + SLJIT_UNUSED_ARG(op); + + SLJIT_ASSERT((op >= SLJIT_BREAKPOINT && op <= SLJIT_SMUL) + || ((op & ~SLJIT_INT_OP) >= SLJIT_UDIV && (op & ~SLJIT_INT_OP) <= SLJIT_SDIV)); +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) + if (SLJIT_UNLIKELY(!!compiler->verbose)) + fprintf(compiler->verbose, " %s%s\n", !(op & SLJIT_INT_OP) ? "" : "i", op_names[GET_OPCODE(op)]); +#endif +} + +static SLJIT_INLINE void check_sljit_emit_op1(struct sljit_compiler *compiler, sljit_si op, + sljit_si dst, sljit_sw dstw, + sljit_si src, sljit_sw srcw) +{ + /* If debug and verbose are disabled, all arguments are unused. */ + SLJIT_UNUSED_ARG(compiler); + SLJIT_UNUSED_ARG(op); + SLJIT_UNUSED_ARG(dst); + SLJIT_UNUSED_ARG(dstw); + SLJIT_UNUSED_ARG(src); + SLJIT_UNUSED_ARG(srcw); + +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) || (defined SLJIT_DEBUG && SLJIT_DEBUG) + if (SLJIT_UNLIKELY(compiler->skip_checks)) { + compiler->skip_checks = 0; + return; + } +#endif + + SLJIT_ASSERT(GET_OPCODE(op) >= SLJIT_MOV && GET_OPCODE(op) <= SLJIT_CLZ); +#if (defined SLJIT_DEBUG && SLJIT_DEBUG) + FUNCTION_CHECK_OP(); + FUNCTION_CHECK_SRC(src, srcw); + FUNCTION_CHECK_DST(dst, dstw); + FUNCTION_CHECK_OP1(); +#endif +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) + if (SLJIT_UNLIKELY(!!compiler->verbose)) { + fprintf(compiler->verbose, " %s%s%s%s%s%s%s%s ", !(op & SLJIT_INT_OP) ? "" : "i", op_names[GET_OPCODE(op)], + !(op & SLJIT_SET_E) ? "" : ".e", !(op & SLJIT_SET_S) ? "" : ".s", !(op & SLJIT_SET_U) ? "" : ".u", + !(op & SLJIT_SET_O) ? "" : ".o", !(op & SLJIT_SET_C) ? "" : ".c", !(op & SLJIT_KEEP_FLAGS) ? "" : ".k"); + sljit_verbose_param(dst, dstw); + fprintf(compiler->verbose, ", "); + sljit_verbose_param(src, srcw); + fprintf(compiler->verbose, "\n"); + } +#endif +} + +static SLJIT_INLINE void check_sljit_emit_op2(struct sljit_compiler *compiler, sljit_si op, + sljit_si dst, sljit_sw dstw, + sljit_si src1, sljit_sw src1w, + sljit_si src2, sljit_sw src2w) +{ + /* If debug and verbose are disabled, all arguments are unused. */ + SLJIT_UNUSED_ARG(compiler); + SLJIT_UNUSED_ARG(op); + SLJIT_UNUSED_ARG(dst); + SLJIT_UNUSED_ARG(dstw); + SLJIT_UNUSED_ARG(src1); + SLJIT_UNUSED_ARG(src1w); + SLJIT_UNUSED_ARG(src2); + SLJIT_UNUSED_ARG(src2w); + +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) || (defined SLJIT_DEBUG && SLJIT_DEBUG) + if (SLJIT_UNLIKELY(compiler->skip_checks)) { + compiler->skip_checks = 0; + return; + } +#endif + + SLJIT_ASSERT(GET_OPCODE(op) >= SLJIT_ADD && GET_OPCODE(op) <= SLJIT_ASHR); +#if (defined SLJIT_DEBUG && SLJIT_DEBUG) + FUNCTION_CHECK_OP(); + FUNCTION_CHECK_SRC(src1, src1w); + FUNCTION_CHECK_SRC(src2, src2w); + FUNCTION_CHECK_DST(dst, dstw); +#endif +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) + if (SLJIT_UNLIKELY(!!compiler->verbose)) { + fprintf(compiler->verbose, " %s%s%s%s%s%s%s%s ", !(op & SLJIT_INT_OP) ? "" : "i", op_names[GET_OPCODE(op)], + !(op & SLJIT_SET_E) ? "" : ".e", !(op & SLJIT_SET_S) ? "" : ".s", !(op & SLJIT_SET_U) ? "" : ".u", + !(op & SLJIT_SET_O) ? "" : ".o", !(op & SLJIT_SET_C) ? "" : ".c", !(op & SLJIT_KEEP_FLAGS) ? "" : ".k"); + sljit_verbose_param(dst, dstw); + fprintf(compiler->verbose, ", "); + sljit_verbose_param(src1, src1w); + fprintf(compiler->verbose, ", "); + sljit_verbose_param(src2, src2w); + fprintf(compiler->verbose, "\n"); + } +#endif +} + +static SLJIT_INLINE void check_sljit_get_register_index(sljit_si reg) +{ + SLJIT_UNUSED_ARG(reg); + SLJIT_ASSERT(reg > 0 && reg <= SLJIT_NO_REGISTERS); +} + +static SLJIT_INLINE void check_sljit_emit_op_custom(struct sljit_compiler *compiler, + void *instruction, sljit_si size) +{ + SLJIT_UNUSED_ARG(compiler); + SLJIT_UNUSED_ARG(instruction); + SLJIT_UNUSED_ARG(size); + SLJIT_ASSERT(instruction); +} + +static SLJIT_INLINE void check_sljit_emit_fop1(struct sljit_compiler *compiler, sljit_si op, + sljit_si dst, sljit_sw dstw, + sljit_si src, sljit_sw srcw) +{ + /* If debug and verbose are disabled, all arguments are unused. */ + SLJIT_UNUSED_ARG(compiler); + SLJIT_UNUSED_ARG(op); + SLJIT_UNUSED_ARG(dst); + SLJIT_UNUSED_ARG(dstw); + SLJIT_UNUSED_ARG(src); + SLJIT_UNUSED_ARG(srcw); + +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) || (defined SLJIT_DEBUG && SLJIT_DEBUG) + if (SLJIT_UNLIKELY(compiler->skip_checks)) { + compiler->skip_checks = 0; + return; + } +#endif + + SLJIT_ASSERT(sljit_is_fpu_available()); + SLJIT_ASSERT(GET_OPCODE(op) >= SLJIT_CMPD && GET_OPCODE(op) <= SLJIT_ABSD); +#if (defined SLJIT_DEBUG && SLJIT_DEBUG) + FUNCTION_CHECK_OP(); + FUNCTION_FCHECK(src, srcw); + FUNCTION_FCHECK(dst, dstw); +#endif +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) + if (SLJIT_UNLIKELY(!!compiler->verbose)) { + fprintf(compiler->verbose, " %s%s%s%s ", op_names[GET_OPCODE(op)], (op & SLJIT_SINGLE_OP) ? "s" : "d", + !(op & SLJIT_SET_E) ? "" : ".e", !(op & SLJIT_SET_S) ? "" : ".s"); + sljit_verbose_fparam(dst, dstw); + fprintf(compiler->verbose, ", "); + sljit_verbose_fparam(src, srcw); + fprintf(compiler->verbose, "\n"); + } +#endif +} + +static SLJIT_INLINE void check_sljit_emit_fop2(struct sljit_compiler *compiler, sljit_si op, + sljit_si dst, sljit_sw dstw, + sljit_si src1, sljit_sw src1w, + sljit_si src2, sljit_sw src2w) +{ + /* If debug and verbose are disabled, all arguments are unused. */ + SLJIT_UNUSED_ARG(compiler); + SLJIT_UNUSED_ARG(op); + SLJIT_UNUSED_ARG(dst); + SLJIT_UNUSED_ARG(dstw); + SLJIT_UNUSED_ARG(src1); + SLJIT_UNUSED_ARG(src1w); + SLJIT_UNUSED_ARG(src2); + SLJIT_UNUSED_ARG(src2w); + + SLJIT_ASSERT(sljit_is_fpu_available()); + SLJIT_ASSERT(GET_OPCODE(op) >= SLJIT_ADDD && GET_OPCODE(op) <= SLJIT_DIVD); +#if (defined SLJIT_DEBUG && SLJIT_DEBUG) + FUNCTION_CHECK_OP(); + FUNCTION_FCHECK(src1, src1w); + FUNCTION_FCHECK(src2, src2w); + FUNCTION_FCHECK(dst, dstw); +#endif +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) + if (SLJIT_UNLIKELY(!!compiler->verbose)) { + fprintf(compiler->verbose, " %s%s ", op_names[GET_OPCODE(op)], (op & SLJIT_SINGLE_OP) ? "s" : "d"); + sljit_verbose_fparam(dst, dstw); + fprintf(compiler->verbose, ", "); + sljit_verbose_fparam(src1, src1w); + fprintf(compiler->verbose, ", "); + sljit_verbose_fparam(src2, src2w); + fprintf(compiler->verbose, "\n"); + } +#endif +} + +static SLJIT_INLINE void check_sljit_emit_label(struct sljit_compiler *compiler) +{ + /* If debug and verbose are disabled, all arguments are unused. */ + SLJIT_UNUSED_ARG(compiler); + +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) + if (SLJIT_UNLIKELY(!!compiler->verbose)) + fprintf(compiler->verbose, "label:\n"); +#endif +} + +static SLJIT_INLINE void check_sljit_emit_jump(struct sljit_compiler *compiler, sljit_si type) +{ + /* If debug and verbose are disabled, all arguments are unused. */ + SLJIT_UNUSED_ARG(compiler); + SLJIT_UNUSED_ARG(type); + +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) || (defined SLJIT_DEBUG && SLJIT_DEBUG) + if (SLJIT_UNLIKELY(compiler->skip_checks)) { + compiler->skip_checks = 0; + return; + } +#endif + + SLJIT_ASSERT(!(type & ~(0xff | SLJIT_REWRITABLE_JUMP))); + SLJIT_ASSERT((type & 0xff) >= SLJIT_C_EQUAL && (type & 0xff) <= SLJIT_CALL3); +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) + if (SLJIT_UNLIKELY(!!compiler->verbose)) + fprintf(compiler->verbose, " jump%s<%s>\n", !(type & SLJIT_REWRITABLE_JUMP) ? "" : ".r", jump_names[type & 0xff]); +#endif +} + +static SLJIT_INLINE void check_sljit_emit_cmp(struct sljit_compiler *compiler, sljit_si type, + sljit_si src1, sljit_sw src1w, + sljit_si src2, sljit_sw src2w) +{ + SLJIT_UNUSED_ARG(compiler); + SLJIT_UNUSED_ARG(type); + SLJIT_UNUSED_ARG(src1); + SLJIT_UNUSED_ARG(src1w); + SLJIT_UNUSED_ARG(src2); + SLJIT_UNUSED_ARG(src2w); + + SLJIT_ASSERT(!(type & ~(0xff | SLJIT_REWRITABLE_JUMP | SLJIT_INT_OP))); + SLJIT_ASSERT((type & 0xff) >= SLJIT_C_EQUAL && (type & 0xff) <= SLJIT_C_SIG_LESS_EQUAL); +#if (defined SLJIT_DEBUG && SLJIT_DEBUG) + FUNCTION_CHECK_SRC(src1, src1w); + FUNCTION_CHECK_SRC(src2, src2w); +#endif +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) + if (SLJIT_UNLIKELY(!!compiler->verbose)) { + fprintf(compiler->verbose, " %scmp%s<%s> ", !(type & SLJIT_INT_OP) ? "" : "i", !(type & SLJIT_REWRITABLE_JUMP) ? "" : ".r", jump_names[type & 0xff]); + sljit_verbose_param(src1, src1w); + fprintf(compiler->verbose, ", "); + sljit_verbose_param(src2, src2w); + fprintf(compiler->verbose, "\n"); + } +#endif +} + +static SLJIT_INLINE void check_sljit_emit_fcmp(struct sljit_compiler *compiler, sljit_si type, + sljit_si src1, sljit_sw src1w, + sljit_si src2, sljit_sw src2w) +{ + SLJIT_UNUSED_ARG(compiler); + SLJIT_UNUSED_ARG(type); + SLJIT_UNUSED_ARG(src1); + SLJIT_UNUSED_ARG(src1w); + SLJIT_UNUSED_ARG(src2); + SLJIT_UNUSED_ARG(src2w); + + SLJIT_ASSERT(sljit_is_fpu_available()); + SLJIT_ASSERT(!(type & ~(0xff | SLJIT_REWRITABLE_JUMP | SLJIT_SINGLE_OP))); + SLJIT_ASSERT((type & 0xff) >= SLJIT_C_FLOAT_EQUAL && (type & 0xff) <= SLJIT_C_FLOAT_ORDERED); +#if (defined SLJIT_DEBUG && SLJIT_DEBUG) + FUNCTION_FCHECK(src1, src1w); + FUNCTION_FCHECK(src2, src2w); +#endif +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) + if (SLJIT_UNLIKELY(!!compiler->verbose)) { + fprintf(compiler->verbose, " %scmp%s<%s> ", (type & SLJIT_SINGLE_OP) ? "s" : "d", + !(type & SLJIT_REWRITABLE_JUMP) ? "" : ".r", jump_names[type & 0xff]); + sljit_verbose_fparam(src1, src1w); + fprintf(compiler->verbose, ", "); + sljit_verbose_fparam(src2, src2w); + fprintf(compiler->verbose, "\n"); + } +#endif +} + +static SLJIT_INLINE void check_sljit_emit_ijump(struct sljit_compiler *compiler, sljit_si type, sljit_si src, sljit_sw srcw) +{ + /* If debug and verbose are disabled, all arguments are unused. */ + SLJIT_UNUSED_ARG(compiler); + SLJIT_UNUSED_ARG(type); + SLJIT_UNUSED_ARG(src); + SLJIT_UNUSED_ARG(srcw); + +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) || (defined SLJIT_DEBUG && SLJIT_DEBUG) + if (SLJIT_UNLIKELY(compiler->skip_checks)) { + compiler->skip_checks = 0; + return; + } +#endif + + SLJIT_ASSERT(type >= SLJIT_JUMP && type <= SLJIT_CALL3); +#if (defined SLJIT_DEBUG && SLJIT_DEBUG) + FUNCTION_CHECK_SRC(src, srcw); +#endif +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) + if (SLJIT_UNLIKELY(!!compiler->verbose)) { + fprintf(compiler->verbose, " ijump<%s> ", jump_names[type]); + sljit_verbose_param(src, srcw); + fprintf(compiler->verbose, "\n"); + } +#endif +} + +static SLJIT_INLINE void check_sljit_emit_op_flags(struct sljit_compiler *compiler, sljit_si op, + sljit_si dst, sljit_sw dstw, + sljit_si src, sljit_sw srcw, + sljit_si type) +{ + /* If debug and verbose are disabled, all arguments are unused. */ + SLJIT_UNUSED_ARG(compiler); + SLJIT_UNUSED_ARG(op); + SLJIT_UNUSED_ARG(dst); + SLJIT_UNUSED_ARG(dstw); + SLJIT_UNUSED_ARG(src); + SLJIT_UNUSED_ARG(srcw); + SLJIT_UNUSED_ARG(type); + + SLJIT_ASSERT(type >= SLJIT_C_EQUAL && type < SLJIT_JUMP); + SLJIT_ASSERT(op == SLJIT_MOV || GET_OPCODE(op) == SLJIT_MOV_UI || GET_OPCODE(op) == SLJIT_MOV_SI + || (GET_OPCODE(op) >= SLJIT_AND && GET_OPCODE(op) <= SLJIT_XOR)); + SLJIT_ASSERT((op & (SLJIT_SET_S | SLJIT_SET_U | SLJIT_SET_O | SLJIT_SET_C)) == 0); + SLJIT_ASSERT((op & (SLJIT_SET_E | SLJIT_KEEP_FLAGS)) != (SLJIT_SET_E | SLJIT_KEEP_FLAGS)); +#if (defined SLJIT_DEBUG && SLJIT_DEBUG) + if (GET_OPCODE(op) < SLJIT_ADD) { + SLJIT_ASSERT(src == SLJIT_UNUSED && srcw == 0); + } else { + SLJIT_ASSERT(src == dst && srcw == dstw); + } + FUNCTION_CHECK_DST(dst, dstw); +#endif +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) + if (SLJIT_UNLIKELY(!!compiler->verbose)) { + fprintf(compiler->verbose, " op_flags<%s%s%s%s> ", !(op & SLJIT_INT_OP) ? "" : "i", + op_names[GET_OPCODE(op)], !(op & SLJIT_SET_E) ? "" : ".e", !(op & SLJIT_KEEP_FLAGS) ? "" : ".k"); + sljit_verbose_param(dst, dstw); + if (src != SLJIT_UNUSED) { + fprintf(compiler->verbose, ", "); + sljit_verbose_param(src, srcw); + } + fprintf(compiler->verbose, ", <%s>\n", jump_names[type]); + } +#endif +} + +static SLJIT_INLINE void check_sljit_get_local_base(struct sljit_compiler *compiler, sljit_si dst, sljit_sw dstw, sljit_sw offset) +{ + SLJIT_UNUSED_ARG(compiler); + SLJIT_UNUSED_ARG(dst); + SLJIT_UNUSED_ARG(dstw); + SLJIT_UNUSED_ARG(offset); + +#if (defined SLJIT_DEBUG && SLJIT_DEBUG) + FUNCTION_CHECK_DST(dst, dstw); +#endif +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) + if (SLJIT_UNLIKELY(!!compiler->verbose)) { + fprintf(compiler->verbose, " local_base "); + sljit_verbose_param(dst, dstw); + fprintf(compiler->verbose, ", #%" SLJIT_PRINT_D "d\n", offset); + } +#endif +} + +static SLJIT_INLINE void check_sljit_emit_const(struct sljit_compiler *compiler, sljit_si dst, sljit_sw dstw, sljit_sw init_value) +{ + /* If debug and verbose are disabled, all arguments are unused. */ + SLJIT_UNUSED_ARG(compiler); + SLJIT_UNUSED_ARG(dst); + SLJIT_UNUSED_ARG(dstw); + SLJIT_UNUSED_ARG(init_value); + +#if (defined SLJIT_DEBUG && SLJIT_DEBUG) + FUNCTION_CHECK_DST(dst, dstw); +#endif +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) + if (SLJIT_UNLIKELY(!!compiler->verbose)) { + fprintf(compiler->verbose, " const "); + sljit_verbose_param(dst, dstw); + fprintf(compiler->verbose, ", #%" SLJIT_PRINT_D "d\n", init_value); + } +#endif +} + +static SLJIT_INLINE sljit_si emit_mov_before_return(struct sljit_compiler *compiler, sljit_si op, sljit_si src, sljit_sw srcw) +{ + /* Return if don't need to do anything. */ + if (op == SLJIT_UNUSED) + return SLJIT_SUCCESS; + +#if (defined SLJIT_64BIT_ARCHITECTURE && SLJIT_64BIT_ARCHITECTURE) + /* At the moment the pointer size is always equal to sljit_sw. May be changed in the future. */ + if (src == SLJIT_RETURN_REG && (op == SLJIT_MOV || op == SLJIT_MOV_P)) + return SLJIT_SUCCESS; +#else + if (src == SLJIT_RETURN_REG && (op == SLJIT_MOV || op == SLJIT_MOV_UI || op == SLJIT_MOV_SI || op == SLJIT_MOV_P)) + return SLJIT_SUCCESS; +#endif + +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) || (defined SLJIT_DEBUG && SLJIT_DEBUG) + compiler->skip_checks = 1; +#endif + return sljit_emit_op1(compiler, op, SLJIT_RETURN_REG, 0, src, srcw); +} + +/* CPU description section */ + +#if (defined SLJIT_32BIT_ARCHITECTURE && SLJIT_32BIT_ARCHITECTURE) +#define SLJIT_CPUINFO_PART1 " 32bit (" +#elif (defined SLJIT_64BIT_ARCHITECTURE && SLJIT_64BIT_ARCHITECTURE) +#define SLJIT_CPUINFO_PART1 " 64bit (" +#else +#error "Internal error: CPU type info missing" +#endif + +#if (defined SLJIT_LITTLE_ENDIAN && SLJIT_LITTLE_ENDIAN) +#define SLJIT_CPUINFO_PART2 "little endian + " +#elif (defined SLJIT_BIG_ENDIAN && SLJIT_BIG_ENDIAN) +#define SLJIT_CPUINFO_PART2 "big endian + " +#else +#error "Internal error: CPU type info missing" +#endif + +#if (defined SLJIT_UNALIGNED && SLJIT_UNALIGNED) +#define SLJIT_CPUINFO_PART3 "unaligned)" +#else +#define SLJIT_CPUINFO_PART3 "aligned)" +#endif + +#define SLJIT_CPUINFO SLJIT_CPUINFO_PART1 SLJIT_CPUINFO_PART2 SLJIT_CPUINFO_PART3 + +#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) +# include "sljitNativeX86_common.c" +#elif (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) +# include "sljitNativeX86_common.c" +#elif (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) +# include "sljitNativeARM_v5.c" +#elif (defined SLJIT_CONFIG_ARM_V7 && SLJIT_CONFIG_ARM_V7) +# include "sljitNativeARM_v5.c" +#elif (defined SLJIT_CONFIG_ARM_THUMB2 && SLJIT_CONFIG_ARM_THUMB2) +# include "sljitNativeARM_Thumb2.c" +#elif (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32) +# include "sljitNativePPC_common.c" +#elif (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) +# include "sljitNativePPC_common.c" +#elif (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) +# include "sljitNativeMIPS_common.c" +#elif (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) +# include "sljitNativeSPARC_common.c" +#endif + +#if !(defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) + +SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_cmp(struct sljit_compiler *compiler, sljit_si type, + sljit_si src1, sljit_sw src1w, + sljit_si src2, sljit_sw src2w) +{ + /* Default compare for most architectures. */ + sljit_si flags, tmp_src, condition; + sljit_sw tmp_srcw; + + CHECK_ERROR_PTR(); + check_sljit_emit_cmp(compiler, type, src1, src1w, src2, src2w); + + condition = type & 0xff; + if (SLJIT_UNLIKELY((src1 & SLJIT_IMM) && !(src2 & SLJIT_IMM))) { + /* Immediate is prefered as second argument by most architectures. */ + switch (condition) { + case SLJIT_C_LESS: + condition = SLJIT_C_GREATER; + break; + case SLJIT_C_GREATER_EQUAL: + condition = SLJIT_C_LESS_EQUAL; + break; + case SLJIT_C_GREATER: + condition = SLJIT_C_LESS; + break; + case SLJIT_C_LESS_EQUAL: + condition = SLJIT_C_GREATER_EQUAL; + break; + case SLJIT_C_SIG_LESS: + condition = SLJIT_C_SIG_GREATER; + break; + case SLJIT_C_SIG_GREATER_EQUAL: + condition = SLJIT_C_SIG_LESS_EQUAL; + break; + case SLJIT_C_SIG_GREATER: + condition = SLJIT_C_SIG_LESS; + break; + case SLJIT_C_SIG_LESS_EQUAL: + condition = SLJIT_C_SIG_GREATER_EQUAL; + break; + } + type = condition | (type & (SLJIT_INT_OP | SLJIT_REWRITABLE_JUMP)); + tmp_src = src1; + src1 = src2; + src2 = tmp_src; + tmp_srcw = src1w; + src1w = src2w; + src2w = tmp_srcw; + } + + if (condition <= SLJIT_C_NOT_ZERO) + flags = SLJIT_SET_E; + else if (condition <= SLJIT_C_LESS_EQUAL) + flags = SLJIT_SET_U; + else + flags = SLJIT_SET_S; + +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) || (defined SLJIT_DEBUG && SLJIT_DEBUG) + compiler->skip_checks = 1; +#endif + PTR_FAIL_IF(sljit_emit_op2(compiler, SLJIT_SUB | flags | (type & SLJIT_INT_OP), + SLJIT_UNUSED, 0, src1, src1w, src2, src2w)); +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) || (defined SLJIT_DEBUG && SLJIT_DEBUG) + compiler->skip_checks = 1; +#endif + return sljit_emit_jump(compiler, condition | (type & SLJIT_REWRITABLE_JUMP)); +} + +SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_fcmp(struct sljit_compiler *compiler, sljit_si type, + sljit_si src1, sljit_sw src1w, + sljit_si src2, sljit_sw src2w) +{ + sljit_si flags, condition; + + check_sljit_emit_fcmp(compiler, type, src1, src1w, src2, src2w); + + condition = type & 0xff; + flags = (condition <= SLJIT_C_FLOAT_NOT_EQUAL) ? SLJIT_SET_E : SLJIT_SET_S; + if (type & SLJIT_SINGLE_OP) + flags |= SLJIT_SINGLE_OP; + +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) || (defined SLJIT_DEBUG && SLJIT_DEBUG) + compiler->skip_checks = 1; +#endif + sljit_emit_fop1(compiler, SLJIT_CMPD | flags, src1, src1w, src2, src2w); + +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) || (defined SLJIT_DEBUG && SLJIT_DEBUG) + compiler->skip_checks = 1; +#endif + return sljit_emit_jump(compiler, condition | (type & SLJIT_REWRITABLE_JUMP)); +} + +#endif + +#if !(defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) && !(defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_get_local_base(struct sljit_compiler *compiler, sljit_si dst, sljit_sw dstw, sljit_sw offset) +{ + CHECK_ERROR(); + check_sljit_get_local_base(compiler, dst, dstw, offset); + + ADJUST_LOCAL_OFFSET(SLJIT_MEM1(SLJIT_LOCALS_REG), offset); +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) || (defined SLJIT_DEBUG && SLJIT_DEBUG) + compiler->skip_checks = 1; +#endif + if (offset != 0) + return sljit_emit_op2(compiler, SLJIT_ADD | SLJIT_KEEP_FLAGS, dst, dstw, SLJIT_LOCALS_REG, 0, SLJIT_IMM, offset); + return sljit_emit_op1(compiler, SLJIT_MOV, dst, dstw, SLJIT_LOCALS_REG, 0); +} + +#endif + +#else /* SLJIT_CONFIG_UNSUPPORTED */ + +/* Empty function bodies for those machines, which are not (yet) supported. */ + +SLJIT_API_FUNC_ATTRIBUTE SLJIT_CONST char* sljit_get_platform_name(void) +{ + return "unsupported"; +} + +SLJIT_API_FUNC_ATTRIBUTE struct sljit_compiler* sljit_create_compiler(void) +{ + SLJIT_ASSERT_STOP(); + return NULL; +} + +SLJIT_API_FUNC_ATTRIBUTE void sljit_free_compiler(struct sljit_compiler *compiler) +{ + SLJIT_UNUSED_ARG(compiler); + SLJIT_ASSERT_STOP(); +} + +SLJIT_API_FUNC_ATTRIBUTE void* sljit_alloc_memory(struct sljit_compiler *compiler, sljit_si size) +{ + SLJIT_UNUSED_ARG(compiler); + SLJIT_UNUSED_ARG(size); + SLJIT_ASSERT_STOP(); + return NULL; +} + +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) +SLJIT_API_FUNC_ATTRIBUTE void sljit_compiler_verbose(struct sljit_compiler *compiler, FILE* verbose) +{ + SLJIT_UNUSED_ARG(compiler); + SLJIT_UNUSED_ARG(verbose); + SLJIT_ASSERT_STOP(); +} +#endif + +SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compiler) +{ + SLJIT_UNUSED_ARG(compiler); + SLJIT_ASSERT_STOP(); + return NULL; +} + +SLJIT_API_FUNC_ATTRIBUTE void sljit_free_code(void* code) +{ + SLJIT_UNUSED_ARG(code); + SLJIT_ASSERT_STOP(); +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_enter(struct sljit_compiler *compiler, sljit_si args, sljit_si scratches, sljit_si saveds, sljit_si local_size) +{ + SLJIT_UNUSED_ARG(compiler); + SLJIT_UNUSED_ARG(args); + SLJIT_UNUSED_ARG(scratches); + SLJIT_UNUSED_ARG(saveds); + SLJIT_UNUSED_ARG(local_size); + SLJIT_ASSERT_STOP(); + return SLJIT_ERR_UNSUPPORTED; +} + +SLJIT_API_FUNC_ATTRIBUTE void sljit_set_context(struct sljit_compiler *compiler, sljit_si args, sljit_si scratches, sljit_si saveds, sljit_si local_size) +{ + SLJIT_UNUSED_ARG(compiler); + SLJIT_UNUSED_ARG(args); + SLJIT_UNUSED_ARG(scratches); + SLJIT_UNUSED_ARG(saveds); + SLJIT_UNUSED_ARG(local_size); + SLJIT_ASSERT_STOP(); +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_return(struct sljit_compiler *compiler, sljit_si op, sljit_si src, sljit_sw srcw) +{ + SLJIT_UNUSED_ARG(compiler); + SLJIT_UNUSED_ARG(op); + SLJIT_UNUSED_ARG(src); + SLJIT_UNUSED_ARG(srcw); + SLJIT_ASSERT_STOP(); + return SLJIT_ERR_UNSUPPORTED; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fast_enter(struct sljit_compiler *compiler, sljit_si dst, sljit_sw dstw) +{ + SLJIT_UNUSED_ARG(compiler); + SLJIT_UNUSED_ARG(dst); + SLJIT_UNUSED_ARG(dstw); + SLJIT_ASSERT_STOP(); + return SLJIT_ERR_UNSUPPORTED; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fast_return(struct sljit_compiler *compiler, sljit_si src, sljit_sw srcw) +{ + SLJIT_UNUSED_ARG(compiler); + SLJIT_UNUSED_ARG(src); + SLJIT_UNUSED_ARG(srcw); + SLJIT_ASSERT_STOP(); + return SLJIT_ERR_UNSUPPORTED; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op0(struct sljit_compiler *compiler, sljit_si op) +{ + SLJIT_UNUSED_ARG(compiler); + SLJIT_UNUSED_ARG(op); + SLJIT_ASSERT_STOP(); + return SLJIT_ERR_UNSUPPORTED; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op1(struct sljit_compiler *compiler, sljit_si op, + sljit_si dst, sljit_sw dstw, + sljit_si src, sljit_sw srcw) +{ + SLJIT_UNUSED_ARG(compiler); + SLJIT_UNUSED_ARG(op); + SLJIT_UNUSED_ARG(dst); + SLJIT_UNUSED_ARG(dstw); + SLJIT_UNUSED_ARG(src); + SLJIT_UNUSED_ARG(srcw); + SLJIT_ASSERT_STOP(); + return SLJIT_ERR_UNSUPPORTED; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op2(struct sljit_compiler *compiler, sljit_si op, + sljit_si dst, sljit_sw dstw, + sljit_si src1, sljit_sw src1w, + sljit_si src2, sljit_sw src2w) +{ + SLJIT_UNUSED_ARG(compiler); + SLJIT_UNUSED_ARG(op); + SLJIT_UNUSED_ARG(dst); + SLJIT_UNUSED_ARG(dstw); + SLJIT_UNUSED_ARG(src1); + SLJIT_UNUSED_ARG(src1w); + SLJIT_UNUSED_ARG(src2); + SLJIT_UNUSED_ARG(src2w); + SLJIT_ASSERT_STOP(); + return SLJIT_ERR_UNSUPPORTED; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_get_register_index(sljit_si reg) +{ + SLJIT_ASSERT_STOP(); + return reg; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op_custom(struct sljit_compiler *compiler, + void *instruction, sljit_si size) +{ + SLJIT_UNUSED_ARG(compiler); + SLJIT_UNUSED_ARG(instruction); + SLJIT_UNUSED_ARG(size); + SLJIT_ASSERT_STOP(); + return SLJIT_ERR_UNSUPPORTED; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_is_fpu_available(void) +{ + SLJIT_ASSERT_STOP(); + return 0; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fop1(struct sljit_compiler *compiler, sljit_si op, + sljit_si dst, sljit_sw dstw, + sljit_si src, sljit_sw srcw) +{ + SLJIT_UNUSED_ARG(compiler); + SLJIT_UNUSED_ARG(op); + SLJIT_UNUSED_ARG(dst); + SLJIT_UNUSED_ARG(dstw); + SLJIT_UNUSED_ARG(src); + SLJIT_UNUSED_ARG(srcw); + SLJIT_ASSERT_STOP(); + return SLJIT_ERR_UNSUPPORTED; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fop2(struct sljit_compiler *compiler, sljit_si op, + sljit_si dst, sljit_sw dstw, + sljit_si src1, sljit_sw src1w, + sljit_si src2, sljit_sw src2w) +{ + SLJIT_UNUSED_ARG(compiler); + SLJIT_UNUSED_ARG(op); + SLJIT_UNUSED_ARG(dst); + SLJIT_UNUSED_ARG(dstw); + SLJIT_UNUSED_ARG(src1); + SLJIT_UNUSED_ARG(src1w); + SLJIT_UNUSED_ARG(src2); + SLJIT_UNUSED_ARG(src2w); + SLJIT_ASSERT_STOP(); + return SLJIT_ERR_UNSUPPORTED; +} + +SLJIT_API_FUNC_ATTRIBUTE struct sljit_label* sljit_emit_label(struct sljit_compiler *compiler) +{ + SLJIT_UNUSED_ARG(compiler); + SLJIT_ASSERT_STOP(); + return NULL; +} + +SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compiler *compiler, sljit_si type) +{ + SLJIT_UNUSED_ARG(compiler); + SLJIT_UNUSED_ARG(type); + SLJIT_ASSERT_STOP(); + return NULL; +} + +SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_cmp(struct sljit_compiler *compiler, sljit_si type, + sljit_si src1, sljit_sw src1w, + sljit_si src2, sljit_sw src2w) +{ + SLJIT_UNUSED_ARG(compiler); + SLJIT_UNUSED_ARG(type); + SLJIT_UNUSED_ARG(src1); + SLJIT_UNUSED_ARG(src1w); + SLJIT_UNUSED_ARG(src2); + SLJIT_UNUSED_ARG(src2w); + SLJIT_ASSERT_STOP(); + return NULL; +} + +SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_fcmp(struct sljit_compiler *compiler, sljit_si type, + sljit_si src1, sljit_sw src1w, + sljit_si src2, sljit_sw src2w) +{ + SLJIT_UNUSED_ARG(compiler); + SLJIT_UNUSED_ARG(type); + SLJIT_UNUSED_ARG(src1); + SLJIT_UNUSED_ARG(src1w); + SLJIT_UNUSED_ARG(src2); + SLJIT_UNUSED_ARG(src2w); + SLJIT_ASSERT_STOP(); + return NULL; +} + +SLJIT_API_FUNC_ATTRIBUTE void sljit_set_label(struct sljit_jump *jump, struct sljit_label* label) +{ + SLJIT_UNUSED_ARG(jump); + SLJIT_UNUSED_ARG(label); + SLJIT_ASSERT_STOP(); +} + +SLJIT_API_FUNC_ATTRIBUTE void sljit_set_target(struct sljit_jump *jump, sljit_uw target) +{ + SLJIT_UNUSED_ARG(jump); + SLJIT_UNUSED_ARG(target); + SLJIT_ASSERT_STOP(); +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_ijump(struct sljit_compiler *compiler, sljit_si type, sljit_si src, sljit_sw srcw) +{ + SLJIT_UNUSED_ARG(compiler); + SLJIT_UNUSED_ARG(type); + SLJIT_UNUSED_ARG(src); + SLJIT_UNUSED_ARG(srcw); + SLJIT_ASSERT_STOP(); + return SLJIT_ERR_UNSUPPORTED; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op_flags(struct sljit_compiler *compiler, sljit_si op, + sljit_si dst, sljit_sw dstw, + sljit_si src, sljit_sw srcw, + sljit_si type) +{ + SLJIT_UNUSED_ARG(compiler); + SLJIT_UNUSED_ARG(op); + SLJIT_UNUSED_ARG(dst); + SLJIT_UNUSED_ARG(dstw); + SLJIT_UNUSED_ARG(src); + SLJIT_UNUSED_ARG(srcw); + SLJIT_UNUSED_ARG(type); + SLJIT_ASSERT_STOP(); + return SLJIT_ERR_UNSUPPORTED; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_get_local_base(struct sljit_compiler *compiler, sljit_si dst, sljit_sw dstw, sljit_sw offset) +{ + SLJIT_UNUSED_ARG(compiler); + SLJIT_UNUSED_ARG(dst); + SLJIT_UNUSED_ARG(dstw); + SLJIT_UNUSED_ARG(offset); + SLJIT_ASSERT_STOP(); + return SLJIT_ERR_UNSUPPORTED; +} + +SLJIT_API_FUNC_ATTRIBUTE struct sljit_const* sljit_emit_const(struct sljit_compiler *compiler, sljit_si dst, sljit_sw dstw, sljit_sw initval) +{ + SLJIT_UNUSED_ARG(compiler); + SLJIT_UNUSED_ARG(dst); + SLJIT_UNUSED_ARG(dstw); + SLJIT_UNUSED_ARG(initval); + SLJIT_ASSERT_STOP(); + return NULL; +} + +SLJIT_API_FUNC_ATTRIBUTE void sljit_set_jump_addr(sljit_uw addr, sljit_uw new_addr) +{ + SLJIT_UNUSED_ARG(addr); + SLJIT_UNUSED_ARG(new_addr); + SLJIT_ASSERT_STOP(); +} + +SLJIT_API_FUNC_ATTRIBUTE void sljit_set_const(sljit_uw addr, sljit_sw new_constant) +{ + SLJIT_UNUSED_ARG(addr); + SLJIT_UNUSED_ARG(new_constant); + SLJIT_ASSERT_STOP(); +} + +#endif Property changes on: sys/contrib/sljit/sljitLir.c ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Added: fbsd:nokeywords ## -0,0 +1 ## +yes \ No newline at end of property Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Index: sys/contrib/sljit/sljitLir.h =================================================================== --- sys/contrib/sljit/sljitLir.h (revision 0) +++ sys/contrib/sljit/sljitLir.h (working copy) @@ -0,0 +1,985 @@ +/* + * Stack-less Just-In-Time compiler + * + * Copyright 2009-2012 Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, are + * permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of + * conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list + * of conditions and the following disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED + * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _SLJIT_LIR_H_ +#define _SLJIT_LIR_H_ + +/* + ------------------------------------------------------------------------ + Stack-Less JIT compiler for multiple architectures (x86, ARM, PowerPC) + ------------------------------------------------------------------------ + + Short description + Advantages: + - The execution can be continued from any LIR instruction. In other + words, it is possible to jump to any label from anywhere, even from + a code fragment, which is compiled later, if both compiled code + shares the same context. See sljit_emit_enter for more details + - Supports self modifying code: target of (conditional) jump and call + instructions and some constant values can be dynamically modified + during runtime + - although it is not suggested to do it frequently + - can be used for inline caching: save an important value once + in the instruction stream + - since this feature limits the optimization possibilities, a + special flag must be passed at compile time when these + instructions are emitted + - A fixed stack space can be allocated for local variables + - The compiler is thread-safe + - The compiler is highly configurable through preprocessor macros. + You can disable unneeded features (multithreading in single + threaded applications), and you can use your own system functions + (including memory allocators). See sljitConfig.h + Disadvantages: + - No automatic register allocation, and temporary results are + not stored on the stack. (hence the name comes) + - Limited number of registers (only 6+4 integer registers, max 3+2 + scratch, max 3+2 saved and 6 floating point registers) + In practice: + - This approach is very effective for interpreters + - One of the saved registers typically points to a stack interface + - It can jump to any exception handler anytime (even if it belongs + to another function) + - Hot paths can be modified during runtime reflecting the changes + of the fastest execution path of the dynamic language + - SLJIT supports complex memory addressing modes + - mainly position and context independent code (except some cases) + + For valgrind users: + - pass --smc-check=all argument to valgrind, since JIT is a "self-modifying code" +*/ + +#if !(defined SLJIT_NO_DEFAULT_CONFIG && SLJIT_NO_DEFAULT_CONFIG) +#include "sljitConfig.h" +#endif + +/* The following header file defines useful macros for fine tuning +sljit based code generators. They are listed in the begining +of sljitConfigInternal.h */ + +#include "sljitConfigInternal.h" + +/* --------------------------------------------------------------------- */ +/* Error codes */ +/* --------------------------------------------------------------------- */ + +/* Indicates no error. */ +#define SLJIT_SUCCESS 0 +/* After the call of sljit_generate_code(), the error code of the compiler + is set to this value to avoid future sljit calls (in debug mode at least). + The complier should be freed after sljit_generate_code(). */ +#define SLJIT_ERR_COMPILED 1 +/* Cannot allocate non executable memory. */ +#define SLJIT_ERR_ALLOC_FAILED 2 +/* Cannot allocate executable memory. + Only for sljit_generate_code() */ +#define SLJIT_ERR_EX_ALLOC_FAILED 3 +/* return value for SLJIT_CONFIG_UNSUPPORTED empty architecture. */ +#define SLJIT_ERR_UNSUPPORTED 4 + +/* --------------------------------------------------------------------- */ +/* Registers */ +/* --------------------------------------------------------------------- */ + +#define SLJIT_UNUSED 0 + +/* Scratch (temporary) registers whose may not preserve their values + across function calls. */ +#define SLJIT_SCRATCH_REG1 1 +#define SLJIT_SCRATCH_REG2 2 +#define SLJIT_SCRATCH_REG3 3 +/* Note: extra registers cannot be used for memory addressing. */ +/* Note: on x86-32, these registers are emulated (using stack + loads & stores). */ +#define SLJIT_TEMPORARY_EREG1 4 +#define SLJIT_TEMPORARY_EREG2 5 + +/* Saved registers whose preserve their values across function calls. */ +#define SLJIT_SAVED_REG1 6 +#define SLJIT_SAVED_REG2 7 +#define SLJIT_SAVED_REG3 8 +/* Note: extra registers cannot be used for memory addressing. */ +/* Note: on x86-32, these registers are emulated (using stack + loads & stores). */ +#define SLJIT_SAVED_EREG1 9 +#define SLJIT_SAVED_EREG2 10 + +/* Read-only register (cannot be the destination of an operation). + Only SLJIT_MEM1(SLJIT_LOCALS_REG) addressing mode is allowed since + several ABIs has certain limitations about the stack layout. However + sljit_get_local_base() can be used to obtain the offset of a value + on the stack. */ +#define SLJIT_LOCALS_REG 11 + +/* Number of registers. */ +#define SLJIT_NO_TMP_REGISTERS 5 +#define SLJIT_NO_GEN_REGISTERS 5 +#define SLJIT_NO_REGISTERS 11 + +/* Return with machine word. */ + +#define SLJIT_RETURN_REG SLJIT_SCRATCH_REG1 + +/* x86 prefers specific registers for special purposes. In case of shift + by register it supports only SLJIT_SCRATCH_REG3 for shift argument + (which is the src2 argument of sljit_emit_op2). If another register is + used, sljit must exchange data between registers which cause a minor + slowdown. Other architectures has no such limitation. */ + +#define SLJIT_PREF_SHIFT_REG SLJIT_SCRATCH_REG3 + +/* --------------------------------------------------------------------- */ +/* Floating point registers */ +/* --------------------------------------------------------------------- */ + +/* Note: SLJIT_UNUSED as destination is not valid for floating point + operations, since they cannot be used for setting flags. */ + +/* Floating point operations are performed on double or + single precision values. */ + +#define SLJIT_FLOAT_REG1 1 +#define SLJIT_FLOAT_REG2 2 +#define SLJIT_FLOAT_REG3 3 +#define SLJIT_FLOAT_REG4 4 +#define SLJIT_FLOAT_REG5 5 +#define SLJIT_FLOAT_REG6 6 + +/* --------------------------------------------------------------------- */ +/* Main structures and functions */ +/* --------------------------------------------------------------------- */ + +struct sljit_memory_fragment { + struct sljit_memory_fragment *next; + sljit_uw used_size; + /* Must be aligned to sljit_sw. */ + sljit_ub memory[1]; +}; + +struct sljit_label { + struct sljit_label *next; + sljit_uw addr; + /* The maximum size difference. */ + sljit_uw size; +}; + +struct sljit_jump { + struct sljit_jump *next; + sljit_uw addr; + sljit_sw flags; + union { + sljit_uw target; + struct sljit_label* label; + } u; +}; + +struct sljit_const { + struct sljit_const *next; + sljit_uw addr; +}; + +struct sljit_compiler { + sljit_si error; + + struct sljit_label *labels; + struct sljit_jump *jumps; + struct sljit_const *consts; + struct sljit_label *last_label; + struct sljit_jump *last_jump; + struct sljit_const *last_const; + + struct sljit_memory_fragment *buf; + struct sljit_memory_fragment *abuf; + + /* Used local registers. */ + sljit_si scratches; + /* Used saved registers. */ + sljit_si saveds; + /* Local stack size. */ + sljit_si local_size; + /* Code size. */ + sljit_uw size; + /* For statistical purposes. */ + sljit_uw executable_size; + +#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) + sljit_si args; + sljit_si locals_offset; + sljit_si scratches_start; + sljit_si saveds_start; +#endif + +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) + sljit_si mode32; +#endif + +#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) || (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) + sljit_si flags_saved; +#endif + +#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) + /* Constant pool handling. */ + sljit_uw *cpool; + sljit_ub *cpool_unique; + sljit_uw cpool_diff; + sljit_uw cpool_fill; + /* Other members. */ + /* Contains pointer, "ldr pc, [...]" pairs. */ + sljit_uw patches; +#endif + +#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) || (defined SLJIT_CONFIG_ARM_V7 && SLJIT_CONFIG_ARM_V7) + /* Temporary fields. */ + sljit_uw shift_imm; + sljit_si cache_arg; + sljit_sw cache_argw; +#endif + +#if (defined SLJIT_CONFIG_ARM_THUMB2 && SLJIT_CONFIG_ARM_THUMB2) + sljit_si cache_arg; + sljit_sw cache_argw; +#endif + +#if (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32) || (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) + sljit_sw imm; + sljit_si cache_arg; + sljit_sw cache_argw; +#endif + +#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) + sljit_si delay_slot; + sljit_si cache_arg; + sljit_sw cache_argw; +#endif + +#if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) + sljit_si delay_slot; + sljit_si cache_arg; + sljit_sw cache_argw; +#endif + +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) + FILE* verbose; +#endif + +#if (defined SLJIT_DEBUG && SLJIT_DEBUG) + /* Local size passed to the functions. */ + sljit_si logical_local_size; +#endif + +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) || (defined SLJIT_DEBUG && SLJIT_DEBUG) + sljit_si skip_checks; +#endif +}; + +/* --------------------------------------------------------------------- */ +/* Main functions */ +/* --------------------------------------------------------------------- */ + +/* Creates an sljit compiler. + Returns NULL if failed. */ +SLJIT_API_FUNC_ATTRIBUTE struct sljit_compiler* sljit_create_compiler(void); + +/* Free everything except the compiled machine code. */ +SLJIT_API_FUNC_ATTRIBUTE void sljit_free_compiler(struct sljit_compiler *compiler); + +/* Returns the current error code. If an error is occured, future sljit + calls which uses the same compiler argument returns early with the same + error code. Thus there is no need for checking the error after every + call, it is enough to do it before the code is compiled. Removing + these checks increases the performance of the compiling process. */ +static SLJIT_INLINE sljit_si sljit_get_compiler_error(struct sljit_compiler *compiler) { return compiler->error; } + +/* + Allocate a small amount of memory. The size must be <= 64 bytes on 32 bit, + and <= 128 bytes on 64 bit architectures. The memory area is owned by the + compiler, and freed by sljit_free_compiler. The returned pointer is + sizeof(sljit_sw) aligned. Excellent for allocating small blocks during + the compiling, and no need to worry about freeing them. The size is + enough to contain at most 16 pointers. If the size is outside of the range, + the function will return with NULL. However, this return value does not + indicate that there is no more memory (does not set the current error code + of the compiler to out-of-memory status). +*/ +SLJIT_API_FUNC_ATTRIBUTE void* sljit_alloc_memory(struct sljit_compiler *compiler, sljit_si size); + +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) +/* Passing NULL disables verbose. */ +SLJIT_API_FUNC_ATTRIBUTE void sljit_compiler_verbose(struct sljit_compiler *compiler, FILE* verbose); +#endif + +SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compiler); +SLJIT_API_FUNC_ATTRIBUTE void sljit_free_code(void* code); + +/* + After the machine code generation is finished we can retrieve the allocated + executable memory size, although this area may not be fully filled with + instructions depending on some optimizations. This function is useful only + for statistical purposes. + + Before a successful code generation, this function returns with 0. +*/ +static SLJIT_INLINE sljit_uw sljit_get_generated_code_size(struct sljit_compiler *compiler) { return compiler->executable_size; } + +/* Instruction generation. Returns with any error code. If there is no + error, they return with SLJIT_SUCCESS. */ + +/* + The executable code is basically a function call from the viewpoint of + the C language. The function calls must obey to the ABI (Application + Binary Interface) of the platform, which specify the purpose of machine + registers and stack handling among other things. The sljit_emit_enter + function emits the necessary instructions for setting up a new context + for the executable code and moves function arguments to the saved + registers. The number of arguments are specified in the "args" + parameter and the first argument goes to SLJIT_SAVED_REG1, the second + goes to SLJIT_SAVED_REG2 and so on. The number of scratch and + saved registers are passed in "scratches" and "saveds" arguments + respectively. Since the saved registers contains the arguments, + "args" must be less or equal than "saveds". The sljit_emit_enter + is also capable of allocating a stack space for local variables. The + "local_size" argument contains the size in bytes of this local area + and its staring address is stored in SLJIT_LOCALS_REG. However + the SLJIT_LOCALS_REG is not necessary the machine stack pointer. + The memory bytes between SLJIT_LOCALS_REG (inclusive) and + SLJIT_LOCALS_REG + local_size (exclusive) can be modified freely + until the function returns. The stack space is uninitialized. + + Note: every call of sljit_emit_enter and sljit_set_context + overwrites the previous context. */ + +#define SLJIT_MAX_LOCAL_SIZE 65536 + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_enter(struct sljit_compiler *compiler, + sljit_si args, sljit_si scratches, sljit_si saveds, sljit_si local_size); + +/* The machine code has a context (which contains the local stack space size, + number of used registers, etc.) which initialized by sljit_emit_enter. Several + functions (like sljit_emit_return) requres this context to be able to generate + the appropriate code. However, some code fragments (like inline cache) may have + no normal entry point so their context is unknown for the compiler. Using the + function below we can specify their context. + + Note: every call of sljit_emit_enter and sljit_set_context overwrites + the previous context. */ + +SLJIT_API_FUNC_ATTRIBUTE void sljit_set_context(struct sljit_compiler *compiler, + sljit_si args, sljit_si scratches, sljit_si saveds, sljit_si local_size); + +/* Return from machine code. The op argument can be SLJIT_UNUSED which means the + function does not return with anything or any opcode between SLJIT_MOV and + SLJIT_MOV_P (see sljit_emit_op1). As for src and srcw they must be 0 if op + is SLJIT_UNUSED, otherwise see below the description about source and + destination arguments. */ + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_return(struct sljit_compiler *compiler, sljit_si op, + sljit_si src, sljit_sw srcw); + +/* Fast calling mechanism for utility functions (see SLJIT_FAST_CALL). All registers and + even the stack frame is passed to the callee. The return address is preserved in + dst/dstw by sljit_emit_fast_enter (the type of the value stored by this function + is sljit_p), and sljit_emit_fast_return can use this as a return value later. */ + +/* Note: only for sljit specific, non ABI compilant calls. Fast, since only a few machine + instructions are needed. Excellent for small uility functions, where saving registers + and setting up a new stack frame would cost too much performance. However, it is still + possible to return to the address of the caller (or anywhere else). */ + +/* Note: flags are not changed (unlike sljit_emit_enter / sljit_emit_return). */ + +/* Note: although sljit_emit_fast_return could be replaced by an ijump, it is not suggested, + since many architectures do clever branch prediction on call / return instruction pairs. */ + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fast_enter(struct sljit_compiler *compiler, sljit_si dst, sljit_sw dstw); +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fast_return(struct sljit_compiler *compiler, sljit_si src, sljit_sw srcw); + +/* + Source and destination values for arithmetical instructions + imm - a simple immediate value (cannot be used as a destination) + reg - any of the registers (immediate argument must be 0) + [imm] - absolute immediate memory address + [reg+imm] - indirect memory address + [reg+(reg<addr; } +static SLJIT_INLINE sljit_uw sljit_get_jump_addr(struct sljit_jump *jump) { return jump->addr; } +static SLJIT_INLINE sljit_uw sljit_get_const_addr(struct sljit_const *const_) { return const_->addr; } + +/* Only the address is required to rewrite the code. */ +SLJIT_API_FUNC_ATTRIBUTE void sljit_set_jump_addr(sljit_uw addr, sljit_uw new_addr); +SLJIT_API_FUNC_ATTRIBUTE void sljit_set_const(sljit_uw addr, sljit_sw new_constant); + +/* --------------------------------------------------------------------- */ +/* Miscellaneous utility functions */ +/* --------------------------------------------------------------------- */ + +#define SLJIT_MAJOR_VERSION 0 +#define SLJIT_MINOR_VERSION 90 + +/* Get the human readable name of the platform. Can be useful on platforms + like ARM, where ARM and Thumb2 functions can be mixed, and + it is useful to know the type of the code generator. */ +SLJIT_API_FUNC_ATTRIBUTE SLJIT_CONST char* sljit_get_platform_name(void); + +/* Portable helper function to get an offset of a member. */ +#define SLJIT_OFFSETOF(base, member) ((sljit_sw)(&((base*)0x10)->member) - 0x10) + +#if (defined SLJIT_UTIL_GLOBAL_LOCK && SLJIT_UTIL_GLOBAL_LOCK) +/* This global lock is useful to compile common functions. */ +SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_grab_lock(void); +SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_release_lock(void); +#endif + +#if (defined SLJIT_UTIL_STACK && SLJIT_UTIL_STACK) + +/* The sljit_stack is a utiliy feature of sljit, which allocates a + writable memory region between base (inclusive) and limit (exclusive). + Both base and limit is a pointer, and base is always <= than limit. + This feature uses the "address space reserve" feature + of modern operating systems. Basically we don't need to allocate a + huge memory block in one step for the worst case, we can start with + a smaller chunk and extend it later. Since the address space is + reserved, the data never copied to other regions, thus it is safe + to store pointers here. */ + +/* Note: The base field is aligned to PAGE_SIZE bytes (usually 4k or more). + Note: stack growing should not happen in small steps: 4k, 16k or even + bigger growth is better. + Note: this structure may not be supported by all operating systems. + Some kind of fallback mechanism is suggested when SLJIT_UTIL_STACK + is not defined. */ + +struct sljit_stack { + /* User data, anything can be stored here. + Starting with the same value as base. */ + sljit_uw top; + /* These members are read only. */ + sljit_uw base; + sljit_uw limit; + sljit_uw max_limit; +}; + +/* Returns NULL if unsuccessful. + Note: limit and max_limit contains the size for stack allocation + Note: the top field is initialized to base. */ +SLJIT_API_FUNC_ATTRIBUTE struct sljit_stack* SLJIT_CALL sljit_allocate_stack(sljit_uw limit, sljit_uw max_limit); +SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_free_stack(struct sljit_stack* stack); + +/* Can be used to increase (allocate) or decrease (free) the memory area. + Returns with a non-zero value if unsuccessful. If new_limit is greater than + max_limit, it will fail. It is very easy to implement a stack data structure, + since the growth ratio can be added to the current limit, and sljit_stack_resize + will do all the necessary checks. The fields of the stack are not changed if + sljit_stack_resize fails. */ +SLJIT_API_FUNC_ATTRIBUTE sljit_sw SLJIT_CALL sljit_stack_resize(struct sljit_stack* stack, sljit_uw new_limit); + +#endif /* (defined SLJIT_UTIL_STACK && SLJIT_UTIL_STACK) */ + +#if !(defined SLJIT_INDIRECT_CALL && SLJIT_INDIRECT_CALL) + +/* Get the entry address of a given function. */ +#define SLJIT_FUNC_OFFSET(func_name) ((sljit_sw)func_name) + +#else /* !(defined SLJIT_INDIRECT_CALL && SLJIT_INDIRECT_CALL) */ + +/* All JIT related code should be placed in the same context (library, binary, etc.). */ + +#define SLJIT_FUNC_OFFSET(func_name) (*(sljit_sw*)(void*)func_name) + +/* For powerpc64, the function pointers point to a context descriptor. */ +struct sljit_function_context { + sljit_sw addr; + sljit_sw r2; + sljit_sw r11; +}; + +/* Fill the context arguments using the addr and the function. + If func_ptr is NULL, it will not be set to the address of context + If addr is NULL, the function address also comes from the func pointer. */ +SLJIT_API_FUNC_ATTRIBUTE void sljit_set_function_context(void** func_ptr, struct sljit_function_context* context, sljit_sw addr, void* func); + +#endif /* !(defined SLJIT_INDIRECT_CALL && SLJIT_INDIRECT_CALL) */ + +#endif /* _SLJIT_LIR_H_ */ Property changes on: sys/contrib/sljit/sljitLir.h ___________________________________________________________________ Added: fbsd:nokeywords ## -0,0 +1 ## +yes \ No newline at end of property Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: sys/contrib/sljit/sljitNativeARM_Thumb2.c =================================================================== --- sys/contrib/sljit/sljitNativeARM_Thumb2.c (revision 0) +++ sys/contrib/sljit/sljitNativeARM_Thumb2.c (working copy) @@ -0,0 +1,2008 @@ +/* + * Stack-less Just-In-Time compiler + * + * Copyright 2009-2012 Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, are + * permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of + * conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list + * of conditions and the following disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED + * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +SLJIT_API_FUNC_ATTRIBUTE SLJIT_CONST char* sljit_get_platform_name(void) +{ + return "ARM-Thumb2" SLJIT_CPUINFO; +} + +/* Length of an instruction word. */ +typedef sljit_ui sljit_ins; + +/* Last register + 1. */ +#define TMP_REG1 (SLJIT_NO_REGISTERS + 1) +#define TMP_REG2 (SLJIT_NO_REGISTERS + 2) +#define TMP_REG3 (SLJIT_NO_REGISTERS + 3) +#define TMP_PC (SLJIT_NO_REGISTERS + 4) + +#define TMP_FREG1 (0) +#define TMP_FREG2 (SLJIT_FLOAT_REG6 + 1) + +/* See sljit_emit_enter and sljit_emit_op0 if you want to change them. */ +static SLJIT_CONST sljit_ub reg_map[SLJIT_NO_REGISTERS + 5] = { + 0, 0, 1, 2, 12, 5, 6, 7, 8, 10, 11, 13, 3, 4, 14, 15 +}; + +#define COPY_BITS(src, from, to, bits) \ + ((from >= to ? (src >> (from - to)) : (src << (to - from))) & (((1 << bits) - 1) << to)) + +/* Thumb16 encodings. */ +#define RD3(rd) (reg_map[rd]) +#define RN3(rn) (reg_map[rn] << 3) +#define RM3(rm) (reg_map[rm] << 6) +#define RDN3(rdn) (reg_map[rdn] << 8) +#define IMM3(imm) (imm << 6) +#define IMM8(imm) (imm) + +/* Thumb16 helpers. */ +#define SET_REGS44(rd, rn) \ + ((reg_map[rn] << 3) | (reg_map[rd] & 0x7) | ((reg_map[rd] & 0x8) << 4)) +#define IS_2_LO_REGS(reg1, reg2) \ + (reg_map[reg1] <= 7 && reg_map[reg2] <= 7) +#define IS_3_LO_REGS(reg1, reg2, reg3) \ + (reg_map[reg1] <= 7 && reg_map[reg2] <= 7 && reg_map[reg3] <= 7) + +/* Thumb32 encodings. */ +#define RD4(rd) (reg_map[rd] << 8) +#define RN4(rn) (reg_map[rn] << 16) +#define RM4(rm) (reg_map[rm]) +#define RT4(rt) (reg_map[rt] << 12) +#define DD4(dd) ((dd) << 12) +#define DN4(dn) ((dn) << 16) +#define DM4(dm) (dm) +#define IMM5(imm) \ + (COPY_BITS(imm, 2, 12, 3) | ((imm & 0x3) << 6)) +#define IMM12(imm) \ + (COPY_BITS(imm, 11, 26, 1) | COPY_BITS(imm, 8, 12, 3) | (imm & 0xff)) + +/* --------------------------------------------------------------------- */ +/* Instrucion forms */ +/* --------------------------------------------------------------------- */ + +/* dot '.' changed to _ + I immediate form (possibly followed by number of immediate bits). */ +#define ADCI 0xf1400000 +#define ADCS 0x4140 +#define ADC_W 0xeb400000 +#define ADD 0x4400 +#define ADDS 0x1800 +#define ADDSI3 0x1c00 +#define ADDSI8 0x3000 +#define ADD_W 0xeb000000 +#define ADDWI 0xf2000000 +#define ADD_SP 0xb000 +#define ADD_W 0xeb000000 +#define ADD_WI 0xf1000000 +#define ANDI 0xf0000000 +#define ANDS 0x4000 +#define AND_W 0xea000000 +#define ASRS 0x4100 +#define ASRSI 0x1000 +#define ASR_W 0xfa40f000 +#define ASR_WI 0xea4f0020 +#define BICI 0xf0200000 +#define BKPT 0xbe00 +#define BLX 0x4780 +#define BX 0x4700 +#define CLZ 0xfab0f080 +#define CMPI 0x2800 +#define CMP_W 0xebb00f00 +#define EORI 0xf0800000 +#define EORS 0x4040 +#define EOR_W 0xea800000 +#define IT 0xbf00 +#define LSLS 0x4080 +#define LSLSI 0x0000 +#define LSL_W 0xfa00f000 +#define LSL_WI 0xea4f0000 +#define LSRS 0x40c0 +#define LSRSI 0x0800 +#define LSR_W 0xfa20f000 +#define LSR_WI 0xea4f0010 +#define MOV 0x4600 +#define MOVS 0x0000 +#define MOVSI 0x2000 +#define MOVT 0xf2c00000 +#define MOVW 0xf2400000 +#define MOV_W 0xea4f0000 +#define MOV_WI 0xf04f0000 +#define MUL 0xfb00f000 +#define MVNS 0x43c0 +#define MVN_W 0xea6f0000 +#define MVN_WI 0xf06f0000 +#define NOP 0xbf00 +#define ORNI 0xf0600000 +#define ORRI 0xf0400000 +#define ORRS 0x4300 +#define ORR_W 0xea400000 +#define POP 0xbd00 +#define POP_W 0xe8bd0000 +#define PUSH 0xb500 +#define PUSH_W 0xe92d0000 +#define RSB_WI 0xf1c00000 +#define RSBSI 0x4240 +#define SBCI 0xf1600000 +#define SBCS 0x4180 +#define SBC_W 0xeb600000 +#define SMULL 0xfb800000 +#define STR_SP 0x9000 +#define SUBS 0x1a00 +#define SUBSI3 0x1e00 +#define SUBSI8 0x3800 +#define SUB_W 0xeba00000 +#define SUBWI 0xf2a00000 +#define SUB_SP 0xb080 +#define SUB_WI 0xf1a00000 +#define SXTB 0xb240 +#define SXTB_W 0xfa4ff080 +#define SXTH 0xb200 +#define SXTH_W 0xfa0ff080 +#define TST 0x4200 +#define UMULL 0xfba00000 +#define UXTB 0xb2c0 +#define UXTB_W 0xfa5ff080 +#define UXTH 0xb280 +#define UXTH_W 0xfa1ff080 +#define VABS_F32 0xeeb00ac0 +#define VADD_F32 0xee300a00 +#define VCMP_F32 0xeeb40a40 +#define VDIV_F32 0xee800a00 +#define VMOV_F32 0xeeb00a40 +#define VMRS 0xeef1fa10 +#define VMUL_F32 0xee200a00 +#define VNEG_F32 0xeeb10a40 +#define VSTR_F32 0xed000a00 +#define VSUB_F32 0xee300a40 + +static sljit_si push_inst16(struct sljit_compiler *compiler, sljit_ins inst) +{ + sljit_uh *ptr; + SLJIT_ASSERT(!(inst & 0xffff0000)); + + ptr = (sljit_uh*)ensure_buf(compiler, sizeof(sljit_uh)); + FAIL_IF(!ptr); + *ptr = inst; + compiler->size++; + return SLJIT_SUCCESS; +} + +static sljit_si push_inst32(struct sljit_compiler *compiler, sljit_ins inst) +{ + sljit_uh *ptr = (sljit_uh*)ensure_buf(compiler, sizeof(sljit_ins)); + FAIL_IF(!ptr); + *ptr++ = inst >> 16; + *ptr = inst; + compiler->size += 2; + return SLJIT_SUCCESS; +} + +static SLJIT_INLINE sljit_si emit_imm32_const(struct sljit_compiler *compiler, sljit_si dst, sljit_uw imm) +{ + FAIL_IF(push_inst32(compiler, MOVW | RD4(dst) | + COPY_BITS(imm, 12, 16, 4) | COPY_BITS(imm, 11, 26, 1) | COPY_BITS(imm, 8, 12, 3) | (imm & 0xff))); + return push_inst32(compiler, MOVT | RD4(dst) | + COPY_BITS(imm, 12 + 16, 16, 4) | COPY_BITS(imm, 11 + 16, 26, 1) | COPY_BITS(imm, 8 + 16, 12, 3) | ((imm & 0xff0000) >> 16)); +} + +static SLJIT_INLINE void modify_imm32_const(sljit_uh* inst, sljit_uw new_imm) +{ + sljit_si dst = inst[1] & 0x0f00; + SLJIT_ASSERT(((inst[0] & 0xfbf0) == (MOVW >> 16)) && ((inst[2] & 0xfbf0) == (MOVT >> 16)) && dst == (inst[3] & 0x0f00)); + inst[0] = (MOVW >> 16) | COPY_BITS(new_imm, 12, 0, 4) | COPY_BITS(new_imm, 11, 10, 1); + inst[1] = dst | COPY_BITS(new_imm, 8, 12, 3) | (new_imm & 0xff); + inst[2] = (MOVT >> 16) | COPY_BITS(new_imm, 12 + 16, 0, 4) | COPY_BITS(new_imm, 11 + 16, 10, 1); + inst[3] = dst | COPY_BITS(new_imm, 8 + 16, 12, 3) | ((new_imm & 0xff0000) >> 16); +} + +static SLJIT_INLINE sljit_si detect_jump_type(struct sljit_jump *jump, sljit_uh *code_ptr, sljit_uh *code) +{ + sljit_sw diff; + + if (jump->flags & SLJIT_REWRITABLE_JUMP) + return 0; + + if (jump->flags & JUMP_ADDR) { + /* Branch to ARM code is not optimized yet. */ + if (!(jump->u.target & 0x1)) + return 0; + diff = ((sljit_sw)jump->u.target - (sljit_sw)(code_ptr + 2)) >> 1; + } + else { + SLJIT_ASSERT(jump->flags & JUMP_LABEL); + diff = ((sljit_sw)(code + jump->u.label->size) - (sljit_sw)(code_ptr + 2)) >> 1; + } + + if (jump->flags & IS_COND) { + SLJIT_ASSERT(!(jump->flags & IS_BL)); + if (diff <= 127 && diff >= -128) { + jump->flags |= B_TYPE1; + return 5; + } + if (diff <= 524287 && diff >= -524288) { + jump->flags |= B_TYPE2; + return 4; + } + /* +1 comes from the prefix IT instruction. */ + diff--; + if (diff <= 8388607 && diff >= -8388608) { + jump->flags |= B_TYPE3; + return 3; + } + } + else if (jump->flags & IS_BL) { + if (diff <= 8388607 && diff >= -8388608) { + jump->flags |= BL_TYPE6; + return 3; + } + } + else { + if (diff <= 1023 && diff >= -1024) { + jump->flags |= B_TYPE4; + return 4; + } + if (diff <= 8388607 && diff >= -8388608) { + jump->flags |= B_TYPE5; + return 3; + } + } + + return 0; +} + +static SLJIT_INLINE void inline_set_jump_addr(sljit_uw addr, sljit_uw new_addr, sljit_si flush) +{ + sljit_uh* inst = (sljit_uh*)addr; + modify_imm32_const(inst, new_addr); + if (flush) { + SLJIT_CACHE_FLUSH(inst, inst + 3); + } +} + +static SLJIT_INLINE void set_jump_instruction(struct sljit_jump *jump) +{ + sljit_si type = (jump->flags >> 4) & 0xf; + sljit_sw diff; + sljit_uh *jump_inst; + sljit_si s, j1, j2; + + if (SLJIT_UNLIKELY(type == 0)) { + inline_set_jump_addr(jump->addr, (jump->flags & JUMP_LABEL) ? jump->u.label->addr : jump->u.target, 0); + return; + } + + if (jump->flags & JUMP_ADDR) { + SLJIT_ASSERT(jump->u.target & 0x1); + diff = ((sljit_sw)jump->u.target - (sljit_sw)(jump->addr + 4)) >> 1; + } + else + diff = ((sljit_sw)(jump->u.label->addr) - (sljit_sw)(jump->addr + 4)) >> 1; + jump_inst = (sljit_uh*)jump->addr; + + switch (type) { + case 1: + /* Encoding T1 of 'B' instruction */ + SLJIT_ASSERT(diff <= 127 && diff >= -128 && (jump->flags & IS_COND)); + jump_inst[0] = 0xd000 | (jump->flags & 0xf00) | (diff & 0xff); + return; + case 2: + /* Encoding T3 of 'B' instruction */ + SLJIT_ASSERT(diff <= 524287 && diff >= -524288 && (jump->flags & IS_COND)); + jump_inst[0] = 0xf000 | COPY_BITS(jump->flags, 8, 6, 4) | COPY_BITS(diff, 11, 0, 6) | COPY_BITS(diff, 19, 10, 1); + jump_inst[1] = 0x8000 | COPY_BITS(diff, 17, 13, 1) | COPY_BITS(diff, 18, 11, 1) | (diff & 0x7ff); + return; + case 3: + SLJIT_ASSERT(jump->flags & IS_COND); + *jump_inst++ = IT | ((jump->flags >> 4) & 0xf0) | 0x8; + diff--; + type = 5; + break; + case 4: + /* Encoding T2 of 'B' instruction */ + SLJIT_ASSERT(diff <= 1023 && diff >= -1024 && !(jump->flags & IS_COND)); + jump_inst[0] = 0xe000 | (diff & 0x7ff); + return; + } + + SLJIT_ASSERT(diff <= 8388607 && diff >= -8388608); + + /* Really complex instruction form for branches. */ + s = (diff >> 23) & 0x1; + j1 = (~(diff >> 21) ^ s) & 0x1; + j2 = (~(diff >> 22) ^ s) & 0x1; + jump_inst[0] = 0xf000 | (s << 10) | COPY_BITS(diff, 11, 0, 10); + jump_inst[1] = (j1 << 13) | (j2 << 11) | (diff & 0x7ff); + + /* The others have a common form. */ + if (type == 5) /* Encoding T4 of 'B' instruction */ + jump_inst[1] |= 0x9000; + else if (type == 6) /* Encoding T1 of 'BL' instruction */ + jump_inst[1] |= 0xd000; + else + SLJIT_ASSERT_STOP(); +} + +SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compiler) +{ + struct sljit_memory_fragment *buf; + sljit_uh *code; + sljit_uh *code_ptr; + sljit_uh *buf_ptr; + sljit_uh *buf_end; + sljit_uw half_count; + + struct sljit_label *label; + struct sljit_jump *jump; + struct sljit_const *const_; + + CHECK_ERROR_PTR(); + check_sljit_generate_code(compiler); + reverse_buf(compiler); + + code = (sljit_uh*)SLJIT_MALLOC_EXEC(compiler->size * sizeof(sljit_uh)); + PTR_FAIL_WITH_EXEC_IF(code); + buf = compiler->buf; + + code_ptr = code; + half_count = 0; + label = compiler->labels; + jump = compiler->jumps; + const_ = compiler->consts; + + do { + buf_ptr = (sljit_uh*)buf->memory; + buf_end = buf_ptr + (buf->used_size >> 1); + do { + *code_ptr = *buf_ptr++; + /* These structures are ordered by their address. */ + SLJIT_ASSERT(!label || label->size >= half_count); + SLJIT_ASSERT(!jump || jump->addr >= half_count); + SLJIT_ASSERT(!const_ || const_->addr >= half_count); + if (label && label->size == half_count) { + label->addr = ((sljit_uw)code_ptr) | 0x1; + label->size = code_ptr - code; + label = label->next; + } + if (jump && jump->addr == half_count) { + jump->addr = (sljit_uw)code_ptr - ((jump->flags & IS_COND) ? 10 : 8); + code_ptr -= detect_jump_type(jump, code_ptr, code); + jump = jump->next; + } + if (const_ && const_->addr == half_count) { + const_->addr = (sljit_uw)code_ptr; + const_ = const_->next; + } + code_ptr ++; + half_count ++; + } while (buf_ptr < buf_end); + + buf = buf->next; + } while (buf); + + if (label && label->size == half_count) { + label->addr = ((sljit_uw)code_ptr) | 0x1; + label->size = code_ptr - code; + label = label->next; + } + + SLJIT_ASSERT(!label); + SLJIT_ASSERT(!jump); + SLJIT_ASSERT(!const_); + SLJIT_ASSERT(code_ptr - code <= (sljit_sw)compiler->size); + + jump = compiler->jumps; + while (jump) { + set_jump_instruction(jump); + jump = jump->next; + } + + SLJIT_CACHE_FLUSH(code, code_ptr); + compiler->error = SLJIT_ERR_COMPILED; + compiler->executable_size = compiler->size * sizeof(sljit_uh); + /* Set thumb mode flag. */ + return (void*)((sljit_uw)code | 0x1); +} + +#define INVALID_IMM 0x80000000 +static sljit_uw get_imm(sljit_uw imm) +{ + /* Thumb immediate form. */ + sljit_si counter; + + if (imm <= 0xff) + return imm; + + if ((imm & 0xffff) == (imm >> 16)) { + /* Some special cases. */ + if (!(imm & 0xff00)) + return (1 << 12) | (imm & 0xff); + if (!(imm & 0xff)) + return (2 << 12) | ((imm >> 8) & 0xff); + if ((imm & 0xff00) == ((imm & 0xff) << 8)) + return (3 << 12) | (imm & 0xff); + } + + /* Assembly optimization: count leading zeroes? */ + counter = 8; + if (!(imm & 0xffff0000)) { + counter += 16; + imm <<= 16; + } + if (!(imm & 0xff000000)) { + counter += 8; + imm <<= 8; + } + if (!(imm & 0xf0000000)) { + counter += 4; + imm <<= 4; + } + if (!(imm & 0xc0000000)) { + counter += 2; + imm <<= 2; + } + if (!(imm & 0x80000000)) { + counter += 1; + imm <<= 1; + } + /* Since imm >= 128, this must be true. */ + SLJIT_ASSERT(counter <= 31); + + if (imm & 0x00ffffff) + return INVALID_IMM; /* Cannot be encoded. */ + + return ((imm >> 24) & 0x7f) | COPY_BITS(counter, 4, 26, 1) | COPY_BITS(counter, 1, 12, 3) | COPY_BITS(counter, 0, 7, 1); +} + +static sljit_si load_immediate(struct sljit_compiler *compiler, sljit_si dst, sljit_uw imm) +{ + sljit_uw tmp; + + if (imm >= 0x10000) { + tmp = get_imm(imm); + if (tmp != INVALID_IMM) + return push_inst32(compiler, MOV_WI | RD4(dst) | tmp); + tmp = get_imm(~imm); + if (tmp != INVALID_IMM) + return push_inst32(compiler, MVN_WI | RD4(dst) | tmp); + } + + /* set low 16 bits, set hi 16 bits to 0. */ + FAIL_IF(push_inst32(compiler, MOVW | RD4(dst) | + COPY_BITS(imm, 12, 16, 4) | COPY_BITS(imm, 11, 26, 1) | COPY_BITS(imm, 8, 12, 3) | (imm & 0xff))); + + /* set hi 16 bit if needed. */ + if (imm >= 0x10000) + return push_inst32(compiler, MOVT | RD4(dst) | + COPY_BITS(imm, 12 + 16, 16, 4) | COPY_BITS(imm, 11 + 16, 26, 1) | COPY_BITS(imm, 8 + 16, 12, 3) | ((imm & 0xff0000) >> 16)); + return SLJIT_SUCCESS; +} + +#define ARG1_IMM 0x0010000 +#define ARG2_IMM 0x0020000 +#define KEEP_FLAGS 0x0040000 +#define SET_MULOV 0x0080000 +/* SET_FLAGS must be 0x100000 as it is also the value of S bit (can be used for optimization). */ +#define SET_FLAGS 0x0100000 +#define UNUSED_RETURN 0x0200000 +#define SLOW_DEST 0x0400000 +#define SLOW_SRC1 0x0800000 +#define SLOW_SRC2 0x1000000 + +static sljit_si emit_op_imm(struct sljit_compiler *compiler, sljit_si flags, sljit_si dst, sljit_uw arg1, sljit_uw arg2) +{ + /* dst must be register, TMP_REG1 + arg1 must be register, TMP_REG1, imm + arg2 must be register, TMP_REG2, imm */ + sljit_si reg; + sljit_uw imm, negated_imm; + + if (SLJIT_UNLIKELY((flags & (ARG1_IMM | ARG2_IMM)) == (ARG1_IMM | ARG2_IMM))) { + /* Both are immediates. */ + flags &= ~ARG1_IMM; + FAIL_IF(load_immediate(compiler, TMP_REG1, arg1)); + arg1 = TMP_REG1; + } + + if (flags & (ARG1_IMM | ARG2_IMM)) { + reg = (flags & ARG2_IMM) ? arg1 : arg2; + imm = (flags & ARG2_IMM) ? arg2 : arg1; + + switch (flags & 0xffff) { + case SLJIT_MOV: + SLJIT_ASSERT(!(flags & SET_FLAGS) && (flags & ARG2_IMM) && arg1 == TMP_REG1); + return load_immediate(compiler, dst, imm); + case SLJIT_NOT: + if (!(flags & SET_FLAGS)) + return load_immediate(compiler, dst, ~imm); + /* Since the flags should be set, we just fallback to the register mode. + Although I could do some clever things here, "NOT IMM" does not worth the efforts. */ + break; + case SLJIT_CLZ: + /* No form with immediate operand. */ + break; + case SLJIT_ADD: + negated_imm = (sljit_uw)-(sljit_sw)imm; + if (!(flags & KEEP_FLAGS) && IS_2_LO_REGS(reg, dst)) { + if (imm <= 0x7) + return push_inst16(compiler, ADDSI3 | IMM3(imm) | RD3(dst) | RN3(reg)); + if (negated_imm <= 0x7) + return push_inst16(compiler, SUBSI3 | IMM3(negated_imm) | RD3(dst) | RN3(reg)); + if (reg == dst) { + if (imm <= 0xff) + return push_inst16(compiler, ADDSI8 | IMM8(imm) | RDN3(dst)); + if (negated_imm <= 0xff) + return push_inst16(compiler, SUBSI8 | IMM8(negated_imm) | RDN3(dst)); + } + } + if (!(flags & SET_FLAGS)) { + if (imm <= 0xfff) + return push_inst32(compiler, ADDWI | RD4(dst) | RN4(reg) | IMM12(imm)); + if (negated_imm <= 0xfff) + return push_inst32(compiler, SUBWI | RD4(dst) | RN4(reg) | IMM12(negated_imm)); + } + imm = get_imm(imm); + if (imm != INVALID_IMM) + return push_inst32(compiler, ADD_WI | (flags & SET_FLAGS) | RD4(dst) | RN4(reg) | imm); + break; + case SLJIT_ADDC: + imm = get_imm(imm); + if (imm != INVALID_IMM) + return push_inst32(compiler, ADCI | (flags & SET_FLAGS) | RD4(dst) | RN4(reg) | imm); + break; + case SLJIT_SUB: + if (flags & ARG2_IMM) { + negated_imm = (sljit_uw)-(sljit_sw)imm; + if (!(flags & KEEP_FLAGS) && IS_2_LO_REGS(reg, dst)) { + if (imm <= 0x7) + return push_inst16(compiler, SUBSI3 | IMM3(imm) | RD3(dst) | RN3(reg)); + if (negated_imm <= 0x7) + return push_inst16(compiler, ADDSI3 | IMM3(negated_imm) | RD3(dst) | RN3(reg)); + if (reg == dst) { + if (imm <= 0xff) + return push_inst16(compiler, SUBSI8 | IMM8(imm) | RDN3(dst)); + if (negated_imm <= 0xff) + return push_inst16(compiler, ADDSI8 | IMM8(negated_imm) | RDN3(dst)); + } + if (imm <= 0xff && (flags & UNUSED_RETURN)) + return push_inst16(compiler, CMPI | IMM8(imm) | RDN3(reg)); + } + if (!(flags & SET_FLAGS)) { + if (imm <= 0xfff) + return push_inst32(compiler, SUBWI | RD4(dst) | RN4(reg) | IMM12(imm)); + if (negated_imm <= 0xfff) + return push_inst32(compiler, ADDWI | RD4(dst) | RN4(reg) | IMM12(negated_imm)); + } + imm = get_imm(imm); + if (imm != INVALID_IMM) + return push_inst32(compiler, SUB_WI | (flags & SET_FLAGS) | RD4(dst) | RN4(reg) | imm); + } + else { + if (!(flags & KEEP_FLAGS) && imm == 0 && IS_2_LO_REGS(reg, dst)) + return push_inst16(compiler, RSBSI | RD3(dst) | RN3(reg)); + imm = get_imm(imm); + if (imm != INVALID_IMM) + return push_inst32(compiler, RSB_WI | (flags & SET_FLAGS) | RD4(dst) | RN4(reg) | imm); + } + break; + case SLJIT_SUBC: + if (flags & ARG2_IMM) { + imm = get_imm(imm); + if (imm != INVALID_IMM) + return push_inst32(compiler, SBCI | (flags & SET_FLAGS) | RD4(dst) | RN4(reg) | imm); + } + break; + case SLJIT_MUL: + /* No form with immediate operand. */ + break; + case SLJIT_AND: + imm = get_imm(imm); + if (imm != INVALID_IMM) + return push_inst32(compiler, ANDI | (flags & SET_FLAGS) | RD4(dst) | RN4(reg) | imm); + imm = get_imm(~((flags & ARG2_IMM) ? arg2 : arg1)); + if (imm != INVALID_IMM) + return push_inst32(compiler, BICI | (flags & SET_FLAGS) | RD4(dst) | RN4(reg) | imm); + break; + case SLJIT_OR: + imm = get_imm(imm); + if (imm != INVALID_IMM) + return push_inst32(compiler, ORRI | (flags & SET_FLAGS) | RD4(dst) | RN4(reg) | imm); + imm = get_imm(~((flags & ARG2_IMM) ? arg2 : arg1)); + if (imm != INVALID_IMM) + return push_inst32(compiler, ORNI | (flags & SET_FLAGS) | RD4(dst) | RN4(reg) | imm); + break; + case SLJIT_XOR: + imm = get_imm(imm); + if (imm != INVALID_IMM) + return push_inst32(compiler, EORI | (flags & SET_FLAGS) | RD4(dst) | RN4(reg) | imm); + break; + case SLJIT_SHL: + if (flags & ARG2_IMM) { + imm &= 0x1f; + if (imm == 0) { + if (!(flags & SET_FLAGS)) + return push_inst16(compiler, MOV | SET_REGS44(dst, reg)); + if (IS_2_LO_REGS(dst, reg)) + return push_inst16(compiler, MOVS | RD3(dst) | RN3(reg)); + return push_inst32(compiler, MOV_W | SET_FLAGS | RD4(dst) | RM4(reg)); + } + if (!(flags & KEEP_FLAGS) && IS_2_LO_REGS(dst, reg)) + return push_inst16(compiler, LSLSI | RD3(dst) | RN3(reg) | (imm << 6)); + return push_inst32(compiler, LSL_WI | (flags & SET_FLAGS) | RD4(dst) | RM4(reg) | IMM5(imm)); + } + break; + case SLJIT_LSHR: + if (flags & ARG2_IMM) { + imm &= 0x1f; + if (imm == 0) { + if (!(flags & SET_FLAGS)) + return push_inst16(compiler, MOV | SET_REGS44(dst, reg)); + if (IS_2_LO_REGS(dst, reg)) + return push_inst16(compiler, MOVS | RD3(dst) | RN3(reg)); + return push_inst32(compiler, MOV_W | SET_FLAGS | RD4(dst) | RM4(reg)); + } + if (!(flags & KEEP_FLAGS) && IS_2_LO_REGS(dst, reg)) + return push_inst16(compiler, LSRSI | RD3(dst) | RN3(reg) | (imm << 6)); + return push_inst32(compiler, LSR_WI | (flags & SET_FLAGS) | RD4(dst) | RM4(reg) | IMM5(imm)); + } + break; + case SLJIT_ASHR: + if (flags & ARG2_IMM) { + imm &= 0x1f; + if (imm == 0) { + if (!(flags & SET_FLAGS)) + return push_inst16(compiler, MOV | SET_REGS44(dst, reg)); + if (IS_2_LO_REGS(dst, reg)) + return push_inst16(compiler, MOVS | RD3(dst) | RN3(reg)); + return push_inst32(compiler, MOV_W | SET_FLAGS | RD4(dst) | RM4(reg)); + } + if (!(flags & KEEP_FLAGS) && IS_2_LO_REGS(dst, reg)) + return push_inst16(compiler, ASRSI | RD3(dst) | RN3(reg) | (imm << 6)); + return push_inst32(compiler, ASR_WI | (flags & SET_FLAGS) | RD4(dst) | RM4(reg) | IMM5(imm)); + } + break; + default: + SLJIT_ASSERT_STOP(); + break; + } + + if (flags & ARG2_IMM) { + FAIL_IF(load_immediate(compiler, TMP_REG2, arg2)); + arg2 = TMP_REG2; + } + else { + FAIL_IF(load_immediate(compiler, TMP_REG1, arg1)); + arg1 = TMP_REG1; + } + } + + /* Both arguments are registers. */ + switch (flags & 0xffff) { + case SLJIT_MOV: + case SLJIT_MOV_UI: + case SLJIT_MOV_SI: + case SLJIT_MOV_P: + case SLJIT_MOVU: + case SLJIT_MOVU_UI: + case SLJIT_MOVU_SI: + case SLJIT_MOVU_P: + SLJIT_ASSERT(!(flags & SET_FLAGS) && arg1 == TMP_REG1); + return push_inst16(compiler, MOV | SET_REGS44(dst, arg2)); + case SLJIT_MOV_UB: + case SLJIT_MOVU_UB: + SLJIT_ASSERT(!(flags & SET_FLAGS) && arg1 == TMP_REG1); + if (IS_2_LO_REGS(dst, arg2)) + return push_inst16(compiler, UXTB | RD3(dst) | RN3(arg2)); + return push_inst32(compiler, UXTB_W | RD4(dst) | RM4(arg2)); + case SLJIT_MOV_SB: + case SLJIT_MOVU_SB: + SLJIT_ASSERT(!(flags & SET_FLAGS) && arg1 == TMP_REG1); + if (IS_2_LO_REGS(dst, arg2)) + return push_inst16(compiler, SXTB | RD3(dst) | RN3(arg2)); + return push_inst32(compiler, SXTB_W | RD4(dst) | RM4(arg2)); + case SLJIT_MOV_UH: + case SLJIT_MOVU_UH: + SLJIT_ASSERT(!(flags & SET_FLAGS) && arg1 == TMP_REG1); + if (IS_2_LO_REGS(dst, arg2)) + return push_inst16(compiler, UXTH | RD3(dst) | RN3(arg2)); + return push_inst32(compiler, UXTH_W | RD4(dst) | RM4(arg2)); + case SLJIT_MOV_SH: + case SLJIT_MOVU_SH: + SLJIT_ASSERT(!(flags & SET_FLAGS) && arg1 == TMP_REG1); + if (IS_2_LO_REGS(dst, arg2)) + return push_inst16(compiler, SXTH | RD3(dst) | RN3(arg2)); + return push_inst32(compiler, SXTH_W | RD4(dst) | RM4(arg2)); + case SLJIT_NOT: + SLJIT_ASSERT(arg1 == TMP_REG1); + if (!(flags & KEEP_FLAGS) && IS_2_LO_REGS(dst, arg2)) + return push_inst16(compiler, MVNS | RD3(dst) | RN3(arg2)); + return push_inst32(compiler, MVN_W | (flags & SET_FLAGS) | RD4(dst) | RM4(arg2)); + case SLJIT_CLZ: + SLJIT_ASSERT(arg1 == TMP_REG1); + FAIL_IF(push_inst32(compiler, CLZ | RN4(arg2) | RD4(dst) | RM4(arg2))); + if (flags & SET_FLAGS) { + if (reg_map[dst] <= 7) + return push_inst16(compiler, CMPI | RDN3(dst)); + return push_inst32(compiler, ADD_WI | SET_FLAGS | RN4(dst) | RD4(dst)); + } + return SLJIT_SUCCESS; + case SLJIT_ADD: + if (!(flags & KEEP_FLAGS) && IS_3_LO_REGS(dst, arg1, arg2)) + return push_inst16(compiler, ADDS | RD3(dst) | RN3(arg1) | RM3(arg2)); + if (dst == arg1 && !(flags & SET_FLAGS)) + return push_inst16(compiler, ADD | SET_REGS44(dst, arg2)); + return push_inst32(compiler, ADD_W | (flags & SET_FLAGS) | RD4(dst) | RN4(arg1) | RM4(arg2)); + case SLJIT_ADDC: + if (dst == arg1 && !(flags & KEEP_FLAGS) && IS_2_LO_REGS(dst, arg2)) + return push_inst16(compiler, ADCS | RD3(dst) | RN3(arg2)); + return push_inst32(compiler, ADC_W | (flags & SET_FLAGS) | RD4(dst) | RN4(arg1) | RM4(arg2)); + case SLJIT_SUB: + if (!(flags & KEEP_FLAGS) && IS_3_LO_REGS(dst, arg1, arg2)) + return push_inst16(compiler, SUBS | RD3(dst) | RN3(arg1) | RM3(arg2)); + return push_inst32(compiler, SUB_W | (flags & SET_FLAGS) | RD4(dst) | RN4(arg1) | RM4(arg2)); + case SLJIT_SUBC: + if (dst == arg1 && !(flags & KEEP_FLAGS) && IS_2_LO_REGS(dst, arg2)) + return push_inst16(compiler, SBCS | RD3(dst) | RN3(arg2)); + return push_inst32(compiler, SBC_W | (flags & SET_FLAGS) | RD4(dst) | RN4(arg1) | RM4(arg2)); + case SLJIT_MUL: + if (!(flags & SET_FLAGS)) + return push_inst32(compiler, MUL | RD4(dst) | RN4(arg1) | RM4(arg2)); + SLJIT_ASSERT(reg_map[TMP_REG2] <= 7 && dst != TMP_REG2); + FAIL_IF(push_inst32(compiler, SMULL | RT4(dst) | RD4(TMP_REG2) | RN4(arg1) | RM4(arg2))); + /* cmp TMP_REG2, dst asr #31. */ + return push_inst32(compiler, CMP_W | RN4(TMP_REG2) | 0x70e0 | RM4(dst)); + case SLJIT_AND: + if (!(flags & KEEP_FLAGS)) { + if (dst == arg1 && IS_2_LO_REGS(dst, arg2)) + return push_inst16(compiler, ANDS | RD3(dst) | RN3(arg2)); + if ((flags & UNUSED_RETURN) && IS_2_LO_REGS(arg1, arg2)) + return push_inst16(compiler, TST | RD3(arg1) | RN3(arg2)); + } + return push_inst32(compiler, AND_W | (flags & SET_FLAGS) | RD4(dst) | RN4(arg1) | RM4(arg2)); + case SLJIT_OR: + if (dst == arg1 && !(flags & KEEP_FLAGS) && IS_2_LO_REGS(dst, arg2)) + return push_inst16(compiler, ORRS | RD3(dst) | RN3(arg2)); + return push_inst32(compiler, ORR_W | (flags & SET_FLAGS) | RD4(dst) | RN4(arg1) | RM4(arg2)); + case SLJIT_XOR: + if (dst == arg1 && !(flags & KEEP_FLAGS) && IS_2_LO_REGS(dst, arg2)) + return push_inst16(compiler, EORS | RD3(dst) | RN3(arg2)); + return push_inst32(compiler, EOR_W | (flags & SET_FLAGS) | RD4(dst) | RN4(arg1) | RM4(arg2)); + case SLJIT_SHL: + if (dst == arg1 && !(flags & KEEP_FLAGS) && IS_2_LO_REGS(dst, arg2)) + return push_inst16(compiler, LSLS | RD3(dst) | RN3(arg2)); + return push_inst32(compiler, LSL_W | (flags & SET_FLAGS) | RD4(dst) | RN4(arg1) | RM4(arg2)); + case SLJIT_LSHR: + if (dst == arg1 && !(flags & KEEP_FLAGS) && IS_2_LO_REGS(dst, arg2)) + return push_inst16(compiler, LSRS | RD3(dst) | RN3(arg2)); + return push_inst32(compiler, LSR_W | (flags & SET_FLAGS) | RD4(dst) | RN4(arg1) | RM4(arg2)); + case SLJIT_ASHR: + if (dst == arg1 && !(flags & KEEP_FLAGS) && IS_2_LO_REGS(dst, arg2)) + return push_inst16(compiler, ASRS | RD3(dst) | RN3(arg2)); + return push_inst32(compiler, ASR_W | (flags & SET_FLAGS) | RD4(dst) | RN4(arg1) | RM4(arg2)); + } + + SLJIT_ASSERT_STOP(); + return SLJIT_SUCCESS; +} + +#define STORE 0x01 +#define SIGNED 0x02 + +#define WORD_SIZE 0x00 +#define BYTE_SIZE 0x04 +#define HALF_SIZE 0x08 + +#define UPDATE 0x10 +#define ARG_TEST 0x20 + +#define IS_WORD_SIZE(flags) (!(flags & (BYTE_SIZE | HALF_SIZE))) +#define OFFSET_CHECK(imm, shift) (!(argw & ~(imm << shift))) + +/* + 1st letter: + w = word + b = byte + h = half + + 2nd letter: + s = signed + u = unsigned + + 3rd letter: + l = load + s = store +*/ + +static SLJIT_CONST sljit_uw sljit_mem16[12] = { +/* w u l */ 0x5800 /* ldr */, +/* w u s */ 0x5000 /* str */, +/* w s l */ 0x5800 /* ldr */, +/* w s s */ 0x5000 /* str */, + +/* b u l */ 0x5c00 /* ldrb */, +/* b u s */ 0x5400 /* strb */, +/* b s l */ 0x5600 /* ldrsb */, +/* b s s */ 0x5400 /* strb */, + +/* h u l */ 0x5a00 /* ldrh */, +/* h u s */ 0x5200 /* strh */, +/* h s l */ 0x5e00 /* ldrsh */, +/* h s s */ 0x5200 /* strh */, +}; + +static SLJIT_CONST sljit_uw sljit_mem16_imm5[12] = { +/* w u l */ 0x6800 /* ldr imm5 */, +/* w u s */ 0x6000 /* str imm5 */, +/* w s l */ 0x6800 /* ldr imm5 */, +/* w s s */ 0x6000 /* str imm5 */, + +/* b u l */ 0x7800 /* ldrb imm5 */, +/* b u s */ 0x7000 /* strb imm5 */, +/* b s l */ 0x0000 /* not allowed */, +/* b s s */ 0x7000 /* strb imm5 */, + +/* h u l */ 0x8800 /* ldrh imm5 */, +/* h u s */ 0x8000 /* strh imm5 */, +/* h s l */ 0x0000 /* not allowed */, +/* h s s */ 0x8000 /* strh imm5 */, +}; + +#define MEM_IMM8 0xc00 +#define MEM_IMM12 0x800000 +static SLJIT_CONST sljit_uw sljit_mem32[12] = { +/* w u l */ 0xf8500000 /* ldr.w */, +/* w u s */ 0xf8400000 /* str.w */, +/* w s l */ 0xf8500000 /* ldr.w */, +/* w s s */ 0xf8400000 /* str.w */, + +/* b u l */ 0xf8100000 /* ldrb.w */, +/* b u s */ 0xf8000000 /* strb.w */, +/* b s l */ 0xf9100000 /* ldrsb.w */, +/* b s s */ 0xf8000000 /* strb.w */, + +/* h u l */ 0xf8300000 /* ldrh.w */, +/* h u s */ 0xf8200000 /* strsh.w */, +/* h s l */ 0xf9300000 /* ldrsh.w */, +/* h s s */ 0xf8200000 /* strsh.w */, +}; + +/* Helper function. Dst should be reg + value, using at most 1 instruction, flags does not set. */ +static sljit_si emit_set_delta(struct sljit_compiler *compiler, sljit_si dst, sljit_si reg, sljit_sw value) +{ + if (value >= 0) { + if (value <= 0xfff) + return push_inst32(compiler, ADDWI | RD4(dst) | RN4(reg) | IMM12(value)); + value = get_imm(value); + if (value != INVALID_IMM) + return push_inst32(compiler, ADD_WI | RD4(dst) | RN4(reg) | value); + } + else { + value = -value; + if (value <= 0xfff) + return push_inst32(compiler, SUBWI | RD4(dst) | RN4(reg) | IMM12(value)); + value = get_imm(value); + if (value != INVALID_IMM) + return push_inst32(compiler, SUB_WI | RD4(dst) | RN4(reg) | value); + } + return SLJIT_ERR_UNSUPPORTED; +} + +/* Can perform an operation using at most 1 instruction. */ +static sljit_si getput_arg_fast(struct sljit_compiler *compiler, sljit_si flags, sljit_si reg, sljit_si arg, sljit_sw argw) +{ + sljit_si tmp; + + SLJIT_ASSERT(arg & SLJIT_MEM); + + if (SLJIT_UNLIKELY(flags & UPDATE)) { + if ((arg & 0xf) && !(arg & 0xf0) && argw <= 0xff && argw >= -0xff) { + flags &= ~UPDATE; + arg &= 0xf; + if (SLJIT_UNLIKELY(flags & ARG_TEST)) + return 1; + + if (argw >= 0) + argw |= 0x200; + else { + argw = -argw; + } + SLJIT_ASSERT(argw >= 0 && (argw & 0xff) <= 0xff); + FAIL_IF(push_inst32(compiler, sljit_mem32[flags] | MEM_IMM8 | RT4(reg) | RN4(arg) | 0x100 | argw)); + return -1; + } + return (flags & ARG_TEST) ? SLJIT_SUCCESS : 0; + } + + if (SLJIT_UNLIKELY(arg & 0xf0)) { + argw &= 0x3; + tmp = (arg >> 4) & 0xf; + arg &= 0xf; + if (SLJIT_UNLIKELY(flags & ARG_TEST)) + return 1; + + if (!argw && IS_3_LO_REGS(reg, arg, tmp)) + FAIL_IF(push_inst16(compiler, sljit_mem16[flags] | RD3(reg) | RN3(arg) | RM3(tmp))); + else + FAIL_IF(push_inst32(compiler, sljit_mem32[flags] | RT4(reg) | RN4(arg) | RM4(tmp) | (argw << 4))); + return -1; + } + + if (!(arg & 0xf) || argw > 0xfff || argw < -0xff) + return (flags & ARG_TEST) ? SLJIT_SUCCESS : 0; + + if (SLJIT_UNLIKELY(flags & ARG_TEST)) + return 1; + + arg &= 0xf; + if (IS_2_LO_REGS(reg, arg) && sljit_mem16_imm5[flags]) { + tmp = 3; + if (IS_WORD_SIZE(flags)) { + if (OFFSET_CHECK(0x1f, 2)) + tmp = 2; + } + else if (flags & BYTE_SIZE) + { + if (OFFSET_CHECK(0x1f, 0)) + tmp = 0; + } + else { + SLJIT_ASSERT(flags & HALF_SIZE); + if (OFFSET_CHECK(0x1f, 1)) + tmp = 1; + } + + if (tmp != 3) { + FAIL_IF(push_inst16(compiler, sljit_mem16_imm5[flags] | RD3(reg) | RN3(arg) | (argw << (6 - tmp)))); + return -1; + } + } + + /* SP based immediate. */ + if (SLJIT_UNLIKELY(arg == SLJIT_LOCALS_REG) && OFFSET_CHECK(0xff, 2) && IS_WORD_SIZE(flags) && reg_map[reg] <= 7) { + FAIL_IF(push_inst16(compiler, STR_SP | ((flags & STORE) ? 0 : 0x800) | RDN3(reg) | (argw >> 2))); + return -1; + } + + if (argw >= 0) + FAIL_IF(push_inst32(compiler, sljit_mem32[flags] | MEM_IMM12 | RT4(reg) | RN4(arg) | argw)); + else + FAIL_IF(push_inst32(compiler, sljit_mem32[flags] | MEM_IMM8 | RT4(reg) | RN4(arg) | -argw)); + return -1; +} + +/* see getput_arg below. + Note: can_cache is called only for binary operators. Those + operators always uses word arguments without write back. */ +static sljit_si can_cache(sljit_si arg, sljit_sw argw, sljit_si next_arg, sljit_sw next_argw) +{ + /* Simple operation except for updates. */ + if ((arg & 0xf0) || !(next_arg & SLJIT_MEM)) + return 0; + + if (!(arg & 0xf)) { + if ((sljit_uw)(argw - next_argw) <= 0xfff || (sljit_uw)(next_argw - argw) <= 0xfff) + return 1; + return 0; + } + + if (argw == next_argw) + return 1; + + if (arg == next_arg && ((sljit_uw)(argw - next_argw) <= 0xfff || (sljit_uw)(next_argw - argw) <= 0xfff)) + return 1; + + return 0; +} + +/* Emit the necessary instructions. See can_cache above. */ +static sljit_si getput_arg(struct sljit_compiler *compiler, sljit_si flags, sljit_si reg, sljit_si arg, sljit_sw argw, sljit_si next_arg, sljit_sw next_argw) +{ + sljit_si tmp_r; + sljit_sw tmp; + + SLJIT_ASSERT(arg & SLJIT_MEM); + if (!(next_arg & SLJIT_MEM)) { + next_arg = 0; + next_argw = 0; + } + + tmp_r = (flags & STORE) ? TMP_REG3 : reg; + + if (SLJIT_UNLIKELY(flags & UPDATE)) { + flags &= ~UPDATE; + /* Update only applies if a base register exists. */ + if (arg & 0xf) { + /* There is no caching here. */ + tmp = (arg & 0xf0) >> 4; + arg &= 0xf; + + if (!tmp) { + if (!(argw & ~0xfff)) { + FAIL_IF(push_inst32(compiler, sljit_mem32[flags] | MEM_IMM12 | RT4(reg) | RN4(arg) | argw)); + return push_inst32(compiler, ADDWI | RD4(arg) | RN4(arg) | IMM12(argw)); + } + + if (compiler->cache_arg == SLJIT_MEM) { + if (argw == compiler->cache_argw) { + tmp = TMP_REG3; + argw = 0; + } + else if (emit_set_delta(compiler, TMP_REG3, TMP_REG3, argw - compiler->cache_argw) != SLJIT_ERR_UNSUPPORTED) { + FAIL_IF(compiler->error); + compiler->cache_argw = argw; + tmp = TMP_REG3; + argw = 0; + } + } + + if (argw) { + FAIL_IF(load_immediate(compiler, TMP_REG3, argw)); + compiler->cache_arg = SLJIT_MEM; + compiler->cache_argw = argw; + tmp = TMP_REG3; + argw = 0; + } + } + + argw &= 0x3; + if (!argw && IS_3_LO_REGS(reg, arg, tmp)) { + FAIL_IF(push_inst16(compiler, sljit_mem16[flags] | RD3(reg) | RN3(arg) | RM3(tmp))); + return push_inst16(compiler, ADD | SET_REGS44(arg, tmp)); + } + FAIL_IF(push_inst32(compiler, sljit_mem32[flags] | RT4(reg) | RN4(arg) | RM4(tmp) | (argw << 4))); + return push_inst32(compiler, ADD_W | RD4(arg) | RN4(arg) | RM4(tmp) | (argw << 6)); + } + } + + SLJIT_ASSERT(!(arg & 0xf0)); + + if (compiler->cache_arg == arg) { + if (!((argw - compiler->cache_argw) & ~0xfff)) + return push_inst32(compiler, sljit_mem32[flags] | MEM_IMM12 | RT4(reg) | RN4(TMP_REG3) | (argw - compiler->cache_argw)); + if (!((compiler->cache_argw - argw) & ~0xff)) + return push_inst32(compiler, sljit_mem32[flags] | MEM_IMM8 | RT4(reg) | RN4(TMP_REG3) | (compiler->cache_argw - argw)); + if (emit_set_delta(compiler, TMP_REG3, TMP_REG3, argw - compiler->cache_argw) != SLJIT_ERR_UNSUPPORTED) { + FAIL_IF(compiler->error); + return push_inst32(compiler, sljit_mem32[flags] | MEM_IMM12 | RT4(reg) | RN4(TMP_REG3) | 0); + } + } + + next_arg = (arg & 0xf) && (arg == next_arg); + arg &= 0xf; + if (arg && compiler->cache_arg == SLJIT_MEM && compiler->cache_argw == argw) + return push_inst32(compiler, sljit_mem32[flags] | RT4(reg) | RN4(arg) | RM4(TMP_REG3)); + + compiler->cache_argw = argw; + if (next_arg && emit_set_delta(compiler, TMP_REG3, arg, argw) != SLJIT_ERR_UNSUPPORTED) { + FAIL_IF(compiler->error); + compiler->cache_arg = SLJIT_MEM | arg; + arg = 0; + } + else { + FAIL_IF(load_immediate(compiler, TMP_REG3, argw)); + compiler->cache_arg = SLJIT_MEM; + + if (next_arg) { + FAIL_IF(push_inst16(compiler, ADD | SET_REGS44(TMP_REG3, arg))); + compiler->cache_arg = SLJIT_MEM | arg; + arg = 0; + } + } + + if (arg) + return push_inst32(compiler, sljit_mem32[flags] | RT4(reg) | RN4(arg) | RM4(TMP_REG3)); + return push_inst32(compiler, sljit_mem32[flags] | MEM_IMM12 | RT4(reg) | RN4(TMP_REG3) | 0); +} + +static SLJIT_INLINE sljit_si emit_op_mem(struct sljit_compiler *compiler, sljit_si flags, sljit_si reg, sljit_si arg, sljit_sw argw) +{ + if (getput_arg_fast(compiler, flags, reg, arg, argw)) + return compiler->error; + compiler->cache_arg = 0; + compiler->cache_argw = 0; + return getput_arg(compiler, flags, reg, arg, argw, 0, 0); +} + +static SLJIT_INLINE sljit_si emit_op_mem2(struct sljit_compiler *compiler, sljit_si flags, sljit_si reg, sljit_si arg1, sljit_sw arg1w, sljit_si arg2, sljit_sw arg2w) +{ + if (getput_arg_fast(compiler, flags, reg, arg1, arg1w)) + return compiler->error; + return getput_arg(compiler, flags, reg, arg1, arg1w, arg2, arg2w); +} + +/* --------------------------------------------------------------------- */ +/* Entry, exit */ +/* --------------------------------------------------------------------- */ + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_enter(struct sljit_compiler *compiler, sljit_si args, sljit_si scratches, sljit_si saveds, sljit_si local_size) +{ + sljit_si size; + sljit_ins push; + + CHECK_ERROR(); + check_sljit_emit_enter(compiler, args, scratches, saveds, local_size); + + compiler->scratches = scratches; + compiler->saveds = saveds; +#if (defined SLJIT_DEBUG && SLJIT_DEBUG) + compiler->logical_local_size = local_size; +#endif + + push = (1 << 4); + if (saveds >= 5) + push |= 1 << 11; + if (saveds >= 4) + push |= 1 << 10; + if (saveds >= 3) + push |= 1 << 8; + if (saveds >= 2) + push |= 1 << 7; + if (saveds >= 1) + push |= 1 << 6; + if (scratches >= 5) + push |= 1 << 5; + FAIL_IF(saveds >= 3 + ? push_inst32(compiler, PUSH_W | (1 << 14) | push) + : push_inst16(compiler, PUSH | push)); + + /* Stack must be aligned to 8 bytes: */ + size = (3 + saveds) * sizeof(sljit_uw); + local_size += size; + local_size = (local_size + 7) & ~7; + local_size -= size; + compiler->local_size = local_size; + if (local_size > 0) { + if (local_size <= (127 << 2)) + FAIL_IF(push_inst16(compiler, SUB_SP | (local_size >> 2))); + else + FAIL_IF(emit_op_imm(compiler, SLJIT_SUB | ARG2_IMM, SLJIT_LOCALS_REG, SLJIT_LOCALS_REG, local_size)); + } + + if (args >= 1) + FAIL_IF(push_inst16(compiler, MOV | SET_REGS44(SLJIT_SAVED_REG1, SLJIT_SCRATCH_REG1))); + if (args >= 2) + FAIL_IF(push_inst16(compiler, MOV | SET_REGS44(SLJIT_SAVED_REG2, SLJIT_SCRATCH_REG2))); + if (args >= 3) + FAIL_IF(push_inst16(compiler, MOV | SET_REGS44(SLJIT_SAVED_REG3, SLJIT_SCRATCH_REG3))); + + return SLJIT_SUCCESS; +} + +SLJIT_API_FUNC_ATTRIBUTE void sljit_set_context(struct sljit_compiler *compiler, sljit_si args, sljit_si scratches, sljit_si saveds, sljit_si local_size) +{ + sljit_si size; + + CHECK_ERROR_VOID(); + check_sljit_set_context(compiler, args, scratches, saveds, local_size); + + compiler->scratches = scratches; + compiler->saveds = saveds; +#if (defined SLJIT_DEBUG && SLJIT_DEBUG) + compiler->logical_local_size = local_size; +#endif + + size = (3 + saveds) * sizeof(sljit_uw); + local_size += size; + local_size = (local_size + 7) & ~7; + local_size -= size; + compiler->local_size = local_size; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_return(struct sljit_compiler *compiler, sljit_si op, sljit_si src, sljit_sw srcw) +{ + sljit_ins pop; + + CHECK_ERROR(); + check_sljit_emit_return(compiler, op, src, srcw); + + FAIL_IF(emit_mov_before_return(compiler, op, src, srcw)); + + if (compiler->local_size > 0) { + if (compiler->local_size <= (127 << 2)) + FAIL_IF(push_inst16(compiler, ADD_SP | (compiler->local_size >> 2))); + else + FAIL_IF(emit_op_imm(compiler, SLJIT_ADD | ARG2_IMM, SLJIT_LOCALS_REG, SLJIT_LOCALS_REG, compiler->local_size)); + } + + pop = (1 << 4); + if (compiler->saveds >= 5) + pop |= 1 << 11; + if (compiler->saveds >= 4) + pop |= 1 << 10; + if (compiler->saveds >= 3) + pop |= 1 << 8; + if (compiler->saveds >= 2) + pop |= 1 << 7; + if (compiler->saveds >= 1) + pop |= 1 << 6; + if (compiler->scratches >= 5) + pop |= 1 << 5; + return compiler->saveds >= 3 + ? push_inst32(compiler, POP_W | (1 << 15) | pop) + : push_inst16(compiler, POP | pop); +} + +/* --------------------------------------------------------------------- */ +/* Operators */ +/* --------------------------------------------------------------------- */ + +#ifdef __cplusplus +extern "C" { +#endif + +#if defined(__GNUC__) +extern unsigned int __aeabi_uidivmod(unsigned int numerator, int unsigned denominator); +extern int __aeabi_idivmod(int numerator, int denominator); +#else +#error "Software divmod functions are needed" +#endif + +#ifdef __cplusplus +} +#endif + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op0(struct sljit_compiler *compiler, sljit_si op) +{ + CHECK_ERROR(); + check_sljit_emit_op0(compiler, op); + + op = GET_OPCODE(op); + switch (op) { + case SLJIT_BREAKPOINT: + push_inst16(compiler, BKPT); + break; + case SLJIT_NOP: + push_inst16(compiler, NOP); + break; + case SLJIT_UMUL: + case SLJIT_SMUL: + return push_inst32(compiler, (op == SLJIT_UMUL ? UMULL : SMULL) + | (reg_map[SLJIT_SCRATCH_REG2] << 8) + | (reg_map[SLJIT_SCRATCH_REG1] << 12) + | (reg_map[SLJIT_SCRATCH_REG1] << 16) + | reg_map[SLJIT_SCRATCH_REG2]); + case SLJIT_UDIV: + case SLJIT_SDIV: + if (compiler->scratches >= 4) { + FAIL_IF(push_inst32(compiler, 0xf84d2d04 /* str r2, [sp, #-4]! */)); + FAIL_IF(push_inst32(compiler, 0xf84dcd04 /* str ip, [sp, #-4]! */)); + } else if (compiler->scratches >= 3) + FAIL_IF(push_inst32(compiler, 0xf84d2d08 /* str r2, [sp, #-8]! */)); +#if defined(__GNUC__) + FAIL_IF(sljit_emit_ijump(compiler, SLJIT_FAST_CALL, SLJIT_IMM, + (op == SLJIT_UDIV ? SLJIT_FUNC_OFFSET(__aeabi_uidivmod) : SLJIT_FUNC_OFFSET(__aeabi_idivmod)))); +#else +#error "Software divmod functions are needed" +#endif + if (compiler->scratches >= 4) { + FAIL_IF(push_inst32(compiler, 0xf85dcb04 /* ldr ip, [sp], #4 */)); + return push_inst32(compiler, 0xf85d2b04 /* ldr r2, [sp], #4 */); + } else if (compiler->scratches >= 3) + return push_inst32(compiler, 0xf85d2b08 /* ldr r2, [sp], #8 */); + return SLJIT_SUCCESS; + } + + return SLJIT_SUCCESS; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op1(struct sljit_compiler *compiler, sljit_si op, + sljit_si dst, sljit_sw dstw, + sljit_si src, sljit_sw srcw) +{ + sljit_si dst_r, flags; + sljit_si op_flags = GET_ALL_FLAGS(op); + + CHECK_ERROR(); + check_sljit_emit_op1(compiler, op, dst, dstw, src, srcw); + ADJUST_LOCAL_OFFSET(dst, dstw); + ADJUST_LOCAL_OFFSET(src, srcw); + + compiler->cache_arg = 0; + compiler->cache_argw = 0; + + dst_r = (dst >= SLJIT_SCRATCH_REG1 && dst <= TMP_REG3) ? dst : TMP_REG1; + + op = GET_OPCODE(op); + if (op >= SLJIT_MOV && op <= SLJIT_MOVU_P) { + switch (op) { + case SLJIT_MOV: + case SLJIT_MOV_UI: + case SLJIT_MOV_SI: + case SLJIT_MOV_P: + flags = WORD_SIZE; + break; + case SLJIT_MOV_UB: + flags = BYTE_SIZE; + if (src & SLJIT_IMM) + srcw = (sljit_ub)srcw; + break; + case SLJIT_MOV_SB: + flags = BYTE_SIZE | SIGNED; + if (src & SLJIT_IMM) + srcw = (sljit_sb)srcw; + break; + case SLJIT_MOV_UH: + flags = HALF_SIZE; + if (src & SLJIT_IMM) + srcw = (sljit_uh)srcw; + break; + case SLJIT_MOV_SH: + flags = HALF_SIZE | SIGNED; + if (src & SLJIT_IMM) + srcw = (sljit_sh)srcw; + break; + case SLJIT_MOVU: + case SLJIT_MOVU_UI: + case SLJIT_MOVU_SI: + case SLJIT_MOVU_P: + flags = WORD_SIZE | UPDATE; + break; + case SLJIT_MOVU_UB: + flags = BYTE_SIZE | UPDATE; + if (src & SLJIT_IMM) + srcw = (sljit_ub)srcw; + break; + case SLJIT_MOVU_SB: + flags = BYTE_SIZE | SIGNED | UPDATE; + if (src & SLJIT_IMM) + srcw = (sljit_sb)srcw; + break; + case SLJIT_MOVU_UH: + flags = HALF_SIZE | UPDATE; + if (src & SLJIT_IMM) + srcw = (sljit_uh)srcw; + break; + case SLJIT_MOVU_SH: + flags = HALF_SIZE | SIGNED | UPDATE; + if (src & SLJIT_IMM) + srcw = (sljit_sh)srcw; + break; + default: + SLJIT_ASSERT_STOP(); + flags = 0; + break; + } + + if (src & SLJIT_IMM) + FAIL_IF(emit_op_imm(compiler, SLJIT_MOV | ARG2_IMM, dst_r, TMP_REG1, srcw)); + else if (src & SLJIT_MEM) { + if (getput_arg_fast(compiler, flags, dst_r, src, srcw)) + FAIL_IF(compiler->error); + else + FAIL_IF(getput_arg(compiler, flags, dst_r, src, srcw, dst, dstw)); + } else { + if (dst_r != TMP_REG1) + return emit_op_imm(compiler, op, dst_r, TMP_REG1, src); + dst_r = src; + } + + if (dst & SLJIT_MEM) { + if (getput_arg_fast(compiler, flags | STORE, dst_r, dst, dstw)) + return compiler->error; + else + return getput_arg(compiler, flags | STORE, dst_r, dst, dstw, 0, 0); + } + return SLJIT_SUCCESS; + } + + if (op == SLJIT_NEG) { +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) || (defined SLJIT_DEBUG && SLJIT_DEBUG) + compiler->skip_checks = 1; +#endif + return sljit_emit_op2(compiler, SLJIT_SUB | op_flags, dst, dstw, SLJIT_IMM, 0, src, srcw); + } + + flags = (GET_FLAGS(op_flags) ? SET_FLAGS : 0) | ((op_flags & SLJIT_KEEP_FLAGS) ? KEEP_FLAGS : 0); + if (src & SLJIT_MEM) { + if (getput_arg_fast(compiler, WORD_SIZE, TMP_REG2, src, srcw)) + FAIL_IF(compiler->error); + else + FAIL_IF(getput_arg(compiler, WORD_SIZE, TMP_REG2, src, srcw, dst, dstw)); + src = TMP_REG2; + } + + if (src & SLJIT_IMM) + flags |= ARG2_IMM; + else + srcw = src; + + emit_op_imm(compiler, flags | op, dst_r, TMP_REG1, srcw); + + if (dst & SLJIT_MEM) { + if (getput_arg_fast(compiler, flags | STORE, dst_r, dst, dstw)) + return compiler->error; + else + return getput_arg(compiler, flags | STORE, dst_r, dst, dstw, 0, 0); + } + return SLJIT_SUCCESS; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op2(struct sljit_compiler *compiler, sljit_si op, + sljit_si dst, sljit_sw dstw, + sljit_si src1, sljit_sw src1w, + sljit_si src2, sljit_sw src2w) +{ + sljit_si dst_r, flags; + + CHECK_ERROR(); + check_sljit_emit_op2(compiler, op, dst, dstw, src1, src1w, src2, src2w); + ADJUST_LOCAL_OFFSET(dst, dstw); + ADJUST_LOCAL_OFFSET(src1, src1w); + ADJUST_LOCAL_OFFSET(src2, src2w); + + compiler->cache_arg = 0; + compiler->cache_argw = 0; + + dst_r = (dst >= SLJIT_SCRATCH_REG1 && dst <= TMP_REG3) ? dst : TMP_REG1; + flags = (GET_FLAGS(op) ? SET_FLAGS : 0) | ((op & SLJIT_KEEP_FLAGS) ? KEEP_FLAGS : 0); + + if ((dst & SLJIT_MEM) && !getput_arg_fast(compiler, WORD_SIZE | STORE | ARG_TEST, TMP_REG1, dst, dstw)) + flags |= SLOW_DEST; + + if (src1 & SLJIT_MEM) { + if (getput_arg_fast(compiler, WORD_SIZE, TMP_REG1, src1, src1w)) + FAIL_IF(compiler->error); + else + flags |= SLOW_SRC1; + } + if (src2 & SLJIT_MEM) { + if (getput_arg_fast(compiler, WORD_SIZE, TMP_REG2, src2, src2w)) + FAIL_IF(compiler->error); + else + flags |= SLOW_SRC2; + } + + if ((flags & (SLOW_SRC1 | SLOW_SRC2)) == (SLOW_SRC1 | SLOW_SRC2)) { + if (!can_cache(src1, src1w, src2, src2w) && can_cache(src1, src1w, dst, dstw)) { + FAIL_IF(getput_arg(compiler, WORD_SIZE, TMP_REG2, src2, src2w, src1, src1w)); + FAIL_IF(getput_arg(compiler, WORD_SIZE, TMP_REG1, src1, src1w, dst, dstw)); + } + else { + FAIL_IF(getput_arg(compiler, WORD_SIZE, TMP_REG1, src1, src1w, src2, src2w)); + FAIL_IF(getput_arg(compiler, WORD_SIZE, TMP_REG2, src2, src2w, dst, dstw)); + } + } + else if (flags & SLOW_SRC1) + FAIL_IF(getput_arg(compiler, WORD_SIZE, TMP_REG1, src1, src1w, dst, dstw)); + else if (flags & SLOW_SRC2) + FAIL_IF(getput_arg(compiler, WORD_SIZE, TMP_REG2, src2, src2w, dst, dstw)); + + if (src1 & SLJIT_MEM) + src1 = TMP_REG1; + if (src2 & SLJIT_MEM) + src2 = TMP_REG2; + + if (src1 & SLJIT_IMM) + flags |= ARG1_IMM; + else + src1w = src1; + if (src2 & SLJIT_IMM) + flags |= ARG2_IMM; + else + src2w = src2; + + if (dst == SLJIT_UNUSED) + flags |= UNUSED_RETURN; + + if (GET_OPCODE(op) == SLJIT_MUL && (op & SLJIT_SET_O)) + flags |= SET_MULOV; + + emit_op_imm(compiler, flags | GET_OPCODE(op), dst_r, src1w, src2w); + + if (dst & SLJIT_MEM) { + if (!(flags & SLOW_DEST)) { + getput_arg_fast(compiler, WORD_SIZE | STORE, dst_r, dst, dstw); + return compiler->error; + } + return getput_arg(compiler, WORD_SIZE | STORE, TMP_REG1, dst, dstw, 0, 0); + } + return SLJIT_SUCCESS; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_get_register_index(sljit_si reg) +{ + check_sljit_get_register_index(reg); + return reg_map[reg]; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op_custom(struct sljit_compiler *compiler, + void *instruction, sljit_si size) +{ + CHECK_ERROR(); + check_sljit_emit_op_custom(compiler, instruction, size); + SLJIT_ASSERT(size == 2 || size == 4); + + if (size == 2) + return push_inst16(compiler, *(sljit_uh*)instruction); + return push_inst32(compiler, *(sljit_ins*)instruction); +} + +/* --------------------------------------------------------------------- */ +/* Floating point operators */ +/* --------------------------------------------------------------------- */ + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_is_fpu_available(void) +{ + return 1; +} + +#define FPU_LOAD (1 << 20) + +static sljit_si emit_fop_mem(struct sljit_compiler *compiler, sljit_si flags, sljit_si reg, sljit_si arg, sljit_sw argw) +{ + sljit_sw tmp; + sljit_uw imm; + sljit_sw inst = VSTR_F32 | (flags & (SLJIT_SINGLE_OP | FPU_LOAD)); + + SLJIT_ASSERT(arg & SLJIT_MEM); + + /* Fast loads and stores. */ + if (SLJIT_UNLIKELY(arg & 0xf0)) { + FAIL_IF(push_inst32(compiler, ADD_W | RD4(TMP_REG2) | RN4(arg & 0xf) | RM4((arg & 0xf0) >> 4) | ((argw & 0x3) << 6))); + arg = SLJIT_MEM | TMP_REG2; + argw = 0; + } + + if ((arg & 0xf) && (argw & 0x3) == 0) { + if (!(argw & ~0x3fc)) + return push_inst32(compiler, inst | 0x800000 | RN4(arg & 0xf) | DD4(reg) | (argw >> 2)); + if (!(-argw & ~0x3fc)) + return push_inst32(compiler, inst | RN4(arg & 0xf) | DD4(reg) | (-argw >> 2)); + } + + SLJIT_ASSERT(!(arg & 0xf0)); + if (compiler->cache_arg == arg) { + tmp = argw - compiler->cache_argw; + if (!(tmp & ~0x3fc)) + return push_inst32(compiler, inst | 0x800000 | RN4(TMP_REG3) | DD4(reg) | (tmp >> 2)); + if (!(-tmp & ~0x3fc)) + return push_inst32(compiler, inst | RN4(TMP_REG3) | DD4(reg) | (-tmp >> 2)); + if (emit_set_delta(compiler, TMP_REG3, TMP_REG3, tmp) != SLJIT_ERR_UNSUPPORTED) { + FAIL_IF(compiler->error); + compiler->cache_argw = argw; + return push_inst32(compiler, inst | 0x800000 | RN4(TMP_REG3) | DD4(reg)); + } + } + + if (arg & 0xf) { + if (emit_set_delta(compiler, TMP_REG1, arg & 0xf, argw) != SLJIT_ERR_UNSUPPORTED) { + FAIL_IF(compiler->error); + return push_inst32(compiler, inst | 0x800000 | RN4(TMP_REG1) | DD4(reg)); + } + imm = get_imm(argw & ~0x3fc); + if (imm != INVALID_IMM) { + FAIL_IF(push_inst32(compiler, ADD_WI | RD4(TMP_REG1) | RN4(arg & 0xf) | imm)); + return push_inst32(compiler, inst | 0x800000 | RN4(TMP_REG1) | DD4(reg) | ((argw & 0x3fc) >> 2)); + } + imm = get_imm(-argw & ~0x3fc); + if (imm != INVALID_IMM) { + argw = -argw; + FAIL_IF(push_inst32(compiler, SUB_WI | RD4(TMP_REG1) | RN4(arg & 0xf) | imm)); + return push_inst32(compiler, inst | RN4(TMP_REG1) | DD4(reg) | ((argw & 0x3fc) >> 2)); + } + } + + compiler->cache_arg = arg; + compiler->cache_argw = argw; + + if (SLJIT_UNLIKELY(!(arg & 0xf))) + FAIL_IF(load_immediate(compiler, TMP_REG3, argw)); + else { + FAIL_IF(load_immediate(compiler, TMP_REG3, argw)); + if (arg & 0xf) + FAIL_IF(push_inst16(compiler, ADD | SET_REGS44(TMP_REG3, (arg & 0xf)))); + } + return push_inst32(compiler, inst | 0x800000 | RN4(TMP_REG3) | DD4(reg)); +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fop1(struct sljit_compiler *compiler, sljit_si op, + sljit_si dst, sljit_sw dstw, + sljit_si src, sljit_sw srcw) +{ + sljit_si dst_r; + + CHECK_ERROR(); + check_sljit_emit_fop1(compiler, op, dst, dstw, src, srcw); + SLJIT_COMPILE_ASSERT((SLJIT_SINGLE_OP == 0x100), float_transfer_bit_error); + + compiler->cache_arg = 0; + compiler->cache_argw = 0; + op ^= SLJIT_SINGLE_OP; + + if (GET_OPCODE(op) == SLJIT_CMPD) { + if (dst & SLJIT_MEM) { + emit_fop_mem(compiler, (op & SLJIT_SINGLE_OP) | FPU_LOAD, TMP_FREG1, dst, dstw); + dst = TMP_FREG1; + } + if (src & SLJIT_MEM) { + emit_fop_mem(compiler, (op & SLJIT_SINGLE_OP) | FPU_LOAD, TMP_FREG2, src, srcw); + src = TMP_FREG2; + } + FAIL_IF(push_inst32(compiler, VCMP_F32 | (op & SLJIT_SINGLE_OP) | DD4(dst) | DM4(src))); + return push_inst32(compiler, VMRS); + } + + dst_r = (dst > SLJIT_FLOAT_REG6) ? TMP_FREG1 : dst; + if (src & SLJIT_MEM) { + emit_fop_mem(compiler, (op & SLJIT_SINGLE_OP) | FPU_LOAD, dst_r, src, srcw); + src = dst_r; + } + + switch (GET_OPCODE(op)) { + case SLJIT_MOVD: + if (src != dst_r) + FAIL_IF(push_inst32(compiler, VMOV_F32 | (op & SLJIT_SINGLE_OP) | DD4(dst_r) | DM4(src))); + break; + case SLJIT_NEGD: + FAIL_IF(push_inst32(compiler, VNEG_F32 | (op & SLJIT_SINGLE_OP) | DD4(dst_r) | DM4(src))); + break; + case SLJIT_ABSD: + FAIL_IF(push_inst32(compiler, VABS_F32 | (op & SLJIT_SINGLE_OP) | DD4(dst_r) | DM4(src))); + break; + } + + if (dst & SLJIT_MEM) + return emit_fop_mem(compiler, (op & SLJIT_SINGLE_OP), TMP_FREG1, dst, dstw); + return SLJIT_SUCCESS; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fop2(struct sljit_compiler *compiler, sljit_si op, + sljit_si dst, sljit_sw dstw, + sljit_si src1, sljit_sw src1w, + sljit_si src2, sljit_sw src2w) +{ + sljit_si dst_r; + + CHECK_ERROR(); + check_sljit_emit_fop2(compiler, op, dst, dstw, src1, src1w, src2, src2w); + + compiler->cache_arg = 0; + compiler->cache_argw = 0; + op ^= SLJIT_SINGLE_OP; + + dst_r = (dst > SLJIT_FLOAT_REG6) ? TMP_FREG1 : dst; + if (src1 & SLJIT_MEM) { + emit_fop_mem(compiler, (op & SLJIT_SINGLE_OP) | FPU_LOAD, TMP_FREG1, src1, src1w); + src1 = TMP_FREG1; + } + if (src2 & SLJIT_MEM) { + emit_fop_mem(compiler, (op & SLJIT_SINGLE_OP) | FPU_LOAD, TMP_FREG2, src2, src2w); + src2 = TMP_FREG2; + } + + switch (GET_OPCODE(op)) { + case SLJIT_ADDD: + FAIL_IF(push_inst32(compiler, VADD_F32 | (op & SLJIT_SINGLE_OP) | DD4(dst_r) | DN4(src1) | DM4(src2))); + break; + case SLJIT_SUBD: + FAIL_IF(push_inst32(compiler, VSUB_F32 | (op & SLJIT_SINGLE_OP) | DD4(dst_r) | DN4(src1) | DM4(src2))); + break; + case SLJIT_MULD: + FAIL_IF(push_inst32(compiler, VMUL_F32 | (op & SLJIT_SINGLE_OP) | DD4(dst_r) | DN4(src1) | DM4(src2))); + break; + case SLJIT_DIVD: + FAIL_IF(push_inst32(compiler, VDIV_F32 | (op & SLJIT_SINGLE_OP) | DD4(dst_r) | DN4(src1) | DM4(src2))); + break; + } + + if (dst & SLJIT_MEM) + return emit_fop_mem(compiler, (op & SLJIT_SINGLE_OP), TMP_FREG1, dst, dstw); + return SLJIT_SUCCESS; +} + +#undef FPU_LOAD + +/* --------------------------------------------------------------------- */ +/* Other instructions */ +/* --------------------------------------------------------------------- */ + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fast_enter(struct sljit_compiler *compiler, sljit_si dst, sljit_sw dstw) +{ + CHECK_ERROR(); + check_sljit_emit_fast_enter(compiler, dst, dstw); + ADJUST_LOCAL_OFFSET(dst, dstw); + + /* For UNUSED dst. Uncommon, but possible. */ + if (dst == SLJIT_UNUSED) + return SLJIT_SUCCESS; + + if (dst <= TMP_REG3) + return push_inst16(compiler, MOV | SET_REGS44(dst, TMP_REG3)); + + /* Memory. */ + if (getput_arg_fast(compiler, WORD_SIZE | STORE, TMP_REG3, dst, dstw)) + return compiler->error; + /* TMP_REG3 is used for caching. */ + FAIL_IF(push_inst16(compiler, MOV | SET_REGS44(TMP_REG2, TMP_REG3))); + compiler->cache_arg = 0; + compiler->cache_argw = 0; + return getput_arg(compiler, WORD_SIZE | STORE, TMP_REG2, dst, dstw, 0, 0); +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fast_return(struct sljit_compiler *compiler, sljit_si src, sljit_sw srcw) +{ + CHECK_ERROR(); + check_sljit_emit_fast_return(compiler, src, srcw); + ADJUST_LOCAL_OFFSET(src, srcw); + + if (src <= TMP_REG3) + FAIL_IF(push_inst16(compiler, MOV | SET_REGS44(TMP_REG3, src))); + else if (src & SLJIT_MEM) { + if (getput_arg_fast(compiler, WORD_SIZE, TMP_REG3, src, srcw)) + FAIL_IF(compiler->error); + else { + compiler->cache_arg = 0; + compiler->cache_argw = 0; + FAIL_IF(getput_arg(compiler, WORD_SIZE, TMP_REG2, src, srcw, 0, 0)); + FAIL_IF(push_inst16(compiler, MOV | SET_REGS44(TMP_REG3, TMP_REG2))); + } + } + else if (src & SLJIT_IMM) + FAIL_IF(load_immediate(compiler, TMP_REG3, srcw)); + return push_inst16(compiler, BLX | RN3(TMP_REG3)); +} + +/* --------------------------------------------------------------------- */ +/* Conditional instructions */ +/* --------------------------------------------------------------------- */ + +static sljit_uw get_cc(sljit_si type) +{ + switch (type) { + case SLJIT_C_EQUAL: + case SLJIT_C_MUL_NOT_OVERFLOW: + case SLJIT_C_FLOAT_EQUAL: + return 0x0; + + case SLJIT_C_NOT_EQUAL: + case SLJIT_C_MUL_OVERFLOW: + case SLJIT_C_FLOAT_NOT_EQUAL: + return 0x1; + + case SLJIT_C_LESS: + case SLJIT_C_FLOAT_LESS: + return 0x3; + + case SLJIT_C_GREATER_EQUAL: + case SLJIT_C_FLOAT_GREATER_EQUAL: + return 0x2; + + case SLJIT_C_GREATER: + case SLJIT_C_FLOAT_GREATER: + return 0x8; + + case SLJIT_C_LESS_EQUAL: + case SLJIT_C_FLOAT_LESS_EQUAL: + return 0x9; + + case SLJIT_C_SIG_LESS: + return 0xb; + + case SLJIT_C_SIG_GREATER_EQUAL: + return 0xa; + + case SLJIT_C_SIG_GREATER: + return 0xc; + + case SLJIT_C_SIG_LESS_EQUAL: + return 0xd; + + case SLJIT_C_OVERFLOW: + case SLJIT_C_FLOAT_UNORDERED: + return 0x6; + + case SLJIT_C_NOT_OVERFLOW: + case SLJIT_C_FLOAT_ORDERED: + return 0x7; + + default: /* SLJIT_JUMP */ + return 0xe; + } +} + +SLJIT_API_FUNC_ATTRIBUTE struct sljit_label* sljit_emit_label(struct sljit_compiler *compiler) +{ + struct sljit_label *label; + + CHECK_ERROR_PTR(); + check_sljit_emit_label(compiler); + + if (compiler->last_label && compiler->last_label->size == compiler->size) + return compiler->last_label; + + label = (struct sljit_label*)ensure_abuf(compiler, sizeof(struct sljit_label)); + PTR_FAIL_IF(!label); + set_label(label, compiler); + return label; +} + +SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compiler *compiler, sljit_si type) +{ + struct sljit_jump *jump; + sljit_si cc; + + CHECK_ERROR_PTR(); + check_sljit_emit_jump(compiler, type); + + jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump)); + PTR_FAIL_IF(!jump); + set_jump(jump, compiler, type & SLJIT_REWRITABLE_JUMP); + type &= 0xff; + + /* In ARM, we don't need to touch the arguments. */ + PTR_FAIL_IF(emit_imm32_const(compiler, TMP_REG1, 0)); + if (type < SLJIT_JUMP) { + jump->flags |= IS_COND; + cc = get_cc(type); + jump->flags |= cc << 8; + PTR_FAIL_IF(push_inst16(compiler, IT | (cc << 4) | 0x8)); + } + + jump->addr = compiler->size; + if (type <= SLJIT_JUMP) + PTR_FAIL_IF(push_inst16(compiler, BX | RN3(TMP_REG1))); + else { + jump->flags |= IS_BL; + PTR_FAIL_IF(push_inst16(compiler, BLX | RN3(TMP_REG1))); + } + + return jump; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_ijump(struct sljit_compiler *compiler, sljit_si type, sljit_si src, sljit_sw srcw) +{ + struct sljit_jump *jump; + + CHECK_ERROR(); + check_sljit_emit_ijump(compiler, type, src, srcw); + ADJUST_LOCAL_OFFSET(src, srcw); + + /* In ARM, we don't need to touch the arguments. */ + if (src & SLJIT_IMM) { + jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump)); + FAIL_IF(!jump); + set_jump(jump, compiler, JUMP_ADDR | ((type >= SLJIT_FAST_CALL) ? IS_BL : 0)); + jump->u.target = srcw; + + FAIL_IF(emit_imm32_const(compiler, TMP_REG1, 0)); + jump->addr = compiler->size; + FAIL_IF(push_inst16(compiler, (type <= SLJIT_JUMP ? BX : BLX) | RN3(TMP_REG1))); + } + else { + if (src <= TMP_REG3) + return push_inst16(compiler, (type <= SLJIT_JUMP ? BX : BLX) | RN3(src)); + + FAIL_IF(emit_op_mem(compiler, WORD_SIZE, type <= SLJIT_JUMP ? TMP_PC : TMP_REG1, src, srcw)); + if (type >= SLJIT_FAST_CALL) + return push_inst16(compiler, BLX | RN3(TMP_REG1)); + } + return SLJIT_SUCCESS; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op_flags(struct sljit_compiler *compiler, sljit_si op, + sljit_si dst, sljit_sw dstw, + sljit_si src, sljit_sw srcw, + sljit_si type) +{ + sljit_si dst_r, flags = GET_ALL_FLAGS(op); + sljit_ins ins; + sljit_uw cc; + + CHECK_ERROR(); + check_sljit_emit_op_flags(compiler, op, dst, dstw, src, srcw, type); + ADJUST_LOCAL_OFFSET(dst, dstw); + ADJUST_LOCAL_OFFSET(src, srcw); + + if (dst == SLJIT_UNUSED) + return SLJIT_SUCCESS; + + op = GET_OPCODE(op); + cc = get_cc(type); + dst_r = (dst <= TMP_REG3) ? dst : TMP_REG2; + + if (op < SLJIT_ADD) { + FAIL_IF(push_inst16(compiler, IT | (cc << 4) | (((cc & 0x1) ^ 0x1) << 3) | 0x4)); + if (reg_map[dst_r] > 7) { + FAIL_IF(push_inst32(compiler, MOV_WI | RD4(dst_r) | 1)); + FAIL_IF(push_inst32(compiler, MOV_WI | RD4(dst_r) | 0)); + } else { + FAIL_IF(push_inst16(compiler, MOVSI | RDN3(dst_r) | 1)); + FAIL_IF(push_inst16(compiler, MOVSI | RDN3(dst_r) | 0)); + } + return dst_r == TMP_REG2 ? emit_op_mem(compiler, WORD_SIZE | STORE, TMP_REG2, dst, dstw) : SLJIT_SUCCESS; + } + + ins = (op == SLJIT_AND ? ANDI : (op == SLJIT_OR ? ORRI : EORI)); + if ((op == SLJIT_OR || op == SLJIT_XOR) && dst <= TMP_REG3 && dst == src) { + /* Does not change the other bits. */ + FAIL_IF(push_inst16(compiler, IT | (cc << 4) | 0x8)); + FAIL_IF(push_inst32(compiler, ins | RN4(src) | RD4(dst) | 1)); + if (flags & SLJIT_SET_E) { + /* The condition must always be set, even if the ORRI/EORI is not executed above. */ + if (reg_map[dst] <= 7) + return push_inst16(compiler, MOVS | RD3(TMP_REG1) | RN3(dst)); + return push_inst32(compiler, MOV_W | SET_FLAGS | RD4(TMP_REG1) | RM4(dst)); + } + return SLJIT_SUCCESS; + } + + compiler->cache_arg = 0; + compiler->cache_argw = 0; + if (src & SLJIT_MEM) { + FAIL_IF(emit_op_mem2(compiler, WORD_SIZE, TMP_REG1, src, srcw, dst, dstw)); + src = TMP_REG1; + srcw = 0; + } else if (src & SLJIT_IMM) { + FAIL_IF(load_immediate(compiler, TMP_REG1, srcw)); + src = TMP_REG1; + srcw = 0; + } + + FAIL_IF(push_inst16(compiler, IT | (cc << 4) | (((cc & 0x1) ^ 0x1) << 3) | 0x4)); + FAIL_IF(push_inst32(compiler, ins | RN4(src) | RD4(dst_r) | 1)); + FAIL_IF(push_inst32(compiler, ins | RN4(src) | RD4(dst_r) | 0)); + if (dst_r == TMP_REG2) + FAIL_IF(emit_op_mem2(compiler, WORD_SIZE | STORE, TMP_REG2, dst, dstw, 0, 0)); + + if (flags & SLJIT_SET_E) { + /* The condition must always be set, even if the ORR/EORI is not executed above. */ + if (reg_map[dst_r] <= 7) + return push_inst16(compiler, MOVS | RD3(TMP_REG1) | RN3(dst_r)); + return push_inst32(compiler, MOV_W | SET_FLAGS | RD4(TMP_REG1) | RM4(dst_r)); + } + return SLJIT_SUCCESS; +} + +SLJIT_API_FUNC_ATTRIBUTE struct sljit_const* sljit_emit_const(struct sljit_compiler *compiler, sljit_si dst, sljit_sw dstw, sljit_sw init_value) +{ + struct sljit_const *const_; + sljit_si dst_r; + + CHECK_ERROR_PTR(); + check_sljit_emit_const(compiler, dst, dstw, init_value); + ADJUST_LOCAL_OFFSET(dst, dstw); + + const_ = (struct sljit_const*)ensure_abuf(compiler, sizeof(struct sljit_const)); + PTR_FAIL_IF(!const_); + set_const(const_, compiler); + + dst_r = (dst <= TMP_REG3) ? dst : TMP_REG1; + PTR_FAIL_IF(emit_imm32_const(compiler, dst_r, init_value)); + + if (dst & SLJIT_MEM) + PTR_FAIL_IF(emit_op_mem(compiler, WORD_SIZE | STORE, dst_r, dst, dstw)); + return const_; +} + +SLJIT_API_FUNC_ATTRIBUTE void sljit_set_jump_addr(sljit_uw addr, sljit_uw new_addr) +{ + inline_set_jump_addr(addr, new_addr, 1); +} + +SLJIT_API_FUNC_ATTRIBUTE void sljit_set_const(sljit_uw addr, sljit_sw new_constant) +{ + sljit_uh* inst = (sljit_uh*)addr; + modify_imm32_const(inst, new_constant); + SLJIT_CACHE_FLUSH(inst, inst + 3); +} Property changes on: sys/contrib/sljit/sljitNativeARM_Thumb2.c ___________________________________________________________________ Added: fbsd:nokeywords ## -0,0 +1 ## +yes \ No newline at end of property Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: sys/contrib/sljit/sljitNativeARM_v5.c =================================================================== --- sys/contrib/sljit/sljitNativeARM_v5.c (revision 0) +++ sys/contrib/sljit/sljitNativeARM_v5.c (working copy) @@ -0,0 +1,2515 @@ +/* + * Stack-less Just-In-Time compiler + * + * Copyright 2009-2012 Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, are + * permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of + * conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list + * of conditions and the following disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED + * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +SLJIT_API_FUNC_ATTRIBUTE SLJIT_CONST char* sljit_get_platform_name(void) +{ +#if (defined SLJIT_CONFIG_ARM_V7 && SLJIT_CONFIG_ARM_V7) + return "ARMv7" SLJIT_CPUINFO; +#elif (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) + return "ARMv5" SLJIT_CPUINFO; +#else +#error "Internal error: Unknown ARM architecture" +#endif +} + +/* Last register + 1. */ +#define TMP_REG1 (SLJIT_NO_REGISTERS + 1) +#define TMP_REG2 (SLJIT_NO_REGISTERS + 2) +#define TMP_REG3 (SLJIT_NO_REGISTERS + 3) +#define TMP_PC (SLJIT_NO_REGISTERS + 4) + +#define TMP_FREG1 (0) +#define TMP_FREG2 (SLJIT_FLOAT_REG6 + 1) + +/* In ARM instruction words. + Cache lines are usually 32 byte aligned. */ +#define CONST_POOL_ALIGNMENT 8 +#define CONST_POOL_EMPTY 0xffffffff + +#define ALIGN_INSTRUCTION(ptr) \ + (sljit_uw*)(((sljit_uw)(ptr) + (CONST_POOL_ALIGNMENT * sizeof(sljit_uw)) - 1) & ~((CONST_POOL_ALIGNMENT * sizeof(sljit_uw)) - 1)) +#define MAX_DIFFERENCE(max_diff) \ + (((max_diff) / (sljit_si)sizeof(sljit_uw)) - (CONST_POOL_ALIGNMENT - 1)) + +/* See sljit_emit_enter and sljit_emit_op0 if you want to change them. */ +static SLJIT_CONST sljit_ub reg_map[SLJIT_NO_REGISTERS + 5] = { + 0, 0, 1, 2, 10, 11, 4, 5, 6, 7, 8, 13, 3, 12, 14, 15 +}; + +#define RM(rm) (reg_map[rm]) +#define RD(rd) (reg_map[rd] << 12) +#define RN(rn) (reg_map[rn] << 16) + +/* --------------------------------------------------------------------- */ +/* Instrucion forms */ +/* --------------------------------------------------------------------- */ + +/* The instruction includes the AL condition. + INST_NAME - CONDITIONAL remove this flag. */ +#define COND_MASK 0xf0000000 +#define CONDITIONAL 0xe0000000 +#define PUSH_POOL 0xff000000 + +/* DP - Data Processing instruction (use with EMIT_DATA_PROCESS_INS). */ +#define ADC_DP 0x5 +#define ADD_DP 0x4 +#define AND_DP 0x0 +#define B 0xea000000 +#define BIC_DP 0xe +#define BL 0xeb000000 +#define BLX 0xe12fff30 +#define BX 0xe12fff10 +#define CLZ 0xe16f0f10 +#define CMP_DP 0xa +#define BKPT 0xe1200070 +#define EOR_DP 0x1 +#define MOV_DP 0xd +#define MUL 0xe0000090 +#define MVN_DP 0xf +#define NOP 0xe1a00000 +#define ORR_DP 0xc +#define PUSH 0xe92d0000 +#define POP 0xe8bd0000 +#define RSB_DP 0x3 +#define RSC_DP 0x7 +#define SBC_DP 0x6 +#define SMULL 0xe0c00090 +#define SUB_DP 0x2 +#define UMULL 0xe0800090 +#define VABS_F32 0xeeb00ac0 +#define VADD_F32 0xee300a00 +#define VCMP_F32 0xeeb40a40 +#define VDIV_F32 0xee800a00 +#define VMOV_F32 0xeeb00a40 +#define VMRS 0xeef1fa10 +#define VMUL_F32 0xee200a00 +#define VNEG_F32 0xeeb10a40 +#define VSTR_F32 0xed000a00 +#define VSUB_F32 0xee300a40 + +#if (defined SLJIT_CONFIG_ARM_V7 && SLJIT_CONFIG_ARM_V7) +/* Arm v7 specific instructions. */ +#define MOVW 0xe3000000 +#define MOVT 0xe3400000 +#define SXTB 0xe6af0070 +#define SXTH 0xe6bf0070 +#define UXTB 0xe6ef0070 +#define UXTH 0xe6ff0070 +#endif + +#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) + +static sljit_si push_cpool(struct sljit_compiler *compiler) +{ + /* Pushing the constant pool into the instruction stream. */ + sljit_uw* inst; + sljit_uw* cpool_ptr; + sljit_uw* cpool_end; + sljit_si i; + + /* The label could point the address after the constant pool. */ + if (compiler->last_label && compiler->last_label->size == compiler->size) + compiler->last_label->size += compiler->cpool_fill + (CONST_POOL_ALIGNMENT - 1) + 1; + + SLJIT_ASSERT(compiler->cpool_fill > 0 && compiler->cpool_fill <= CPOOL_SIZE); + inst = (sljit_uw*)ensure_buf(compiler, sizeof(sljit_uw)); + FAIL_IF(!inst); + compiler->size++; + *inst = 0xff000000 | compiler->cpool_fill; + + for (i = 0; i < CONST_POOL_ALIGNMENT - 1; i++) { + inst = (sljit_uw*)ensure_buf(compiler, sizeof(sljit_uw)); + FAIL_IF(!inst); + compiler->size++; + *inst = 0; + } + + cpool_ptr = compiler->cpool; + cpool_end = cpool_ptr + compiler->cpool_fill; + while (cpool_ptr < cpool_end) { + inst = (sljit_uw*)ensure_buf(compiler, sizeof(sljit_uw)); + FAIL_IF(!inst); + compiler->size++; + *inst = *cpool_ptr++; + } + compiler->cpool_diff = CONST_POOL_EMPTY; + compiler->cpool_fill = 0; + return SLJIT_SUCCESS; +} + +static sljit_si push_inst(struct sljit_compiler *compiler, sljit_uw inst) +{ + sljit_uw* ptr; + + if (SLJIT_UNLIKELY(compiler->cpool_diff != CONST_POOL_EMPTY && compiler->size - compiler->cpool_diff >= MAX_DIFFERENCE(4092))) + FAIL_IF(push_cpool(compiler)); + + ptr = (sljit_uw*)ensure_buf(compiler, sizeof(sljit_uw)); + FAIL_IF(!ptr); + compiler->size++; + *ptr = inst; + return SLJIT_SUCCESS; +} + +static sljit_si push_inst_with_literal(struct sljit_compiler *compiler, sljit_uw inst, sljit_uw literal) +{ + sljit_uw* ptr; + sljit_uw cpool_index = CPOOL_SIZE; + sljit_uw* cpool_ptr; + sljit_uw* cpool_end; + sljit_ub* cpool_unique_ptr; + + if (SLJIT_UNLIKELY(compiler->cpool_diff != CONST_POOL_EMPTY && compiler->size - compiler->cpool_diff >= MAX_DIFFERENCE(4092))) + FAIL_IF(push_cpool(compiler)); + else if (compiler->cpool_fill > 0) { + cpool_ptr = compiler->cpool; + cpool_end = cpool_ptr + compiler->cpool_fill; + cpool_unique_ptr = compiler->cpool_unique; + do { + if ((*cpool_ptr == literal) && !(*cpool_unique_ptr)) { + cpool_index = cpool_ptr - compiler->cpool; + break; + } + cpool_ptr++; + cpool_unique_ptr++; + } while (cpool_ptr < cpool_end); + } + + if (cpool_index == CPOOL_SIZE) { + /* Must allocate a new entry in the literal pool. */ + if (compiler->cpool_fill < CPOOL_SIZE) { + cpool_index = compiler->cpool_fill; + compiler->cpool_fill++; + } + else { + FAIL_IF(push_cpool(compiler)); + cpool_index = 0; + compiler->cpool_fill = 1; + } + } + + SLJIT_ASSERT((inst & 0xfff) == 0); + ptr = (sljit_uw*)ensure_buf(compiler, sizeof(sljit_uw)); + FAIL_IF(!ptr); + compiler->size++; + *ptr = inst | cpool_index; + + compiler->cpool[cpool_index] = literal; + compiler->cpool_unique[cpool_index] = 0; + if (compiler->cpool_diff == CONST_POOL_EMPTY) + compiler->cpool_diff = compiler->size; + return SLJIT_SUCCESS; +} + +static sljit_si push_inst_with_unique_literal(struct sljit_compiler *compiler, sljit_uw inst, sljit_uw literal) +{ + sljit_uw* ptr; + if (SLJIT_UNLIKELY((compiler->cpool_diff != CONST_POOL_EMPTY && compiler->size - compiler->cpool_diff >= MAX_DIFFERENCE(4092)) || compiler->cpool_fill >= CPOOL_SIZE)) + FAIL_IF(push_cpool(compiler)); + + SLJIT_ASSERT(compiler->cpool_fill < CPOOL_SIZE && (inst & 0xfff) == 0); + ptr = (sljit_uw*)ensure_buf(compiler, sizeof(sljit_uw)); + FAIL_IF(!ptr); + compiler->size++; + *ptr = inst | compiler->cpool_fill; + + compiler->cpool[compiler->cpool_fill] = literal; + compiler->cpool_unique[compiler->cpool_fill] = 1; + compiler->cpool_fill++; + if (compiler->cpool_diff == CONST_POOL_EMPTY) + compiler->cpool_diff = compiler->size; + return SLJIT_SUCCESS; +} + +static SLJIT_INLINE sljit_si prepare_blx(struct sljit_compiler *compiler) +{ + /* Place for at least two instruction (doesn't matter whether the first has a literal). */ + if (SLJIT_UNLIKELY(compiler->cpool_diff != CONST_POOL_EMPTY && compiler->size - compiler->cpool_diff >= MAX_DIFFERENCE(4088))) + return push_cpool(compiler); + return SLJIT_SUCCESS; +} + +static SLJIT_INLINE sljit_si emit_blx(struct sljit_compiler *compiler) +{ + /* Must follow tightly the previous instruction (to be able to convert it to bl instruction). */ + SLJIT_ASSERT(compiler->cpool_diff == CONST_POOL_EMPTY || compiler->size - compiler->cpool_diff < MAX_DIFFERENCE(4092)); + return push_inst(compiler, BLX | RM(TMP_REG1)); +} + +static sljit_uw patch_pc_relative_loads(sljit_uw *last_pc_patch, sljit_uw *code_ptr, sljit_uw* const_pool, sljit_uw cpool_size) +{ + sljit_uw diff; + sljit_uw ind; + sljit_uw counter = 0; + sljit_uw* clear_const_pool = const_pool; + sljit_uw* clear_const_pool_end = const_pool + cpool_size; + + SLJIT_ASSERT(const_pool - code_ptr <= CONST_POOL_ALIGNMENT); + /* Set unused flag for all literals in the constant pool. + I.e.: unused literals can belong to branches, which can be encoded as B or BL. + We can "compress" the constant pool by discarding these literals. */ + while (clear_const_pool < clear_const_pool_end) + *clear_const_pool++ = (sljit_uw)(-1); + + while (last_pc_patch < code_ptr) { + /* Data transfer instruction with Rn == r15. */ + if ((*last_pc_patch & 0x0c0f0000) == 0x040f0000) { + diff = const_pool - last_pc_patch; + ind = (*last_pc_patch) & 0xfff; + + /* Must be a load instruction with immediate offset. */ + SLJIT_ASSERT(ind < cpool_size && !(*last_pc_patch & (1 << 25)) && (*last_pc_patch & (1 << 20))); + if ((sljit_si)const_pool[ind] < 0) { + const_pool[ind] = counter; + ind = counter; + counter++; + } + else + ind = const_pool[ind]; + + SLJIT_ASSERT(diff >= 1); + if (diff >= 2 || ind > 0) { + diff = (diff + ind - 2) << 2; + SLJIT_ASSERT(diff <= 0xfff); + *last_pc_patch = (*last_pc_patch & ~0xfff) | diff; + } + else + *last_pc_patch = (*last_pc_patch & ~(0xfff | (1 << 23))) | 0x004; + } + last_pc_patch++; + } + return counter; +} + +/* In some rare ocasions we may need future patches. The probability is close to 0 in practice. */ +struct future_patch { + struct future_patch* next; + sljit_si index; + sljit_si value; +}; + +static SLJIT_INLINE sljit_si resolve_const_pool_index(struct future_patch **first_patch, sljit_uw cpool_current_index, sljit_uw *cpool_start_address, sljit_uw *buf_ptr) +{ + sljit_si value; + struct future_patch *curr_patch, *prev_patch; + + /* Using the values generated by patch_pc_relative_loads. */ + if (!*first_patch) + value = (sljit_si)cpool_start_address[cpool_current_index]; + else { + curr_patch = *first_patch; + prev_patch = 0; + while (1) { + if (!curr_patch) { + value = (sljit_si)cpool_start_address[cpool_current_index]; + break; + } + if ((sljit_uw)curr_patch->index == cpool_current_index) { + value = curr_patch->value; + if (prev_patch) + prev_patch->next = curr_patch->next; + else + *first_patch = curr_patch->next; + SLJIT_FREE(curr_patch); + break; + } + prev_patch = curr_patch; + curr_patch = curr_patch->next; + } + } + + if (value >= 0) { + if ((sljit_uw)value > cpool_current_index) { + curr_patch = (struct future_patch*)SLJIT_MALLOC(sizeof(struct future_patch)); + if (!curr_patch) { + while (*first_patch) { + curr_patch = *first_patch; + *first_patch = (*first_patch)->next; + SLJIT_FREE(curr_patch); + } + return SLJIT_ERR_ALLOC_FAILED; + } + curr_patch->next = *first_patch; + curr_patch->index = value; + curr_patch->value = cpool_start_address[value]; + *first_patch = curr_patch; + } + cpool_start_address[value] = *buf_ptr; + } + return SLJIT_SUCCESS; +} + +#else + +static sljit_si push_inst(struct sljit_compiler *compiler, sljit_uw inst) +{ + sljit_uw* ptr; + + ptr = (sljit_uw*)ensure_buf(compiler, sizeof(sljit_uw)); + FAIL_IF(!ptr); + compiler->size++; + *ptr = inst; + return SLJIT_SUCCESS; +} + +static SLJIT_INLINE sljit_si emit_imm(struct sljit_compiler *compiler, sljit_si reg, sljit_sw imm) +{ + FAIL_IF(push_inst(compiler, MOVW | RD(reg) | ((imm << 4) & 0xf0000) | (imm & 0xfff))); + return push_inst(compiler, MOVT | RD(reg) | ((imm >> 12) & 0xf0000) | ((imm >> 16) & 0xfff)); +} + +#endif + +static SLJIT_INLINE sljit_si detect_jump_type(struct sljit_jump *jump, sljit_uw *code_ptr, sljit_uw *code) +{ + sljit_sw diff; + + if (jump->flags & SLJIT_REWRITABLE_JUMP) + return 0; + +#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) + if (jump->flags & IS_BL) + code_ptr--; + + if (jump->flags & JUMP_ADDR) + diff = ((sljit_sw)jump->u.target - (sljit_sw)(code_ptr + 2)); + else { + SLJIT_ASSERT(jump->flags & JUMP_LABEL); + diff = ((sljit_sw)(code + jump->u.label->size) - (sljit_sw)(code_ptr + 2)); + } + + /* Branch to Thumb code has not been optimized yet. */ + if (diff & 0x3) + return 0; + + diff >>= 2; + if (jump->flags & IS_BL) { + if (diff <= 0x01ffffff && diff >= -0x02000000) { + *code_ptr = (BL - CONDITIONAL) | (*(code_ptr + 1) & COND_MASK); + jump->flags |= PATCH_B; + return 1; + } + } + else { + if (diff <= 0x01ffffff && diff >= -0x02000000) { + *code_ptr = (B - CONDITIONAL) | (*code_ptr & COND_MASK); + jump->flags |= PATCH_B; + } + } +#else + if (jump->flags & JUMP_ADDR) + diff = ((sljit_sw)jump->u.target - (sljit_sw)code_ptr); + else { + SLJIT_ASSERT(jump->flags & JUMP_LABEL); + diff = ((sljit_sw)(code + jump->u.label->size) - (sljit_sw)code_ptr); + } + + /* Branch to Thumb code has not been optimized yet. */ + if (diff & 0x3) + return 0; + + diff >>= 2; + if (diff <= 0x01ffffff && diff >= -0x02000000) { + code_ptr -= 2; + *code_ptr = ((jump->flags & IS_BL) ? (BL - CONDITIONAL) : (B - CONDITIONAL)) | (code_ptr[2] & COND_MASK); + jump->flags |= PATCH_B; + return 1; + } +#endif + return 0; +} + +static SLJIT_INLINE void inline_set_jump_addr(sljit_uw addr, sljit_uw new_addr, sljit_si flush) +{ +#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) + sljit_uw *ptr = (sljit_uw*)addr; + sljit_uw *inst = (sljit_uw*)ptr[0]; + sljit_uw mov_pc = ptr[1]; + sljit_si bl = (mov_pc & 0x0000f000) != RD(TMP_PC); + sljit_sw diff = (sljit_sw)(((sljit_sw)new_addr - (sljit_sw)(inst + 2)) >> 2); + + if (diff <= 0x7fffff && diff >= -0x800000) { + /* Turn to branch. */ + if (!bl) { + inst[0] = (mov_pc & COND_MASK) | (B - CONDITIONAL) | (diff & 0xffffff); + if (flush) { + SLJIT_CACHE_FLUSH(inst, inst + 1); + } + } else { + inst[0] = (mov_pc & COND_MASK) | (BL - CONDITIONAL) | (diff & 0xffffff); + inst[1] = NOP; + if (flush) { + SLJIT_CACHE_FLUSH(inst, inst + 2); + } + } + } else { + /* Get the position of the constant. */ + if (mov_pc & (1 << 23)) + ptr = inst + ((mov_pc & 0xfff) >> 2) + 2; + else + ptr = inst + 1; + + if (*inst != mov_pc) { + inst[0] = mov_pc; + if (!bl) { + if (flush) { + SLJIT_CACHE_FLUSH(inst, inst + 1); + } + } else { + inst[1] = BLX | RM(TMP_REG1); + if (flush) { + SLJIT_CACHE_FLUSH(inst, inst + 2); + } + } + } + *ptr = new_addr; + } +#else + sljit_uw *inst = (sljit_uw*)addr; + SLJIT_ASSERT((inst[0] & 0xfff00000) == MOVW && (inst[1] & 0xfff00000) == MOVT); + inst[0] = MOVW | (inst[0] & 0xf000) | ((new_addr << 4) & 0xf0000) | (new_addr & 0xfff); + inst[1] = MOVT | (inst[1] & 0xf000) | ((new_addr >> 12) & 0xf0000) | ((new_addr >> 16) & 0xfff); + if (flush) { + SLJIT_CACHE_FLUSH(inst, inst + 2); + } +#endif +} + +static sljit_uw get_imm(sljit_uw imm); + +static SLJIT_INLINE void inline_set_const(sljit_uw addr, sljit_sw new_constant, sljit_si flush) +{ +#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) + sljit_uw *ptr = (sljit_uw*)addr; + sljit_uw *inst = (sljit_uw*)ptr[0]; + sljit_uw ldr_literal = ptr[1]; + sljit_uw src2; + + src2 = get_imm(new_constant); + if (src2) { + *inst = 0xe3a00000 | (ldr_literal & 0xf000) | src2; + if (flush) { + SLJIT_CACHE_FLUSH(inst, inst + 1); + } + return; + } + + src2 = get_imm(~new_constant); + if (src2) { + *inst = 0xe3e00000 | (ldr_literal & 0xf000) | src2; + if (flush) { + SLJIT_CACHE_FLUSH(inst, inst + 1); + } + return; + } + + if (ldr_literal & (1 << 23)) + ptr = inst + ((ldr_literal & 0xfff) >> 2) + 2; + else + ptr = inst + 1; + + if (*inst != ldr_literal) { + *inst = ldr_literal; + if (flush) { + SLJIT_CACHE_FLUSH(inst, inst + 1); + } + } + *ptr = new_constant; +#else + sljit_uw *inst = (sljit_uw*)addr; + SLJIT_ASSERT((inst[0] & 0xfff00000) == MOVW && (inst[1] & 0xfff00000) == MOVT); + inst[0] = MOVW | (inst[0] & 0xf000) | ((new_constant << 4) & 0xf0000) | (new_constant & 0xfff); + inst[1] = MOVT | (inst[1] & 0xf000) | ((new_constant >> 12) & 0xf0000) | ((new_constant >> 16) & 0xfff); + if (flush) { + SLJIT_CACHE_FLUSH(inst, inst + 2); + } +#endif +} + +SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compiler) +{ + struct sljit_memory_fragment *buf; + sljit_uw *code; + sljit_uw *code_ptr; + sljit_uw *buf_ptr; + sljit_uw *buf_end; + sljit_uw size; + sljit_uw word_count; +#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) + sljit_uw cpool_size; + sljit_uw cpool_skip_alignment; + sljit_uw cpool_current_index; + sljit_uw *cpool_start_address; + sljit_uw *last_pc_patch; + struct future_patch *first_patch; +#endif + + struct sljit_label *label; + struct sljit_jump *jump; + struct sljit_const *const_; + + CHECK_ERROR_PTR(); + check_sljit_generate_code(compiler); + reverse_buf(compiler); + + /* Second code generation pass. */ +#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) + size = compiler->size + (compiler->patches << 1); + if (compiler->cpool_fill > 0) + size += compiler->cpool_fill + CONST_POOL_ALIGNMENT - 1; +#else + size = compiler->size; +#endif + code = (sljit_uw*)SLJIT_MALLOC_EXEC(size * sizeof(sljit_uw)); + PTR_FAIL_WITH_EXEC_IF(code); + buf = compiler->buf; + +#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) + cpool_size = 0; + cpool_skip_alignment = 0; + cpool_current_index = 0; + cpool_start_address = NULL; + first_patch = NULL; + last_pc_patch = code; +#endif + + code_ptr = code; + word_count = 0; + + label = compiler->labels; + jump = compiler->jumps; + const_ = compiler->consts; + + if (label && label->size == 0) { + label->addr = (sljit_uw)code; + label->size = 0; + label = label->next; + } + + do { + buf_ptr = (sljit_uw*)buf->memory; + buf_end = buf_ptr + (buf->used_size >> 2); + do { + word_count++; +#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) + if (cpool_size > 0) { + if (cpool_skip_alignment > 0) { + buf_ptr++; + cpool_skip_alignment--; + } + else { + if (SLJIT_UNLIKELY(resolve_const_pool_index(&first_patch, cpool_current_index, cpool_start_address, buf_ptr))) { + SLJIT_FREE_EXEC(code); + compiler->error = SLJIT_ERR_ALLOC_FAILED; + return NULL; + } + buf_ptr++; + if (++cpool_current_index >= cpool_size) { + SLJIT_ASSERT(!first_patch); + cpool_size = 0; + if (label && label->size == word_count) { + /* Points after the current instruction. */ + label->addr = (sljit_uw)code_ptr; + label->size = code_ptr - code; + label = label->next; + } + } + } + } + else if ((*buf_ptr & 0xff000000) != PUSH_POOL) { +#endif + *code_ptr = *buf_ptr++; + /* These structures are ordered by their address. */ + SLJIT_ASSERT(!label || label->size >= word_count); + SLJIT_ASSERT(!jump || jump->addr >= word_count); + SLJIT_ASSERT(!const_ || const_->addr >= word_count); + if (jump && jump->addr == word_count) { +#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) + if (detect_jump_type(jump, code_ptr, code)) + code_ptr--; + jump->addr = (sljit_uw)code_ptr; +#else + jump->addr = (sljit_uw)(code_ptr - 2); + if (detect_jump_type(jump, code_ptr, code)) + code_ptr -= 2; +#endif + jump = jump->next; + } + if (label && label->size == word_count) { + /* code_ptr can be affected above. */ + label->addr = (sljit_uw)(code_ptr + 1); + label->size = (code_ptr + 1) - code; + label = label->next; + } + if (const_ && const_->addr == word_count) { +#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) + const_->addr = (sljit_uw)code_ptr; +#else + const_->addr = (sljit_uw)(code_ptr - 1); +#endif + const_ = const_->next; + } + code_ptr++; +#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) + } + else { + /* Fortunately, no need to shift. */ + cpool_size = *buf_ptr++ & ~PUSH_POOL; + SLJIT_ASSERT(cpool_size > 0); + cpool_start_address = ALIGN_INSTRUCTION(code_ptr + 1); + cpool_current_index = patch_pc_relative_loads(last_pc_patch, code_ptr, cpool_start_address, cpool_size); + if (cpool_current_index > 0) { + /* Unconditional branch. */ + *code_ptr = B | (((cpool_start_address - code_ptr) + cpool_current_index - 2) & ~PUSH_POOL); + code_ptr = cpool_start_address + cpool_current_index; + } + cpool_skip_alignment = CONST_POOL_ALIGNMENT - 1; + cpool_current_index = 0; + last_pc_patch = code_ptr; + } +#endif + } while (buf_ptr < buf_end); + buf = buf->next; + } while (buf); + + SLJIT_ASSERT(!label); + SLJIT_ASSERT(!jump); + SLJIT_ASSERT(!const_); + +#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) + SLJIT_ASSERT(cpool_size == 0); + if (compiler->cpool_fill > 0) { + cpool_start_address = ALIGN_INSTRUCTION(code_ptr); + cpool_current_index = patch_pc_relative_loads(last_pc_patch, code_ptr, cpool_start_address, compiler->cpool_fill); + if (cpool_current_index > 0) + code_ptr = cpool_start_address + cpool_current_index; + + buf_ptr = compiler->cpool; + buf_end = buf_ptr + compiler->cpool_fill; + cpool_current_index = 0; + while (buf_ptr < buf_end) { + if (SLJIT_UNLIKELY(resolve_const_pool_index(&first_patch, cpool_current_index, cpool_start_address, buf_ptr))) { + SLJIT_FREE_EXEC(code); + compiler->error = SLJIT_ERR_ALLOC_FAILED; + return NULL; + } + buf_ptr++; + cpool_current_index++; + } + SLJIT_ASSERT(!first_patch); + } +#endif + + jump = compiler->jumps; + while (jump) { + buf_ptr = (sljit_uw*)jump->addr; + + if (jump->flags & PATCH_B) { + if (!(jump->flags & JUMP_ADDR)) { + SLJIT_ASSERT(jump->flags & JUMP_LABEL); + SLJIT_ASSERT(((sljit_sw)jump->u.label->addr - (sljit_sw)(buf_ptr + 2)) <= 0x01ffffff && ((sljit_sw)jump->u.label->addr - (sljit_sw)(buf_ptr + 2)) >= -0x02000000); + *buf_ptr |= (((sljit_sw)jump->u.label->addr - (sljit_sw)(buf_ptr + 2)) >> 2) & 0x00ffffff; + } + else { + SLJIT_ASSERT(((sljit_sw)jump->u.target - (sljit_sw)(buf_ptr + 2)) <= 0x01ffffff && ((sljit_sw)jump->u.target - (sljit_sw)(buf_ptr + 2)) >= -0x02000000); + *buf_ptr |= (((sljit_sw)jump->u.target - (sljit_sw)(buf_ptr + 2)) >> 2) & 0x00ffffff; + } + } + else if (jump->flags & SLJIT_REWRITABLE_JUMP) { +#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) + jump->addr = (sljit_uw)code_ptr; + code_ptr[0] = (sljit_uw)buf_ptr; + code_ptr[1] = *buf_ptr; + inline_set_jump_addr((sljit_uw)code_ptr, (jump->flags & JUMP_LABEL) ? jump->u.label->addr : jump->u.target, 0); + code_ptr += 2; +#else + inline_set_jump_addr((sljit_uw)buf_ptr, (jump->flags & JUMP_LABEL) ? jump->u.label->addr : jump->u.target, 0); +#endif + } + else { +#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) + if (jump->flags & IS_BL) + buf_ptr--; + if (*buf_ptr & (1 << 23)) + buf_ptr += ((*buf_ptr & 0xfff) >> 2) + 2; + else + buf_ptr += 1; + *buf_ptr = (jump->flags & JUMP_LABEL) ? jump->u.label->addr : jump->u.target; +#else + inline_set_jump_addr((sljit_uw)buf_ptr, (jump->flags & JUMP_LABEL) ? jump->u.label->addr : jump->u.target, 0); +#endif + } + jump = jump->next; + } + +#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) + const_ = compiler->consts; + while (const_) { + buf_ptr = (sljit_uw*)const_->addr; + const_->addr = (sljit_uw)code_ptr; + + code_ptr[0] = (sljit_uw)buf_ptr; + code_ptr[1] = *buf_ptr; + if (*buf_ptr & (1 << 23)) + buf_ptr += ((*buf_ptr & 0xfff) >> 2) + 2; + else + buf_ptr += 1; + /* Set the value again (can be a simple constant). */ + inline_set_const((sljit_uw)code_ptr, *buf_ptr, 0); + code_ptr += 2; + + const_ = const_->next; + } +#endif + + SLJIT_ASSERT(code_ptr - code <= (sljit_si)size); + + SLJIT_CACHE_FLUSH(code, code_ptr); + compiler->error = SLJIT_ERR_COMPILED; + compiler->executable_size = size * sizeof(sljit_uw); + return code; +} + +/* --------------------------------------------------------------------- */ +/* Entry, exit */ +/* --------------------------------------------------------------------- */ + +/* emit_op inp_flags. + WRITE_BACK must be the first, since it is a flag. */ +#define WRITE_BACK 0x01 +#define ALLOW_IMM 0x02 +#define ALLOW_INV_IMM 0x04 +#define ALLOW_ANY_IMM (ALLOW_IMM | ALLOW_INV_IMM) +#define ARG_TEST 0x08 + +/* Creates an index in data_transfer_insts array. */ +#define WORD_DATA 0x00 +#define BYTE_DATA 0x10 +#define HALF_DATA 0x20 +#define SIGNED_DATA 0x40 +#define LOAD_DATA 0x80 + +#define EMIT_INSTRUCTION(inst) \ + FAIL_IF(push_inst(compiler, (inst))) + +/* Condition: AL. */ +#define EMIT_DATA_PROCESS_INS(opcode, set_flags, dst, src1, src2) \ + (0xe0000000 | ((opcode) << 21) | (set_flags) | RD(dst) | RN(src1) | (src2)) + +static sljit_si emit_op(struct sljit_compiler *compiler, sljit_si op, sljit_si inp_flags, + sljit_si dst, sljit_sw dstw, + sljit_si src1, sljit_sw src1w, + sljit_si src2, sljit_sw src2w); + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_enter(struct sljit_compiler *compiler, sljit_si args, sljit_si scratches, sljit_si saveds, sljit_si local_size) +{ + sljit_si size; + sljit_uw push; + + CHECK_ERROR(); + check_sljit_emit_enter(compiler, args, scratches, saveds, local_size); + + compiler->scratches = scratches; + compiler->saveds = saveds; +#if (defined SLJIT_DEBUG && SLJIT_DEBUG) + compiler->logical_local_size = local_size; +#endif + + /* Push saved registers, temporary registers + stmdb sp!, {..., lr} */ + push = PUSH | (1 << 14); + if (scratches >= 5) + push |= 1 << 11; + if (scratches >= 4) + push |= 1 << 10; + if (saveds >= 5) + push |= 1 << 8; + if (saveds >= 4) + push |= 1 << 7; + if (saveds >= 3) + push |= 1 << 6; + if (saveds >= 2) + push |= 1 << 5; + if (saveds >= 1) + push |= 1 << 4; + EMIT_INSTRUCTION(push); + + /* Stack must be aligned to 8 bytes: */ + size = (1 + saveds) * sizeof(sljit_uw); + if (scratches >= 4) + size += (scratches - 3) * sizeof(sljit_uw); + local_size += size; + local_size = (local_size + 7) & ~7; + local_size -= size; + compiler->local_size = local_size; + if (local_size > 0) + FAIL_IF(emit_op(compiler, SLJIT_SUB, ALLOW_IMM, SLJIT_LOCALS_REG, 0, SLJIT_LOCALS_REG, 0, SLJIT_IMM, local_size)); + + if (args >= 1) + EMIT_INSTRUCTION(EMIT_DATA_PROCESS_INS(MOV_DP, 0, SLJIT_SAVED_REG1, SLJIT_UNUSED, RM(SLJIT_SCRATCH_REG1))); + if (args >= 2) + EMIT_INSTRUCTION(EMIT_DATA_PROCESS_INS(MOV_DP, 0, SLJIT_SAVED_REG2, SLJIT_UNUSED, RM(SLJIT_SCRATCH_REG2))); + if (args >= 3) + EMIT_INSTRUCTION(EMIT_DATA_PROCESS_INS(MOV_DP, 0, SLJIT_SAVED_REG3, SLJIT_UNUSED, RM(SLJIT_SCRATCH_REG3))); + + return SLJIT_SUCCESS; +} + +SLJIT_API_FUNC_ATTRIBUTE void sljit_set_context(struct sljit_compiler *compiler, sljit_si args, sljit_si scratches, sljit_si saveds, sljit_si local_size) +{ + sljit_si size; + + CHECK_ERROR_VOID(); + check_sljit_set_context(compiler, args, scratches, saveds, local_size); + + compiler->scratches = scratches; + compiler->saveds = saveds; +#if (defined SLJIT_DEBUG && SLJIT_DEBUG) + compiler->logical_local_size = local_size; +#endif + + size = (1 + saveds) * sizeof(sljit_uw); + if (scratches >= 4) + size += (scratches - 3) * sizeof(sljit_uw); + local_size += size; + local_size = (local_size + 7) & ~7; + local_size -= size; + compiler->local_size = local_size; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_return(struct sljit_compiler *compiler, sljit_si op, sljit_si src, sljit_sw srcw) +{ + sljit_uw pop; + + CHECK_ERROR(); + check_sljit_emit_return(compiler, op, src, srcw); + + FAIL_IF(emit_mov_before_return(compiler, op, src, srcw)); + + if (compiler->local_size > 0) + FAIL_IF(emit_op(compiler, SLJIT_ADD, ALLOW_IMM, SLJIT_LOCALS_REG, 0, SLJIT_LOCALS_REG, 0, SLJIT_IMM, compiler->local_size)); + + pop = POP | (1 << 15); + /* Push saved registers, temporary registers + ldmia sp!, {..., pc} */ + if (compiler->scratches >= 5) + pop |= 1 << 11; + if (compiler->scratches >= 4) + pop |= 1 << 10; + if (compiler->saveds >= 5) + pop |= 1 << 8; + if (compiler->saveds >= 4) + pop |= 1 << 7; + if (compiler->saveds >= 3) + pop |= 1 << 6; + if (compiler->saveds >= 2) + pop |= 1 << 5; + if (compiler->saveds >= 1) + pop |= 1 << 4; + + return push_inst(compiler, pop); +} + +/* --------------------------------------------------------------------- */ +/* Operators */ +/* --------------------------------------------------------------------- */ + +/* s/l - store/load (1 bit) + u/s - signed/unsigned (1 bit) + w/b/h/N - word/byte/half/NOT allowed (2 bit) + It contans 16 items, but not all are different. */ + +static sljit_sw data_transfer_insts[16] = { +/* s u w */ 0xe5000000 /* str */, +/* s u b */ 0xe5400000 /* strb */, +/* s u h */ 0xe10000b0 /* strh */, +/* s u N */ 0x00000000 /* not allowed */, +/* s s w */ 0xe5000000 /* str */, +/* s s b */ 0xe5400000 /* strb */, +/* s s h */ 0xe10000b0 /* strh */, +/* s s N */ 0x00000000 /* not allowed */, + +/* l u w */ 0xe5100000 /* ldr */, +/* l u b */ 0xe5500000 /* ldrb */, +/* l u h */ 0xe11000b0 /* ldrh */, +/* l u N */ 0x00000000 /* not allowed */, +/* l s w */ 0xe5100000 /* ldr */, +/* l s b */ 0xe11000d0 /* ldrsb */, +/* l s h */ 0xe11000f0 /* ldrsh */, +/* l s N */ 0x00000000 /* not allowed */, +}; + +#define EMIT_DATA_TRANSFER(type, add, wb, target, base1, base2) \ + (data_transfer_insts[(type) >> 4] | ((add) << 23) | ((wb) << 21) | (reg_map[target] << 12) | (reg_map[base1] << 16) | (base2)) +/* Normal ldr/str instruction. + Type2: ldrsb, ldrh, ldrsh */ +#define IS_TYPE1_TRANSFER(type) \ + (data_transfer_insts[(type) >> 4] & 0x04000000) +#define TYPE2_TRANSFER_IMM(imm) \ + (((imm) & 0xf) | (((imm) & 0xf0) << 4) | (1 << 22)) + +/* flags: */ + /* Arguments are swapped. */ +#define ARGS_SWAPPED 0x01 + /* Inverted immediate. */ +#define INV_IMM 0x02 + /* Source and destination is register. */ +#define REG_DEST 0x04 +#define REG_SOURCE 0x08 + /* One instruction is enough. */ +#define FAST_DEST 0x10 + /* Multiple instructions are required. */ +#define SLOW_DEST 0x20 +/* SET_FLAGS must be (1 << 20) as it is also the value of S bit (can be used for optimization). */ +#define SET_FLAGS (1 << 20) +/* dst: reg + src1: reg + src2: reg or imm (if allowed) + SRC2_IMM must be (1 << 25) as it is also the value of I bit (can be used for optimization). */ +#define SRC2_IMM (1 << 25) + +#define EMIT_DATA_PROCESS_INS_AND_RETURN(opcode) \ + return push_inst(compiler, EMIT_DATA_PROCESS_INS(opcode, flags & SET_FLAGS, dst, src1, (src2 & SRC2_IMM) ? src2 : RM(src2))) + +#define EMIT_FULL_DATA_PROCESS_INS_AND_RETURN(opcode, dst, src1, src2) \ + return push_inst(compiler, EMIT_DATA_PROCESS_INS(opcode, flags & SET_FLAGS, dst, src1, src2)) + +#define EMIT_SHIFT_INS_AND_RETURN(opcode) \ + SLJIT_ASSERT(!(flags & INV_IMM) && !(src2 & SRC2_IMM)); \ + if (compiler->shift_imm != 0x20) { \ + SLJIT_ASSERT(src1 == TMP_REG1); \ + SLJIT_ASSERT(!(flags & ARGS_SWAPPED)); \ + if (compiler->shift_imm != 0) \ + return push_inst(compiler, EMIT_DATA_PROCESS_INS(MOV_DP, flags & SET_FLAGS, dst, SLJIT_UNUSED, (compiler->shift_imm << 7) | (opcode << 5) | reg_map[src2])); \ + return push_inst(compiler, EMIT_DATA_PROCESS_INS(MOV_DP, flags & SET_FLAGS, dst, SLJIT_UNUSED, reg_map[src2])); \ + } \ + return push_inst(compiler, EMIT_DATA_PROCESS_INS(MOV_DP, flags & SET_FLAGS, dst, SLJIT_UNUSED, (reg_map[(flags & ARGS_SWAPPED) ? src1 : src2] << 8) | (opcode << 5) | 0x10 | ((flags & ARGS_SWAPPED) ? reg_map[src2] : reg_map[src1]))); + +static SLJIT_INLINE sljit_si emit_single_op(struct sljit_compiler *compiler, sljit_si op, sljit_si flags, + sljit_si dst, sljit_si src1, sljit_si src2) +{ + sljit_sw mul_inst; + + switch (GET_OPCODE(op)) { + case SLJIT_MOV: + SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & ARGS_SWAPPED)); + if (dst != src2) { + if (src2 & SRC2_IMM) { + if (flags & INV_IMM) + EMIT_FULL_DATA_PROCESS_INS_AND_RETURN(MVN_DP, dst, SLJIT_UNUSED, src2); + EMIT_FULL_DATA_PROCESS_INS_AND_RETURN(MOV_DP, dst, SLJIT_UNUSED, src2); + } + EMIT_FULL_DATA_PROCESS_INS_AND_RETURN(MOV_DP, dst, SLJIT_UNUSED, reg_map[src2]); + } + return SLJIT_SUCCESS; + + case SLJIT_MOV_UB: + case SLJIT_MOV_SB: + SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & ARGS_SWAPPED)); + if ((flags & (REG_DEST | REG_SOURCE)) == (REG_DEST | REG_SOURCE)) { +#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) + if (op == SLJIT_MOV_UB) + return push_inst(compiler, EMIT_DATA_PROCESS_INS(AND_DP, 0, dst, src2, SRC2_IMM | 0xff)); + EMIT_INSTRUCTION(EMIT_DATA_PROCESS_INS(MOV_DP, 0, dst, SLJIT_UNUSED, (24 << 7) | reg_map[src2])); + return push_inst(compiler, EMIT_DATA_PROCESS_INS(MOV_DP, 0, dst, SLJIT_UNUSED, (24 << 7) | (op == SLJIT_MOV_UB ? 0x20 : 0x40) | reg_map[dst])); +#else + return push_inst(compiler, (op == SLJIT_MOV_UB ? UXTB : SXTB) | RD(dst) | RM(src2)); +#endif + } + else if (dst != src2) { + SLJIT_ASSERT(src2 & SRC2_IMM); + if (flags & INV_IMM) + EMIT_FULL_DATA_PROCESS_INS_AND_RETURN(MVN_DP, dst, SLJIT_UNUSED, src2); + EMIT_FULL_DATA_PROCESS_INS_AND_RETURN(MOV_DP, dst, SLJIT_UNUSED, src2); + } + return SLJIT_SUCCESS; + + case SLJIT_MOV_UH: + case SLJIT_MOV_SH: + SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & ARGS_SWAPPED)); + if ((flags & (REG_DEST | REG_SOURCE)) == (REG_DEST | REG_SOURCE)) { +#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) + EMIT_INSTRUCTION(EMIT_DATA_PROCESS_INS(MOV_DP, 0, dst, SLJIT_UNUSED, (16 << 7) | reg_map[src2])); + return push_inst(compiler, EMIT_DATA_PROCESS_INS(MOV_DP, 0, dst, SLJIT_UNUSED, (16 << 7) | (op == SLJIT_MOV_UH ? 0x20 : 0x40) | reg_map[dst])); +#else + return push_inst(compiler, (op == SLJIT_MOV_UH ? UXTH : SXTH) | RD(dst) | RM(src2)); +#endif + } + else if (dst != src2) { + SLJIT_ASSERT(src2 & SRC2_IMM); + if (flags & INV_IMM) + EMIT_FULL_DATA_PROCESS_INS_AND_RETURN(MVN_DP, dst, SLJIT_UNUSED, src2); + EMIT_FULL_DATA_PROCESS_INS_AND_RETURN(MOV_DP, dst, SLJIT_UNUSED, src2); + } + return SLJIT_SUCCESS; + + case SLJIT_NOT: + if (src2 & SRC2_IMM) { + if (flags & INV_IMM) + EMIT_FULL_DATA_PROCESS_INS_AND_RETURN(MOV_DP, dst, SLJIT_UNUSED, src2); + EMIT_FULL_DATA_PROCESS_INS_AND_RETURN(MVN_DP, dst, SLJIT_UNUSED, src2); + } + EMIT_FULL_DATA_PROCESS_INS_AND_RETURN(MVN_DP, dst, SLJIT_UNUSED, RM(src2)); + + case SLJIT_CLZ: + SLJIT_ASSERT(!(flags & INV_IMM)); + SLJIT_ASSERT(!(src2 & SRC2_IMM)); + FAIL_IF(push_inst(compiler, CLZ | RD(dst) | RM(src2))); + if (flags & SET_FLAGS) + EMIT_FULL_DATA_PROCESS_INS_AND_RETURN(CMP_DP, SLJIT_UNUSED, dst, SRC2_IMM); + return SLJIT_SUCCESS; + + case SLJIT_ADD: + SLJIT_ASSERT(!(flags & INV_IMM)); + EMIT_DATA_PROCESS_INS_AND_RETURN(ADD_DP); + + case SLJIT_ADDC: + SLJIT_ASSERT(!(flags & INV_IMM)); + EMIT_DATA_PROCESS_INS_AND_RETURN(ADC_DP); + + case SLJIT_SUB: + SLJIT_ASSERT(!(flags & INV_IMM)); + if (!(flags & ARGS_SWAPPED)) + EMIT_DATA_PROCESS_INS_AND_RETURN(SUB_DP); + EMIT_DATA_PROCESS_INS_AND_RETURN(RSB_DP); + + case SLJIT_SUBC: + SLJIT_ASSERT(!(flags & INV_IMM)); + if (!(flags & ARGS_SWAPPED)) + EMIT_DATA_PROCESS_INS_AND_RETURN(SBC_DP); + EMIT_DATA_PROCESS_INS_AND_RETURN(RSC_DP); + + case SLJIT_MUL: + SLJIT_ASSERT(!(flags & INV_IMM)); + SLJIT_ASSERT(!(src2 & SRC2_IMM)); + if (SLJIT_UNLIKELY(op & SLJIT_SET_O)) + mul_inst = SMULL | (reg_map[TMP_REG3] << 16) | (reg_map[dst] << 12); + else + mul_inst = MUL | (reg_map[dst] << 16); + + if (dst != src2) + FAIL_IF(push_inst(compiler, mul_inst | (reg_map[src1] << 8) | reg_map[src2])); + else if (dst != src1) + FAIL_IF(push_inst(compiler, mul_inst | (reg_map[src2] << 8) | reg_map[src1])); + else { + /* Rm and Rd must not be the same register. */ + SLJIT_ASSERT(dst != TMP_REG1); + FAIL_IF(push_inst(compiler, EMIT_DATA_PROCESS_INS(MOV_DP, 0, TMP_REG1, SLJIT_UNUSED, reg_map[src2]))); + FAIL_IF(push_inst(compiler, mul_inst | (reg_map[src2] << 8) | reg_map[TMP_REG1])); + } + + if (!(op & SLJIT_SET_O)) + return SLJIT_SUCCESS; + + /* We need to use TMP_REG3. */ + compiler->cache_arg = 0; + compiler->cache_argw = 0; + /* cmp TMP_REG2, dst asr #31. */ + return push_inst(compiler, EMIT_DATA_PROCESS_INS(CMP_DP, SET_FLAGS, SLJIT_UNUSED, TMP_REG3, RM(dst) | 0xfc0)); + + case SLJIT_AND: + if (!(flags & INV_IMM)) + EMIT_DATA_PROCESS_INS_AND_RETURN(AND_DP); + EMIT_DATA_PROCESS_INS_AND_RETURN(BIC_DP); + + case SLJIT_OR: + SLJIT_ASSERT(!(flags & INV_IMM)); + EMIT_DATA_PROCESS_INS_AND_RETURN(ORR_DP); + + case SLJIT_XOR: + SLJIT_ASSERT(!(flags & INV_IMM)); + EMIT_DATA_PROCESS_INS_AND_RETURN(EOR_DP); + + case SLJIT_SHL: + EMIT_SHIFT_INS_AND_RETURN(0); + + case SLJIT_LSHR: + EMIT_SHIFT_INS_AND_RETURN(1); + + case SLJIT_ASHR: + EMIT_SHIFT_INS_AND_RETURN(2); + } + SLJIT_ASSERT_STOP(); + return SLJIT_SUCCESS; +} + +#undef EMIT_DATA_PROCESS_INS_AND_RETURN +#undef EMIT_FULL_DATA_PROCESS_INS_AND_RETURN +#undef EMIT_SHIFT_INS_AND_RETURN + +/* Tests whether the immediate can be stored in the 12 bit imm field. + Returns with 0 if not possible. */ +static sljit_uw get_imm(sljit_uw imm) +{ + sljit_si rol; + + if (imm <= 0xff) + return SRC2_IMM | imm; + + if (!(imm & 0xff000000)) { + imm <<= 8; + rol = 8; + } + else { + imm = (imm << 24) | (imm >> 8); + rol = 0; + } + + if (!(imm & 0xff000000)) { + imm <<= 8; + rol += 4; + } + + if (!(imm & 0xf0000000)) { + imm <<= 4; + rol += 2; + } + + if (!(imm & 0xc0000000)) { + imm <<= 2; + rol += 1; + } + + if (!(imm & 0x00ffffff)) + return SRC2_IMM | (imm >> 24) | (rol << 8); + else + return 0; +} + +#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) +static sljit_si generate_int(struct sljit_compiler *compiler, sljit_si reg, sljit_uw imm, sljit_si positive) +{ + sljit_uw mask; + sljit_uw imm1; + sljit_uw imm2; + sljit_si rol; + + /* Step1: Search a zero byte (8 continous zero bit). */ + mask = 0xff000000; + rol = 8; + while(1) { + if (!(imm & mask)) { + /* Rol imm by rol. */ + imm = (imm << rol) | (imm >> (32 - rol)); + /* Calculate arm rol. */ + rol = 4 + (rol >> 1); + break; + } + rol += 2; + mask >>= 2; + if (mask & 0x3) { + /* rol by 8. */ + imm = (imm << 8) | (imm >> 24); + mask = 0xff00; + rol = 24; + while (1) { + if (!(imm & mask)) { + /* Rol imm by rol. */ + imm = (imm << rol) | (imm >> (32 - rol)); + /* Calculate arm rol. */ + rol = (rol >> 1) - 8; + break; + } + rol += 2; + mask >>= 2; + if (mask & 0x3) + return 0; + } + break; + } + } + + /* The low 8 bit must be zero. */ + SLJIT_ASSERT(!(imm & 0xff)); + + if (!(imm & 0xff000000)) { + imm1 = SRC2_IMM | ((imm >> 16) & 0xff) | (((rol + 4) & 0xf) << 8); + imm2 = SRC2_IMM | ((imm >> 8) & 0xff) | (((rol + 8) & 0xf) << 8); + } + else if (imm & 0xc0000000) { + imm1 = SRC2_IMM | ((imm >> 24) & 0xff) | ((rol & 0xf) << 8); + imm <<= 8; + rol += 4; + + if (!(imm & 0xff000000)) { + imm <<= 8; + rol += 4; + } + + if (!(imm & 0xf0000000)) { + imm <<= 4; + rol += 2; + } + + if (!(imm & 0xc0000000)) { + imm <<= 2; + rol += 1; + } + + if (!(imm & 0x00ffffff)) + imm2 = SRC2_IMM | (imm >> 24) | ((rol & 0xf) << 8); + else + return 0; + } + else { + if (!(imm & 0xf0000000)) { + imm <<= 4; + rol += 2; + } + + if (!(imm & 0xc0000000)) { + imm <<= 2; + rol += 1; + } + + imm1 = SRC2_IMM | ((imm >> 24) & 0xff) | ((rol & 0xf) << 8); + imm <<= 8; + rol += 4; + + if (!(imm & 0xf0000000)) { + imm <<= 4; + rol += 2; + } + + if (!(imm & 0xc0000000)) { + imm <<= 2; + rol += 1; + } + + if (!(imm & 0x00ffffff)) + imm2 = SRC2_IMM | (imm >> 24) | ((rol & 0xf) << 8); + else + return 0; + } + + EMIT_INSTRUCTION(EMIT_DATA_PROCESS_INS(positive ? MOV_DP : MVN_DP, 0, reg, SLJIT_UNUSED, imm1)); + EMIT_INSTRUCTION(EMIT_DATA_PROCESS_INS(positive ? ORR_DP : BIC_DP, 0, reg, reg, imm2)); + return 1; +} +#endif + +static sljit_si load_immediate(struct sljit_compiler *compiler, sljit_si reg, sljit_uw imm) +{ + sljit_uw tmp; + +#if (defined SLJIT_CONFIG_ARM_V7 && SLJIT_CONFIG_ARM_V7) + if (!(imm & ~0xffff)) + return push_inst(compiler, MOVW | RD(reg) | ((imm << 4) & 0xf0000) | (imm & 0xfff)); +#endif + + /* Create imm by 1 inst. */ + tmp = get_imm(imm); + if (tmp) { + EMIT_INSTRUCTION(EMIT_DATA_PROCESS_INS(MOV_DP, 0, reg, SLJIT_UNUSED, tmp)); + return SLJIT_SUCCESS; + } + + tmp = get_imm(~imm); + if (tmp) { + EMIT_INSTRUCTION(EMIT_DATA_PROCESS_INS(MVN_DP, 0, reg, SLJIT_UNUSED, tmp)); + return SLJIT_SUCCESS; + } + +#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) + /* Create imm by 2 inst. */ + FAIL_IF(generate_int(compiler, reg, imm, 1)); + FAIL_IF(generate_int(compiler, reg, ~imm, 0)); + + /* Load integer. */ + return push_inst_with_literal(compiler, EMIT_DATA_TRANSFER(WORD_DATA | LOAD_DATA, 1, 0, reg, TMP_PC, 0), imm); +#else + return emit_imm(compiler, reg, imm); +#endif +} + +/* Helper function. Dst should be reg + value, using at most 1 instruction, flags does not set. */ +static sljit_si emit_set_delta(struct sljit_compiler *compiler, sljit_si dst, sljit_si reg, sljit_sw value) +{ + if (value >= 0) { + value = get_imm(value); + if (value) + return push_inst(compiler, EMIT_DATA_PROCESS_INS(ADD_DP, 0, dst, reg, value)); + } + else { + value = get_imm(-value); + if (value) + return push_inst(compiler, EMIT_DATA_PROCESS_INS(SUB_DP, 0, dst, reg, value)); + } + return SLJIT_ERR_UNSUPPORTED; +} + +/* Can perform an operation using at most 1 instruction. */ +static sljit_si getput_arg_fast(struct sljit_compiler *compiler, sljit_si inp_flags, sljit_si reg, sljit_si arg, sljit_sw argw) +{ + sljit_uw imm; + + if (arg & SLJIT_IMM) { + imm = get_imm(argw); + if (imm) { + if (inp_flags & ARG_TEST) + return 1; + EMIT_INSTRUCTION(EMIT_DATA_PROCESS_INS(MOV_DP, 0, reg, SLJIT_UNUSED, imm)); + return -1; + } + imm = get_imm(~argw); + if (imm) { + if (inp_flags & ARG_TEST) + return 1; + EMIT_INSTRUCTION(EMIT_DATA_PROCESS_INS(MVN_DP, 0, reg, SLJIT_UNUSED, imm)); + return -1; + } + return (inp_flags & ARG_TEST) ? SLJIT_SUCCESS : 0; + } + + SLJIT_ASSERT(arg & SLJIT_MEM); + + /* Fast loads/stores. */ + if (arg & 0xf) { + if (!(arg & 0xf0)) { + if (IS_TYPE1_TRANSFER(inp_flags)) { + if (argw >= 0 && argw <= 0xfff) { + if (inp_flags & ARG_TEST) + return 1; + EMIT_INSTRUCTION(EMIT_DATA_TRANSFER(inp_flags, 1, inp_flags & WRITE_BACK, reg, arg & 0xf, argw)); + return -1; + } + if (argw < 0 && argw >= -0xfff) { + if (inp_flags & ARG_TEST) + return 1; + EMIT_INSTRUCTION(EMIT_DATA_TRANSFER(inp_flags, 0, inp_flags & WRITE_BACK, reg, arg & 0xf, -argw)); + return -1; + } + } + else { + if (argw >= 0 && argw <= 0xff) { + if (inp_flags & ARG_TEST) + return 1; + EMIT_INSTRUCTION(EMIT_DATA_TRANSFER(inp_flags, 1, inp_flags & WRITE_BACK, reg, arg & 0xf, TYPE2_TRANSFER_IMM(argw))); + return -1; + } + if (argw < 0 && argw >= -0xff) { + if (inp_flags & ARG_TEST) + return 1; + argw = -argw; + EMIT_INSTRUCTION(EMIT_DATA_TRANSFER(inp_flags, 0, inp_flags & WRITE_BACK, reg, arg & 0xf, TYPE2_TRANSFER_IMM(argw))); + return -1; + } + } + } + else if ((argw & 0x3) == 0 || IS_TYPE1_TRANSFER(inp_flags)) { + if (inp_flags & ARG_TEST) + return 1; + EMIT_INSTRUCTION(EMIT_DATA_TRANSFER(inp_flags, 1, inp_flags & WRITE_BACK, reg, arg & 0xf, + RM((arg >> 4) & 0xf) | (IS_TYPE1_TRANSFER(inp_flags) ? SRC2_IMM : 0) | ((argw & 0x3) << 7))); + return -1; + } + } + + return (inp_flags & ARG_TEST) ? SLJIT_SUCCESS : 0; +} + +/* See getput_arg below. + Note: can_cache is called only for binary operators. Those + operators always uses word arguments without write back. */ +static sljit_si can_cache(sljit_si arg, sljit_sw argw, sljit_si next_arg, sljit_sw next_argw) +{ + /* Immediate caching is not supported as it would be an operation on constant arguments. */ + if (arg & SLJIT_IMM) + return 0; + + /* Always a simple operation. */ + if (arg & 0xf0) + return 0; + + if (!(arg & 0xf)) { + /* Immediate access. */ + if ((next_arg & SLJIT_MEM) && ((sljit_uw)argw - (sljit_uw)next_argw <= 0xfff || (sljit_uw)next_argw - (sljit_uw)argw <= 0xfff)) + return 1; + return 0; + } + + if (argw <= 0xfffff && argw >= -0xfffff) + return 0; + + if (argw == next_argw && (next_arg & SLJIT_MEM)) + return 1; + + if (arg == next_arg && ((sljit_uw)argw - (sljit_uw)next_argw <= 0xfff || (sljit_uw)next_argw - (sljit_uw)argw <= 0xfff)) + return 1; + + return 0; +} + +#define GETPUT_ARG_DATA_TRANSFER(add, wb, target, base, imm) \ + if (max_delta & 0xf00) \ + FAIL_IF(push_inst(compiler, EMIT_DATA_TRANSFER(inp_flags, add, wb, target, base, imm))); \ + else \ + FAIL_IF(push_inst(compiler, EMIT_DATA_TRANSFER(inp_flags, add, wb, target, base, TYPE2_TRANSFER_IMM(imm)))); + +#define TEST_WRITE_BACK() \ + if (inp_flags & WRITE_BACK) { \ + tmp_r = arg & 0xf; \ + if (reg == tmp_r) { \ + /* This can only happen for stores */ \ + /* since ldr reg, [reg, ...]! has no meaning */ \ + SLJIT_ASSERT(!(inp_flags & LOAD_DATA)); \ + EMIT_INSTRUCTION(EMIT_DATA_PROCESS_INS(MOV_DP, 0, TMP_REG3, SLJIT_UNUSED, RM(reg))); \ + reg = TMP_REG3; \ + } \ + } + +/* Emit the necessary instructions. See can_cache above. */ +static sljit_si getput_arg(struct sljit_compiler *compiler, sljit_si inp_flags, sljit_si reg, sljit_si arg, sljit_sw argw, sljit_si next_arg, sljit_sw next_argw) +{ + sljit_si tmp_r; + sljit_sw max_delta; + sljit_sw sign; + sljit_uw imm; + + if (arg & SLJIT_IMM) { + SLJIT_ASSERT(inp_flags & LOAD_DATA); + return load_immediate(compiler, reg, argw); + } + + SLJIT_ASSERT(arg & SLJIT_MEM); + + tmp_r = (inp_flags & LOAD_DATA) ? reg : TMP_REG3; + max_delta = IS_TYPE1_TRANSFER(inp_flags) ? 0xfff : 0xff; + + if ((arg & 0xf) == SLJIT_UNUSED) { + /* Write back is not used. */ + imm = (sljit_uw)(argw - compiler->cache_argw); + if ((compiler->cache_arg & SLJIT_IMM) && (imm <= (sljit_uw)max_delta || imm >= (sljit_uw)-max_delta)) { + if (imm <= (sljit_uw)max_delta) { + sign = 1; + argw = argw - compiler->cache_argw; + } + else { + sign = 0; + argw = compiler->cache_argw - argw; + } + + GETPUT_ARG_DATA_TRANSFER(sign, 0, reg, TMP_REG3, argw); + return SLJIT_SUCCESS; + } + + /* With write back, we can create some sophisticated loads, but + it is hard to decide whether we should convert downward (0s) or upward (1s). */ + imm = (sljit_uw)(argw - next_argw); + if ((next_arg & SLJIT_MEM) && (imm <= (sljit_uw)max_delta || imm >= (sljit_uw)-max_delta)) { + SLJIT_ASSERT(inp_flags & LOAD_DATA); + + compiler->cache_arg = SLJIT_IMM; + compiler->cache_argw = argw; + tmp_r = TMP_REG3; + } + + FAIL_IF(load_immediate(compiler, tmp_r, argw)); + GETPUT_ARG_DATA_TRANSFER(1, 0, reg, tmp_r, 0); + return SLJIT_SUCCESS; + } + + if (arg & 0xf0) { + SLJIT_ASSERT((argw & 0x3) && !(max_delta & 0xf00)); + if (inp_flags & WRITE_BACK) + tmp_r = arg & 0xf; + EMIT_INSTRUCTION(EMIT_DATA_PROCESS_INS(ADD_DP, 0, tmp_r, arg & 0xf, RM((arg >> 4) & 0xf) | ((argw & 0x3) << 7))); + EMIT_INSTRUCTION(EMIT_DATA_TRANSFER(inp_flags, 1, 0, reg, tmp_r, TYPE2_TRANSFER_IMM(0))); + return SLJIT_SUCCESS; + } + + imm = (sljit_uw)(argw - compiler->cache_argw); + if (compiler->cache_arg == arg && imm <= (sljit_uw)max_delta) { + SLJIT_ASSERT(!(inp_flags & WRITE_BACK)); + GETPUT_ARG_DATA_TRANSFER(1, 0, reg, TMP_REG3, imm); + return SLJIT_SUCCESS; + } + if (compiler->cache_arg == arg && imm >= (sljit_uw)-max_delta) { + SLJIT_ASSERT(!(inp_flags & WRITE_BACK)); + imm = (sljit_uw)-(sljit_sw)imm; + GETPUT_ARG_DATA_TRANSFER(0, 0, reg, TMP_REG3, imm); + return SLJIT_SUCCESS; + } + + imm = get_imm(argw & ~max_delta); + if (imm) { + TEST_WRITE_BACK(); + EMIT_INSTRUCTION(EMIT_DATA_PROCESS_INS(ADD_DP, 0, tmp_r, arg & 0xf, imm)); + GETPUT_ARG_DATA_TRANSFER(1, inp_flags & WRITE_BACK, reg, tmp_r, argw & max_delta); + return SLJIT_SUCCESS; + } + + imm = get_imm(-argw & ~max_delta); + if (imm) { + argw = -argw; + TEST_WRITE_BACK(); + EMIT_INSTRUCTION(EMIT_DATA_PROCESS_INS(SUB_DP, 0, tmp_r, arg & 0xf, imm)); + GETPUT_ARG_DATA_TRANSFER(0, inp_flags & WRITE_BACK, reg, tmp_r, argw & max_delta); + return SLJIT_SUCCESS; + } + + if ((compiler->cache_arg & SLJIT_IMM) && compiler->cache_argw == argw) { + TEST_WRITE_BACK(); + EMIT_INSTRUCTION(EMIT_DATA_TRANSFER(inp_flags, 1, inp_flags & WRITE_BACK, reg, arg & 0xf, RM(TMP_REG3) | (max_delta & 0xf00 ? SRC2_IMM : 0))); + return SLJIT_SUCCESS; + } + + if (argw == next_argw && (next_arg & SLJIT_MEM)) { + SLJIT_ASSERT(inp_flags & LOAD_DATA); + FAIL_IF(load_immediate(compiler, TMP_REG3, argw)); + + compiler->cache_arg = SLJIT_IMM; + compiler->cache_argw = argw; + + TEST_WRITE_BACK(); + EMIT_INSTRUCTION(EMIT_DATA_TRANSFER(inp_flags, 1, inp_flags & WRITE_BACK, reg, arg & 0xf, RM(TMP_REG3) | (max_delta & 0xf00 ? SRC2_IMM : 0))); + return SLJIT_SUCCESS; + } + + imm = (sljit_uw)(argw - next_argw); + if (arg == next_arg && !(inp_flags & WRITE_BACK) && (imm <= (sljit_uw)max_delta || imm >= (sljit_uw)-max_delta)) { + SLJIT_ASSERT(inp_flags & LOAD_DATA); + FAIL_IF(load_immediate(compiler, TMP_REG3, argw)); + EMIT_INSTRUCTION(EMIT_DATA_PROCESS_INS(ADD_DP, 0, TMP_REG3, TMP_REG3, reg_map[arg & 0xf])); + + compiler->cache_arg = arg; + compiler->cache_argw = argw; + + GETPUT_ARG_DATA_TRANSFER(1, 0, reg, TMP_REG3, 0); + return SLJIT_SUCCESS; + } + + if ((arg & 0xf) == tmp_r) { + compiler->cache_arg = SLJIT_IMM; + compiler->cache_argw = argw; + tmp_r = TMP_REG3; + } + + FAIL_IF(load_immediate(compiler, tmp_r, argw)); + EMIT_INSTRUCTION(EMIT_DATA_TRANSFER(inp_flags, 1, inp_flags & WRITE_BACK, reg, arg & 0xf, reg_map[tmp_r] | (max_delta & 0xf00 ? SRC2_IMM : 0))); + return SLJIT_SUCCESS; +} + +static SLJIT_INLINE sljit_si emit_op_mem(struct sljit_compiler *compiler, sljit_si flags, sljit_si reg, sljit_si arg, sljit_sw argw) +{ + if (getput_arg_fast(compiler, flags, reg, arg, argw)) + return compiler->error; + compiler->cache_arg = 0; + compiler->cache_argw = 0; + return getput_arg(compiler, flags, reg, arg, argw, 0, 0); +} + +static SLJIT_INLINE sljit_si emit_op_mem2(struct sljit_compiler *compiler, sljit_si flags, sljit_si reg, sljit_si arg1, sljit_sw arg1w, sljit_si arg2, sljit_sw arg2w) +{ + if (getput_arg_fast(compiler, flags, reg, arg1, arg1w)) + return compiler->error; + return getput_arg(compiler, flags, reg, arg1, arg1w, arg2, arg2w); +} + +static sljit_si emit_op(struct sljit_compiler *compiler, sljit_si op, sljit_si inp_flags, + sljit_si dst, sljit_sw dstw, + sljit_si src1, sljit_sw src1w, + sljit_si src2, sljit_sw src2w) +{ + /* arg1 goes to TMP_REG1 or src reg + arg2 goes to TMP_REG2, imm or src reg + TMP_REG3 can be used for caching + result goes to TMP_REG2, so put result can use TMP_REG1 and TMP_REG3. */ + + /* We prefers register and simple consts. */ + sljit_si dst_r; + sljit_si src1_r; + sljit_si src2_r = 0; + sljit_si sugg_src2_r = TMP_REG2; + sljit_si flags = GET_FLAGS(op) ? SET_FLAGS : 0; + + compiler->cache_arg = 0; + compiler->cache_argw = 0; + + /* Destination check. */ + if (SLJIT_UNLIKELY(dst == SLJIT_UNUSED)) { + if (op >= SLJIT_MOV && op <= SLJIT_MOVU_SI && !(src2 & SLJIT_MEM)) + return SLJIT_SUCCESS; + dst_r = TMP_REG2; + } + else if (dst <= TMP_REG3) { + dst_r = dst; + flags |= REG_DEST; + if (op >= SLJIT_MOV && op <= SLJIT_MOVU_SI) + sugg_src2_r = dst_r; + } + else { + SLJIT_ASSERT(dst & SLJIT_MEM); + if (getput_arg_fast(compiler, inp_flags | ARG_TEST, TMP_REG2, dst, dstw)) { + flags |= FAST_DEST; + dst_r = TMP_REG2; + } + else { + flags |= SLOW_DEST; + dst_r = 0; + } + } + + /* Source 1. */ + if (src1 <= TMP_REG3) + src1_r = src1; + else if (src2 <= TMP_REG3) { + flags |= ARGS_SWAPPED; + src1_r = src2; + src2 = src1; + src2w = src1w; + } + else do { /* do { } while(0) is used because of breaks. */ + src1_r = 0; + if ((inp_flags & ALLOW_ANY_IMM) && (src1 & SLJIT_IMM)) { + /* The second check will generate a hit. */ + src2_r = get_imm(src1w); + if (src2_r) { + flags |= ARGS_SWAPPED; + src1 = src2; + src1w = src2w; + break; + } + if (inp_flags & ALLOW_INV_IMM) { + src2_r = get_imm(~src1w); + if (src2_r) { + flags |= ARGS_SWAPPED | INV_IMM; + src1 = src2; + src1w = src2w; + break; + } + } + if (GET_OPCODE(op) == SLJIT_ADD) { + src2_r = get_imm(-src1w); + if (src2_r) { + /* Note: ARGS_SWAPPED is intentionally not applied! */ + src1 = src2; + src1w = src2w; + op = SLJIT_SUB | GET_ALL_FLAGS(op); + break; + } + } + } + + if (getput_arg_fast(compiler, inp_flags | LOAD_DATA, TMP_REG1, src1, src1w)) { + FAIL_IF(compiler->error); + src1_r = TMP_REG1; + } + } while (0); + + /* Source 2. */ + if (src2_r == 0) { + if (src2 <= TMP_REG3) { + src2_r = src2; + flags |= REG_SOURCE; + if (!(flags & REG_DEST) && op >= SLJIT_MOV && op <= SLJIT_MOVU_SI) + dst_r = src2_r; + } + else do { /* do { } while(0) is used because of breaks. */ + if ((inp_flags & ALLOW_ANY_IMM) && (src2 & SLJIT_IMM)) { + src2_r = get_imm(src2w); + if (src2_r) + break; + if (inp_flags & ALLOW_INV_IMM) { + src2_r = get_imm(~src2w); + if (src2_r) { + flags |= INV_IMM; + break; + } + } + if (GET_OPCODE(op) == SLJIT_ADD) { + src2_r = get_imm(-src2w); + if (src2_r) { + op = SLJIT_SUB | GET_ALL_FLAGS(op); + flags &= ~ARGS_SWAPPED; + break; + } + } + if (GET_OPCODE(op) == SLJIT_SUB && !(flags & ARGS_SWAPPED)) { + src2_r = get_imm(-src2w); + if (src2_r) { + op = SLJIT_ADD | GET_ALL_FLAGS(op); + flags &= ~ARGS_SWAPPED; + break; + } + } + } + + /* src2_r is 0. */ + if (getput_arg_fast(compiler, inp_flags | LOAD_DATA, sugg_src2_r, src2, src2w)) { + FAIL_IF(compiler->error); + src2_r = sugg_src2_r; + } + } while (0); + } + + /* src1_r, src2_r and dst_r can be zero (=unprocessed) or non-zero. + If they are zero, they must not be registers. */ + if (src1_r == 0 && src2_r == 0 && dst_r == 0) { + if (!can_cache(src1, src1w, src2, src2w) && can_cache(src1, src1w, dst, dstw)) { + SLJIT_ASSERT(!(flags & ARGS_SWAPPED)); + flags |= ARGS_SWAPPED; + FAIL_IF(getput_arg(compiler, inp_flags | LOAD_DATA, TMP_REG1, src2, src2w, src1, src1w)); + FAIL_IF(getput_arg(compiler, inp_flags | LOAD_DATA, TMP_REG2, src1, src1w, dst, dstw)); + } + else { + FAIL_IF(getput_arg(compiler, inp_flags | LOAD_DATA, TMP_REG1, src1, src1w, src2, src2w)); + FAIL_IF(getput_arg(compiler, inp_flags | LOAD_DATA, TMP_REG2, src2, src2w, dst, dstw)); + } + src1_r = TMP_REG1; + src2_r = TMP_REG2; + } + else if (src1_r == 0 && src2_r == 0) { + FAIL_IF(getput_arg(compiler, inp_flags | LOAD_DATA, TMP_REG1, src1, src1w, src2, src2w)); + src1_r = TMP_REG1; + } + else if (src1_r == 0 && dst_r == 0) { + FAIL_IF(getput_arg(compiler, inp_flags | LOAD_DATA, TMP_REG1, src1, src1w, dst, dstw)); + src1_r = TMP_REG1; + } + else if (src2_r == 0 && dst_r == 0) { + FAIL_IF(getput_arg(compiler, inp_flags | LOAD_DATA, sugg_src2_r, src2, src2w, dst, dstw)); + src2_r = sugg_src2_r; + } + + if (dst_r == 0) + dst_r = TMP_REG2; + + if (src1_r == 0) { + FAIL_IF(getput_arg(compiler, inp_flags | LOAD_DATA, TMP_REG1, src1, src1w, 0, 0)); + src1_r = TMP_REG1; + } + + if (src2_r == 0) { + FAIL_IF(getput_arg(compiler, inp_flags | LOAD_DATA, sugg_src2_r, src2, src2w, 0, 0)); + src2_r = sugg_src2_r; + } + + FAIL_IF(emit_single_op(compiler, op, flags, dst_r, src1_r, src2_r)); + + if (flags & (FAST_DEST | SLOW_DEST)) { + if (flags & FAST_DEST) + FAIL_IF(getput_arg_fast(compiler, inp_flags, dst_r, dst, dstw)); + else + FAIL_IF(getput_arg(compiler, inp_flags, dst_r, dst, dstw, 0, 0)); + } + return SLJIT_SUCCESS; +} + +#ifdef __cplusplus +extern "C" { +#endif + +#if defined(__GNUC__) +extern unsigned int __aeabi_uidivmod(unsigned int numerator, unsigned int denominator); +extern int __aeabi_idivmod(int numerator, int denominator); +#else +#error "Software divmod functions are needed" +#endif + +#ifdef __cplusplus +} +#endif + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op0(struct sljit_compiler *compiler, sljit_si op) +{ + CHECK_ERROR(); + check_sljit_emit_op0(compiler, op); + + op = GET_OPCODE(op); + switch (op) { + case SLJIT_BREAKPOINT: + EMIT_INSTRUCTION(BKPT); + break; + case SLJIT_NOP: + EMIT_INSTRUCTION(NOP); + break; + case SLJIT_UMUL: + case SLJIT_SMUL: +#if (defined SLJIT_CONFIG_ARM_V7 && SLJIT_CONFIG_ARM_V7) + return push_inst(compiler, (op == SLJIT_UMUL ? UMULL : SMULL) + | (reg_map[SLJIT_SCRATCH_REG2] << 16) + | (reg_map[SLJIT_SCRATCH_REG1] << 12) + | (reg_map[SLJIT_SCRATCH_REG1] << 8) + | reg_map[SLJIT_SCRATCH_REG2]); +#else + EMIT_INSTRUCTION(EMIT_DATA_PROCESS_INS(MOV_DP, 0, TMP_REG1, SLJIT_UNUSED, RM(SLJIT_SCRATCH_REG2))); + return push_inst(compiler, (op == SLJIT_UMUL ? UMULL : SMULL) + | (reg_map[SLJIT_SCRATCH_REG2] << 16) + | (reg_map[SLJIT_SCRATCH_REG1] << 12) + | (reg_map[SLJIT_SCRATCH_REG1] << 8) + | reg_map[TMP_REG1]); +#endif + case SLJIT_UDIV: + case SLJIT_SDIV: + if (compiler->scratches >= 3) + EMIT_INSTRUCTION(0xe52d2008 /* str r2, [sp, #-8]! */); +#if defined(__GNUC__) + FAIL_IF(sljit_emit_ijump(compiler, SLJIT_FAST_CALL, SLJIT_IMM, + (op == SLJIT_UDIV ? SLJIT_FUNC_OFFSET(__aeabi_uidivmod) : SLJIT_FUNC_OFFSET(__aeabi_idivmod)))); +#else +#error "Software divmod functions are needed" +#endif + if (compiler->scratches >= 3) + return push_inst(compiler, 0xe49d2008 /* ldr r2, [sp], #8 */); + return SLJIT_SUCCESS; + } + + return SLJIT_SUCCESS; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op1(struct sljit_compiler *compiler, sljit_si op, + sljit_si dst, sljit_sw dstw, + sljit_si src, sljit_sw srcw) +{ + CHECK_ERROR(); + check_sljit_emit_op1(compiler, op, dst, dstw, src, srcw); + ADJUST_LOCAL_OFFSET(dst, dstw); + ADJUST_LOCAL_OFFSET(src, srcw); + + switch (GET_OPCODE(op)) { + case SLJIT_MOV: + case SLJIT_MOV_UI: + case SLJIT_MOV_SI: + case SLJIT_MOV_P: + return emit_op(compiler, SLJIT_MOV, ALLOW_ANY_IMM, dst, dstw, TMP_REG1, 0, src, srcw); + + case SLJIT_MOV_UB: + return emit_op(compiler, SLJIT_MOV_UB, ALLOW_ANY_IMM | BYTE_DATA, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_ub)srcw : srcw); + + case SLJIT_MOV_SB: + return emit_op(compiler, SLJIT_MOV_SB, ALLOW_ANY_IMM | SIGNED_DATA | BYTE_DATA, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_sb)srcw : srcw); + + case SLJIT_MOV_UH: + return emit_op(compiler, SLJIT_MOV_UH, ALLOW_ANY_IMM | HALF_DATA, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_uh)srcw : srcw); + + case SLJIT_MOV_SH: + return emit_op(compiler, SLJIT_MOV_SH, ALLOW_ANY_IMM | SIGNED_DATA | HALF_DATA, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_sh)srcw : srcw); + + case SLJIT_MOVU: + case SLJIT_MOVU_UI: + case SLJIT_MOVU_SI: + case SLJIT_MOVU_P: + return emit_op(compiler, SLJIT_MOV, ALLOW_ANY_IMM | WRITE_BACK, dst, dstw, TMP_REG1, 0, src, srcw); + + case SLJIT_MOVU_UB: + return emit_op(compiler, SLJIT_MOV_UB, ALLOW_ANY_IMM | BYTE_DATA | WRITE_BACK, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_ub)srcw : srcw); + + case SLJIT_MOVU_SB: + return emit_op(compiler, SLJIT_MOV_SB, ALLOW_ANY_IMM | SIGNED_DATA | BYTE_DATA | WRITE_BACK, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_sb)srcw : srcw); + + case SLJIT_MOVU_UH: + return emit_op(compiler, SLJIT_MOV_UH, ALLOW_ANY_IMM | HALF_DATA | WRITE_BACK, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_uh)srcw : srcw); + + case SLJIT_MOVU_SH: + return emit_op(compiler, SLJIT_MOV_SH, ALLOW_ANY_IMM | SIGNED_DATA | HALF_DATA | WRITE_BACK, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_sh)srcw : srcw); + + case SLJIT_NOT: + return emit_op(compiler, op, ALLOW_ANY_IMM, dst, dstw, TMP_REG1, 0, src, srcw); + + case SLJIT_NEG: +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) || (defined SLJIT_DEBUG && SLJIT_DEBUG) + compiler->skip_checks = 1; +#endif + return sljit_emit_op2(compiler, SLJIT_SUB | GET_ALL_FLAGS(op), dst, dstw, SLJIT_IMM, 0, src, srcw); + + case SLJIT_CLZ: + return emit_op(compiler, op, 0, dst, dstw, TMP_REG1, 0, src, srcw); + } + + return SLJIT_SUCCESS; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op2(struct sljit_compiler *compiler, sljit_si op, + sljit_si dst, sljit_sw dstw, + sljit_si src1, sljit_sw src1w, + sljit_si src2, sljit_sw src2w) +{ + CHECK_ERROR(); + check_sljit_emit_op2(compiler, op, dst, dstw, src1, src1w, src2, src2w); + ADJUST_LOCAL_OFFSET(dst, dstw); + ADJUST_LOCAL_OFFSET(src1, src1w); + ADJUST_LOCAL_OFFSET(src2, src2w); + + switch (GET_OPCODE(op)) { + case SLJIT_ADD: + case SLJIT_ADDC: + case SLJIT_SUB: + case SLJIT_SUBC: + case SLJIT_OR: + case SLJIT_XOR: + return emit_op(compiler, op, ALLOW_IMM, dst, dstw, src1, src1w, src2, src2w); + + case SLJIT_MUL: + return emit_op(compiler, op, 0, dst, dstw, src1, src1w, src2, src2w); + + case SLJIT_AND: + return emit_op(compiler, op, ALLOW_ANY_IMM, dst, dstw, src1, src1w, src2, src2w); + + case SLJIT_SHL: + case SLJIT_LSHR: + case SLJIT_ASHR: + if (src2 & SLJIT_IMM) { + compiler->shift_imm = src2w & 0x1f; + return emit_op(compiler, op, 0, dst, dstw, TMP_REG1, 0, src1, src1w); + } + else { + compiler->shift_imm = 0x20; + return emit_op(compiler, op, 0, dst, dstw, src1, src1w, src2, src2w); + } + } + + return SLJIT_SUCCESS; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_get_register_index(sljit_si reg) +{ + check_sljit_get_register_index(reg); + return reg_map[reg]; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op_custom(struct sljit_compiler *compiler, + void *instruction, sljit_si size) +{ + CHECK_ERROR(); + check_sljit_emit_op_custom(compiler, instruction, size); + SLJIT_ASSERT(size == 4); + + return push_inst(compiler, *(sljit_uw*)instruction); +} + +/* --------------------------------------------------------------------- */ +/* Floating point operators */ +/* --------------------------------------------------------------------- */ + +#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) + +/* 0 - no fpu + 1 - vfp */ +static sljit_si arm_fpu_type = -1; + +static void init_compiler(void) +{ + if (arm_fpu_type != -1) + return; + + /* TODO: Only the OS can help to determine the correct fpu type. */ + arm_fpu_type = 1; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_is_fpu_available(void) +{ + if (arm_fpu_type == -1) + init_compiler(); + return arm_fpu_type; +} + +#else + +#define arm_fpu_type 1 + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_is_fpu_available(void) +{ + /* Always available. */ + return 1; +} + +#endif + +#define FPU_LOAD (1 << 20) +#define EMIT_FPU_DATA_TRANSFER(inst, add, base, freg, offs) \ + ((inst) | ((add) << 23) | (reg_map[base] << 16) | (freg << 12) | (offs)) +#define EMIT_FPU_OPERATION(opcode, mode, dst, src1, src2) \ + ((opcode) | (mode) | ((dst) << 12) | (src1) | ((src2) << 16)) + +static sljit_si emit_fop_mem(struct sljit_compiler *compiler, sljit_si flags, sljit_si reg, sljit_si arg, sljit_sw argw) +{ + sljit_sw tmp; + sljit_uw imm; + sljit_sw inst = VSTR_F32 | (flags & (SLJIT_SINGLE_OP | FPU_LOAD)); + SLJIT_ASSERT(arg & SLJIT_MEM); + + if (SLJIT_UNLIKELY(arg & 0xf0)) { + EMIT_INSTRUCTION(EMIT_DATA_PROCESS_INS(ADD_DP, 0, TMP_REG1, arg & 0xf, RM((arg >> 4) & 0xf) | ((argw & 0x3) << 7))); + arg = SLJIT_MEM | TMP_REG1; + argw = 0; + } + + /* Fast loads and stores. */ + if ((arg & 0xf)) { + if (!(argw & ~0x3fc)) + return push_inst(compiler, EMIT_FPU_DATA_TRANSFER(inst, 1, arg & 0xf, reg, argw >> 2)); + if (!(-argw & ~0x3fc)) + return push_inst(compiler, EMIT_FPU_DATA_TRANSFER(inst, 0, arg & 0xf, reg, (-argw) >> 2)); + } + + if (compiler->cache_arg == arg) { + tmp = argw - compiler->cache_argw; + if (!(tmp & ~0x3fc)) + return push_inst(compiler, EMIT_FPU_DATA_TRANSFER(inst, 1, TMP_REG3, reg, tmp >> 2)); + if (!(-tmp & ~0x3fc)) + return push_inst(compiler, EMIT_FPU_DATA_TRANSFER(inst, 0, TMP_REG3, reg, -tmp >> 2)); + if (emit_set_delta(compiler, TMP_REG3, TMP_REG3, tmp) != SLJIT_ERR_UNSUPPORTED) { + FAIL_IF(compiler->error); + compiler->cache_argw = argw; + return push_inst(compiler, EMIT_FPU_DATA_TRANSFER(inst, 1, TMP_REG3, reg, 0)); + } + } + + if (arg & 0xf) { + if (emit_set_delta(compiler, TMP_REG1, arg & 0xf, argw) != SLJIT_ERR_UNSUPPORTED) { + FAIL_IF(compiler->error); + return push_inst(compiler, EMIT_FPU_DATA_TRANSFER(inst, 1, TMP_REG1, reg, 0)); + } + imm = get_imm(argw & ~0x3fc); + if (imm) { + EMIT_INSTRUCTION(EMIT_DATA_PROCESS_INS(ADD_DP, 0, TMP_REG1, arg & 0xf, imm)); + return push_inst(compiler, EMIT_FPU_DATA_TRANSFER(inst, 1, TMP_REG1, reg, (argw & 0x3fc) >> 2)); + } + imm = get_imm(-argw & ~0x3fc); + if (imm) { + argw = -argw; + EMIT_INSTRUCTION(EMIT_DATA_PROCESS_INS(SUB_DP, 0, TMP_REG1, arg & 0xf, imm)); + return push_inst(compiler, EMIT_FPU_DATA_TRANSFER(inst, 0, TMP_REG1, reg, (argw & 0x3fc) >> 2)); + } + } + + compiler->cache_arg = arg; + compiler->cache_argw = argw; + if (arg & 0xf) { + FAIL_IF(load_immediate(compiler, TMP_REG1, argw)); + EMIT_INSTRUCTION(EMIT_DATA_PROCESS_INS(ADD_DP, 0, TMP_REG3, arg & 0xf, reg_map[TMP_REG1])); + } + else + FAIL_IF(load_immediate(compiler, TMP_REG3, argw)); + + return push_inst(compiler, EMIT_FPU_DATA_TRANSFER(inst, 1, TMP_REG3, reg, 0)); +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fop1(struct sljit_compiler *compiler, sljit_si op, + sljit_si dst, sljit_sw dstw, + sljit_si src, sljit_sw srcw) +{ + sljit_si dst_fr; + + CHECK_ERROR(); + check_sljit_emit_fop1(compiler, op, dst, dstw, src, srcw); + SLJIT_COMPILE_ASSERT((SLJIT_SINGLE_OP == 0x100), float_transfer_bit_error); + + compiler->cache_arg = 0; + compiler->cache_argw = 0; + op ^= SLJIT_SINGLE_OP; + + if (GET_OPCODE(op) == SLJIT_CMPD) { + if (dst > SLJIT_FLOAT_REG6) { + FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_SINGLE_OP) | FPU_LOAD, TMP_FREG1, dst, dstw)); + dst = TMP_FREG1; + } + if (src > SLJIT_FLOAT_REG6) { + FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_SINGLE_OP) | FPU_LOAD, TMP_FREG2, src, srcw)); + src = TMP_FREG2; + } + EMIT_INSTRUCTION(EMIT_FPU_OPERATION(VCMP_F32, op & SLJIT_SINGLE_OP, dst, src, 0)); + EMIT_INSTRUCTION(VMRS); + return SLJIT_SUCCESS; + } + + dst_fr = (dst > SLJIT_FLOAT_REG6) ? TMP_FREG1 : dst; + + if (src > SLJIT_FLOAT_REG6) { + FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_SINGLE_OP) | FPU_LOAD, dst_fr, src, srcw)); + src = dst_fr; + } + + switch (GET_OPCODE(op)) { + case SLJIT_MOVD: + if (src != dst_fr && dst_fr != TMP_FREG1) + EMIT_INSTRUCTION(EMIT_FPU_OPERATION(VMOV_F32, op & SLJIT_SINGLE_OP, dst_fr, src, 0)); + break; + case SLJIT_NEGD: + EMIT_INSTRUCTION(EMIT_FPU_OPERATION(VNEG_F32, op & SLJIT_SINGLE_OP, dst_fr, src, 0)); + break; + case SLJIT_ABSD: + EMIT_INSTRUCTION(EMIT_FPU_OPERATION(VABS_F32, op & SLJIT_SINGLE_OP, dst_fr, src, 0)); + break; + } + + if (dst_fr == TMP_FREG1) { + if (GET_OPCODE(op) == SLJIT_MOVD) + dst_fr = src; + FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_SINGLE_OP), dst_fr, dst, dstw)); + } + + return SLJIT_SUCCESS; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fop2(struct sljit_compiler *compiler, sljit_si op, + sljit_si dst, sljit_sw dstw, + sljit_si src1, sljit_sw src1w, + sljit_si src2, sljit_sw src2w) +{ + sljit_si dst_fr; + + CHECK_ERROR(); + check_sljit_emit_fop2(compiler, op, dst, dstw, src1, src1w, src2, src2w); + + compiler->cache_arg = 0; + compiler->cache_argw = 0; + op ^= SLJIT_SINGLE_OP; + + dst_fr = (dst > SLJIT_FLOAT_REG6) ? TMP_FREG1 : dst; + + if (src2 > SLJIT_FLOAT_REG6) { + FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_SINGLE_OP) | FPU_LOAD, TMP_FREG2, src2, src2w)); + src2 = TMP_FREG2; + } + + if (src1 > SLJIT_FLOAT_REG6) { + FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_SINGLE_OP) | FPU_LOAD, TMP_FREG1, src1, src1w)); + src1 = TMP_FREG1; + } + + switch (GET_OPCODE(op)) { + case SLJIT_ADDD: + EMIT_INSTRUCTION(EMIT_FPU_OPERATION(VADD_F32, op & SLJIT_SINGLE_OP, dst_fr, src2, src1)); + break; + + case SLJIT_SUBD: + EMIT_INSTRUCTION(EMIT_FPU_OPERATION(VSUB_F32, op & SLJIT_SINGLE_OP, dst_fr, src2, src1)); + break; + + case SLJIT_MULD: + EMIT_INSTRUCTION(EMIT_FPU_OPERATION(VMUL_F32, op & SLJIT_SINGLE_OP, dst_fr, src2, src1)); + break; + + case SLJIT_DIVD: + EMIT_INSTRUCTION(EMIT_FPU_OPERATION(VDIV_F32, op & SLJIT_SINGLE_OP, dst_fr, src2, src1)); + break; + } + + if (dst_fr == TMP_FREG1) + FAIL_IF(emit_fop_mem(compiler, (op & SLJIT_SINGLE_OP), TMP_FREG1, dst, dstw)); + + return SLJIT_SUCCESS; +} + +#undef FPU_LOAD +#undef EMIT_FPU_DATA_TRANSFER +#undef EMIT_FPU_OPERATION + +/* --------------------------------------------------------------------- */ +/* Other instructions */ +/* --------------------------------------------------------------------- */ + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fast_enter(struct sljit_compiler *compiler, sljit_si dst, sljit_sw dstw) +{ + CHECK_ERROR(); + check_sljit_emit_fast_enter(compiler, dst, dstw); + ADJUST_LOCAL_OFFSET(dst, dstw); + + /* For UNUSED dst. Uncommon, but possible. */ + if (dst == SLJIT_UNUSED) + return SLJIT_SUCCESS; + + if (dst <= TMP_REG3) + return push_inst(compiler, EMIT_DATA_PROCESS_INS(MOV_DP, 0, dst, SLJIT_UNUSED, RM(TMP_REG3))); + + /* Memory. */ + if (getput_arg_fast(compiler, WORD_DATA, TMP_REG3, dst, dstw)) + return compiler->error; + /* TMP_REG3 is used for caching. */ + EMIT_INSTRUCTION(EMIT_DATA_PROCESS_INS(MOV_DP, 0, TMP_REG2, SLJIT_UNUSED, RM(TMP_REG3))); + compiler->cache_arg = 0; + compiler->cache_argw = 0; + return getput_arg(compiler, WORD_DATA, TMP_REG2, dst, dstw, 0, 0); +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fast_return(struct sljit_compiler *compiler, sljit_si src, sljit_sw srcw) +{ + CHECK_ERROR(); + check_sljit_emit_fast_return(compiler, src, srcw); + ADJUST_LOCAL_OFFSET(src, srcw); + + if (src <= TMP_REG3) + EMIT_INSTRUCTION(EMIT_DATA_PROCESS_INS(MOV_DP, 0, TMP_REG3, SLJIT_UNUSED, RM(src))); + else if (src & SLJIT_MEM) { + if (getput_arg_fast(compiler, WORD_DATA | LOAD_DATA, TMP_REG3, src, srcw)) + FAIL_IF(compiler->error); + else { + compiler->cache_arg = 0; + compiler->cache_argw = 0; + FAIL_IF(getput_arg(compiler, WORD_DATA | LOAD_DATA, TMP_REG2, src, srcw, 0, 0)); + EMIT_INSTRUCTION(EMIT_DATA_PROCESS_INS(MOV_DP, 0, TMP_REG3, SLJIT_UNUSED, RM(TMP_REG2))); + } + } + else if (src & SLJIT_IMM) + FAIL_IF(load_immediate(compiler, TMP_REG3, srcw)); + return push_inst(compiler, BLX | RM(TMP_REG3)); +} + +/* --------------------------------------------------------------------- */ +/* Conditional instructions */ +/* --------------------------------------------------------------------- */ + +static sljit_uw get_cc(sljit_si type) +{ + switch (type) { + case SLJIT_C_EQUAL: + case SLJIT_C_MUL_NOT_OVERFLOW: + case SLJIT_C_FLOAT_EQUAL: + return 0x00000000; + + case SLJIT_C_NOT_EQUAL: + case SLJIT_C_MUL_OVERFLOW: + case SLJIT_C_FLOAT_NOT_EQUAL: + return 0x10000000; + + case SLJIT_C_LESS: + case SLJIT_C_FLOAT_LESS: + return 0x30000000; + + case SLJIT_C_GREATER_EQUAL: + case SLJIT_C_FLOAT_GREATER_EQUAL: + return 0x20000000; + + case SLJIT_C_GREATER: + case SLJIT_C_FLOAT_GREATER: + return 0x80000000; + + case SLJIT_C_LESS_EQUAL: + case SLJIT_C_FLOAT_LESS_EQUAL: + return 0x90000000; + + case SLJIT_C_SIG_LESS: + return 0xb0000000; + + case SLJIT_C_SIG_GREATER_EQUAL: + return 0xa0000000; + + case SLJIT_C_SIG_GREATER: + return 0xc0000000; + + case SLJIT_C_SIG_LESS_EQUAL: + return 0xd0000000; + + case SLJIT_C_OVERFLOW: + case SLJIT_C_FLOAT_UNORDERED: + return 0x60000000; + + case SLJIT_C_NOT_OVERFLOW: + case SLJIT_C_FLOAT_ORDERED: + return 0x70000000; + + default: /* SLJIT_JUMP */ + return 0xe0000000; + } +} + +SLJIT_API_FUNC_ATTRIBUTE struct sljit_label* sljit_emit_label(struct sljit_compiler *compiler) +{ + struct sljit_label *label; + + CHECK_ERROR_PTR(); + check_sljit_emit_label(compiler); + + if (compiler->last_label && compiler->last_label->size == compiler->size) + return compiler->last_label; + + label = (struct sljit_label*)ensure_abuf(compiler, sizeof(struct sljit_label)); + PTR_FAIL_IF(!label); + set_label(label, compiler); + return label; +} + +SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compiler *compiler, sljit_si type) +{ + struct sljit_jump *jump; + + CHECK_ERROR_PTR(); + check_sljit_emit_jump(compiler, type); + + jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump)); + PTR_FAIL_IF(!jump); + set_jump(jump, compiler, type & SLJIT_REWRITABLE_JUMP); + type &= 0xff; + + /* In ARM, we don't need to touch the arguments. */ +#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) + if (type >= SLJIT_FAST_CALL) + PTR_FAIL_IF(prepare_blx(compiler)); + PTR_FAIL_IF(push_inst_with_unique_literal(compiler, ((EMIT_DATA_TRANSFER(WORD_DATA | LOAD_DATA, 1, 0, + type <= SLJIT_JUMP ? TMP_PC : TMP_REG1, TMP_PC, 0)) & ~COND_MASK) | get_cc(type), 0)); + + if (jump->flags & SLJIT_REWRITABLE_JUMP) { + jump->addr = compiler->size; + compiler->patches++; + } + + if (type >= SLJIT_FAST_CALL) { + jump->flags |= IS_BL; + PTR_FAIL_IF(emit_blx(compiler)); + } + + if (!(jump->flags & SLJIT_REWRITABLE_JUMP)) + jump->addr = compiler->size; +#else + if (type >= SLJIT_FAST_CALL) + jump->flags |= IS_BL; + PTR_FAIL_IF(emit_imm(compiler, TMP_REG1, 0)); + PTR_FAIL_IF(push_inst(compiler, (((type <= SLJIT_JUMP ? BX : BLX) | RM(TMP_REG1)) & ~COND_MASK) | get_cc(type))); + jump->addr = compiler->size; +#endif + return jump; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_ijump(struct sljit_compiler *compiler, sljit_si type, sljit_si src, sljit_sw srcw) +{ + struct sljit_jump *jump; + + CHECK_ERROR(); + check_sljit_emit_ijump(compiler, type, src, srcw); + ADJUST_LOCAL_OFFSET(src, srcw); + + /* In ARM, we don't need to touch the arguments. */ + if (src & SLJIT_IMM) { + jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump)); + FAIL_IF(!jump); + set_jump(jump, compiler, JUMP_ADDR | ((type >= SLJIT_FAST_CALL) ? IS_BL : 0)); + jump->u.target = srcw; + +#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) + if (type >= SLJIT_FAST_CALL) + FAIL_IF(prepare_blx(compiler)); + FAIL_IF(push_inst_with_unique_literal(compiler, EMIT_DATA_TRANSFER(WORD_DATA | LOAD_DATA, 1, 0, type <= SLJIT_JUMP ? TMP_PC : TMP_REG1, TMP_PC, 0), 0)); + if (type >= SLJIT_FAST_CALL) + FAIL_IF(emit_blx(compiler)); +#else + FAIL_IF(emit_imm(compiler, TMP_REG1, 0)); + FAIL_IF(push_inst(compiler, (type <= SLJIT_JUMP ? BX : BLX) | RM(TMP_REG1))); +#endif + jump->addr = compiler->size; + } + else { + if (src <= TMP_REG3) + return push_inst(compiler, (type <= SLJIT_JUMP ? BX : BLX) | RM(src)); + + SLJIT_ASSERT(src & SLJIT_MEM); + FAIL_IF(emit_op_mem(compiler, WORD_DATA | LOAD_DATA, TMP_REG2, src, srcw)); + return push_inst(compiler, (type <= SLJIT_JUMP ? BX : BLX) | RM(TMP_REG2)); + } + + return SLJIT_SUCCESS; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op_flags(struct sljit_compiler *compiler, sljit_si op, + sljit_si dst, sljit_sw dstw, + sljit_si src, sljit_sw srcw, + sljit_si type) +{ + sljit_si dst_r, flags = GET_ALL_FLAGS(op); + sljit_uw cc, ins; + + CHECK_ERROR(); + check_sljit_emit_op_flags(compiler, op, dst, dstw, src, srcw, type); + ADJUST_LOCAL_OFFSET(dst, dstw); + ADJUST_LOCAL_OFFSET(src, srcw); + + if (dst == SLJIT_UNUSED) + return SLJIT_SUCCESS; + + op = GET_OPCODE(op); + cc = get_cc(type); + dst_r = (dst <= TMP_REG3) ? dst : TMP_REG2; + + if (op < SLJIT_ADD) { + EMIT_INSTRUCTION(EMIT_DATA_PROCESS_INS(MOV_DP, 0, dst_r, SLJIT_UNUSED, SRC2_IMM | 0)); + EMIT_INSTRUCTION((EMIT_DATA_PROCESS_INS(MOV_DP, 0, dst_r, SLJIT_UNUSED, SRC2_IMM | 1) & ~COND_MASK) | cc); + return (dst_r == TMP_REG2) ? emit_op_mem(compiler, WORD_DATA, TMP_REG2, dst, dstw) : SLJIT_SUCCESS; + } + + ins = (op == SLJIT_AND ? AND_DP : (op == SLJIT_OR ? ORR_DP : EOR_DP)); + if ((op == SLJIT_OR || op == SLJIT_XOR) && dst <= TMP_REG3 && dst == src) { + EMIT_INSTRUCTION((EMIT_DATA_PROCESS_INS(ins, 0, dst, dst, SRC2_IMM | 1) & ~COND_MASK) | cc); + /* The condition must always be set, even if the ORR/EOR is not executed above. */ + return (flags & SLJIT_SET_E) ? push_inst(compiler, EMIT_DATA_PROCESS_INS(MOV_DP, SET_FLAGS, TMP_REG1, SLJIT_UNUSED, RM(dst))) : SLJIT_SUCCESS; + } + + compiler->cache_arg = 0; + compiler->cache_argw = 0; + if (src & SLJIT_MEM) { + FAIL_IF(emit_op_mem2(compiler, WORD_DATA | LOAD_DATA, TMP_REG1, src, srcw, dst, dstw)); + src = TMP_REG1; + srcw = 0; + } else if (src & SLJIT_IMM) { + FAIL_IF(load_immediate(compiler, TMP_REG1, srcw)); + src = TMP_REG1; + srcw = 0; + } + + EMIT_INSTRUCTION((EMIT_DATA_PROCESS_INS(ins, 0, dst_r, src, SRC2_IMM | 1) & ~COND_MASK) | cc); + EMIT_INSTRUCTION((EMIT_DATA_PROCESS_INS(ins, 0, dst_r, src, SRC2_IMM | 0) & ~COND_MASK) | (cc ^ 0x10000000)); + if (dst_r == TMP_REG2) + FAIL_IF(emit_op_mem2(compiler, WORD_DATA, TMP_REG2, dst, dstw, 0, 0)); + + return (flags & SLJIT_SET_E) ? push_inst(compiler, EMIT_DATA_PROCESS_INS(MOV_DP, SET_FLAGS, TMP_REG1, SLJIT_UNUSED, RM(dst_r))) : SLJIT_SUCCESS; +} + +SLJIT_API_FUNC_ATTRIBUTE struct sljit_const* sljit_emit_const(struct sljit_compiler *compiler, sljit_si dst, sljit_sw dstw, sljit_sw init_value) +{ + struct sljit_const *const_; + sljit_si reg; + + CHECK_ERROR_PTR(); + check_sljit_emit_const(compiler, dst, dstw, init_value); + ADJUST_LOCAL_OFFSET(dst, dstw); + + const_ = (struct sljit_const*)ensure_abuf(compiler, sizeof(struct sljit_const)); + PTR_FAIL_IF(!const_); + + reg = (dst <= TMP_REG3) ? dst : TMP_REG2; + +#if (defined SLJIT_CONFIG_ARM_V5 && SLJIT_CONFIG_ARM_V5) + PTR_FAIL_IF(push_inst_with_unique_literal(compiler, EMIT_DATA_TRANSFER(WORD_DATA | LOAD_DATA, 1, 0, reg, TMP_PC, 0), init_value)); + compiler->patches++; +#else + PTR_FAIL_IF(emit_imm(compiler, reg, init_value)); +#endif + set_const(const_, compiler); + + if (reg == TMP_REG2 && dst != SLJIT_UNUSED) + PTR_FAIL_IF(emit_op_mem(compiler, WORD_DATA, TMP_REG2, dst, dstw)); + return const_; +} + +SLJIT_API_FUNC_ATTRIBUTE void sljit_set_jump_addr(sljit_uw addr, sljit_uw new_addr) +{ + inline_set_jump_addr(addr, new_addr, 1); +} + +SLJIT_API_FUNC_ATTRIBUTE void sljit_set_const(sljit_uw addr, sljit_sw new_constant) +{ + inline_set_const(addr, new_constant, 1); +} Property changes on: sys/contrib/sljit/sljitNativeARM_v5.c ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Added: fbsd:nokeywords ## -0,0 +1 ## +yes \ No newline at end of property Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Index: sys/contrib/sljit/sljitNativeMIPS_32.c =================================================================== --- sys/contrib/sljit/sljitNativeMIPS_32.c (revision 0) +++ sys/contrib/sljit/sljitNativeMIPS_32.c (working copy) @@ -0,0 +1,404 @@ +/* + * Stack-less Just-In-Time compiler + * + * Copyright 2009-2012 Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, are + * permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of + * conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list + * of conditions and the following disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED + * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* mips 32-bit arch dependent functions. */ + +static sljit_si load_immediate(struct sljit_compiler *compiler, sljit_si dst_ar, sljit_sw imm) +{ + if (!(imm & ~0xffff)) + return push_inst(compiler, ORI | SA(0) | TA(dst_ar) | IMM(imm), dst_ar); + + if (imm < 0 && imm >= SIMM_MIN) + return push_inst(compiler, ADDIU | SA(0) | TA(dst_ar) | IMM(imm), dst_ar); + + FAIL_IF(push_inst(compiler, LUI | TA(dst_ar) | IMM(imm >> 16), dst_ar)); + return (imm & 0xffff) ? push_inst(compiler, ORI | SA(dst_ar) | TA(dst_ar) | IMM(imm), dst_ar) : SLJIT_SUCCESS; +} + +#define EMIT_LOGICAL(op_imm, op_norm) \ + if (flags & SRC2_IMM) { \ + if (op & SLJIT_SET_E) \ + FAIL_IF(push_inst(compiler, op_imm | S(src1) | TA(EQUAL_FLAG) | IMM(src2), EQUAL_FLAG)); \ + if (CHECK_FLAGS(SLJIT_SET_E)) \ + FAIL_IF(push_inst(compiler, op_imm | S(src1) | T(dst) | IMM(src2), DR(dst))); \ + } \ + else { \ + if (op & SLJIT_SET_E) \ + FAIL_IF(push_inst(compiler, op_norm | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG)); \ + if (CHECK_FLAGS(SLJIT_SET_E)) \ + FAIL_IF(push_inst(compiler, op_norm | S(src1) | T(src2) | D(dst), DR(dst))); \ + } + +#define EMIT_SHIFT(op_imm, op_norm) \ + if (flags & SRC2_IMM) { \ + if (op & SLJIT_SET_E) \ + FAIL_IF(push_inst(compiler, op_imm | T(src1) | DA(EQUAL_FLAG) | SH_IMM(src2), EQUAL_FLAG)); \ + if (CHECK_FLAGS(SLJIT_SET_E)) \ + FAIL_IF(push_inst(compiler, op_imm | T(src1) | D(dst) | SH_IMM(src2), DR(dst))); \ + } \ + else { \ + if (op & SLJIT_SET_E) \ + FAIL_IF(push_inst(compiler, op_norm | S(src2) | T(src1) | DA(EQUAL_FLAG), EQUAL_FLAG)); \ + if (CHECK_FLAGS(SLJIT_SET_E)) \ + FAIL_IF(push_inst(compiler, op_norm | S(src2) | T(src1) | D(dst), DR(dst))); \ + } + +static SLJIT_INLINE sljit_si emit_single_op(struct sljit_compiler *compiler, sljit_si op, sljit_si flags, + sljit_si dst, sljit_si src1, sljit_sw src2) +{ + sljit_si overflow_ra = 0; + + switch (GET_OPCODE(op)) { + case SLJIT_MOV: + case SLJIT_MOV_UI: + case SLJIT_MOV_SI: + case SLJIT_MOV_P: + SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM)); + if (dst != src2) + return push_inst(compiler, ADDU | S(src2) | TA(0) | D(dst), DR(dst)); + return SLJIT_SUCCESS; + + case SLJIT_MOV_UB: + case SLJIT_MOV_SB: + SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM)); + if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) { + if (op == SLJIT_MOV_SB) { +#if (defined SLJIT_MIPS_32_64 && SLJIT_MIPS_32_64) + return push_inst(compiler, SEB | T(src2) | D(dst), DR(dst)); +#else + FAIL_IF(push_inst(compiler, SLL | T(src2) | D(dst) | SH_IMM(24), DR(dst))); + return push_inst(compiler, SRA | T(dst) | D(dst) | SH_IMM(24), DR(dst)); +#endif + } + return push_inst(compiler, ANDI | S(src2) | T(dst) | IMM(0xff), DR(dst)); + } + else if (dst != src2) + SLJIT_ASSERT_STOP(); + return SLJIT_SUCCESS; + + case SLJIT_MOV_UH: + case SLJIT_MOV_SH: + SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM)); + if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) { + if (op == SLJIT_MOV_SH) { +#if (defined SLJIT_MIPS_32_64 && SLJIT_MIPS_32_64) + return push_inst(compiler, SEH | T(src2) | D(dst), DR(dst)); +#else + FAIL_IF(push_inst(compiler, SLL | T(src2) | D(dst) | SH_IMM(16), DR(dst))); + return push_inst(compiler, SRA | T(dst) | D(dst) | SH_IMM(16), DR(dst)); +#endif + } + return push_inst(compiler, ANDI | S(src2) | T(dst) | IMM(0xffff), DR(dst)); + } + else if (dst != src2) + SLJIT_ASSERT_STOP(); + return SLJIT_SUCCESS; + + case SLJIT_NOT: + SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM)); + if (op & SLJIT_SET_E) + FAIL_IF(push_inst(compiler, NOR | S(src2) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG)); + if (CHECK_FLAGS(SLJIT_SET_E)) + FAIL_IF(push_inst(compiler, NOR | S(src2) | T(src2) | D(dst), DR(dst))); + return SLJIT_SUCCESS; + + case SLJIT_CLZ: + SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM)); +#if (defined SLJIT_MIPS_32_64 && SLJIT_MIPS_32_64) + if (op & SLJIT_SET_E) + FAIL_IF(push_inst(compiler, CLZ | S(src2) | TA(EQUAL_FLAG) | DA(EQUAL_FLAG), EQUAL_FLAG)); + if (CHECK_FLAGS(SLJIT_SET_E)) + FAIL_IF(push_inst(compiler, CLZ | S(src2) | T(dst) | D(dst), DR(dst))); +#else + if (SLJIT_UNLIKELY(flags & UNUSED_DEST)) { + FAIL_IF(push_inst(compiler, SRL | T(src2) | DA(EQUAL_FLAG) | SH_IMM(31), EQUAL_FLAG)); + return push_inst(compiler, XORI | SA(EQUAL_FLAG) | TA(EQUAL_FLAG) | IMM(1), EQUAL_FLAG); + } + /* Nearly all instructions are unmovable in the following sequence. */ + FAIL_IF(push_inst(compiler, ADDU_W | S(src2) | TA(0) | D(TMP_REG1), DR(TMP_REG1))); + /* Check zero. */ + FAIL_IF(push_inst(compiler, BEQ | S(TMP_REG1) | TA(0) | IMM(5), UNMOVABLE_INS)); + FAIL_IF(push_inst(compiler, ORI | SA(0) | T(dst) | IMM(32), UNMOVABLE_INS)); + FAIL_IF(push_inst(compiler, ADDIU_W | SA(0) | T(dst) | IMM(-1), DR(dst))); + /* Loop for searching the highest bit. */ + FAIL_IF(push_inst(compiler, ADDIU_W | S(dst) | T(dst) | IMM(1), DR(dst))); + FAIL_IF(push_inst(compiler, BGEZ | S(TMP_REG1) | IMM(-2), UNMOVABLE_INS)); + FAIL_IF(push_inst(compiler, SLL | T(TMP_REG1) | D(TMP_REG1) | SH_IMM(1), UNMOVABLE_INS)); + if (op & SLJIT_SET_E) + return push_inst(compiler, ADDU_W | S(dst) | TA(0) | DA(EQUAL_FLAG), EQUAL_FLAG); +#endif + return SLJIT_SUCCESS; + + case SLJIT_ADD: + if (flags & SRC2_IMM) { + if (op & SLJIT_SET_O) { + FAIL_IF(push_inst(compiler, SRL | T(src1) | DA(TMP_EREG1) | SH_IMM(31), TMP_EREG1)); + if (src2 < 0) + FAIL_IF(push_inst(compiler, XORI | SA(TMP_EREG1) | TA(TMP_EREG1) | IMM(1), TMP_EREG1)); + } + if (op & SLJIT_SET_E) + FAIL_IF(push_inst(compiler, ADDIU | S(src1) | TA(EQUAL_FLAG) | IMM(src2), EQUAL_FLAG)); + if (op & SLJIT_SET_C) { + if (src2 >= 0) + FAIL_IF(push_inst(compiler, ORI | S(src1) | TA(ULESS_FLAG) | IMM(src2), ULESS_FLAG)); + else { + FAIL_IF(push_inst(compiler, ADDIU | SA(0) | TA(ULESS_FLAG) | IMM(src2), ULESS_FLAG)); + FAIL_IF(push_inst(compiler, OR | S(src1) | TA(ULESS_FLAG) | DA(ULESS_FLAG), ULESS_FLAG)); + } + } + /* dst may be the same as src1 or src2. */ + if (CHECK_FLAGS(SLJIT_SET_E)) + FAIL_IF(push_inst(compiler, ADDIU | S(src1) | T(dst) | IMM(src2), DR(dst))); + if (op & SLJIT_SET_O) { + FAIL_IF(push_inst(compiler, SRL | T(dst) | DA(OVERFLOW_FLAG) | SH_IMM(31), OVERFLOW_FLAG)); + if (src2 < 0) + FAIL_IF(push_inst(compiler, XORI | SA(OVERFLOW_FLAG) | TA(OVERFLOW_FLAG) | IMM(1), OVERFLOW_FLAG)); + } + } + else { + if (op & SLJIT_SET_O) { + FAIL_IF(push_inst(compiler, XOR | S(src1) | T(src2) | DA(TMP_EREG1), TMP_EREG1)); + FAIL_IF(push_inst(compiler, SRL | TA(TMP_EREG1) | DA(TMP_EREG1) | SH_IMM(31), TMP_EREG1)); + if (src1 != dst) + overflow_ra = DR(src1); + else if (src2 != dst) + overflow_ra = DR(src2); + else { + /* Rare ocasion. */ + FAIL_IF(push_inst(compiler, ADDU | S(src1) | TA(0) | DA(TMP_EREG2), TMP_EREG2)); + overflow_ra = TMP_EREG2; + } + } + if (op & SLJIT_SET_E) + FAIL_IF(push_inst(compiler, ADDU | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG)); + if (op & SLJIT_SET_C) + FAIL_IF(push_inst(compiler, OR | S(src1) | T(src2) | DA(ULESS_FLAG), ULESS_FLAG)); + /* dst may be the same as src1 or src2. */ + if (CHECK_FLAGS(SLJIT_SET_E)) + FAIL_IF(push_inst(compiler, ADDU | S(src1) | T(src2) | D(dst), DR(dst))); + if (op & SLJIT_SET_O) { + FAIL_IF(push_inst(compiler, XOR | S(dst) | TA(overflow_ra) | DA(OVERFLOW_FLAG), OVERFLOW_FLAG)); + FAIL_IF(push_inst(compiler, SRL | TA(OVERFLOW_FLAG) | DA(OVERFLOW_FLAG) | SH_IMM(31), OVERFLOW_FLAG)); + } + } + + /* a + b >= a | b (otherwise, the carry should be set to 1). */ + if (op & SLJIT_SET_C) + FAIL_IF(push_inst(compiler, SLTU | S(dst) | TA(ULESS_FLAG) | DA(ULESS_FLAG), ULESS_FLAG)); + if (op & SLJIT_SET_O) + return push_inst(compiler, MOVN | SA(0) | TA(TMP_EREG1) | DA(OVERFLOW_FLAG), OVERFLOW_FLAG); + return SLJIT_SUCCESS; + + case SLJIT_ADDC: + if (flags & SRC2_IMM) { + if (op & SLJIT_SET_C) { + if (src2 >= 0) + FAIL_IF(push_inst(compiler, ORI | S(src1) | TA(TMP_EREG1) | IMM(src2), TMP_EREG1)); + else { + FAIL_IF(push_inst(compiler, ADDIU | SA(0) | TA(TMP_EREG1) | IMM(src2), TMP_EREG1)); + FAIL_IF(push_inst(compiler, OR | S(src1) | TA(TMP_EREG1) | DA(TMP_EREG1), TMP_EREG1)); + } + } + FAIL_IF(push_inst(compiler, ADDIU | S(src1) | T(dst) | IMM(src2), DR(dst))); + } else { + if (op & SLJIT_SET_C) + FAIL_IF(push_inst(compiler, OR | S(src1) | T(src2) | DA(TMP_EREG1), TMP_EREG1)); + /* dst may be the same as src1 or src2. */ + FAIL_IF(push_inst(compiler, ADDU | S(src1) | T(src2) | D(dst), DR(dst))); + } + if (op & SLJIT_SET_C) + FAIL_IF(push_inst(compiler, SLTU | S(dst) | TA(TMP_EREG1) | DA(TMP_EREG1), TMP_EREG1)); + + FAIL_IF(push_inst(compiler, ADDU | S(dst) | TA(ULESS_FLAG) | D(dst), DR(dst))); + if (!(op & SLJIT_SET_C)) + return SLJIT_SUCCESS; + + /* Set TMP_EREG2 (dst == 0) && (ULESS_FLAG == 1). */ + FAIL_IF(push_inst(compiler, SLTIU | S(dst) | TA(TMP_EREG2) | IMM(1), TMP_EREG2)); + FAIL_IF(push_inst(compiler, AND | SA(TMP_EREG2) | TA(ULESS_FLAG) | DA(TMP_EREG2), TMP_EREG2)); + /* Set carry flag. */ + return push_inst(compiler, OR | SA(TMP_EREG2) | TA(TMP_EREG1) | DA(ULESS_FLAG), ULESS_FLAG); + + case SLJIT_SUB: + if ((flags & SRC2_IMM) && ((op & (SLJIT_SET_S | SLJIT_SET_U)) || src2 == SIMM_MIN)) { + FAIL_IF(push_inst(compiler, ADDIU | SA(0) | T(TMP_REG2) | IMM(src2), DR(TMP_REG2))); + src2 = TMP_REG2; + flags &= ~SRC2_IMM; + } + + if (flags & SRC2_IMM) { + if (op & SLJIT_SET_O) { + FAIL_IF(push_inst(compiler, SRL | T(src1) | DA(TMP_EREG1) | SH_IMM(31), TMP_EREG1)); + if (src2 < 0) + FAIL_IF(push_inst(compiler, XORI | SA(TMP_EREG1) | TA(TMP_EREG1) | IMM(1), TMP_EREG1)); + if (src1 != dst) + overflow_ra = DR(src1); + else { + /* Rare ocasion. */ + FAIL_IF(push_inst(compiler, ADDU | S(src1) | TA(0) | DA(TMP_EREG2), TMP_EREG2)); + overflow_ra = TMP_EREG2; + } + } + if (op & SLJIT_SET_E) + FAIL_IF(push_inst(compiler, ADDIU | S(src1) | TA(EQUAL_FLAG) | IMM(-src2), EQUAL_FLAG)); + if (op & SLJIT_SET_C) + FAIL_IF(push_inst(compiler, SLTIU | S(src1) | TA(ULESS_FLAG) | IMM(src2), ULESS_FLAG)); + /* dst may be the same as src1 or src2. */ + if (CHECK_FLAGS(SLJIT_SET_E)) + FAIL_IF(push_inst(compiler, ADDIU | S(src1) | T(dst) | IMM(-src2), DR(dst))); + } + else { + if (op & SLJIT_SET_O) { + FAIL_IF(push_inst(compiler, XOR | S(src1) | T(src2) | DA(TMP_EREG1), TMP_EREG1)); + FAIL_IF(push_inst(compiler, SRL | TA(TMP_EREG1) | DA(TMP_EREG1) | SH_IMM(31), TMP_EREG1)); + if (src1 != dst) + overflow_ra = DR(src1); + else { + /* Rare ocasion. */ + FAIL_IF(push_inst(compiler, ADDU | S(src1) | TA(0) | DA(TMP_EREG2), TMP_EREG2)); + overflow_ra = TMP_EREG2; + } + } + if (op & SLJIT_SET_E) + FAIL_IF(push_inst(compiler, SUBU | S(src1) | T(src2) | DA(EQUAL_FLAG), EQUAL_FLAG)); + if (op & (SLJIT_SET_U | SLJIT_SET_C)) + FAIL_IF(push_inst(compiler, SLTU | S(src1) | T(src2) | DA(ULESS_FLAG), ULESS_FLAG)); + if (op & SLJIT_SET_U) + FAIL_IF(push_inst(compiler, SLTU | S(src2) | T(src1) | DA(UGREATER_FLAG), UGREATER_FLAG)); + if (op & SLJIT_SET_S) { + FAIL_IF(push_inst(compiler, SLT | S(src1) | T(src2) | DA(LESS_FLAG), LESS_FLAG)); + FAIL_IF(push_inst(compiler, SLT | S(src2) | T(src1) | DA(GREATER_FLAG), GREATER_FLAG)); + } + /* dst may be the same as src1 or src2. */ + if (CHECK_FLAGS(SLJIT_SET_E | SLJIT_SET_S | SLJIT_SET_U | SLJIT_SET_C)) + FAIL_IF(push_inst(compiler, SUBU | S(src1) | T(src2) | D(dst), DR(dst))); + } + + if (op & SLJIT_SET_O) { + FAIL_IF(push_inst(compiler, XOR | S(dst) | TA(overflow_ra) | DA(OVERFLOW_FLAG), OVERFLOW_FLAG)); + FAIL_IF(push_inst(compiler, SRL | TA(OVERFLOW_FLAG) | DA(OVERFLOW_FLAG) | SH_IMM(31), OVERFLOW_FLAG)); + return push_inst(compiler, MOVZ | SA(0) | TA(TMP_EREG1) | DA(OVERFLOW_FLAG), OVERFLOW_FLAG); + } + return SLJIT_SUCCESS; + + case SLJIT_SUBC: + if ((flags & SRC2_IMM) && src2 == SIMM_MIN) { + FAIL_IF(push_inst(compiler, ADDIU | SA(0) | T(TMP_REG2) | IMM(src2), DR(TMP_REG2))); + src2 = TMP_REG2; + flags &= ~SRC2_IMM; + } + + if (flags & SRC2_IMM) { + if (op & SLJIT_SET_C) + FAIL_IF(push_inst(compiler, SLTIU | S(src1) | TA(TMP_EREG1) | IMM(-src2), TMP_EREG1)); + /* dst may be the same as src1 or src2. */ + FAIL_IF(push_inst(compiler, ADDIU | S(src1) | T(dst) | IMM(-src2), DR(dst))); + } + else { + if (op & SLJIT_SET_C) + FAIL_IF(push_inst(compiler, SLTU | S(src1) | T(src2) | DA(TMP_EREG1), TMP_EREG1)); + /* dst may be the same as src1 or src2. */ + FAIL_IF(push_inst(compiler, SUBU | S(src1) | T(src2) | D(dst), DR(dst))); + } + + if (op & SLJIT_SET_C) + FAIL_IF(push_inst(compiler, MOVZ | SA(ULESS_FLAG) | T(dst) | DA(TMP_EREG1), TMP_EREG1)); + + FAIL_IF(push_inst(compiler, SUBU | S(dst) | TA(ULESS_FLAG) | D(dst), DR(dst))); + + if (op & SLJIT_SET_C) + FAIL_IF(push_inst(compiler, ADDU | SA(TMP_EREG1) | TA(0) | DA(ULESS_FLAG), ULESS_FLAG)); + + return SLJIT_SUCCESS; + + case SLJIT_MUL: + SLJIT_ASSERT(!(flags & SRC2_IMM)); + if (!(op & SLJIT_SET_O)) { +#if (defined SLJIT_MIPS_32_64 && SLJIT_MIPS_32_64) + return push_inst(compiler, MUL | S(src1) | T(src2) | D(dst), DR(dst)); +#else + FAIL_IF(push_inst(compiler, MULT | S(src1) | T(src2), MOVABLE_INS)); + return push_inst(compiler, MFLO | D(dst), DR(dst)); +#endif + } + FAIL_IF(push_inst(compiler, MULT | S(src1) | T(src2), MOVABLE_INS)); + FAIL_IF(push_inst(compiler, MFHI | DA(TMP_EREG1), TMP_EREG1)); + FAIL_IF(push_inst(compiler, MFLO | D(dst), DR(dst))); + FAIL_IF(push_inst(compiler, SRA | T(dst) | DA(TMP_EREG2) | SH_IMM(31), TMP_EREG2)); + return push_inst(compiler, SUBU | SA(TMP_EREG1) | TA(TMP_EREG2) | DA(OVERFLOW_FLAG), OVERFLOW_FLAG); + + case SLJIT_AND: + EMIT_LOGICAL(ANDI, AND); + return SLJIT_SUCCESS; + + case SLJIT_OR: + EMIT_LOGICAL(ORI, OR); + return SLJIT_SUCCESS; + + case SLJIT_XOR: + EMIT_LOGICAL(XORI, XOR); + return SLJIT_SUCCESS; + + case SLJIT_SHL: + EMIT_SHIFT(SLL, SLLV); + return SLJIT_SUCCESS; + + case SLJIT_LSHR: + EMIT_SHIFT(SRL, SRLV); + return SLJIT_SUCCESS; + + case SLJIT_ASHR: + EMIT_SHIFT(SRA, SRAV); + return SLJIT_SUCCESS; + } + + SLJIT_ASSERT_STOP(); + return SLJIT_SUCCESS; +} + +static SLJIT_INLINE sljit_si emit_const(struct sljit_compiler *compiler, sljit_si dst, sljit_sw init_value) +{ + FAIL_IF(push_inst(compiler, LUI | T(dst) | IMM(init_value >> 16), DR(dst))); + return push_inst(compiler, ORI | S(dst) | T(dst) | IMM(init_value), DR(dst)); +} + +SLJIT_API_FUNC_ATTRIBUTE void sljit_set_jump_addr(sljit_uw addr, sljit_uw new_addr) +{ + sljit_ins *inst = (sljit_ins*)addr; + + inst[0] = (inst[0] & 0xffff0000) | ((new_addr >> 16) & 0xffff); + inst[1] = (inst[1] & 0xffff0000) | (new_addr & 0xffff); + SLJIT_CACHE_FLUSH(inst, inst + 2); +} + +SLJIT_API_FUNC_ATTRIBUTE void sljit_set_const(sljit_uw addr, sljit_sw new_constant) +{ + sljit_ins *inst = (sljit_ins*)addr; + + inst[0] = (inst[0] & 0xffff0000) | ((new_constant >> 16) & 0xffff); + inst[1] = (inst[1] & 0xffff0000) | (new_constant & 0xffff); + SLJIT_CACHE_FLUSH(inst, inst + 2); +} Property changes on: sys/contrib/sljit/sljitNativeMIPS_32.c ___________________________________________________________________ Added: fbsd:nokeywords ## -0,0 +1 ## +yes \ No newline at end of property Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: sys/contrib/sljit/sljitNativeMIPS_common.c =================================================================== --- sys/contrib/sljit/sljitNativeMIPS_common.c (revision 0) +++ sys/contrib/sljit/sljitNativeMIPS_common.c (working copy) @@ -0,0 +1,1881 @@ +/* + * Stack-less Just-In-Time compiler + * + * Copyright 2009-2012 Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, are + * permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of + * conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list + * of conditions and the following disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED + * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* Latest MIPS architecture. */ +/* Automatically detect SLJIT_MIPS_32_64 */ + +SLJIT_API_FUNC_ATTRIBUTE SLJIT_CONST char* sljit_get_platform_name(void) +{ +#if (defined SLJIT_MIPS_32_64 && SLJIT_MIPS_32_64) + return "MIPS" SLJIT_CPUINFO; +#else + return "MIPS III" SLJIT_CPUINFO; +#endif +} + +/* Length of an instruction word + Both for mips-32 and mips-64 */ +typedef sljit_ui sljit_ins; + +#define TMP_REG1 (SLJIT_NO_REGISTERS + 1) +#define TMP_REG2 (SLJIT_NO_REGISTERS + 2) +#define TMP_REG3 (SLJIT_NO_REGISTERS + 3) + +/* For position independent code, t9 must contain the function address. */ +#define PIC_ADDR_REG TMP_REG2 + +/* TMP_EREG1 is used mainly for literal encoding on 64 bit. */ +#define TMP_EREG1 15 +#define TMP_EREG2 24 +/* Floating point status register. */ +#define FCSR_REG 31 +/* Return address register. */ +#define RETURN_ADDR_REG 31 + +/* Flags are keept in volatile registers. */ +#define EQUAL_FLAG 7 +/* And carry flag as well. */ +#define ULESS_FLAG 10 +#define UGREATER_FLAG 11 +#define LESS_FLAG 12 +#define GREATER_FLAG 13 +#define OVERFLOW_FLAG 14 + +#define TMP_FREG1 (0) +#define TMP_FREG2 ((SLJIT_FLOAT_REG6 + 1) << 1) + +static SLJIT_CONST sljit_ub reg_map[SLJIT_NO_REGISTERS + 4] = { + 0, 2, 5, 6, 3, 8, 16, 17, 18, 19, 20, 29, 4, 25, 9 +}; + +/* --------------------------------------------------------------------- */ +/* Instrucion forms */ +/* --------------------------------------------------------------------- */ + +#define S(s) (reg_map[s] << 21) +#define T(t) (reg_map[t] << 16) +#define D(d) (reg_map[d] << 11) +/* Absolute registers. */ +#define SA(s) ((s) << 21) +#define TA(t) ((t) << 16) +#define DA(d) ((d) << 11) +#define FT(t) ((t) << 16) +#define FS(s) ((s) << 11) +#define FD(d) ((d) << 6) +#define IMM(imm) ((imm) & 0xffff) +#define SH_IMM(imm) ((imm & 0x1f) << 6) + +#define DR(dr) (reg_map[dr]) +#define HI(opcode) ((opcode) << 26) +#define LO(opcode) (opcode) +/* S = (16 << 21) D = (17 << 21) */ +#define FMT_SD (16 << 21) + +#define ABS_fmt (HI(17) | FMT_SD | LO(5)) +#define ADD_fmt (HI(17) | FMT_SD | LO(0)) +#define ADDU (HI(0) | LO(33)) +#define ADDIU (HI(9)) +#define AND (HI(0) | LO(36)) +#define ANDI (HI(12)) +#define B (HI(4)) +#define BAL (HI(1) | (17 << 16)) +#define BC1F (HI(17) | (8 << 21)) +#define BC1T (HI(17) | (8 << 21) | (1 << 16)) +#define BEQ (HI(4)) +#define BGEZ (HI(1) | (1 << 16)) +#define BGTZ (HI(7)) +#define BLEZ (HI(6)) +#define BLTZ (HI(1) | (0 << 16)) +#define BNE (HI(5)) +#define BREAK (HI(0) | LO(13)) +#define CFC1 (HI(17) | (2 << 21)) +#define C_UN_fmt (HI(17) | FMT_SD | LO(49)) +#define C_UEQ_fmt (HI(17) | FMT_SD | LO(51)) +#define C_ULE_fmt (HI(17) | FMT_SD | LO(55)) +#define C_ULT_fmt (HI(17) | FMT_SD | LO(53)) +#define DIV (HI(0) | LO(26)) +#define DIVU (HI(0) | LO(27)) +#define DIV_fmt (HI(17) | FMT_SD | LO(3)) +#define J (HI(2)) +#define JAL (HI(3)) +#define JALR (HI(0) | LO(9)) +#define JR (HI(0) | LO(8)) +#define LD (HI(55)) +#define LUI (HI(15)) +#define LW (HI(35)) +#define MFHI (HI(0) | LO(16)) +#define MFLO (HI(0) | LO(18)) +#define MOV_fmt (HI(17) | FMT_SD | LO(6)) +#define MOVN (HI(0) | LO(11)) +#define MOVZ (HI(0) | LO(10)) +#define MUL_fmt (HI(17) | FMT_SD | LO(2)) +#define MULT (HI(0) | LO(24)) +#define MULTU (HI(0) | LO(25)) +#define NEG_fmt (HI(17) | FMT_SD | LO(7)) +#define NOP (HI(0) | LO(0)) +#define NOR (HI(0) | LO(39)) +#define OR (HI(0) | LO(37)) +#define ORI (HI(13)) +#define SD (HI(63)) +#define SLT (HI(0) | LO(42)) +#define SLTI (HI(10)) +#define SLTIU (HI(11)) +#define SLTU (HI(0) | LO(43)) +#define SLL (HI(0) | LO(0)) +#define SLLV (HI(0) | LO(4)) +#define SRL (HI(0) | LO(2)) +#define SRLV (HI(0) | LO(6)) +#define SRA (HI(0) | LO(3)) +#define SRAV (HI(0) | LO(7)) +#define SUB_fmt (HI(17) | FMT_SD | LO(1)) +#define SUBU (HI(0) | LO(35)) +#define SW (HI(43)) +#define XOR (HI(0) | LO(38)) +#define XORI (HI(14)) + +#if (defined SLJIT_MIPS_32_64 && SLJIT_MIPS_32_64) +#define CLZ (HI(28) | LO(32)) +#define MUL (HI(28) | LO(2)) +#define SEB (HI(31) | (16 << 6) | LO(32)) +#define SEH (HI(31) | (24 << 6) | LO(32)) +#endif + +#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) +#define ADDU_W ADDU +#define ADDIU_W ADDIU +#define SLL_W SLL +#define SUBU_W SUBU +#else +#define ADDU_W DADDU +#define ADDIU_W DADDIU +#define SLL_W DSLL +#define SUBU_W DSUBU +#endif + +#define SIMM_MAX (0x7fff) +#define SIMM_MIN (-0x8000) +#define UIMM_MAX (0xffff) + +/* dest_reg is the absolute name of the register + Useful for reordering instructions in the delay slot. */ +static sljit_si push_inst(struct sljit_compiler *compiler, sljit_ins ins, sljit_si delay_slot) +{ + SLJIT_ASSERT(delay_slot == MOVABLE_INS || delay_slot >= UNMOVABLE_INS + || delay_slot == ((ins >> 11) & 0x1f) || delay_slot == ((ins >> 16) & 0x1f)); + sljit_ins *ptr = (sljit_ins*)ensure_buf(compiler, sizeof(sljit_ins)); + FAIL_IF(!ptr); + *ptr = ins; + compiler->size++; + compiler->delay_slot = delay_slot; + return SLJIT_SUCCESS; +} + +static SLJIT_INLINE sljit_ins invert_branch(sljit_si flags) +{ + return (flags & IS_BIT26_COND) ? (1 << 26) : (1 << 16); +} + +static SLJIT_INLINE sljit_ins* optimize_jump(struct sljit_jump *jump, sljit_ins *code_ptr, sljit_ins *code) +{ + sljit_sw diff; + sljit_uw target_addr; + sljit_ins *inst; + sljit_ins saved_inst; + + if (jump->flags & SLJIT_REWRITABLE_JUMP) + return code_ptr; + + if (jump->flags & JUMP_ADDR) + target_addr = jump->u.target; + else { + SLJIT_ASSERT(jump->flags & JUMP_LABEL); + target_addr = (sljit_uw)(code + jump->u.label->size); + } + inst = (sljit_ins*)jump->addr; + if (jump->flags & IS_COND) + inst--; + + /* B instructions. */ + if (jump->flags & IS_MOVABLE) { + diff = ((sljit_sw)target_addr - (sljit_sw)(inst)) >> 2; + if (diff <= SIMM_MAX && diff >= SIMM_MIN) { + jump->flags |= PATCH_B; + + if (!(jump->flags & IS_COND)) { + inst[0] = inst[-1]; + inst[-1] = (jump->flags & IS_JAL) ? BAL : B; + jump->addr -= sizeof(sljit_ins); + return inst; + } + saved_inst = inst[0]; + inst[0] = inst[-1]; + inst[-1] = saved_inst ^ invert_branch(jump->flags); + jump->addr -= 2 * sizeof(sljit_ins); + return inst; + } + } + + diff = ((sljit_sw)target_addr - (sljit_sw)(inst + 1)) >> 2; + if (diff <= SIMM_MAX && diff >= SIMM_MIN) { + jump->flags |= PATCH_B; + + if (!(jump->flags & IS_COND)) { + inst[0] = (jump->flags & IS_JAL) ? BAL : B; + inst[1] = NOP; + return inst + 1; + } + inst[0] = inst[0] ^ invert_branch(jump->flags); + inst[1] = NOP; + jump->addr -= sizeof(sljit_ins); + return inst + 1; + } + + if (jump->flags & IS_COND) { + if ((target_addr & ~0xfffffff) == ((jump->addr + 3 * sizeof(sljit_ins)) & ~0xfffffff)) { + jump->flags |= PATCH_J; + inst[0] = (inst[0] & 0xffff0000) | 3; + inst[1] = NOP; + inst[2] = J; + inst[3] = NOP; + jump->addr += sizeof(sljit_ins); + return inst + 3; + } + return code_ptr; + } + + /* J instuctions. */ + if (jump->flags & IS_MOVABLE) { + if ((target_addr & ~0xfffffff) == (jump->addr & ~0xfffffff)) { + jump->flags |= PATCH_J; + inst[0] = inst[-1]; + inst[-1] = (jump->flags & IS_JAL) ? JAL : J; + jump->addr -= sizeof(sljit_ins); + return inst; + } + } + + if ((target_addr & ~0xfffffff) == ((jump->addr + sizeof(sljit_ins)) & ~0xfffffff)) { + jump->flags |= PATCH_J; + inst[0] = (jump->flags & IS_JAL) ? JAL : J; + inst[1] = NOP; + return inst + 1; + } + + return code_ptr; +} + +#ifdef __GNUC__ +static __attribute__ ((noinline)) void sljit_cache_flush(void* code, void* code_ptr) +{ + SLJIT_CACHE_FLUSH(code, code_ptr); +} +#endif + +SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compiler) +{ + struct sljit_memory_fragment *buf; + sljit_ins *code; + sljit_ins *code_ptr; + sljit_ins *buf_ptr; + sljit_ins *buf_end; + sljit_uw word_count; + sljit_uw addr; + + struct sljit_label *label; + struct sljit_jump *jump; + struct sljit_const *const_; + + CHECK_ERROR_PTR(); + check_sljit_generate_code(compiler); + reverse_buf(compiler); + + code = (sljit_ins*)SLJIT_MALLOC_EXEC(compiler->size * sizeof(sljit_ins)); + PTR_FAIL_WITH_EXEC_IF(code); + buf = compiler->buf; + + code_ptr = code; + word_count = 0; + label = compiler->labels; + jump = compiler->jumps; + const_ = compiler->consts; + do { + buf_ptr = (sljit_ins*)buf->memory; + buf_end = buf_ptr + (buf->used_size >> 2); + do { + *code_ptr = *buf_ptr++; + SLJIT_ASSERT(!label || label->size >= word_count); + SLJIT_ASSERT(!jump || jump->addr >= word_count); + SLJIT_ASSERT(!const_ || const_->addr >= word_count); + /* These structures are ordered by their address. */ + if (label && label->size == word_count) { + /* Just recording the address. */ + label->addr = (sljit_uw)code_ptr; + label->size = code_ptr - code; + label = label->next; + } + if (jump && jump->addr == word_count) { +#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) + jump->addr = (sljit_uw)(code_ptr - 3); +#else +#error "Implementation required" +#endif + code_ptr = optimize_jump(jump, code_ptr, code); + jump = jump->next; + } + if (const_ && const_->addr == word_count) { + /* Just recording the address. */ + const_->addr = (sljit_uw)code_ptr; + const_ = const_->next; + } + code_ptr ++; + word_count ++; + } while (buf_ptr < buf_end); + + buf = buf->next; + } while (buf); + + if (label && label->size == word_count) { + label->addr = (sljit_uw)code_ptr; + label->size = code_ptr - code; + label = label->next; + } + + SLJIT_ASSERT(!label); + SLJIT_ASSERT(!jump); + SLJIT_ASSERT(!const_); + SLJIT_ASSERT(code_ptr - code <= (sljit_sw)compiler->size); + + jump = compiler->jumps; + while (jump) { + do { + addr = (jump->flags & JUMP_LABEL) ? jump->u.label->addr : jump->u.target; + buf_ptr = (sljit_ins*)jump->addr; + + if (jump->flags & PATCH_B) { + addr = (sljit_sw)(addr - (jump->addr + sizeof(sljit_ins))) >> 2; + SLJIT_ASSERT((sljit_sw)addr <= SIMM_MAX && (sljit_sw)addr >= SIMM_MIN); + buf_ptr[0] = (buf_ptr[0] & 0xffff0000) | (addr & 0xffff); + break; + } + if (jump->flags & PATCH_J) { + SLJIT_ASSERT((addr & ~0xfffffff) == ((jump->addr + sizeof(sljit_ins)) & ~0xfffffff)); + buf_ptr[0] |= (addr >> 2) & 0x03ffffff; + break; + } + + /* Set the fields of immediate loads. */ +#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) + buf_ptr[0] = (buf_ptr[0] & 0xffff0000) | ((addr >> 16) & 0xffff); + buf_ptr[1] = (buf_ptr[1] & 0xffff0000) | (addr & 0xffff); +#else +#error "Implementation required" +#endif + } while (0); + jump = jump->next; + } + + compiler->error = SLJIT_ERR_COMPILED; + compiler->executable_size = compiler->size * sizeof(sljit_ins); +#ifndef __GNUC__ + SLJIT_CACHE_FLUSH(code, code_ptr); +#else + /* GCC workaround for invalid code generation with -O2. */ + sljit_cache_flush(code, code_ptr); +#endif + return code; +} + +/* --------------------------------------------------------------------- */ +/* Entry, exit */ +/* --------------------------------------------------------------------- */ + +/* Creates an index in data_transfer_insts array. */ +#define LOAD_DATA 0x01 +#define WORD_DATA 0x00 +#define BYTE_DATA 0x02 +#define HALF_DATA 0x04 +#define INT_DATA 0x06 +#define SIGNED_DATA 0x08 +/* Separates integer and floating point registers */ +#define GPR_REG 0x0f +#define DOUBLE_DATA 0x10 + +#define MEM_MASK 0x1f + +#define WRITE_BACK 0x00020 +#define ARG_TEST 0x00040 +#define ALT_KEEP_CACHE 0x00080 +#define CUMULATIVE_OP 0x00100 +#define LOGICAL_OP 0x00200 +#define IMM_OP 0x00400 +#define SRC2_IMM 0x00800 + +#define UNUSED_DEST 0x01000 +#define REG_DEST 0x02000 +#define REG1_SOURCE 0x04000 +#define REG2_SOURCE 0x08000 +#define SLOW_SRC1 0x10000 +#define SLOW_SRC2 0x20000 +#define SLOW_DEST 0x40000 + +/* Only these flags are set. UNUSED_DEST is not set when no flags should be set. */ +#define CHECK_FLAGS(list) \ + (!(flags & UNUSED_DEST) || (op & GET_FLAGS(~(list)))) + +#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) +#define STACK_STORE SW +#define STACK_LOAD LW +#else +#define STACK_STORE SD +#define STACK_LOAD LD +#endif + +#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) +#include "sljitNativeMIPS_32.c" +#else +#include "sljitNativeMIPS_64.c" +#endif + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_enter(struct sljit_compiler *compiler, sljit_si args, sljit_si scratches, sljit_si saveds, sljit_si local_size) +{ + sljit_ins base; + + CHECK_ERROR(); + check_sljit_emit_enter(compiler, args, scratches, saveds, local_size); + + compiler->scratches = scratches; + compiler->saveds = saveds; +#if (defined SLJIT_DEBUG && SLJIT_DEBUG) + compiler->logical_local_size = local_size; +#endif + + local_size += (saveds + 1 + 4) * sizeof(sljit_sw); + local_size = (local_size + 15) & ~0xf; + compiler->local_size = local_size; + + if (local_size <= SIMM_MAX) { + /* Frequent case. */ + FAIL_IF(push_inst(compiler, ADDIU_W | S(SLJIT_LOCALS_REG) | T(SLJIT_LOCALS_REG) | IMM(-local_size), DR(SLJIT_LOCALS_REG))); + base = S(SLJIT_LOCALS_REG); + } + else { + FAIL_IF(load_immediate(compiler, DR(TMP_REG1), local_size)); + FAIL_IF(push_inst(compiler, ADDU_W | S(SLJIT_LOCALS_REG) | TA(0) | D(TMP_REG2), DR(TMP_REG2))); + FAIL_IF(push_inst(compiler, SUBU_W | S(SLJIT_LOCALS_REG) | T(TMP_REG1) | D(SLJIT_LOCALS_REG), DR(SLJIT_LOCALS_REG))); + base = S(TMP_REG2); + local_size = 0; + } + + FAIL_IF(push_inst(compiler, STACK_STORE | base | TA(RETURN_ADDR_REG) | IMM(local_size - 1 * (sljit_si)sizeof(sljit_sw)), MOVABLE_INS)); + if (saveds >= 1) + FAIL_IF(push_inst(compiler, STACK_STORE | base | T(SLJIT_SAVED_REG1) | IMM(local_size - 2 * (sljit_si)sizeof(sljit_sw)), MOVABLE_INS)); + if (saveds >= 2) + FAIL_IF(push_inst(compiler, STACK_STORE | base | T(SLJIT_SAVED_REG2) | IMM(local_size - 3 * (sljit_si)sizeof(sljit_sw)), MOVABLE_INS)); + if (saveds >= 3) + FAIL_IF(push_inst(compiler, STACK_STORE | base | T(SLJIT_SAVED_REG3) | IMM(local_size - 4 * (sljit_si)sizeof(sljit_sw)), MOVABLE_INS)); + if (saveds >= 4) + FAIL_IF(push_inst(compiler, STACK_STORE | base | T(SLJIT_SAVED_EREG1) | IMM(local_size - 5 * (sljit_si)sizeof(sljit_sw)), MOVABLE_INS)); + if (saveds >= 5) + FAIL_IF(push_inst(compiler, STACK_STORE | base | T(SLJIT_SAVED_EREG2) | IMM(local_size - 6 * (sljit_si)sizeof(sljit_sw)), MOVABLE_INS)); + + if (args >= 1) + FAIL_IF(push_inst(compiler, ADDU_W | SA(4) | TA(0) | D(SLJIT_SAVED_REG1), DR(SLJIT_SAVED_REG1))); + if (args >= 2) + FAIL_IF(push_inst(compiler, ADDU_W | SA(5) | TA(0) | D(SLJIT_SAVED_REG2), DR(SLJIT_SAVED_REG2))); + if (args >= 3) + FAIL_IF(push_inst(compiler, ADDU_W | SA(6) | TA(0) | D(SLJIT_SAVED_REG3), DR(SLJIT_SAVED_REG3))); + + return SLJIT_SUCCESS; +} + +SLJIT_API_FUNC_ATTRIBUTE void sljit_set_context(struct sljit_compiler *compiler, sljit_si args, sljit_si scratches, sljit_si saveds, sljit_si local_size) +{ + CHECK_ERROR_VOID(); + check_sljit_set_context(compiler, args, scratches, saveds, local_size); + + compiler->scratches = scratches; + compiler->saveds = saveds; +#if (defined SLJIT_DEBUG && SLJIT_DEBUG) + compiler->logical_local_size = local_size; +#endif + + local_size += (saveds + 1 + 4) * sizeof(sljit_sw); + compiler->local_size = (local_size + 15) & ~0xf; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_return(struct sljit_compiler *compiler, sljit_si op, sljit_si src, sljit_sw srcw) +{ + sljit_si local_size; + sljit_ins base; + + CHECK_ERROR(); + check_sljit_emit_return(compiler, op, src, srcw); + + FAIL_IF(emit_mov_before_return(compiler, op, src, srcw)); + + local_size = compiler->local_size; + if (local_size <= SIMM_MAX) + base = S(SLJIT_LOCALS_REG); + else { + FAIL_IF(load_immediate(compiler, DR(TMP_REG1), local_size)); + FAIL_IF(push_inst(compiler, ADDU_W | S(SLJIT_LOCALS_REG) | T(TMP_REG1) | D(TMP_REG1), DR(TMP_REG1))); + base = S(TMP_REG1); + local_size = 0; + } + + FAIL_IF(push_inst(compiler, STACK_LOAD | base | TA(RETURN_ADDR_REG) | IMM(local_size - 1 * (sljit_si)sizeof(sljit_sw)), RETURN_ADDR_REG)); + if (compiler->saveds >= 5) + FAIL_IF(push_inst(compiler, STACK_LOAD | base | T(SLJIT_SAVED_EREG2) | IMM(local_size - 6 * (sljit_si)sizeof(sljit_sw)), DR(SLJIT_SAVED_EREG2))); + if (compiler->saveds >= 4) + FAIL_IF(push_inst(compiler, STACK_LOAD | base | T(SLJIT_SAVED_EREG1) | IMM(local_size - 5 * (sljit_si)sizeof(sljit_sw)), DR(SLJIT_SAVED_EREG1))); + if (compiler->saveds >= 3) + FAIL_IF(push_inst(compiler, STACK_LOAD | base | T(SLJIT_SAVED_REG3) | IMM(local_size - 4 * (sljit_si)sizeof(sljit_sw)), DR(SLJIT_SAVED_REG3))); + if (compiler->saveds >= 2) + FAIL_IF(push_inst(compiler, STACK_LOAD | base | T(SLJIT_SAVED_REG2) | IMM(local_size - 3 * (sljit_si)sizeof(sljit_sw)), DR(SLJIT_SAVED_REG2))); + if (compiler->saveds >= 1) + FAIL_IF(push_inst(compiler, STACK_LOAD | base | T(SLJIT_SAVED_REG1) | IMM(local_size - 2 * (sljit_si)sizeof(sljit_sw)), DR(SLJIT_SAVED_REG1))); + + FAIL_IF(push_inst(compiler, JR | SA(RETURN_ADDR_REG), UNMOVABLE_INS)); + if (compiler->local_size <= SIMM_MAX) + return push_inst(compiler, ADDIU_W | S(SLJIT_LOCALS_REG) | T(SLJIT_LOCALS_REG) | IMM(compiler->local_size), UNMOVABLE_INS); + else + return push_inst(compiler, ADDU_W | S(TMP_REG1) | TA(0) | D(SLJIT_LOCALS_REG), UNMOVABLE_INS); +} + +#undef STACK_STORE +#undef STACK_LOAD + +/* --------------------------------------------------------------------- */ +/* Operators */ +/* --------------------------------------------------------------------- */ + +#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) +#define ARCH_32_64(a, b) a +#else +#define ARCH_32_64(a, b) b +#endif + +static SLJIT_CONST sljit_ins data_transfer_insts[16 + 4] = { +/* u w s */ ARCH_32_64(HI(43) /* sw */, HI(63) /* sd */), +/* u w l */ ARCH_32_64(HI(35) /* lw */, HI(55) /* ld */), +/* u b s */ HI(40) /* sb */, +/* u b l */ HI(36) /* lbu */, +/* u h s */ HI(41) /* sh */, +/* u h l */ HI(37) /* lhu */, +/* u i s */ HI(43) /* sw */, +/* u i l */ ARCH_32_64(HI(35) /* lw */, HI(39) /* lwu */), + +/* s w s */ ARCH_32_64(HI(43) /* sw */, HI(63) /* sd */), +/* s w l */ ARCH_32_64(HI(35) /* lw */, HI(55) /* ld */), +/* s b s */ HI(40) /* sb */, +/* s b l */ HI(32) /* lb */, +/* s h s */ HI(41) /* sh */, +/* s h l */ HI(33) /* lh */, +/* s i s */ HI(43) /* sw */, +/* s i l */ HI(35) /* lw */, + +/* d s */ HI(61) /* sdc1 */, +/* d l */ HI(53) /* ldc1 */, +/* s s */ HI(57) /* swc1 */, +/* s l */ HI(49) /* lwc1 */, +}; + +#undef ARCH_32_64 + +/* reg_ar is an absoulute register! */ + +/* Can perform an operation using at most 1 instruction. */ +static sljit_si getput_arg_fast(struct sljit_compiler *compiler, sljit_si flags, sljit_si reg_ar, sljit_si arg, sljit_sw argw) +{ + SLJIT_ASSERT(arg & SLJIT_MEM); + + if ((!(flags & WRITE_BACK) || !(arg & 0xf)) && !(arg & 0xf0) && argw <= SIMM_MAX && argw >= SIMM_MIN) { + /* Works for both absoulte and relative addresses. */ + if (SLJIT_UNLIKELY(flags & ARG_TEST)) + return 1; + FAIL_IF(push_inst(compiler, data_transfer_insts[flags & MEM_MASK] | S(arg & 0xf) + | TA(reg_ar) | IMM(argw), ((flags & MEM_MASK) <= GPR_REG && (flags & LOAD_DATA)) ? reg_ar : MOVABLE_INS)); + return -1; + } + return 0; +} + +/* See getput_arg below. + Note: can_cache is called only for binary operators. Those + operators always uses word arguments without write back. */ +static sljit_si can_cache(sljit_si arg, sljit_sw argw, sljit_si next_arg, sljit_sw next_argw) +{ + SLJIT_ASSERT((arg & SLJIT_MEM) && (next_arg & SLJIT_MEM)); + + /* Simple operation except for updates. */ + if (arg & 0xf0) { + argw &= 0x3; + next_argw &= 0x3; + if (argw && argw == next_argw && (arg == next_arg || (arg & 0xf0) == (next_arg & 0xf0))) + return 1; + return 0; + } + + if (arg == next_arg) { + if (((next_argw - argw) <= SIMM_MAX && (next_argw - argw) >= SIMM_MIN)) + return 1; + return 0; + } + + return 0; +} + +/* Emit the necessary instructions. See can_cache above. */ +static sljit_si getput_arg(struct sljit_compiler *compiler, sljit_si flags, sljit_si reg_ar, sljit_si arg, sljit_sw argw, sljit_si next_arg, sljit_sw next_argw) +{ + sljit_si tmp_ar, base, delay_slot; + + SLJIT_ASSERT(arg & SLJIT_MEM); + if (!(next_arg & SLJIT_MEM)) { + next_arg = 0; + next_argw = 0; + } + + if ((flags & MEM_MASK) <= GPR_REG && (flags & LOAD_DATA)) { + tmp_ar = reg_ar; + delay_slot = reg_ar; + } else { + tmp_ar = DR(TMP_REG1); + delay_slot = MOVABLE_INS; + } + base = arg & 0xf; + + if (SLJIT_UNLIKELY(arg & 0xf0)) { + argw &= 0x3; + if ((flags & WRITE_BACK) && reg_ar == DR(base)) { + SLJIT_ASSERT(!(flags & LOAD_DATA) && DR(TMP_REG1) != reg_ar); + FAIL_IF(push_inst(compiler, ADDU_W | SA(reg_ar) | TA(0) | D(TMP_REG1), DR(TMP_REG1))); + reg_ar = DR(TMP_REG1); + } + + /* Using the cache. */ + if (argw == compiler->cache_argw) { + if (!(flags & WRITE_BACK)) { + if (arg == compiler->cache_arg) + return push_inst(compiler, data_transfer_insts[flags & MEM_MASK] | S(TMP_REG3) | TA(reg_ar), delay_slot); + if ((SLJIT_MEM | (arg & 0xf0)) == compiler->cache_arg) { + if (arg == next_arg && argw == (next_argw & 0x3)) { + compiler->cache_arg = arg; + compiler->cache_argw = argw; + FAIL_IF(push_inst(compiler, ADDU_W | S(base) | T(TMP_REG3) | D(TMP_REG3), DR(TMP_REG3))); + return push_inst(compiler, data_transfer_insts[flags & MEM_MASK] | S(TMP_REG3) | TA(reg_ar), delay_slot); + } + FAIL_IF(push_inst(compiler, ADDU_W | S(base) | T(TMP_REG3) | DA(tmp_ar), tmp_ar)); + return push_inst(compiler, data_transfer_insts[flags & MEM_MASK] | SA(tmp_ar) | TA(reg_ar), delay_slot); + } + } + else { + if ((SLJIT_MEM | (arg & 0xf0)) == compiler->cache_arg) { + FAIL_IF(push_inst(compiler, ADDU_W | S(base) | T(TMP_REG3) | D(base), DR(base))); + return push_inst(compiler, data_transfer_insts[flags & MEM_MASK] | S(base) | TA(reg_ar), delay_slot); + } + } + } + + if (SLJIT_UNLIKELY(argw)) { + compiler->cache_arg = SLJIT_MEM | (arg & 0xf0); + compiler->cache_argw = argw; + FAIL_IF(push_inst(compiler, SLL_W | T((arg >> 4) & 0xf) | D(TMP_REG3) | SH_IMM(argw), DR(TMP_REG3))); + } + + if (!(flags & WRITE_BACK)) { + if (arg == next_arg && argw == (next_argw & 0x3)) { + compiler->cache_arg = arg; + compiler->cache_argw = argw; + FAIL_IF(push_inst(compiler, ADDU_W | S(base) | T(!argw ? ((arg >> 4) & 0xf) : TMP_REG3) | D(TMP_REG3), DR(TMP_REG3))); + tmp_ar = DR(TMP_REG3); + } + else + FAIL_IF(push_inst(compiler, ADDU_W | S(base) | T(!argw ? ((arg >> 4) & 0xf) : TMP_REG3) | DA(tmp_ar), tmp_ar)); + return push_inst(compiler, data_transfer_insts[flags & MEM_MASK] | SA(tmp_ar) | TA(reg_ar), delay_slot); + } + FAIL_IF(push_inst(compiler, ADDU_W | S(base) | T(!argw ? ((arg >> 4) & 0xf) : TMP_REG3) | D(base), DR(base))); + return push_inst(compiler, data_transfer_insts[flags & MEM_MASK] | S(base) | TA(reg_ar), delay_slot); + } + + if (SLJIT_UNLIKELY(flags & WRITE_BACK) && base) { + /* Update only applies if a base register exists. */ + if (reg_ar == DR(base)) { + SLJIT_ASSERT(!(flags & LOAD_DATA) && DR(TMP_REG1) != reg_ar); + if (argw <= SIMM_MAX && argw >= SIMM_MIN) { + FAIL_IF(push_inst(compiler, data_transfer_insts[flags & MEM_MASK] | S(base) | TA(reg_ar) | IMM(argw), MOVABLE_INS)); + if (argw) + return push_inst(compiler, ADDIU_W | S(base) | T(base) | IMM(argw), DR(base)); + return SLJIT_SUCCESS; + } + FAIL_IF(push_inst(compiler, ADDU_W | SA(reg_ar) | TA(0) | D(TMP_REG1), DR(TMP_REG1))); + reg_ar = DR(TMP_REG1); + } + + if (argw <= SIMM_MAX && argw >= SIMM_MIN) { + if (argw) + FAIL_IF(push_inst(compiler, ADDIU_W | S(base) | T(base) | IMM(argw), DR(base))); + } + else { + if (compiler->cache_arg == SLJIT_MEM && argw - compiler->cache_argw <= SIMM_MAX && argw - compiler->cache_argw >= SIMM_MIN) { + if (argw != compiler->cache_argw) { + FAIL_IF(push_inst(compiler, ADDIU_W | S(TMP_REG3) | T(TMP_REG3) | IMM(argw - compiler->cache_argw), DR(TMP_REG3))); + compiler->cache_argw = argw; + } + FAIL_IF(push_inst(compiler, ADDU_W | S(base) | T(TMP_REG3) | D(base), DR(base))); + } + else { + compiler->cache_arg = SLJIT_MEM; + compiler->cache_argw = argw; + FAIL_IF(load_immediate(compiler, DR(TMP_REG3), argw)); + FAIL_IF(push_inst(compiler, ADDU_W | S(base) | T(TMP_REG3) | D(base), DR(base))); + } + } + return push_inst(compiler, data_transfer_insts[flags & MEM_MASK] | S(base) | TA(reg_ar), delay_slot); + } + + if (compiler->cache_arg == arg && argw - compiler->cache_argw <= SIMM_MAX && argw - compiler->cache_argw >= SIMM_MIN) { + if (argw != compiler->cache_argw) { + FAIL_IF(push_inst(compiler, ADDIU_W | S(TMP_REG3) | T(TMP_REG3) | IMM(argw - compiler->cache_argw), DR(TMP_REG3))); + compiler->cache_argw = argw; + } + return push_inst(compiler, data_transfer_insts[flags & MEM_MASK] | S(TMP_REG3) | TA(reg_ar), delay_slot); + } + + if (compiler->cache_arg == SLJIT_MEM && argw - compiler->cache_argw <= SIMM_MAX && argw - compiler->cache_argw >= SIMM_MIN) { + if (argw != compiler->cache_argw) + FAIL_IF(push_inst(compiler, ADDIU_W | S(TMP_REG3) | T(TMP_REG3) | IMM(argw - compiler->cache_argw), DR(TMP_REG3))); + } + else { + compiler->cache_arg = SLJIT_MEM; + FAIL_IF(load_immediate(compiler, DR(TMP_REG3), argw)); + } + compiler->cache_argw = argw; + + if (!base) + return push_inst(compiler, data_transfer_insts[flags & MEM_MASK] | S(TMP_REG3) | TA(reg_ar), delay_slot); + + if (arg == next_arg && next_argw - argw <= SIMM_MAX && next_argw - argw >= SIMM_MIN) { + compiler->cache_arg = arg; + FAIL_IF(push_inst(compiler, ADDU_W | S(TMP_REG3) | T(base) | D(TMP_REG3), DR(TMP_REG3))); + return push_inst(compiler, data_transfer_insts[flags & MEM_MASK] | S(TMP_REG3) | TA(reg_ar), delay_slot); + } + + FAIL_IF(push_inst(compiler, ADDU_W | S(TMP_REG3) | T(base) | DA(tmp_ar), tmp_ar)); + return push_inst(compiler, data_transfer_insts[flags & MEM_MASK] | SA(tmp_ar) | TA(reg_ar), delay_slot); +} + +static SLJIT_INLINE sljit_si emit_op_mem(struct sljit_compiler *compiler, sljit_si flags, sljit_si reg_ar, sljit_si arg, sljit_sw argw) +{ + if (getput_arg_fast(compiler, flags, reg_ar, arg, argw)) + return compiler->error; + compiler->cache_arg = 0; + compiler->cache_argw = 0; + return getput_arg(compiler, flags, reg_ar, arg, argw, 0, 0); +} + +static SLJIT_INLINE sljit_si emit_op_mem2(struct sljit_compiler *compiler, sljit_si flags, sljit_si reg, sljit_si arg1, sljit_sw arg1w, sljit_si arg2, sljit_sw arg2w) +{ + if (getput_arg_fast(compiler, flags, reg, arg1, arg1w)) + return compiler->error; + return getput_arg(compiler, flags, reg, arg1, arg1w, arg2, arg2w); +} + +static sljit_si emit_op(struct sljit_compiler *compiler, sljit_si op, sljit_si flags, + sljit_si dst, sljit_sw dstw, + sljit_si src1, sljit_sw src1w, + sljit_si src2, sljit_sw src2w) +{ + /* arg1 goes to TMP_REG1 or src reg + arg2 goes to TMP_REG2, imm or src reg + TMP_REG3 can be used for caching + result goes to TMP_REG2, so put result can use TMP_REG1 and TMP_REG3. */ + sljit_si dst_r = TMP_REG2; + sljit_si src1_r; + sljit_sw src2_r = 0; + sljit_si sugg_src2_r = TMP_REG2; + + if (!(flags & ALT_KEEP_CACHE)) { + compiler->cache_arg = 0; + compiler->cache_argw = 0; + } + + if (SLJIT_UNLIKELY(dst == SLJIT_UNUSED)) { + if (op >= SLJIT_MOV && op <= SLJIT_MOVU_SI && !(src2 & SLJIT_MEM)) + return SLJIT_SUCCESS; + if (GET_FLAGS(op)) + flags |= UNUSED_DEST; + } + else if (dst <= TMP_REG3) { + dst_r = dst; + flags |= REG_DEST; + if (op >= SLJIT_MOV && op <= SLJIT_MOVU_SI) + sugg_src2_r = dst_r; + } + else if ((dst & SLJIT_MEM) && !getput_arg_fast(compiler, flags | ARG_TEST, DR(TMP_REG1), dst, dstw)) + flags |= SLOW_DEST; + + if (flags & IMM_OP) { + if ((src2 & SLJIT_IMM) && src2w) { + if ((!(flags & LOGICAL_OP) && (src2w <= SIMM_MAX && src2w >= SIMM_MIN)) + || ((flags & LOGICAL_OP) && !(src2w & ~UIMM_MAX))) { + flags |= SRC2_IMM; + src2_r = src2w; + } + } + if (!(flags & SRC2_IMM) && (flags & CUMULATIVE_OP) && (src1 & SLJIT_IMM) && src1w) { + if ((!(flags & LOGICAL_OP) && (src1w <= SIMM_MAX && src1w >= SIMM_MIN)) + || ((flags & LOGICAL_OP) && !(src1w & ~UIMM_MAX))) { + flags |= SRC2_IMM; + src2_r = src1w; + + /* And swap arguments. */ + src1 = src2; + src1w = src2w; + src2 = SLJIT_IMM; + /* src2w = src2_r unneeded. */ + } + } + } + + /* Source 1. */ + if (src1 <= TMP_REG3) { + src1_r = src1; + flags |= REG1_SOURCE; + } + else if (src1 & SLJIT_IMM) { + if (src1w) { + FAIL_IF(load_immediate(compiler, DR(TMP_REG1), src1w)); + src1_r = TMP_REG1; + } + else + src1_r = 0; + } + else { + if (getput_arg_fast(compiler, flags | LOAD_DATA, DR(TMP_REG1), src1, src1w)) + FAIL_IF(compiler->error); + else + flags |= SLOW_SRC1; + src1_r = TMP_REG1; + } + + /* Source 2. */ + if (src2 <= TMP_REG3) { + src2_r = src2; + flags |= REG2_SOURCE; + if (!(flags & REG_DEST) && op >= SLJIT_MOV && op <= SLJIT_MOVU_SI) + dst_r = src2_r; + } + else if (src2 & SLJIT_IMM) { + if (!(flags & SRC2_IMM)) { + if (src2w) { + FAIL_IF(load_immediate(compiler, DR(sugg_src2_r), src2w)); + src2_r = sugg_src2_r; + } + else { + src2_r = 0; + if ((op >= SLJIT_MOV && op <= SLJIT_MOVU_SI) && (dst & SLJIT_MEM)) + dst_r = 0; + } + } + } + else { + if (getput_arg_fast(compiler, flags | LOAD_DATA, DR(sugg_src2_r), src2, src2w)) + FAIL_IF(compiler->error); + else + flags |= SLOW_SRC2; + src2_r = sugg_src2_r; + } + + if ((flags & (SLOW_SRC1 | SLOW_SRC2)) == (SLOW_SRC1 | SLOW_SRC2)) { + SLJIT_ASSERT(src2_r == TMP_REG2); + if (!can_cache(src1, src1w, src2, src2w) && can_cache(src1, src1w, dst, dstw)) { + FAIL_IF(getput_arg(compiler, flags | LOAD_DATA, DR(TMP_REG2), src2, src2w, src1, src1w)); + FAIL_IF(getput_arg(compiler, flags | LOAD_DATA, DR(TMP_REG1), src1, src1w, dst, dstw)); + } + else { + FAIL_IF(getput_arg(compiler, flags | LOAD_DATA, DR(TMP_REG1), src1, src1w, src2, src2w)); + FAIL_IF(getput_arg(compiler, flags | LOAD_DATA, DR(TMP_REG2), src2, src2w, dst, dstw)); + } + } + else if (flags & SLOW_SRC1) + FAIL_IF(getput_arg(compiler, flags | LOAD_DATA, DR(TMP_REG1), src1, src1w, dst, dstw)); + else if (flags & SLOW_SRC2) + FAIL_IF(getput_arg(compiler, flags | LOAD_DATA, DR(sugg_src2_r), src2, src2w, dst, dstw)); + + FAIL_IF(emit_single_op(compiler, op, flags, dst_r, src1_r, src2_r)); + + if (dst & SLJIT_MEM) { + if (!(flags & SLOW_DEST)) { + getput_arg_fast(compiler, flags, DR(dst_r), dst, dstw); + return compiler->error; + } + return getput_arg(compiler, flags, DR(dst_r), dst, dstw, 0, 0); + } + + return SLJIT_SUCCESS; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op0(struct sljit_compiler *compiler, sljit_si op) +{ + CHECK_ERROR(); + check_sljit_emit_op0(compiler, op); + + op = GET_OPCODE(op); + switch (op) { + case SLJIT_BREAKPOINT: + return push_inst(compiler, BREAK, UNMOVABLE_INS); + case SLJIT_NOP: + return push_inst(compiler, NOP, UNMOVABLE_INS); + case SLJIT_UMUL: + case SLJIT_SMUL: + FAIL_IF(push_inst(compiler, (op == SLJIT_UMUL ? MULTU : MULT) | S(SLJIT_SCRATCH_REG1) | T(SLJIT_SCRATCH_REG2), MOVABLE_INS)); + FAIL_IF(push_inst(compiler, MFLO | D(SLJIT_SCRATCH_REG1), DR(SLJIT_SCRATCH_REG1))); + return push_inst(compiler, MFHI | D(SLJIT_SCRATCH_REG2), DR(SLJIT_SCRATCH_REG2)); + case SLJIT_UDIV: + case SLJIT_SDIV: +#if !(defined SLJIT_MIPS_32_64 && SLJIT_MIPS_32_64) + FAIL_IF(push_inst(compiler, NOP, UNMOVABLE_INS)); + FAIL_IF(push_inst(compiler, NOP, UNMOVABLE_INS)); +#endif + FAIL_IF(push_inst(compiler, (op == SLJIT_UDIV ? DIVU : DIV) | S(SLJIT_SCRATCH_REG1) | T(SLJIT_SCRATCH_REG2), MOVABLE_INS)); + FAIL_IF(push_inst(compiler, MFLO | D(SLJIT_SCRATCH_REG1), DR(SLJIT_SCRATCH_REG1))); + return push_inst(compiler, MFHI | D(SLJIT_SCRATCH_REG2), DR(SLJIT_SCRATCH_REG2)); + } + + return SLJIT_SUCCESS; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op1(struct sljit_compiler *compiler, sljit_si op, + sljit_si dst, sljit_sw dstw, + sljit_si src, sljit_sw srcw) +{ +#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) +# define flags 0 +#endif + + CHECK_ERROR(); + check_sljit_emit_op1(compiler, op, dst, dstw, src, srcw); + ADJUST_LOCAL_OFFSET(dst, dstw); + ADJUST_LOCAL_OFFSET(src, srcw); + + switch (GET_OPCODE(op)) { + case SLJIT_MOV: + case SLJIT_MOV_P: + return emit_op(compiler, SLJIT_MOV, flags | WORD_DATA, dst, dstw, TMP_REG1, 0, src, srcw); + + case SLJIT_MOV_UI: + return emit_op(compiler, SLJIT_MOV_UI, flags | INT_DATA, dst, dstw, TMP_REG1, 0, src, srcw); + + case SLJIT_MOV_SI: + return emit_op(compiler, SLJIT_MOV_SI, flags | INT_DATA | SIGNED_DATA, dst, dstw, TMP_REG1, 0, src, srcw); + + case SLJIT_MOV_UB: + return emit_op(compiler, SLJIT_MOV_UB, flags | BYTE_DATA, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_ub)srcw : srcw); + + case SLJIT_MOV_SB: + return emit_op(compiler, SLJIT_MOV_SB, flags | BYTE_DATA | SIGNED_DATA, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_sb)srcw : srcw); + + case SLJIT_MOV_UH: + return emit_op(compiler, SLJIT_MOV_UH, flags | HALF_DATA, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_uh)srcw : srcw); + + case SLJIT_MOV_SH: + return emit_op(compiler, SLJIT_MOV_SH, flags | HALF_DATA | SIGNED_DATA, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_sh)srcw : srcw); + + case SLJIT_MOVU: + case SLJIT_MOVU_P: + return emit_op(compiler, SLJIT_MOV, flags | WORD_DATA | WRITE_BACK, dst, dstw, TMP_REG1, 0, src, srcw); + + case SLJIT_MOVU_UI: + return emit_op(compiler, SLJIT_MOV_UI, flags | INT_DATA | WRITE_BACK, dst, dstw, TMP_REG1, 0, src, srcw); + + case SLJIT_MOVU_SI: + return emit_op(compiler, SLJIT_MOV_SI, flags | INT_DATA | SIGNED_DATA | WRITE_BACK, dst, dstw, TMP_REG1, 0, src, srcw); + + case SLJIT_MOVU_UB: + return emit_op(compiler, SLJIT_MOV_UB, flags | BYTE_DATA | WRITE_BACK, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_ub)srcw : srcw); + + case SLJIT_MOVU_SB: + return emit_op(compiler, SLJIT_MOV_SB, flags | BYTE_DATA | SIGNED_DATA | WRITE_BACK, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_sb)srcw : srcw); + + case SLJIT_MOVU_UH: + return emit_op(compiler, SLJIT_MOV_UH, flags | HALF_DATA | WRITE_BACK, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_uh)srcw : srcw); + + case SLJIT_MOVU_SH: + return emit_op(compiler, SLJIT_MOV_SH, flags | HALF_DATA | SIGNED_DATA | WRITE_BACK, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_sh)srcw : srcw); + + case SLJIT_NOT: + return emit_op(compiler, op, flags, dst, dstw, TMP_REG1, 0, src, srcw); + + case SLJIT_NEG: + return emit_op(compiler, SLJIT_SUB | GET_ALL_FLAGS(op), flags | IMM_OP, dst, dstw, SLJIT_IMM, 0, src, srcw); + + case SLJIT_CLZ: + return emit_op(compiler, op, flags, dst, dstw, TMP_REG1, 0, src, srcw); + } + + return SLJIT_SUCCESS; +#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) +# undef flags +#endif +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op2(struct sljit_compiler *compiler, sljit_si op, + sljit_si dst, sljit_sw dstw, + sljit_si src1, sljit_sw src1w, + sljit_si src2, sljit_sw src2w) +{ +#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) +# define flags 0 +#endif + + CHECK_ERROR(); + check_sljit_emit_op2(compiler, op, dst, dstw, src1, src1w, src2, src2w); + ADJUST_LOCAL_OFFSET(dst, dstw); + ADJUST_LOCAL_OFFSET(src1, src1w); + ADJUST_LOCAL_OFFSET(src2, src2w); + + switch (GET_OPCODE(op)) { + case SLJIT_ADD: + case SLJIT_ADDC: + return emit_op(compiler, op, flags | CUMULATIVE_OP | IMM_OP, dst, dstw, src1, src1w, src2, src2w); + + case SLJIT_SUB: + case SLJIT_SUBC: + return emit_op(compiler, op, flags | IMM_OP, dst, dstw, src1, src1w, src2, src2w); + + case SLJIT_MUL: + return emit_op(compiler, op, flags | CUMULATIVE_OP, dst, dstw, src1, src1w, src2, src2w); + + case SLJIT_AND: + case SLJIT_OR: + case SLJIT_XOR: + return emit_op(compiler, op, flags | CUMULATIVE_OP | LOGICAL_OP | IMM_OP, dst, dstw, src1, src1w, src2, src2w); + + case SLJIT_SHL: + case SLJIT_LSHR: + case SLJIT_ASHR: +#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) + if (src2 & SLJIT_IMM) + src2w &= 0x1f; +#else + SLJIT_ASSERT_STOP(); +#endif + return emit_op(compiler, op, flags | IMM_OP, dst, dstw, src1, src1w, src2, src2w); + } + + return SLJIT_SUCCESS; +#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) +# undef flags +#endif +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_get_register_index(sljit_si reg) +{ + check_sljit_get_register_index(reg); + return reg_map[reg]; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op_custom(struct sljit_compiler *compiler, + void *instruction, sljit_si size) +{ + CHECK_ERROR(); + check_sljit_emit_op_custom(compiler, instruction, size); + SLJIT_ASSERT(size == 4); + + return push_inst(compiler, *(sljit_ins*)instruction, UNMOVABLE_INS); +} + +/* --------------------------------------------------------------------- */ +/* Floating point operators */ +/* --------------------------------------------------------------------- */ + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_is_fpu_available(void) +{ +#if (defined SLJIT_QEMU && SLJIT_QEMU) + /* Qemu says fir is 0 by default. */ + return 1; +#elif defined(__GNUC__) + sljit_sw fir; + asm ("cfc1 %0, $0" : "=r"(fir)); + return (fir >> 22) & 0x1; +#else +#error "FIR check is not implemented for this architecture" +#endif +} + +#define FLOAT_DATA(op) (DOUBLE_DATA | ((op & SLJIT_SINGLE_OP) >> 7)) +#define FMT(op) (((op & SLJIT_SINGLE_OP) ^ SLJIT_SINGLE_OP) << (21 - 8)) + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fop1(struct sljit_compiler *compiler, sljit_si op, + sljit_si dst, sljit_sw dstw, + sljit_si src, sljit_sw srcw) +{ + sljit_si dst_fr; + + CHECK_ERROR(); + check_sljit_emit_fop1(compiler, op, dst, dstw, src, srcw); + SLJIT_COMPILE_ASSERT((SLJIT_SINGLE_OP == 0x100) && !(DOUBLE_DATA & 0x2), float_transfer_bit_error); + + compiler->cache_arg = 0; + compiler->cache_argw = 0; + + if (GET_OPCODE(op) == SLJIT_CMPD) { + if (dst > SLJIT_FLOAT_REG6) { + FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, dst, dstw, src, srcw)); + dst = TMP_FREG1; + } + else + dst <<= 1; + + if (src > SLJIT_FLOAT_REG6) { + FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG2, src, srcw, 0, 0)); + src = TMP_FREG2; + } + else + src <<= 1; + + /* src and dst are swapped. */ + if (op & SLJIT_SET_E) { + FAIL_IF(push_inst(compiler, C_UEQ_fmt | FMT(op) | FT(src) | FS(dst), UNMOVABLE_INS)); + FAIL_IF(push_inst(compiler, CFC1 | TA(EQUAL_FLAG) | DA(FCSR_REG), EQUAL_FLAG)); + FAIL_IF(push_inst(compiler, SRL | TA(EQUAL_FLAG) | DA(EQUAL_FLAG) | SH_IMM(23), EQUAL_FLAG)); + FAIL_IF(push_inst(compiler, ANDI | SA(EQUAL_FLAG) | TA(EQUAL_FLAG) | IMM(1), EQUAL_FLAG)); + } + if (op & SLJIT_SET_S) { + /* Mixing the instructions for the two checks. */ + FAIL_IF(push_inst(compiler, C_ULT_fmt | FMT(op) | FT(src) | FS(dst), UNMOVABLE_INS)); + FAIL_IF(push_inst(compiler, CFC1 | TA(ULESS_FLAG) | DA(FCSR_REG), ULESS_FLAG)); + FAIL_IF(push_inst(compiler, C_ULT_fmt | FMT(op) | FT(dst) | FS(src), UNMOVABLE_INS)); + FAIL_IF(push_inst(compiler, SRL | TA(ULESS_FLAG) | DA(ULESS_FLAG) | SH_IMM(23), ULESS_FLAG)); + FAIL_IF(push_inst(compiler, ANDI | SA(ULESS_FLAG) | TA(ULESS_FLAG) | IMM(1), ULESS_FLAG)); + FAIL_IF(push_inst(compiler, CFC1 | TA(UGREATER_FLAG) | DA(FCSR_REG), UGREATER_FLAG)); + FAIL_IF(push_inst(compiler, SRL | TA(UGREATER_FLAG) | DA(UGREATER_FLAG) | SH_IMM(23), UGREATER_FLAG)); + FAIL_IF(push_inst(compiler, ANDI | SA(UGREATER_FLAG) | TA(UGREATER_FLAG) | IMM(1), UGREATER_FLAG)); + } + return push_inst(compiler, C_UN_fmt | FMT(op) | FT(src) | FS(dst), FCSR_FCC); + } + + dst_fr = (dst > SLJIT_FLOAT_REG6) ? TMP_FREG1 : (dst << 1); + + if (src > SLJIT_FLOAT_REG6) { + FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op) | LOAD_DATA, dst_fr, src, srcw, dst, dstw)); + src = dst_fr; + } + else + src <<= 1; + + switch (GET_OPCODE(op)) { + case SLJIT_MOVD: + if (src != dst_fr && dst_fr != TMP_FREG1) + FAIL_IF(push_inst(compiler, MOV_fmt | FMT(op) | FS(src) | FD(dst_fr), MOVABLE_INS)); + break; + case SLJIT_NEGD: + FAIL_IF(push_inst(compiler, NEG_fmt | FMT(op) | FS(src) | FD(dst_fr), MOVABLE_INS)); + break; + case SLJIT_ABSD: + FAIL_IF(push_inst(compiler, ABS_fmt | FMT(op) | FS(src) | FD(dst_fr), MOVABLE_INS)); + break; + } + + if (dst_fr == TMP_FREG1) { + if (GET_OPCODE(op) == SLJIT_MOVD) + dst_fr = src; + FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op), dst_fr, dst, dstw, 0, 0)); + } + + return SLJIT_SUCCESS; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fop2(struct sljit_compiler *compiler, sljit_si op, + sljit_si dst, sljit_sw dstw, + sljit_si src1, sljit_sw src1w, + sljit_si src2, sljit_sw src2w) +{ + sljit_si dst_fr, flags = 0; + + CHECK_ERROR(); + check_sljit_emit_fop2(compiler, op, dst, dstw, src1, src1w, src2, src2w); + + compiler->cache_arg = 0; + compiler->cache_argw = 0; + + dst_fr = (dst > SLJIT_FLOAT_REG6) ? TMP_FREG2 : (dst << 1); + + if (src1 > SLJIT_FLOAT_REG6) { + if (getput_arg_fast(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, src1, src1w)) { + FAIL_IF(compiler->error); + src1 = TMP_FREG1; + } else + flags |= SLOW_SRC1; + } + else + src1 <<= 1; + + if (src2 > SLJIT_FLOAT_REG6) { + if (getput_arg_fast(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG2, src2, src2w)) { + FAIL_IF(compiler->error); + src2 = TMP_FREG2; + } else + flags |= SLOW_SRC2; + } + else + src2 <<= 1; + + if ((flags & (SLOW_SRC1 | SLOW_SRC2)) == (SLOW_SRC1 | SLOW_SRC2)) { + if (!can_cache(src1, src1w, src2, src2w) && can_cache(src1, src1w, dst, dstw)) { + FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG2, src2, src2w, src1, src1w)); + FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, src1, src1w, dst, dstw)); + } + else { + FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, src1, src1w, src2, src2w)); + FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG2, src2, src2w, dst, dstw)); + } + } + else if (flags & SLOW_SRC1) + FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, src1, src1w, dst, dstw)); + else if (flags & SLOW_SRC2) + FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG2, src2, src2w, dst, dstw)); + + if (flags & SLOW_SRC1) + src1 = TMP_FREG1; + if (flags & SLOW_SRC2) + src2 = TMP_FREG2; + + switch (GET_OPCODE(op)) { + case SLJIT_ADDD: + FAIL_IF(push_inst(compiler, ADD_fmt | FMT(op) | FT(src2) | FS(src1) | FD(dst_fr), MOVABLE_INS)); + break; + + case SLJIT_SUBD: + FAIL_IF(push_inst(compiler, SUB_fmt | FMT(op) | FT(src2) | FS(src1) | FD(dst_fr), MOVABLE_INS)); + break; + + case SLJIT_MULD: + FAIL_IF(push_inst(compiler, MUL_fmt | FMT(op) | FT(src2) | FS(src1) | FD(dst_fr), MOVABLE_INS)); + break; + + case SLJIT_DIVD: + FAIL_IF(push_inst(compiler, DIV_fmt | FMT(op) | FT(src2) | FS(src1) | FD(dst_fr), MOVABLE_INS)); + break; + } + + if (dst_fr == TMP_FREG2) + FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op), TMP_FREG2, dst, dstw, 0, 0)); + + return SLJIT_SUCCESS; +} + +/* --------------------------------------------------------------------- */ +/* Other instructions */ +/* --------------------------------------------------------------------- */ + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fast_enter(struct sljit_compiler *compiler, sljit_si dst, sljit_sw dstw) +{ + CHECK_ERROR(); + check_sljit_emit_fast_enter(compiler, dst, dstw); + ADJUST_LOCAL_OFFSET(dst, dstw); + + /* For UNUSED dst. Uncommon, but possible. */ + if (dst == SLJIT_UNUSED) + return SLJIT_SUCCESS; + + if (dst <= TMP_REG3) + return push_inst(compiler, ADDU_W | SA(RETURN_ADDR_REG) | TA(0) | D(dst), DR(dst)); + + /* Memory. */ + return emit_op_mem(compiler, WORD_DATA, RETURN_ADDR_REG, dst, dstw); +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fast_return(struct sljit_compiler *compiler, sljit_si src, sljit_sw srcw) +{ + CHECK_ERROR(); + check_sljit_emit_fast_return(compiler, src, srcw); + ADJUST_LOCAL_OFFSET(src, srcw); + + if (src <= TMP_REG3) + FAIL_IF(push_inst(compiler, ADDU_W | S(src) | TA(0) | DA(RETURN_ADDR_REG), RETURN_ADDR_REG)); + else if (src & SLJIT_MEM) + FAIL_IF(emit_op_mem(compiler, WORD_DATA | LOAD_DATA, RETURN_ADDR_REG, src, srcw)); + else if (src & SLJIT_IMM) + FAIL_IF(load_immediate(compiler, RETURN_ADDR_REG, srcw)); + + FAIL_IF(push_inst(compiler, JR | SA(RETURN_ADDR_REG), UNMOVABLE_INS)); + return push_inst(compiler, NOP, UNMOVABLE_INS); +} + +/* --------------------------------------------------------------------- */ +/* Conditional instructions */ +/* --------------------------------------------------------------------- */ + +SLJIT_API_FUNC_ATTRIBUTE struct sljit_label* sljit_emit_label(struct sljit_compiler *compiler) +{ + struct sljit_label *label; + + CHECK_ERROR_PTR(); + check_sljit_emit_label(compiler); + + if (compiler->last_label && compiler->last_label->size == compiler->size) + return compiler->last_label; + + label = (struct sljit_label*)ensure_abuf(compiler, sizeof(struct sljit_label)); + PTR_FAIL_IF(!label); + set_label(label, compiler); + compiler->delay_slot = UNMOVABLE_INS; + return label; +} + +#if (defined SLJIT_CONFIG_MIPS_32 && SLJIT_CONFIG_MIPS_32) +#define JUMP_LENGTH 4 +#else +#error "Implementation required" +#endif + +#define BR_Z(src) \ + inst = BEQ | SA(src) | TA(0) | JUMP_LENGTH; \ + flags = IS_BIT26_COND; \ + delay_check = src; + +#define BR_NZ(src) \ + inst = BNE | SA(src) | TA(0) | JUMP_LENGTH; \ + flags = IS_BIT26_COND; \ + delay_check = src; + +#define BR_T() \ + inst = BC1T | JUMP_LENGTH; \ + flags = IS_BIT16_COND; \ + delay_check = FCSR_FCC; + +#define BR_F() \ + inst = BC1F | JUMP_LENGTH; \ + flags = IS_BIT16_COND; \ + delay_check = FCSR_FCC; + +SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compiler *compiler, sljit_si type) +{ + struct sljit_jump *jump; + sljit_ins inst; + sljit_si flags = 0; + sljit_si delay_check = UNMOVABLE_INS; + + CHECK_ERROR_PTR(); + check_sljit_emit_jump(compiler, type); + + jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump)); + PTR_FAIL_IF(!jump); + set_jump(jump, compiler, type & SLJIT_REWRITABLE_JUMP); + type &= 0xff; + + switch (type) { + case SLJIT_C_EQUAL: + case SLJIT_C_FLOAT_NOT_EQUAL: + BR_NZ(EQUAL_FLAG); + break; + case SLJIT_C_NOT_EQUAL: + case SLJIT_C_FLOAT_EQUAL: + BR_Z(EQUAL_FLAG); + break; + case SLJIT_C_LESS: + case SLJIT_C_FLOAT_LESS: + BR_Z(ULESS_FLAG); + break; + case SLJIT_C_GREATER_EQUAL: + case SLJIT_C_FLOAT_GREATER_EQUAL: + BR_NZ(ULESS_FLAG); + break; + case SLJIT_C_GREATER: + case SLJIT_C_FLOAT_GREATER: + BR_Z(UGREATER_FLAG); + break; + case SLJIT_C_LESS_EQUAL: + case SLJIT_C_FLOAT_LESS_EQUAL: + BR_NZ(UGREATER_FLAG); + break; + case SLJIT_C_SIG_LESS: + BR_Z(LESS_FLAG); + break; + case SLJIT_C_SIG_GREATER_EQUAL: + BR_NZ(LESS_FLAG); + break; + case SLJIT_C_SIG_GREATER: + BR_Z(GREATER_FLAG); + break; + case SLJIT_C_SIG_LESS_EQUAL: + BR_NZ(GREATER_FLAG); + break; + case SLJIT_C_OVERFLOW: + case SLJIT_C_MUL_OVERFLOW: + BR_Z(OVERFLOW_FLAG); + break; + case SLJIT_C_NOT_OVERFLOW: + case SLJIT_C_MUL_NOT_OVERFLOW: + BR_NZ(OVERFLOW_FLAG); + break; + case SLJIT_C_FLOAT_UNORDERED: + BR_F(); + break; + case SLJIT_C_FLOAT_ORDERED: + BR_T(); + break; + default: + /* Not conditional branch. */ + inst = 0; + break; + } + + jump->flags |= flags; + if (compiler->delay_slot == MOVABLE_INS || (compiler->delay_slot != UNMOVABLE_INS && compiler->delay_slot != delay_check)) + jump->flags |= IS_MOVABLE; + + if (inst) + PTR_FAIL_IF(push_inst(compiler, inst, UNMOVABLE_INS)); + + PTR_FAIL_IF(emit_const(compiler, TMP_REG2, 0)); + if (type <= SLJIT_JUMP) { + PTR_FAIL_IF(push_inst(compiler, JR | S(TMP_REG2), UNMOVABLE_INS)); + jump->addr = compiler->size; + PTR_FAIL_IF(push_inst(compiler, NOP, UNMOVABLE_INS)); + } else { + SLJIT_ASSERT(DR(PIC_ADDR_REG) == 25 && PIC_ADDR_REG == TMP_REG2); + /* Cannot be optimized out if type is >= CALL0. */ + jump->flags |= IS_JAL | (type >= SLJIT_CALL0 ? SLJIT_REWRITABLE_JUMP : 0); + PTR_FAIL_IF(push_inst(compiler, JALR | S(TMP_REG2) | DA(RETURN_ADDR_REG), UNMOVABLE_INS)); + jump->addr = compiler->size; + /* A NOP if type < CALL1. */ + PTR_FAIL_IF(push_inst(compiler, ADDU_W | S(SLJIT_SCRATCH_REG1) | TA(0) | DA(4), UNMOVABLE_INS)); + } + return jump; +} + +#define RESOLVE_IMM1() \ + if (src1 & SLJIT_IMM) { \ + if (src1w) { \ + PTR_FAIL_IF(load_immediate(compiler, DR(TMP_REG1), src1w)); \ + src1 = TMP_REG1; \ + } \ + else \ + src1 = 0; \ + } + +#define RESOLVE_IMM2() \ + if (src2 & SLJIT_IMM) { \ + if (src2w) { \ + PTR_FAIL_IF(load_immediate(compiler, DR(TMP_REG2), src2w)); \ + src2 = TMP_REG2; \ + } \ + else \ + src2 = 0; \ + } + +SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_cmp(struct sljit_compiler *compiler, sljit_si type, + sljit_si src1, sljit_sw src1w, + sljit_si src2, sljit_sw src2w) +{ + struct sljit_jump *jump; + sljit_si flags; + sljit_ins inst; + + CHECK_ERROR_PTR(); + check_sljit_emit_cmp(compiler, type, src1, src1w, src2, src2w); + ADJUST_LOCAL_OFFSET(src1, src1w); + ADJUST_LOCAL_OFFSET(src2, src2w); + + compiler->cache_arg = 0; + compiler->cache_argw = 0; + flags = ((type & SLJIT_INT_OP) ? INT_DATA : WORD_DATA) | LOAD_DATA; + if (src1 & SLJIT_MEM) { + PTR_FAIL_IF(emit_op_mem2(compiler, flags, DR(TMP_REG1), src1, src1w, src2, src2w)); + src1 = TMP_REG1; + } + if (src2 & SLJIT_MEM) { + PTR_FAIL_IF(emit_op_mem2(compiler, flags, DR(TMP_REG2), src2, src2w, 0, 0)); + src2 = TMP_REG2; + } + + jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump)); + PTR_FAIL_IF(!jump); + set_jump(jump, compiler, type & SLJIT_REWRITABLE_JUMP); + type &= 0xff; + + if (type <= SLJIT_C_NOT_EQUAL) { + RESOLVE_IMM1(); + RESOLVE_IMM2(); + jump->flags |= IS_BIT26_COND; + if (compiler->delay_slot == MOVABLE_INS || (compiler->delay_slot != UNMOVABLE_INS && compiler->delay_slot != DR(src1) && compiler->delay_slot != DR(src2))) + jump->flags |= IS_MOVABLE; + PTR_FAIL_IF(push_inst(compiler, (type == SLJIT_C_EQUAL ? BNE : BEQ) | S(src1) | T(src2) | JUMP_LENGTH, UNMOVABLE_INS)); + } + else if (type >= SLJIT_C_SIG_LESS && (((src1 & SLJIT_IMM) && (src1w == 0)) || ((src2 & SLJIT_IMM) && (src2w == 0)))) { + inst = NOP; + if ((src1 & SLJIT_IMM) && (src1w == 0)) { + RESOLVE_IMM2(); + switch (type) { + case SLJIT_C_SIG_LESS: + inst = BLEZ; + jump->flags |= IS_BIT26_COND; + break; + case SLJIT_C_SIG_GREATER_EQUAL: + inst = BGTZ; + jump->flags |= IS_BIT26_COND; + break; + case SLJIT_C_SIG_GREATER: + inst = BGEZ; + jump->flags |= IS_BIT16_COND; + break; + case SLJIT_C_SIG_LESS_EQUAL: + inst = BLTZ; + jump->flags |= IS_BIT16_COND; + break; + } + src1 = src2; + } + else { + RESOLVE_IMM1(); + switch (type) { + case SLJIT_C_SIG_LESS: + inst = BGEZ; + jump->flags |= IS_BIT16_COND; + break; + case SLJIT_C_SIG_GREATER_EQUAL: + inst = BLTZ; + jump->flags |= IS_BIT16_COND; + break; + case SLJIT_C_SIG_GREATER: + inst = BLEZ; + jump->flags |= IS_BIT26_COND; + break; + case SLJIT_C_SIG_LESS_EQUAL: + inst = BGTZ; + jump->flags |= IS_BIT26_COND; + break; + } + } + PTR_FAIL_IF(push_inst(compiler, inst | S(src1) | JUMP_LENGTH, UNMOVABLE_INS)); + } + else { + if (type == SLJIT_C_LESS || type == SLJIT_C_GREATER_EQUAL || type == SLJIT_C_SIG_LESS || type == SLJIT_C_SIG_GREATER_EQUAL) { + RESOLVE_IMM1(); + if ((src2 & SLJIT_IMM) && src2w <= SIMM_MAX && src2w >= SIMM_MIN) + PTR_FAIL_IF(push_inst(compiler, (type <= SLJIT_C_LESS_EQUAL ? SLTIU : SLTI) | S(src1) | T(TMP_REG1) | IMM(src2w), DR(TMP_REG1))); + else { + RESOLVE_IMM2(); + PTR_FAIL_IF(push_inst(compiler, (type <= SLJIT_C_LESS_EQUAL ? SLTU : SLT) | S(src1) | T(src2) | D(TMP_REG1), DR(TMP_REG1))); + } + type = (type == SLJIT_C_LESS || type == SLJIT_C_SIG_LESS) ? SLJIT_C_NOT_EQUAL : SLJIT_C_EQUAL; + } + else { + RESOLVE_IMM2(); + if ((src1 & SLJIT_IMM) && src1w <= SIMM_MAX && src1w >= SIMM_MIN) + PTR_FAIL_IF(push_inst(compiler, (type <= SLJIT_C_LESS_EQUAL ? SLTIU : SLTI) | S(src2) | T(TMP_REG1) | IMM(src1w), DR(TMP_REG1))); + else { + RESOLVE_IMM1(); + PTR_FAIL_IF(push_inst(compiler, (type <= SLJIT_C_LESS_EQUAL ? SLTU : SLT) | S(src2) | T(src1) | D(TMP_REG1), DR(TMP_REG1))); + } + type = (type == SLJIT_C_GREATER || type == SLJIT_C_SIG_GREATER) ? SLJIT_C_NOT_EQUAL : SLJIT_C_EQUAL; + } + + jump->flags |= IS_BIT26_COND; + PTR_FAIL_IF(push_inst(compiler, (type == SLJIT_C_EQUAL ? BNE : BEQ) | S(TMP_REG1) | TA(0) | JUMP_LENGTH, UNMOVABLE_INS)); + } + + PTR_FAIL_IF(emit_const(compiler, TMP_REG2, 0)); + PTR_FAIL_IF(push_inst(compiler, JR | S(TMP_REG2), UNMOVABLE_INS)); + jump->addr = compiler->size; + PTR_FAIL_IF(push_inst(compiler, NOP, UNMOVABLE_INS)); + return jump; +} + +#undef RESOLVE_IMM1 +#undef RESOLVE_IMM2 + +SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_fcmp(struct sljit_compiler *compiler, sljit_si type, + sljit_si src1, sljit_sw src1w, + sljit_si src2, sljit_sw src2w) +{ + struct sljit_jump *jump; + sljit_ins inst; + sljit_si if_true; + + CHECK_ERROR_PTR(); + check_sljit_emit_fcmp(compiler, type, src1, src1w, src2, src2w); + + compiler->cache_arg = 0; + compiler->cache_argw = 0; + + if (src1 > SLJIT_FLOAT_REG6) { + PTR_FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(type) | LOAD_DATA, TMP_FREG1, src1, src1w, src2, src2w)); + src1 = TMP_FREG1; + } + else + src1 <<= 1; + + if (src2 > SLJIT_FLOAT_REG6) { + PTR_FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(type) | LOAD_DATA, TMP_FREG2, src2, src2w, 0, 0)); + src2 = TMP_FREG2; + } + else + src2 <<= 1; + + jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump)); + PTR_FAIL_IF(!jump); + set_jump(jump, compiler, type & SLJIT_REWRITABLE_JUMP); + jump->flags |= IS_BIT16_COND; + + switch (type & 0xff) { + case SLJIT_C_FLOAT_EQUAL: + inst = C_UEQ_fmt; + if_true = 1; + break; + case SLJIT_C_FLOAT_NOT_EQUAL: + inst = C_UEQ_fmt; + if_true = 0; + break; + case SLJIT_C_FLOAT_LESS: + inst = C_ULT_fmt; + if_true = 1; + break; + case SLJIT_C_FLOAT_GREATER_EQUAL: + inst = C_ULT_fmt; + if_true = 0; + break; + case SLJIT_C_FLOAT_GREATER: + inst = C_ULE_fmt; + if_true = 0; + break; + case SLJIT_C_FLOAT_LESS_EQUAL: + inst = C_ULE_fmt; + if_true = 1; + break; + case SLJIT_C_FLOAT_UNORDERED: + inst = C_UN_fmt; + if_true = 1; + break; + case SLJIT_C_FLOAT_ORDERED: + default: /* Make compilers happy. */ + inst = C_UN_fmt; + if_true = 0; + break; + } + + PTR_FAIL_IF(push_inst(compiler, inst | FMT(type) | FT(src2) | FS(src1), UNMOVABLE_INS)); + /* Intentionally the other opcode. */ + PTR_FAIL_IF(push_inst(compiler, (if_true ? BC1F : BC1T) | JUMP_LENGTH, UNMOVABLE_INS)); + PTR_FAIL_IF(emit_const(compiler, TMP_REG2, 0)); + PTR_FAIL_IF(push_inst(compiler, JR | S(TMP_REG2), UNMOVABLE_INS)); + jump->addr = compiler->size; + PTR_FAIL_IF(push_inst(compiler, NOP, UNMOVABLE_INS)); + return jump; +} + +#undef JUMP_LENGTH +#undef BR_Z +#undef BR_NZ +#undef BR_T +#undef BR_F + +#undef FLOAT_DATA +#undef FMT + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_ijump(struct sljit_compiler *compiler, sljit_si type, sljit_si src, sljit_sw srcw) +{ + sljit_si src_r = TMP_REG2; + struct sljit_jump *jump = NULL; + + CHECK_ERROR(); + check_sljit_emit_ijump(compiler, type, src, srcw); + ADJUST_LOCAL_OFFSET(src, srcw); + + if (src <= TMP_REG3) { + if (DR(src) != 4) + src_r = src; + else + FAIL_IF(push_inst(compiler, ADDU_W | S(src) | TA(0) | D(TMP_REG2), DR(TMP_REG2))); + } + + if (type >= SLJIT_CALL0) { + SLJIT_ASSERT(DR(PIC_ADDR_REG) == 25 && PIC_ADDR_REG == TMP_REG2); + if (src & (SLJIT_IMM | SLJIT_MEM)) { + if (src & SLJIT_IMM) + FAIL_IF(load_immediate(compiler, DR(PIC_ADDR_REG), srcw)); + else { + SLJIT_ASSERT(src_r == TMP_REG2 && (src & SLJIT_MEM)); + FAIL_IF(emit_op(compiler, SLJIT_MOV, WORD_DATA, TMP_REG2, 0, TMP_REG1, 0, src, srcw)); + } + FAIL_IF(push_inst(compiler, JALR | S(PIC_ADDR_REG) | DA(RETURN_ADDR_REG), UNMOVABLE_INS)); + /* We need an extra instruction in any case. */ + return push_inst(compiler, ADDU_W | S(SLJIT_SCRATCH_REG1) | TA(0) | DA(4), UNMOVABLE_INS); + } + + /* Register input. */ + if (type >= SLJIT_CALL1) + FAIL_IF(push_inst(compiler, ADDU_W | S(SLJIT_SCRATCH_REG1) | TA(0) | DA(4), 4)); + FAIL_IF(push_inst(compiler, JALR | S(src_r) | DA(RETURN_ADDR_REG), UNMOVABLE_INS)); + return push_inst(compiler, ADDU_W | S(src_r) | TA(0) | D(PIC_ADDR_REG), UNMOVABLE_INS); + } + + if (src & SLJIT_IMM) { + jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump)); + FAIL_IF(!jump); + set_jump(jump, compiler, JUMP_ADDR | ((type >= SLJIT_FAST_CALL) ? IS_JAL : 0)); + jump->u.target = srcw; + + if (compiler->delay_slot != UNMOVABLE_INS) + jump->flags |= IS_MOVABLE; + + FAIL_IF(emit_const(compiler, TMP_REG2, 0)); + } + else if (src & SLJIT_MEM) + FAIL_IF(emit_op(compiler, SLJIT_MOV, WORD_DATA, TMP_REG2, 0, TMP_REG1, 0, src, srcw)); + + FAIL_IF(push_inst(compiler, JR | S(src_r), UNMOVABLE_INS)); + if (jump) + jump->addr = compiler->size; + FAIL_IF(push_inst(compiler, NOP, UNMOVABLE_INS)); + return SLJIT_SUCCESS; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op_flags(struct sljit_compiler *compiler, sljit_si op, + sljit_si dst, sljit_sw dstw, + sljit_si src, sljit_sw srcw, + sljit_si type) +{ + sljit_si sugg_dst_ar, dst_ar; + sljit_si flags = GET_ALL_FLAGS(op); + + CHECK_ERROR(); + check_sljit_emit_op_flags(compiler, op, dst, dstw, src, srcw, type); + ADJUST_LOCAL_OFFSET(dst, dstw); + + if (dst == SLJIT_UNUSED) + return SLJIT_SUCCESS; + + op = GET_OPCODE(op); + sugg_dst_ar = DR((op < SLJIT_ADD && dst <= TMP_REG3) ? dst : TMP_REG2); + + compiler->cache_arg = 0; + compiler->cache_argw = 0; + if (op >= SLJIT_ADD && (src & SLJIT_MEM)) { + ADJUST_LOCAL_OFFSET(src, srcw); + FAIL_IF(emit_op_mem2(compiler, WORD_DATA | LOAD_DATA, DR(TMP_REG1), src, srcw, dst, dstw)); + src = TMP_REG1; + srcw = 0; + } + + switch (type) { + case SLJIT_C_EQUAL: + case SLJIT_C_NOT_EQUAL: + FAIL_IF(push_inst(compiler, SLTIU | SA(EQUAL_FLAG) | TA(sugg_dst_ar) | IMM(1), sugg_dst_ar)); + dst_ar = sugg_dst_ar; + break; + case SLJIT_C_LESS: + case SLJIT_C_GREATER_EQUAL: + case SLJIT_C_FLOAT_LESS: + case SLJIT_C_FLOAT_GREATER_EQUAL: + dst_ar = ULESS_FLAG; + break; + case SLJIT_C_GREATER: + case SLJIT_C_LESS_EQUAL: + case SLJIT_C_FLOAT_GREATER: + case SLJIT_C_FLOAT_LESS_EQUAL: + dst_ar = UGREATER_FLAG; + break; + case SLJIT_C_SIG_LESS: + case SLJIT_C_SIG_GREATER_EQUAL: + dst_ar = LESS_FLAG; + break; + case SLJIT_C_SIG_GREATER: + case SLJIT_C_SIG_LESS_EQUAL: + dst_ar = GREATER_FLAG; + break; + case SLJIT_C_OVERFLOW: + case SLJIT_C_NOT_OVERFLOW: + dst_ar = OVERFLOW_FLAG; + break; + case SLJIT_C_MUL_OVERFLOW: + case SLJIT_C_MUL_NOT_OVERFLOW: + FAIL_IF(push_inst(compiler, SLTIU | SA(OVERFLOW_FLAG) | TA(sugg_dst_ar) | IMM(1), sugg_dst_ar)); + dst_ar = sugg_dst_ar; + type ^= 0x1; /* Flip type bit for the XORI below. */ + break; + case SLJIT_C_FLOAT_EQUAL: + case SLJIT_C_FLOAT_NOT_EQUAL: + dst_ar = EQUAL_FLAG; + break; + + case SLJIT_C_FLOAT_UNORDERED: + case SLJIT_C_FLOAT_ORDERED: + FAIL_IF(push_inst(compiler, CFC1 | TA(sugg_dst_ar) | DA(FCSR_REG), sugg_dst_ar)); + FAIL_IF(push_inst(compiler, SRL | TA(sugg_dst_ar) | DA(sugg_dst_ar) | SH_IMM(23), sugg_dst_ar)); + FAIL_IF(push_inst(compiler, ANDI | SA(sugg_dst_ar) | TA(sugg_dst_ar) | IMM(1), sugg_dst_ar)); + dst_ar = sugg_dst_ar; + break; + + default: + SLJIT_ASSERT_STOP(); + dst_ar = sugg_dst_ar; + break; + } + + if (type & 0x1) { + FAIL_IF(push_inst(compiler, XORI | SA(dst_ar) | TA(sugg_dst_ar) | IMM(1), sugg_dst_ar)); + dst_ar = sugg_dst_ar; + } + + if (op >= SLJIT_ADD) { + if (DR(TMP_REG2) != dst_ar) + FAIL_IF(push_inst(compiler, ADDU_W | SA(dst_ar) | TA(0) | D(TMP_REG2), DR(TMP_REG2))); + return emit_op(compiler, op | flags, CUMULATIVE_OP | LOGICAL_OP | IMM_OP | ALT_KEEP_CACHE, dst, dstw, src, srcw, TMP_REG2, 0); + } + + if (dst & SLJIT_MEM) + return emit_op_mem(compiler, WORD_DATA, dst_ar, dst, dstw); + + if (sugg_dst_ar != dst_ar) + return push_inst(compiler, ADDU_W | SA(dst_ar) | TA(0) | DA(sugg_dst_ar), sugg_dst_ar); + return SLJIT_SUCCESS; +} + +SLJIT_API_FUNC_ATTRIBUTE struct sljit_const* sljit_emit_const(struct sljit_compiler *compiler, sljit_si dst, sljit_sw dstw, sljit_sw init_value) +{ + struct sljit_const *const_; + sljit_si reg; + + CHECK_ERROR_PTR(); + check_sljit_emit_const(compiler, dst, dstw, init_value); + ADJUST_LOCAL_OFFSET(dst, dstw); + + const_ = (struct sljit_const*)ensure_abuf(compiler, sizeof(struct sljit_const)); + PTR_FAIL_IF(!const_); + set_const(const_, compiler); + + reg = (dst <= TMP_REG3) ? dst : TMP_REG2; + + PTR_FAIL_IF(emit_const(compiler, reg, init_value)); + + if (dst & SLJIT_MEM) + PTR_FAIL_IF(emit_op(compiler, SLJIT_MOV, WORD_DATA, dst, dstw, TMP_REG1, 0, TMP_REG2, 0)); + return const_; +} Property changes on: sys/contrib/sljit/sljitNativeMIPS_common.c ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Added: fbsd:nokeywords ## -0,0 +1 ## +yes \ No newline at end of property Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Index: sys/contrib/sljit/sljitNativePPC_32.c =================================================================== --- sys/contrib/sljit/sljitNativePPC_32.c (revision 0) +++ sys/contrib/sljit/sljitNativePPC_32.c (working copy) @@ -0,0 +1,269 @@ +/* + * Stack-less Just-In-Time compiler + * + * Copyright 2009-2012 Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, are + * permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of + * conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list + * of conditions and the following disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED + * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* ppc 32-bit arch dependent functions. */ + +static sljit_si load_immediate(struct sljit_compiler *compiler, sljit_si reg, sljit_sw imm) +{ + if (imm <= SIMM_MAX && imm >= SIMM_MIN) + return push_inst(compiler, ADDI | D(reg) | A(0) | IMM(imm)); + + if (!(imm & ~0xffff)) + return push_inst(compiler, ORI | S(ZERO_REG) | A(reg) | IMM(imm)); + + FAIL_IF(push_inst(compiler, ADDIS | D(reg) | A(0) | IMM(imm >> 16))); + return (imm & 0xffff) ? push_inst(compiler, ORI | S(reg) | A(reg) | IMM(imm)) : SLJIT_SUCCESS; +} + +#define INS_CLEAR_LEFT(dst, src, from) \ + (RLWINM | S(src) | A(dst) | ((from) << 6) | (31 << 1)) + +static SLJIT_INLINE sljit_si emit_single_op(struct sljit_compiler *compiler, sljit_si op, sljit_si flags, + sljit_si dst, sljit_si src1, sljit_si src2) +{ + switch (op) { + case SLJIT_MOV: + case SLJIT_MOV_UI: + case SLJIT_MOV_SI: + case SLJIT_MOV_P: + SLJIT_ASSERT(src1 == TMP_REG1); + if (dst != src2) + return push_inst(compiler, OR | S(src2) | A(dst) | B(src2)); + return SLJIT_SUCCESS; + + case SLJIT_MOV_UB: + case SLJIT_MOV_SB: + SLJIT_ASSERT(src1 == TMP_REG1); + if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) { + if (op == SLJIT_MOV_SB) + return push_inst(compiler, EXTSB | S(src2) | A(dst)); + return push_inst(compiler, INS_CLEAR_LEFT(dst, src2, 24)); + } + else if ((flags & REG_DEST) && op == SLJIT_MOV_SB) + return push_inst(compiler, EXTSB | S(src2) | A(dst)); + else { + SLJIT_ASSERT(dst == src2); + } + return SLJIT_SUCCESS; + + case SLJIT_MOV_UH: + case SLJIT_MOV_SH: + SLJIT_ASSERT(src1 == TMP_REG1); + if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) { + if (op == SLJIT_MOV_SH) + return push_inst(compiler, EXTSH | S(src2) | A(dst)); + return push_inst(compiler, INS_CLEAR_LEFT(dst, src2, 16)); + } + else { + SLJIT_ASSERT(dst == src2); + } + return SLJIT_SUCCESS; + + case SLJIT_NOT: + SLJIT_ASSERT(src1 == TMP_REG1); + return push_inst(compiler, NOR | RC(flags) | S(src2) | A(dst) | B(src2)); + + case SLJIT_NEG: + SLJIT_ASSERT(src1 == TMP_REG1); + return push_inst(compiler, NEG | OERC(flags) | D(dst) | A(src2)); + + case SLJIT_CLZ: + SLJIT_ASSERT(src1 == TMP_REG1); + return push_inst(compiler, CNTLZW | RC(flags) | S(src2) | A(dst)); + + case SLJIT_ADD: + if (flags & ALT_FORM1) { + /* Flags does not set: BIN_IMM_EXTS unnecessary. */ + SLJIT_ASSERT(src2 == TMP_REG2); + return push_inst(compiler, ADDI | D(dst) | A(src1) | compiler->imm); + } + if (flags & ALT_FORM2) { + /* Flags does not set: BIN_IMM_EXTS unnecessary. */ + SLJIT_ASSERT(src2 == TMP_REG2); + return push_inst(compiler, ADDIS | D(dst) | A(src1) | compiler->imm); + } + if (flags & ALT_FORM3) { + SLJIT_ASSERT(src2 == TMP_REG2); + return push_inst(compiler, ADDIC | D(dst) | A(src1) | compiler->imm); + } + if (flags & ALT_FORM4) { + /* Flags does not set: BIN_IMM_EXTS unnecessary. */ + FAIL_IF(push_inst(compiler, ADDI | D(dst) | A(src1) | (compiler->imm & 0xffff))); + return push_inst(compiler, ADDIS | D(dst) | A(dst) | (((compiler->imm >> 16) & 0xffff) + ((compiler->imm >> 15) & 0x1))); + } + if (!(flags & ALT_SET_FLAGS)) + return push_inst(compiler, ADD | D(dst) | A(src1) | B(src2)); + return push_inst(compiler, ADDC | OERC(ALT_SET_FLAGS) | D(dst) | A(src1) | B(src2)); + + case SLJIT_ADDC: + if (flags & ALT_FORM1) { + FAIL_IF(push_inst(compiler, MFXER | D(0))); + FAIL_IF(push_inst(compiler, ADDE | D(dst) | A(src1) | B(src2))); + return push_inst(compiler, MTXER | S(0)); + } + return push_inst(compiler, ADDE | D(dst) | A(src1) | B(src2)); + + case SLJIT_SUB: + if (flags & ALT_FORM1) { + /* Flags does not set: BIN_IMM_EXTS unnecessary. */ + SLJIT_ASSERT(src2 == TMP_REG2); + return push_inst(compiler, SUBFIC | D(dst) | A(src1) | compiler->imm); + } + if (flags & (ALT_FORM2 | ALT_FORM3)) { + SLJIT_ASSERT(src2 == TMP_REG2); + if (flags & ALT_FORM2) + FAIL_IF(push_inst(compiler, CMPI | CRD(0) | A(src1) | compiler->imm)); + if (flags & ALT_FORM3) + return push_inst(compiler, CMPLI | CRD(4) | A(src1) | compiler->imm); + return SLJIT_SUCCESS; + } + if (flags & (ALT_FORM4 | ALT_FORM5)) { + if (flags & ALT_FORM4) + FAIL_IF(push_inst(compiler, CMPL | CRD(4) | A(src1) | B(src2))); + if (flags & ALT_FORM5) + FAIL_IF(push_inst(compiler, CMP | CRD(0) | A(src1) | B(src2))); + return SLJIT_SUCCESS; + } + if (!(flags & ALT_SET_FLAGS)) + return push_inst(compiler, SUBF | D(dst) | A(src2) | B(src1)); + if (flags & ALT_FORM6) + FAIL_IF(push_inst(compiler, CMPL | CRD(4) | A(src1) | B(src2))); + return push_inst(compiler, SUBFC | OERC(ALT_SET_FLAGS) | D(dst) | A(src2) | B(src1)); + + case SLJIT_SUBC: + if (flags & ALT_FORM1) { + FAIL_IF(push_inst(compiler, MFXER | D(0))); + FAIL_IF(push_inst(compiler, SUBFE | D(dst) | A(src2) | B(src1))); + return push_inst(compiler, MTXER | S(0)); + } + return push_inst(compiler, SUBFE | D(dst) | A(src2) | B(src1)); + + case SLJIT_MUL: + if (flags & ALT_FORM1) { + SLJIT_ASSERT(src2 == TMP_REG2); + return push_inst(compiler, MULLI | D(dst) | A(src1) | compiler->imm); + } + return push_inst(compiler, MULLW | OERC(flags) | D(dst) | A(src2) | B(src1)); + + case SLJIT_AND: + if (flags & ALT_FORM1) { + SLJIT_ASSERT(src2 == TMP_REG2); + return push_inst(compiler, ANDI | S(src1) | A(dst) | compiler->imm); + } + if (flags & ALT_FORM2) { + SLJIT_ASSERT(src2 == TMP_REG2); + return push_inst(compiler, ANDIS | S(src1) | A(dst) | compiler->imm); + } + return push_inst(compiler, AND | RC(flags) | S(src1) | A(dst) | B(src2)); + + case SLJIT_OR: + if (flags & ALT_FORM1) { + SLJIT_ASSERT(src2 == TMP_REG2); + return push_inst(compiler, ORI | S(src1) | A(dst) | compiler->imm); + } + if (flags & ALT_FORM2) { + SLJIT_ASSERT(src2 == TMP_REG2); + return push_inst(compiler, ORIS | S(src1) | A(dst) | compiler->imm); + } + if (flags & ALT_FORM3) { + SLJIT_ASSERT(src2 == TMP_REG2); + FAIL_IF(push_inst(compiler, ORI | S(src1) | A(dst) | IMM(compiler->imm))); + return push_inst(compiler, ORIS | S(dst) | A(dst) | IMM(compiler->imm >> 16)); + } + return push_inst(compiler, OR | RC(flags) | S(src1) | A(dst) | B(src2)); + + case SLJIT_XOR: + if (flags & ALT_FORM1) { + SLJIT_ASSERT(src2 == TMP_REG2); + return push_inst(compiler, XORI | S(src1) | A(dst) | compiler->imm); + } + if (flags & ALT_FORM2) { + SLJIT_ASSERT(src2 == TMP_REG2); + return push_inst(compiler, XORIS | S(src1) | A(dst) | compiler->imm); + } + if (flags & ALT_FORM3) { + SLJIT_ASSERT(src2 == TMP_REG2); + FAIL_IF(push_inst(compiler, XORI | S(src1) | A(dst) | IMM(compiler->imm))); + return push_inst(compiler, XORIS | S(dst) | A(dst) | IMM(compiler->imm >> 16)); + } + return push_inst(compiler, XOR | RC(flags) | S(src1) | A(dst) | B(src2)); + + case SLJIT_SHL: + if (flags & ALT_FORM1) { + SLJIT_ASSERT(src2 == TMP_REG2); + compiler->imm &= 0x1f; + return push_inst(compiler, RLWINM | RC(flags) | S(src1) | A(dst) | (compiler->imm << 11) | ((31 - compiler->imm) << 1)); + } + return push_inst(compiler, SLW | RC(flags) | S(src1) | A(dst) | B(src2)); + + case SLJIT_LSHR: + if (flags & ALT_FORM1) { + SLJIT_ASSERT(src2 == TMP_REG2); + compiler->imm &= 0x1f; + return push_inst(compiler, RLWINM | RC(flags) | S(src1) | A(dst) | (((32 - compiler->imm) & 0x1f) << 11) | (compiler->imm << 6) | (31 << 1)); + } + return push_inst(compiler, SRW | RC(flags) | S(src1) | A(dst) | B(src2)); + + case SLJIT_ASHR: + if (flags & ALT_FORM3) + FAIL_IF(push_inst(compiler, MFXER | D(0))); + if (flags & ALT_FORM1) { + SLJIT_ASSERT(src2 == TMP_REG2); + compiler->imm &= 0x1f; + FAIL_IF(push_inst(compiler, SRAWI | RC(flags) | S(src1) | A(dst) | (compiler->imm << 11))); + } + else + FAIL_IF(push_inst(compiler, SRAW | RC(flags) | S(src1) | A(dst) | B(src2))); + return (flags & ALT_FORM3) ? push_inst(compiler, MTXER | S(0)) : SLJIT_SUCCESS; + } + + SLJIT_ASSERT_STOP(); + return SLJIT_SUCCESS; +} + +static SLJIT_INLINE sljit_si emit_const(struct sljit_compiler *compiler, sljit_si reg, sljit_sw init_value) +{ + FAIL_IF(push_inst(compiler, ADDIS | D(reg) | A(0) | IMM(init_value >> 16))); + return push_inst(compiler, ORI | S(reg) | A(reg) | IMM(init_value)); +} + +SLJIT_API_FUNC_ATTRIBUTE void sljit_set_jump_addr(sljit_uw addr, sljit_uw new_addr) +{ + sljit_ins *inst = (sljit_ins*)addr; + + inst[0] = (inst[0] & 0xffff0000) | ((new_addr >> 16) & 0xffff); + inst[1] = (inst[1] & 0xffff0000) | (new_addr & 0xffff); + SLJIT_CACHE_FLUSH(inst, inst + 2); +} + +SLJIT_API_FUNC_ATTRIBUTE void sljit_set_const(sljit_uw addr, sljit_sw new_constant) +{ + sljit_ins *inst = (sljit_ins*)addr; + + inst[0] = (inst[0] & 0xffff0000) | ((new_constant >> 16) & 0xffff); + inst[1] = (inst[1] & 0xffff0000) | (new_constant & 0xffff); + SLJIT_CACHE_FLUSH(inst, inst + 2); +} Property changes on: sys/contrib/sljit/sljitNativePPC_32.c ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Added: fbsd:nokeywords ## -0,0 +1 ## +yes \ No newline at end of property Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Index: sys/contrib/sljit/sljitNativePPC_64.c =================================================================== --- sys/contrib/sljit/sljitNativePPC_64.c (revision 0) +++ sys/contrib/sljit/sljitNativePPC_64.c (working copy) @@ -0,0 +1,421 @@ +/* + * Stack-less Just-In-Time compiler + * + * Copyright 2009-2012 Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, are + * permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of + * conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list + * of conditions and the following disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED + * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* ppc 64-bit arch dependent functions. */ + +#if defined(__GNUC__) || (defined(__IBM_GCC_ASM) && __IBM_GCC_ASM) +#define ASM_SLJIT_CLZ(src, dst) \ + __asm__ volatile ( "cntlzd %0, %1" : "=r"(dst) : "r"(src) ) +#elif defined(__xlc__) +#error "Please enable GCC syntax for inline assembly statements" +#else +#error "Must implement count leading zeroes" +#endif + +#define RLDI(dst, src, sh, mb, type) \ + (HI(30) | S(src) | A(dst) | ((type) << 2) | (((sh) & 0x1f) << 11) | (((sh) & 0x20) >> 4) | (((mb) & 0x1f) << 6) | ((mb) & 0x20)) + +#define PUSH_RLDICR(reg, shift) \ + push_inst(compiler, RLDI(reg, reg, 63 - shift, shift, 1)) + +static sljit_si load_immediate(struct sljit_compiler *compiler, sljit_si reg, sljit_sw imm) +{ + sljit_uw tmp; + sljit_uw shift; + sljit_uw tmp2; + sljit_uw shift2; + + if (imm <= SIMM_MAX && imm >= SIMM_MIN) + return push_inst(compiler, ADDI | D(reg) | A(0) | IMM(imm)); + + if (!(imm & ~0xffff)) + return push_inst(compiler, ORI | S(ZERO_REG) | A(reg) | IMM(imm)); + + if (imm <= SLJIT_W(0x7fffffff) && imm >= SLJIT_W(-0x80000000)) { + FAIL_IF(push_inst(compiler, ADDIS | D(reg) | A(0) | IMM(imm >> 16))); + return (imm & 0xffff) ? push_inst(compiler, ORI | S(reg) | A(reg) | IMM(imm)) : SLJIT_SUCCESS; + } + + /* Count leading zeroes. */ + tmp = (imm >= 0) ? imm : ~imm; + ASM_SLJIT_CLZ(tmp, shift); + SLJIT_ASSERT(shift > 0); + shift--; + tmp = (imm << shift); + + if ((tmp & ~0xffff000000000000ul) == 0) { + FAIL_IF(push_inst(compiler, ADDI | D(reg) | A(0) | IMM(tmp >> 48))); + shift += 15; + return PUSH_RLDICR(reg, shift); + } + + if ((tmp & ~0xffffffff00000000ul) == 0) { + FAIL_IF(push_inst(compiler, ADDIS | D(reg) | A(0) | IMM(tmp >> 48))); + FAIL_IF(push_inst(compiler, ORI | S(reg) | A(reg) | IMM(tmp >> 32))); + shift += 31; + return PUSH_RLDICR(reg, shift); + } + + /* Cut out the 16 bit from immediate. */ + shift += 15; + tmp2 = imm & ((1ul << (63 - shift)) - 1); + + if (tmp2 <= 0xffff) { + FAIL_IF(push_inst(compiler, ADDI | D(reg) | A(0) | IMM(tmp >> 48))); + FAIL_IF(PUSH_RLDICR(reg, shift)); + return push_inst(compiler, ORI | S(reg) | A(reg) | tmp2); + } + + if (tmp2 <= 0xffffffff) { + FAIL_IF(push_inst(compiler, ADDI | D(reg) | A(0) | IMM(tmp >> 48))); + FAIL_IF(PUSH_RLDICR(reg, shift)); + FAIL_IF(push_inst(compiler, ORIS | S(reg) | A(reg) | (tmp2 >> 16))); + return (imm & 0xffff) ? push_inst(compiler, ORI | S(reg) | A(reg) | IMM(tmp2)) : SLJIT_SUCCESS; + } + + ASM_SLJIT_CLZ(tmp2, shift2); + tmp2 <<= shift2; + + if ((tmp2 & ~0xffff000000000000ul) == 0) { + FAIL_IF(push_inst(compiler, ADDI | D(reg) | A(0) | IMM(tmp >> 48))); + shift2 += 15; + shift += (63 - shift2); + FAIL_IF(PUSH_RLDICR(reg, shift)); + FAIL_IF(push_inst(compiler, ORI | S(reg) | A(reg) | (tmp2 >> 48))); + return PUSH_RLDICR(reg, shift2); + } + + /* The general version. */ + FAIL_IF(push_inst(compiler, ADDIS | D(reg) | A(0) | IMM(imm >> 48))); + FAIL_IF(push_inst(compiler, ORI | S(reg) | A(reg) | IMM(imm >> 32))); + FAIL_IF(PUSH_RLDICR(reg, 31)); + FAIL_IF(push_inst(compiler, ORIS | S(reg) | A(reg) | IMM(imm >> 16))); + return push_inst(compiler, ORI | S(reg) | A(reg) | IMM(imm)); +} + +/* Simplified mnemonics: clrldi. */ +#define INS_CLEAR_LEFT(dst, src, from) \ + (RLDICL | S(src) | A(dst) | ((from) << 6) | (1 << 5)) + +/* Sign extension for integer operations. */ +#define UN_EXTS() \ + if ((flags & (ALT_SIGN_EXT | REG2_SOURCE)) == (ALT_SIGN_EXT | REG2_SOURCE)) { \ + FAIL_IF(push_inst(compiler, EXTSW | S(src2) | A(TMP_REG2))); \ + src2 = TMP_REG2; \ + } + +#define BIN_EXTS() \ + if (flags & ALT_SIGN_EXT) { \ + if (flags & REG1_SOURCE) { \ + FAIL_IF(push_inst(compiler, EXTSW | S(src1) | A(TMP_REG1))); \ + src1 = TMP_REG1; \ + } \ + if (flags & REG2_SOURCE) { \ + FAIL_IF(push_inst(compiler, EXTSW | S(src2) | A(TMP_REG2))); \ + src2 = TMP_REG2; \ + } \ + } + +#define BIN_IMM_EXTS() \ + if ((flags & (ALT_SIGN_EXT | REG1_SOURCE)) == (ALT_SIGN_EXT | REG1_SOURCE)) { \ + FAIL_IF(push_inst(compiler, EXTSW | S(src1) | A(TMP_REG1))); \ + src1 = TMP_REG1; \ + } + +static SLJIT_INLINE sljit_si emit_single_op(struct sljit_compiler *compiler, sljit_si op, sljit_si flags, + sljit_si dst, sljit_si src1, sljit_si src2) +{ + switch (op) { + case SLJIT_MOV: + case SLJIT_MOV_P: + SLJIT_ASSERT(src1 == TMP_REG1); + if (dst != src2) + return push_inst(compiler, OR | S(src2) | A(dst) | B(src2)); + return SLJIT_SUCCESS; + + case SLJIT_MOV_UI: + case SLJIT_MOV_SI: + SLJIT_ASSERT(src1 == TMP_REG1); + if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) { + if (op == SLJIT_MOV_SI) + return push_inst(compiler, EXTSW | S(src2) | A(dst)); + return push_inst(compiler, INS_CLEAR_LEFT(dst, src2, 0)); + } + else { + SLJIT_ASSERT(dst == src2); + } + return SLJIT_SUCCESS; + + case SLJIT_MOV_UB: + case SLJIT_MOV_SB: + SLJIT_ASSERT(src1 == TMP_REG1); + if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) { + if (op == SLJIT_MOV_SB) + return push_inst(compiler, EXTSB | S(src2) | A(dst)); + return push_inst(compiler, INS_CLEAR_LEFT(dst, src2, 24)); + } + else if ((flags & REG_DEST) && op == SLJIT_MOV_SB) + return push_inst(compiler, EXTSB | S(src2) | A(dst)); + else { + SLJIT_ASSERT(dst == src2); + } + return SLJIT_SUCCESS; + + case SLJIT_MOV_UH: + case SLJIT_MOV_SH: + SLJIT_ASSERT(src1 == TMP_REG1); + if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) { + if (op == SLJIT_MOV_SH) + return push_inst(compiler, EXTSH | S(src2) | A(dst)); + return push_inst(compiler, INS_CLEAR_LEFT(dst, src2, 16)); + } + else { + SLJIT_ASSERT(dst == src2); + } + return SLJIT_SUCCESS; + + case SLJIT_NOT: + SLJIT_ASSERT(src1 == TMP_REG1); + UN_EXTS(); + return push_inst(compiler, NOR | RC(flags) | S(src2) | A(dst) | B(src2)); + + case SLJIT_NEG: + SLJIT_ASSERT(src1 == TMP_REG1); + UN_EXTS(); + return push_inst(compiler, NEG | OERC(flags) | D(dst) | A(src2)); + + case SLJIT_CLZ: + SLJIT_ASSERT(src1 == TMP_REG1); + if (flags & ALT_FORM1) + return push_inst(compiler, CNTLZW | RC(flags) | S(src2) | A(dst)); + return push_inst(compiler, CNTLZD | RC(flags) | S(src2) | A(dst)); + + case SLJIT_ADD: + if (flags & ALT_FORM1) { + /* Flags does not set: BIN_IMM_EXTS unnecessary. */ + SLJIT_ASSERT(src2 == TMP_REG2); + return push_inst(compiler, ADDI | D(dst) | A(src1) | compiler->imm); + } + if (flags & ALT_FORM2) { + /* Flags does not set: BIN_IMM_EXTS unnecessary. */ + SLJIT_ASSERT(src2 == TMP_REG2); + return push_inst(compiler, ADDIS | D(dst) | A(src1) | compiler->imm); + } + if (flags & ALT_FORM3) { + SLJIT_ASSERT(src2 == TMP_REG2); + BIN_IMM_EXTS(); + return push_inst(compiler, ADDIC | D(dst) | A(src1) | compiler->imm); + } + if (flags & ALT_FORM4) { + /* Flags does not set: BIN_IMM_EXTS unnecessary. */ + FAIL_IF(push_inst(compiler, ADDI | D(dst) | A(src1) | (compiler->imm & 0xffff))); + return push_inst(compiler, ADDIS | D(dst) | A(dst) | (((compiler->imm >> 16) & 0xffff) + ((compiler->imm >> 15) & 0x1))); + } + if (!(flags & ALT_SET_FLAGS)) + return push_inst(compiler, ADD | D(dst) | A(src1) | B(src2)); + BIN_EXTS(); + return push_inst(compiler, ADDC | OERC(ALT_SET_FLAGS) | D(dst) | A(src1) | B(src2)); + + case SLJIT_ADDC: + if (flags & ALT_FORM1) { + FAIL_IF(push_inst(compiler, MFXER | D(0))); + FAIL_IF(push_inst(compiler, ADDE | D(dst) | A(src1) | B(src2))); + return push_inst(compiler, MTXER | S(0)); + } + BIN_EXTS(); + return push_inst(compiler, ADDE | D(dst) | A(src1) | B(src2)); + + case SLJIT_SUB: + if (flags & ALT_FORM1) { + /* Flags does not set: BIN_IMM_EXTS unnecessary. */ + SLJIT_ASSERT(src2 == TMP_REG2); + return push_inst(compiler, SUBFIC | D(dst) | A(src1) | compiler->imm); + } + if (flags & (ALT_FORM2 | ALT_FORM3)) { + SLJIT_ASSERT(src2 == TMP_REG2); + if (flags & ALT_FORM2) + FAIL_IF(push_inst(compiler, CMPI | CRD(0 | ((flags & ALT_SIGN_EXT) ? 0 : 1)) | A(src1) | compiler->imm)); + if (flags & ALT_FORM3) + return push_inst(compiler, CMPLI | CRD(4 | ((flags & ALT_SIGN_EXT) ? 0 : 1)) | A(src1) | compiler->imm); + return SLJIT_SUCCESS; + } + if (flags & (ALT_FORM4 | ALT_FORM5)) { + if (flags & ALT_FORM4) + FAIL_IF(push_inst(compiler, CMPL | CRD(4 | ((flags & ALT_SIGN_EXT) ? 0 : 1)) | A(src1) | B(src2))); + if (flags & ALT_FORM5) + return push_inst(compiler, CMP | CRD(0 | ((flags & ALT_SIGN_EXT) ? 0 : 1)) | A(src1) | B(src2)); + return SLJIT_SUCCESS; + } + if (!(flags & ALT_SET_FLAGS)) + return push_inst(compiler, SUBF | D(dst) | A(src2) | B(src1)); + BIN_EXTS(); + if (flags & ALT_FORM6) + FAIL_IF(push_inst(compiler, CMPL | CRD(4 | ((flags & ALT_SIGN_EXT) ? 0 : 1)) | A(src1) | B(src2))); + return push_inst(compiler, SUBFC | OERC(ALT_SET_FLAGS) | D(dst) | A(src2) | B(src1)); + + case SLJIT_SUBC: + if (flags & ALT_FORM1) { + FAIL_IF(push_inst(compiler, MFXER | D(0))); + FAIL_IF(push_inst(compiler, SUBFE | D(dst) | A(src2) | B(src1))); + return push_inst(compiler, MTXER | S(0)); + } + BIN_EXTS(); + return push_inst(compiler, SUBFE | D(dst) | A(src2) | B(src1)); + + case SLJIT_MUL: + if (flags & ALT_FORM1) { + SLJIT_ASSERT(src2 == TMP_REG2); + return push_inst(compiler, MULLI | D(dst) | A(src1) | compiler->imm); + } + BIN_EXTS(); + if (flags & ALT_FORM2) + return push_inst(compiler, MULLW | OERC(flags) | D(dst) | A(src2) | B(src1)); + return push_inst(compiler, MULLD | OERC(flags) | D(dst) | A(src2) | B(src1)); + + case SLJIT_AND: + if (flags & ALT_FORM1) { + SLJIT_ASSERT(src2 == TMP_REG2); + return push_inst(compiler, ANDI | S(src1) | A(dst) | compiler->imm); + } + if (flags & ALT_FORM2) { + SLJIT_ASSERT(src2 == TMP_REG2); + return push_inst(compiler, ANDIS | S(src1) | A(dst) | compiler->imm); + } + return push_inst(compiler, AND | RC(flags) | S(src1) | A(dst) | B(src2)); + + case SLJIT_OR: + if (flags & ALT_FORM1) { + SLJIT_ASSERT(src2 == TMP_REG2); + return push_inst(compiler, ORI | S(src1) | A(dst) | compiler->imm); + } + if (flags & ALT_FORM2) { + SLJIT_ASSERT(src2 == TMP_REG2); + return push_inst(compiler, ORIS | S(src1) | A(dst) | compiler->imm); + } + if (flags & ALT_FORM3) { + SLJIT_ASSERT(src2 == TMP_REG2); + FAIL_IF(push_inst(compiler, ORI | S(src1) | A(dst) | IMM(compiler->imm))); + return push_inst(compiler, ORIS | S(dst) | A(dst) | IMM(compiler->imm >> 16)); + } + return push_inst(compiler, OR | RC(flags) | S(src1) | A(dst) | B(src2)); + + case SLJIT_XOR: + if (flags & ALT_FORM1) { + SLJIT_ASSERT(src2 == TMP_REG2); + return push_inst(compiler, XORI | S(src1) | A(dst) | compiler->imm); + } + if (flags & ALT_FORM2) { + SLJIT_ASSERT(src2 == TMP_REG2); + return push_inst(compiler, XORIS | S(src1) | A(dst) | compiler->imm); + } + if (flags & ALT_FORM3) { + SLJIT_ASSERT(src2 == TMP_REG2); + FAIL_IF(push_inst(compiler, XORI | S(src1) | A(dst) | IMM(compiler->imm))); + return push_inst(compiler, XORIS | S(dst) | A(dst) | IMM(compiler->imm >> 16)); + } + return push_inst(compiler, XOR | RC(flags) | S(src1) | A(dst) | B(src2)); + + case SLJIT_SHL: + if (flags & ALT_FORM1) { + SLJIT_ASSERT(src2 == TMP_REG2); + if (flags & ALT_FORM2) { + compiler->imm &= 0x1f; + return push_inst(compiler, RLWINM | RC(flags) | S(src1) | A(dst) | (compiler->imm << 11) | ((31 - compiler->imm) << 1)); + } + else { + compiler->imm &= 0x3f; + return push_inst(compiler, RLDI(dst, src1, compiler->imm, 63 - compiler->imm, 1) | RC(flags)); + } + } + return push_inst(compiler, ((flags & ALT_FORM2) ? SLW : SLD) | RC(flags) | S(src1) | A(dst) | B(src2)); + + case SLJIT_LSHR: + if (flags & ALT_FORM1) { + SLJIT_ASSERT(src2 == TMP_REG2); + if (flags & ALT_FORM2) { + compiler->imm &= 0x1f; + return push_inst(compiler, RLWINM | RC(flags) | S(src1) | A(dst) | (((32 - compiler->imm) & 0x1f) << 11) | (compiler->imm << 6) | (31 << 1)); + } + else { + compiler->imm &= 0x3f; + return push_inst(compiler, RLDI(dst, src1, 64 - compiler->imm, compiler->imm, 0) | RC(flags)); + } + } + return push_inst(compiler, ((flags & ALT_FORM2) ? SRW : SRD) | RC(flags) | S(src1) | A(dst) | B(src2)); + + case SLJIT_ASHR: + if (flags & ALT_FORM3) + FAIL_IF(push_inst(compiler, MFXER | D(0))); + if (flags & ALT_FORM1) { + SLJIT_ASSERT(src2 == TMP_REG2); + if (flags & ALT_FORM2) { + compiler->imm &= 0x1f; + FAIL_IF(push_inst(compiler, SRAWI | RC(flags) | S(src1) | A(dst) | (compiler->imm << 11))); + } + else { + compiler->imm &= 0x3f; + FAIL_IF(push_inst(compiler, SRADI | RC(flags) | S(src1) | A(dst) | ((compiler->imm & 0x1f) << 11) | ((compiler->imm & 0x20) >> 4))); + } + } + else + FAIL_IF(push_inst(compiler, ((flags & ALT_FORM2) ? SRAW : SRAD) | RC(flags) | S(src1) | A(dst) | B(src2))); + return (flags & ALT_FORM3) ? push_inst(compiler, MTXER | S(0)) : SLJIT_SUCCESS; + } + + SLJIT_ASSERT_STOP(); + return SLJIT_SUCCESS; +} + +static SLJIT_INLINE sljit_si emit_const(struct sljit_compiler *compiler, sljit_si reg, sljit_sw init_value) +{ + FAIL_IF(push_inst(compiler, ADDIS | D(reg) | A(0) | IMM(init_value >> 48))); + FAIL_IF(push_inst(compiler, ORI | S(reg) | A(reg) | IMM(init_value >> 32))); + FAIL_IF(PUSH_RLDICR(reg, 31)); + FAIL_IF(push_inst(compiler, ORIS | S(reg) | A(reg) | IMM(init_value >> 16))); + return push_inst(compiler, ORI | S(reg) | A(reg) | IMM(init_value)); +} + +SLJIT_API_FUNC_ATTRIBUTE void sljit_set_jump_addr(sljit_uw addr, sljit_uw new_addr) +{ + sljit_ins *inst = (sljit_ins*)addr; + + inst[0] = (inst[0] & 0xffff0000) | ((new_addr >> 48) & 0xffff); + inst[1] = (inst[1] & 0xffff0000) | ((new_addr >> 32) & 0xffff); + inst[3] = (inst[3] & 0xffff0000) | ((new_addr >> 16) & 0xffff); + inst[4] = (inst[4] & 0xffff0000) | (new_addr & 0xffff); + SLJIT_CACHE_FLUSH(inst, inst + 5); +} + +SLJIT_API_FUNC_ATTRIBUTE void sljit_set_const(sljit_uw addr, sljit_sw new_constant) +{ + sljit_ins *inst = (sljit_ins*)addr; + + inst[0] = (inst[0] & 0xffff0000) | ((new_constant >> 48) & 0xffff); + inst[1] = (inst[1] & 0xffff0000) | ((new_constant >> 32) & 0xffff); + inst[3] = (inst[3] & 0xffff0000) | ((new_constant >> 16) & 0xffff); + inst[4] = (inst[4] & 0xffff0000) | (new_constant & 0xffff); + SLJIT_CACHE_FLUSH(inst, inst + 5); +} Property changes on: sys/contrib/sljit/sljitNativePPC_64.c ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Added: fbsd:nokeywords ## -0,0 +1 ## +yes \ No newline at end of property Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Index: sys/contrib/sljit/sljitNativePPC_common.c =================================================================== --- sys/contrib/sljit/sljitNativePPC_common.c (revision 0) +++ sys/contrib/sljit/sljitNativePPC_common.c (working copy) @@ -0,0 +1,2014 @@ +/* + * Stack-less Just-In-Time compiler + * + * Copyright 2009-2012 Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, are + * permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of + * conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list + * of conditions and the following disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED + * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +SLJIT_API_FUNC_ATTRIBUTE SLJIT_CONST char* sljit_get_platform_name(void) +{ + return "PowerPC" SLJIT_CPUINFO; +} + +/* Length of an instruction word. + Both for ppc-32 and ppc-64. */ +typedef sljit_ui sljit_ins; + +#ifdef _AIX +#include +#endif + +static void ppc_cache_flush(sljit_ins *from, sljit_ins *to) +{ +#ifdef _AIX + _sync_cache_range((caddr_t)from, (int)((size_t)to - (size_t)from)); +#elif defined(__GNUC__) || (defined(__IBM_GCC_ASM) && __IBM_GCC_ASM) +# if defined(_ARCH_PWR) || defined(_ARCH_PWR2) + /* Cache flush for POWER architecture. */ + while (from < to) { + __asm__ volatile ( + "clf 0, %0\n" + "dcs\n" + : : "r"(from) + ); + from++; + } + __asm__ volatile ( "ics" ); +# elif defined(_ARCH_COM) && !defined(_ARCH_PPC) +# error "Cache flush is not implemented for PowerPC/POWER common mode." +# else + /* Cache flush for PowerPC architecture. */ + while (from < to) { + __asm__ volatile ( + "dcbf 0, %0\n" + "sync\n" + "icbi 0, %0\n" + : : "r"(from) + ); + from++; + } + __asm__ volatile ( "isync" ); +# endif +# ifdef __xlc__ +# warning "This file may fail to compile if -qfuncsect is used" +# endif +#elif defined(__xlc__) +#error "Please enable GCC syntax for inline assembly statements with -qasm=gcc" +#else +#error "This platform requires a cache flush implementation." +#endif /* _AIX */ +} + +#define TMP_REG1 (SLJIT_NO_REGISTERS + 1) +#define TMP_REG2 (SLJIT_NO_REGISTERS + 2) +#define TMP_REG3 (SLJIT_NO_REGISTERS + 3) +#define ZERO_REG (SLJIT_NO_REGISTERS + 4) + +#define TMP_FREG1 (0) +#define TMP_FREG2 (SLJIT_FLOAT_REG6 + 1) + +static SLJIT_CONST sljit_ub reg_map[SLJIT_NO_REGISTERS + 5] = { + 0, 3, 4, 5, 6, 7, 30, 29, 28, 27, 26, 1, 8, 9, 10, 31 +}; + +/* --------------------------------------------------------------------- */ +/* Instrucion forms */ +/* --------------------------------------------------------------------- */ +#define D(d) (reg_map[d] << 21) +#define S(s) (reg_map[s] << 21) +#define A(a) (reg_map[a] << 16) +#define B(b) (reg_map[b] << 11) +#define C(c) (reg_map[c] << 6) +#define FD(fd) ((fd) << 21) +#define FA(fa) ((fa) << 16) +#define FB(fb) ((fb) << 11) +#define FC(fc) ((fc) << 6) +#define IMM(imm) ((imm) & 0xffff) +#define CRD(d) ((d) << 21) + +/* Instruction bit sections. + OE and Rc flag (see ALT_SET_FLAGS). */ +#define OERC(flags) (((flags & ALT_SET_FLAGS) >> 10) | (flags & ALT_SET_FLAGS)) +/* Rc flag (see ALT_SET_FLAGS). */ +#define RC(flags) ((flags & ALT_SET_FLAGS) >> 10) +#define HI(opcode) ((opcode) << 26) +#define LO(opcode) ((opcode) << 1) + +#define ADD (HI(31) | LO(266)) +#define ADDC (HI(31) | LO(10)) +#define ADDE (HI(31) | LO(138)) +#define ADDI (HI(14)) +#define ADDIC (HI(13)) +#define ADDIS (HI(15)) +#define ADDME (HI(31) | LO(234)) +#define AND (HI(31) | LO(28)) +#define ANDI (HI(28)) +#define ANDIS (HI(29)) +#define Bx (HI(18)) +#define BCx (HI(16)) +#define BCCTR (HI(19) | LO(528) | (3 << 11)) +#define BLR (HI(19) | LO(16) | (0x14 << 21)) +#define CNTLZD (HI(31) | LO(58)) +#define CNTLZW (HI(31) | LO(26)) +#define CMP (HI(31) | LO(0)) +#define CMPI (HI(11)) +#define CMPL (HI(31) | LO(32)) +#define CMPLI (HI(10)) +#define CROR (HI(19) | LO(449)) +#define DIVD (HI(31) | LO(489)) +#define DIVDU (HI(31) | LO(457)) +#define DIVW (HI(31) | LO(491)) +#define DIVWU (HI(31) | LO(459)) +#define EXTSB (HI(31) | LO(954)) +#define EXTSH (HI(31) | LO(922)) +#define EXTSW (HI(31) | LO(986)) +#define FABS (HI(63) | LO(264)) +#define FADD (HI(63) | LO(21)) +#define FADDS (HI(59) | LO(21)) +#define FCMPU (HI(63) | LO(0)) +#define FDIV (HI(63) | LO(18)) +#define FDIVS (HI(59) | LO(18)) +#define FMR (HI(63) | LO(72)) +#define FMUL (HI(63) | LO(25)) +#define FMULS (HI(59) | LO(25)) +#define FNEG (HI(63) | LO(40)) +#define FSUB (HI(63) | LO(20)) +#define FSUBS (HI(59) | LO(20)) +#define LD (HI(58) | 0) +#define LWZ (HI(32)) +#define MFCR (HI(31) | LO(19)) +#define MFLR (HI(31) | LO(339) | 0x80000) +#define MFXER (HI(31) | LO(339) | 0x10000) +#define MTCTR (HI(31) | LO(467) | 0x90000) +#define MTLR (HI(31) | LO(467) | 0x80000) +#define MTXER (HI(31) | LO(467) | 0x10000) +#define MULHD (HI(31) | LO(73)) +#define MULHDU (HI(31) | LO(9)) +#define MULHW (HI(31) | LO(75)) +#define MULHWU (HI(31) | LO(11)) +#define MULLD (HI(31) | LO(233)) +#define MULLI (HI(7)) +#define MULLW (HI(31) | LO(235)) +#define NEG (HI(31) | LO(104)) +#define NOP (HI(24)) +#define NOR (HI(31) | LO(124)) +#define OR (HI(31) | LO(444)) +#define ORI (HI(24)) +#define ORIS (HI(25)) +#define RLDICL (HI(30)) +#define RLWINM (HI(21)) +#define SLD (HI(31) | LO(27)) +#define SLW (HI(31) | LO(24)) +#define SRAD (HI(31) | LO(794)) +#define SRADI (HI(31) | LO(413 << 1)) +#define SRAW (HI(31) | LO(792)) +#define SRAWI (HI(31) | LO(824)) +#define SRD (HI(31) | LO(539)) +#define SRW (HI(31) | LO(536)) +#define STD (HI(62) | 0) +#define STDU (HI(62) | 1) +#define STDUX (HI(31) | LO(181)) +#define STW (HI(36)) +#define STWU (HI(37)) +#define STWUX (HI(31) | LO(183)) +#define SUBF (HI(31) | LO(40)) +#define SUBFC (HI(31) | LO(8)) +#define SUBFE (HI(31) | LO(136)) +#define SUBFIC (HI(8)) +#define XOR (HI(31) | LO(316)) +#define XORI (HI(26)) +#define XORIS (HI(27)) + +#define SIMM_MAX (0x7fff) +#define SIMM_MIN (-0x8000) +#define UIMM_MAX (0xffff) + +#if (defined SLJIT_INDIRECT_CALL && SLJIT_INDIRECT_CALL) +SLJIT_API_FUNC_ATTRIBUTE void sljit_set_function_context(void** func_ptr, struct sljit_function_context* context, sljit_sw addr, void* func) +{ + sljit_sw* ptrs; + if (func_ptr) + *func_ptr = (void*)context; + ptrs = (sljit_sw*)func; + context->addr = addr ? addr : ptrs[0]; + context->r2 = ptrs[1]; + context->r11 = ptrs[2]; +} +#endif + +static sljit_si push_inst(struct sljit_compiler *compiler, sljit_ins ins) +{ + sljit_ins *ptr = (sljit_ins*)ensure_buf(compiler, sizeof(sljit_ins)); + FAIL_IF(!ptr); + *ptr = ins; + compiler->size++; + return SLJIT_SUCCESS; +} + +static SLJIT_INLINE sljit_si optimize_jump(struct sljit_jump *jump, sljit_ins *code_ptr, sljit_ins *code) +{ + sljit_sw diff; + sljit_uw target_addr; + + if (jump->flags & SLJIT_REWRITABLE_JUMP) + return 0; + + if (jump->flags & JUMP_ADDR) + target_addr = jump->u.target; + else { + SLJIT_ASSERT(jump->flags & JUMP_LABEL); + target_addr = (sljit_uw)(code + jump->u.label->size); + } + diff = ((sljit_sw)target_addr - (sljit_sw)(code_ptr)) & ~0x3l; + + if (jump->flags & UNCOND_B) { + if (diff <= 0x01ffffff && diff >= -0x02000000) { + jump->flags |= PATCH_B; + return 1; + } + if (target_addr <= 0x03ffffff) { + jump->flags |= PATCH_B | ABSOLUTE_B; + return 1; + } + } + else { + if (diff <= 0x7fff && diff >= -0x8000) { + jump->flags |= PATCH_B; + return 1; + } + if (target_addr <= 0xffff) { + jump->flags |= PATCH_B | ABSOLUTE_B; + return 1; + } + } + return 0; +} + +SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compiler) +{ + struct sljit_memory_fragment *buf; + sljit_ins *code; + sljit_ins *code_ptr; + sljit_ins *buf_ptr; + sljit_ins *buf_end; + sljit_uw word_count; + sljit_uw addr; + + struct sljit_label *label; + struct sljit_jump *jump; + struct sljit_const *const_; + + CHECK_ERROR_PTR(); + check_sljit_generate_code(compiler); + reverse_buf(compiler); + +#if (defined SLJIT_INDIRECT_CALL && SLJIT_INDIRECT_CALL) +#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) + compiler->size += (compiler->size & 0x1) + (sizeof(struct sljit_function_context) / sizeof(sljit_ins)); +#else + compiler->size += (sizeof(struct sljit_function_context) / sizeof(sljit_ins)); +#endif +#endif + code = (sljit_ins*)SLJIT_MALLOC_EXEC(compiler->size * sizeof(sljit_ins)); + PTR_FAIL_WITH_EXEC_IF(code); + buf = compiler->buf; + + code_ptr = code; + word_count = 0; + label = compiler->labels; + jump = compiler->jumps; + const_ = compiler->consts; + do { + buf_ptr = (sljit_ins*)buf->memory; + buf_end = buf_ptr + (buf->used_size >> 2); + do { + *code_ptr = *buf_ptr++; + SLJIT_ASSERT(!label || label->size >= word_count); + SLJIT_ASSERT(!jump || jump->addr >= word_count); + SLJIT_ASSERT(!const_ || const_->addr >= word_count); + /* These structures are ordered by their address. */ + if (label && label->size == word_count) { + /* Just recording the address. */ + label->addr = (sljit_uw)code_ptr; + label->size = code_ptr - code; + label = label->next; + } + if (jump && jump->addr == word_count) { +#if (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32) + jump->addr = (sljit_uw)(code_ptr - 3); +#else + jump->addr = (sljit_uw)(code_ptr - 6); +#endif + if (optimize_jump(jump, code_ptr, code)) { +#if (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32) + code_ptr[-3] = code_ptr[0]; + code_ptr -= 3; +#else + code_ptr[-6] = code_ptr[0]; + code_ptr -= 6; +#endif + } + jump = jump->next; + } + if (const_ && const_->addr == word_count) { + /* Just recording the address. */ + const_->addr = (sljit_uw)code_ptr; + const_ = const_->next; + } + code_ptr ++; + word_count ++; + } while (buf_ptr < buf_end); + + buf = buf->next; + } while (buf); + + if (label && label->size == word_count) { + label->addr = (sljit_uw)code_ptr; + label->size = code_ptr - code; + label = label->next; + } + + SLJIT_ASSERT(!label); + SLJIT_ASSERT(!jump); + SLJIT_ASSERT(!const_); +#if (defined SLJIT_INDIRECT_CALL && SLJIT_INDIRECT_CALL) + SLJIT_ASSERT(code_ptr - code <= (sljit_sw)compiler->size - (sizeof(struct sljit_function_context) / sizeof(sljit_ins))); +#else + SLJIT_ASSERT(code_ptr - code <= (sljit_sw)compiler->size); +#endif + + jump = compiler->jumps; + while (jump) { + do { + addr = (jump->flags & JUMP_LABEL) ? jump->u.label->addr : jump->u.target; + buf_ptr = (sljit_ins*)jump->addr; + if (jump->flags & PATCH_B) { + if (jump->flags & UNCOND_B) { + if (!(jump->flags & ABSOLUTE_B)) { + addr = addr - jump->addr; + SLJIT_ASSERT((sljit_sw)addr <= 0x01ffffff && (sljit_sw)addr >= -0x02000000); + *buf_ptr = Bx | (addr & 0x03fffffc) | ((*buf_ptr) & 0x1); + } + else { + SLJIT_ASSERT(addr <= 0x03ffffff); + *buf_ptr = Bx | (addr & 0x03fffffc) | 0x2 | ((*buf_ptr) & 0x1); + } + } + else { + if (!(jump->flags & ABSOLUTE_B)) { + addr = addr - jump->addr; + SLJIT_ASSERT((sljit_sw)addr <= 0x7fff && (sljit_sw)addr >= -0x8000); + *buf_ptr = BCx | (addr & 0xfffc) | ((*buf_ptr) & 0x03ff0001); + } + else { + addr = addr & ~0x3l; + SLJIT_ASSERT(addr <= 0xffff); + *buf_ptr = BCx | (addr & 0xfffc) | 0x2 | ((*buf_ptr) & 0x03ff0001); + } + + } + break; + } + /* Set the fields of immediate loads. */ +#if (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32) + buf_ptr[0] = (buf_ptr[0] & 0xffff0000) | ((addr >> 16) & 0xffff); + buf_ptr[1] = (buf_ptr[1] & 0xffff0000) | (addr & 0xffff); +#else + buf_ptr[0] = (buf_ptr[0] & 0xffff0000) | ((addr >> 48) & 0xffff); + buf_ptr[1] = (buf_ptr[1] & 0xffff0000) | ((addr >> 32) & 0xffff); + buf_ptr[3] = (buf_ptr[3] & 0xffff0000) | ((addr >> 16) & 0xffff); + buf_ptr[4] = (buf_ptr[4] & 0xffff0000) | (addr & 0xffff); +#endif + } while (0); + jump = jump->next; + } + + SLJIT_CACHE_FLUSH(code, code_ptr); + compiler->error = SLJIT_ERR_COMPILED; + compiler->executable_size = compiler->size * sizeof(sljit_ins); + +#if (defined SLJIT_INDIRECT_CALL && SLJIT_INDIRECT_CALL) +#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) + if (((sljit_sw)code_ptr) & 0x4) + code_ptr++; + sljit_set_function_context(NULL, (struct sljit_function_context*)code_ptr, (sljit_sw)code, (void*)sljit_generate_code); + return code_ptr; +#else + sljit_set_function_context(NULL, (struct sljit_function_context*)code_ptr, (sljit_sw)code, (void*)sljit_generate_code); + return code_ptr; +#endif +#else + return code; +#endif +} + +/* --------------------------------------------------------------------- */ +/* Entry, exit */ +/* --------------------------------------------------------------------- */ + +/* inp_flags: */ + +/* Creates an index in data_transfer_insts array. */ +#define LOAD_DATA 0x01 +#define INDEXED 0x02 +#define WRITE_BACK 0x04 +#define WORD_DATA 0x00 +#define BYTE_DATA 0x08 +#define HALF_DATA 0x10 +#define INT_DATA 0x18 +#define SIGNED_DATA 0x20 +/* Separates integer and floating point registers */ +#define GPR_REG 0x3f +#define DOUBLE_DATA 0x40 + +#define MEM_MASK 0x7f + +/* Other inp_flags. */ + +#define ARG_TEST 0x000100 +/* Integer opertion and set flags -> requires exts on 64 bit systems. */ +#define ALT_SIGN_EXT 0x000200 +/* This flag affects the RC() and OERC() macros. */ +#define ALT_SET_FLAGS 0x000400 +#define ALT_KEEP_CACHE 0x000800 +#define ALT_FORM1 0x010000 +#define ALT_FORM2 0x020000 +#define ALT_FORM3 0x040000 +#define ALT_FORM4 0x080000 +#define ALT_FORM5 0x100000 +#define ALT_FORM6 0x200000 + +/* Source and destination is register. */ +#define REG_DEST 0x000001 +#define REG1_SOURCE 0x000002 +#define REG2_SOURCE 0x000004 +/* getput_arg_fast returned true. */ +#define FAST_DEST 0x000008 +/* Multiple instructions are required. */ +#define SLOW_DEST 0x000010 +/* +ALT_SIGN_EXT 0x000200 +ALT_SET_FLAGS 0x000400 +ALT_FORM1 0x010000 +... +ALT_FORM6 0x200000 */ + +#if (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32) +#include "sljitNativePPC_32.c" +#else +#include "sljitNativePPC_64.c" +#endif + +#if (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32) +#define STACK_STORE STW +#define STACK_LOAD LWZ +#else +#define STACK_STORE STD +#define STACK_LOAD LD +#endif + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_enter(struct sljit_compiler *compiler, sljit_si args, sljit_si scratches, sljit_si saveds, sljit_si local_size) +{ + CHECK_ERROR(); + check_sljit_emit_enter(compiler, args, scratches, saveds, local_size); + + compiler->scratches = scratches; + compiler->saveds = saveds; +#if (defined SLJIT_DEBUG && SLJIT_DEBUG) + compiler->logical_local_size = local_size; +#endif + + FAIL_IF(push_inst(compiler, MFLR | D(0))); + FAIL_IF(push_inst(compiler, STACK_STORE | S(ZERO_REG) | A(SLJIT_LOCALS_REG) | IMM(-(sljit_si)(sizeof(sljit_sw))) )); + if (saveds >= 1) + FAIL_IF(push_inst(compiler, STACK_STORE | S(SLJIT_SAVED_REG1) | A(SLJIT_LOCALS_REG) | IMM(-2 * (sljit_si)(sizeof(sljit_sw))) )); + if (saveds >= 2) + FAIL_IF(push_inst(compiler, STACK_STORE | S(SLJIT_SAVED_REG2) | A(SLJIT_LOCALS_REG) | IMM(-3 * (sljit_si)(sizeof(sljit_sw))) )); + if (saveds >= 3) + FAIL_IF(push_inst(compiler, STACK_STORE | S(SLJIT_SAVED_REG3) | A(SLJIT_LOCALS_REG) | IMM(-4 * (sljit_si)(sizeof(sljit_sw))) )); + if (saveds >= 4) + FAIL_IF(push_inst(compiler, STACK_STORE | S(SLJIT_SAVED_EREG1) | A(SLJIT_LOCALS_REG) | IMM(-5 * (sljit_si)(sizeof(sljit_sw))) )); + if (saveds >= 5) + FAIL_IF(push_inst(compiler, STACK_STORE | S(SLJIT_SAVED_EREG2) | A(SLJIT_LOCALS_REG) | IMM(-6 * (sljit_si)(sizeof(sljit_sw))) )); + FAIL_IF(push_inst(compiler, STACK_STORE | S(0) | A(SLJIT_LOCALS_REG) | IMM(sizeof(sljit_sw)) )); + + FAIL_IF(push_inst(compiler, ADDI | D(ZERO_REG) | A(0) | 0)); + if (args >= 1) + FAIL_IF(push_inst(compiler, OR | S(SLJIT_SCRATCH_REG1) | A(SLJIT_SAVED_REG1) | B(SLJIT_SCRATCH_REG1))); + if (args >= 2) + FAIL_IF(push_inst(compiler, OR | S(SLJIT_SCRATCH_REG2) | A(SLJIT_SAVED_REG2) | B(SLJIT_SCRATCH_REG2))); + if (args >= 3) + FAIL_IF(push_inst(compiler, OR | S(SLJIT_SCRATCH_REG3) | A(SLJIT_SAVED_REG3) | B(SLJIT_SCRATCH_REG3))); + +#if (defined SLJIT_INDIRECT_CALL && SLJIT_INDIRECT_CALL) + compiler->local_size = (1 + saveds + 6 + 8) * sizeof(sljit_sw) + local_size; +#else + compiler->local_size = (1 + saveds + 2) * sizeof(sljit_sw) + local_size; +#endif + compiler->local_size = (compiler->local_size + 15) & ~0xf; + +#if (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32) + if (compiler->local_size <= SIMM_MAX) + FAIL_IF(push_inst(compiler, STWU | S(SLJIT_LOCALS_REG) | A(SLJIT_LOCALS_REG) | IMM(-compiler->local_size))); + else { + FAIL_IF(load_immediate(compiler, 0, -compiler->local_size)); + FAIL_IF(push_inst(compiler, STWUX | S(SLJIT_LOCALS_REG) | A(SLJIT_LOCALS_REG) | B(0))); + } +#else + if (compiler->local_size <= SIMM_MAX) + FAIL_IF(push_inst(compiler, STDU | S(SLJIT_LOCALS_REG) | A(SLJIT_LOCALS_REG) | IMM(-compiler->local_size))); + else { + FAIL_IF(load_immediate(compiler, 0, -compiler->local_size)); + FAIL_IF(push_inst(compiler, STDUX | S(SLJIT_LOCALS_REG) | A(SLJIT_LOCALS_REG) | B(0))); + } +#endif + + return SLJIT_SUCCESS; +} + +SLJIT_API_FUNC_ATTRIBUTE void sljit_set_context(struct sljit_compiler *compiler, sljit_si args, sljit_si scratches, sljit_si saveds, sljit_si local_size) +{ + CHECK_ERROR_VOID(); + check_sljit_set_context(compiler, args, scratches, saveds, local_size); + + compiler->scratches = scratches; + compiler->saveds = saveds; +#if (defined SLJIT_DEBUG && SLJIT_DEBUG) + compiler->logical_local_size = local_size; +#endif + +#if (defined SLJIT_INDIRECT_CALL && SLJIT_INDIRECT_CALL) + compiler->local_size = (1 + saveds + 6 + 8) * sizeof(sljit_sw) + local_size; +#else + compiler->local_size = (1 + saveds + 2) * sizeof(sljit_sw) + local_size; +#endif + compiler->local_size = (compiler->local_size + 15) & ~0xf; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_return(struct sljit_compiler *compiler, sljit_si op, sljit_si src, sljit_sw srcw) +{ + CHECK_ERROR(); + check_sljit_emit_return(compiler, op, src, srcw); + + FAIL_IF(emit_mov_before_return(compiler, op, src, srcw)); + + if (compiler->local_size <= SIMM_MAX) + FAIL_IF(push_inst(compiler, ADDI | D(SLJIT_LOCALS_REG) | A(SLJIT_LOCALS_REG) | IMM(compiler->local_size))); + else { + FAIL_IF(load_immediate(compiler, 0, compiler->local_size)); + FAIL_IF(push_inst(compiler, ADD | D(SLJIT_LOCALS_REG) | A(SLJIT_LOCALS_REG) | B(0))); + } + + FAIL_IF(push_inst(compiler, STACK_LOAD | D(0) | A(SLJIT_LOCALS_REG) | IMM(sizeof(sljit_sw)))); + if (compiler->saveds >= 5) + FAIL_IF(push_inst(compiler, STACK_LOAD | D(SLJIT_SAVED_EREG2) | A(SLJIT_LOCALS_REG) | IMM(-6 * (sljit_si)(sizeof(sljit_sw))) )); + if (compiler->saveds >= 4) + FAIL_IF(push_inst(compiler, STACK_LOAD | D(SLJIT_SAVED_EREG1) | A(SLJIT_LOCALS_REG) | IMM(-5 * (sljit_si)(sizeof(sljit_sw))) )); + if (compiler->saveds >= 3) + FAIL_IF(push_inst(compiler, STACK_LOAD | D(SLJIT_SAVED_REG3) | A(SLJIT_LOCALS_REG) | IMM(-4 * (sljit_si)(sizeof(sljit_sw))) )); + if (compiler->saveds >= 2) + FAIL_IF(push_inst(compiler, STACK_LOAD | D(SLJIT_SAVED_REG2) | A(SLJIT_LOCALS_REG) | IMM(-3 * (sljit_si)(sizeof(sljit_sw))) )); + if (compiler->saveds >= 1) + FAIL_IF(push_inst(compiler, STACK_LOAD | D(SLJIT_SAVED_REG1) | A(SLJIT_LOCALS_REG) | IMM(-2 * (sljit_si)(sizeof(sljit_sw))) )); + FAIL_IF(push_inst(compiler, STACK_LOAD | D(ZERO_REG) | A(SLJIT_LOCALS_REG) | IMM(-(sljit_si)(sizeof(sljit_sw))) )); + + FAIL_IF(push_inst(compiler, MTLR | S(0))); + FAIL_IF(push_inst(compiler, BLR)); + + return SLJIT_SUCCESS; +} + +#undef STACK_STORE +#undef STACK_LOAD + +/* --------------------------------------------------------------------- */ +/* Operators */ +/* --------------------------------------------------------------------- */ + +/* i/x - immediate/indexed form + n/w - no write-back / write-back (1 bit) + s/l - store/load (1 bit) + u/s - signed/unsigned (1 bit) + w/b/h/i - word/byte/half/int allowed (2 bit) + It contans 32 items, but not all are different. */ + +/* 64 bit only: [reg+imm] must be aligned to 4 bytes. */ +#define ADDR_MODE2 0x10000 +/* 64-bit only: there is no lwau instruction. */ +#define UPDATE_REQ 0x20000 + +#if (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32) +#define ARCH_32_64(a, b) a +#define INST_CODE_AND_DST(inst, flags, reg) \ + ((inst) | (((flags) & MEM_MASK) <= GPR_REG ? D(reg) : FD(reg))) +#else +#define ARCH_32_64(a, b) b +#define INST_CODE_AND_DST(inst, flags, reg) \ + (((inst) & ~(ADDR_MODE2 | UPDATE_REQ)) | (((flags) & MEM_MASK) <= GPR_REG ? D(reg) : FD(reg))) +#endif + +static SLJIT_CONST sljit_ins data_transfer_insts[64 + 8] = { + +/* -------- Unsigned -------- */ + +/* Word. */ + +/* u w n i s */ ARCH_32_64(HI(36) /* stw */, HI(62) | ADDR_MODE2 | 0x0 /* std */), +/* u w n i l */ ARCH_32_64(HI(32) /* lwz */, HI(58) | ADDR_MODE2 | 0x0 /* ld */), +/* u w n x s */ ARCH_32_64(HI(31) | LO(151) /* stwx */, HI(31) | LO(149) /* stdx */), +/* u w n x l */ ARCH_32_64(HI(31) | LO(23) /* lwzx */, HI(31) | LO(21) /* ldx */), + +/* u w w i s */ ARCH_32_64(HI(37) /* stwu */, HI(62) | ADDR_MODE2 | 0x1 /* stdu */), +/* u w w i l */ ARCH_32_64(HI(33) /* lwzu */, HI(58) | ADDR_MODE2 | 0x1 /* ldu */), +/* u w w x s */ ARCH_32_64(HI(31) | LO(183) /* stwux */, HI(31) | LO(181) /* stdux */), +/* u w w x l */ ARCH_32_64(HI(31) | LO(55) /* lwzux */, HI(31) | LO(53) /* ldux */), + +/* Byte. */ + +/* u b n i s */ HI(38) /* stb */, +/* u b n i l */ HI(34) /* lbz */, +/* u b n x s */ HI(31) | LO(215) /* stbx */, +/* u b n x l */ HI(31) | LO(87) /* lbzx */, + +/* u b w i s */ HI(39) /* stbu */, +/* u b w i l */ HI(35) /* lbzu */, +/* u b w x s */ HI(31) | LO(247) /* stbux */, +/* u b w x l */ HI(31) | LO(119) /* lbzux */, + +/* Half. */ + +/* u h n i s */ HI(44) /* sth */, +/* u h n i l */ HI(40) /* lhz */, +/* u h n x s */ HI(31) | LO(407) /* sthx */, +/* u h n x l */ HI(31) | LO(279) /* lhzx */, + +/* u h w i s */ HI(45) /* sthu */, +/* u h w i l */ HI(41) /* lhzu */, +/* u h w x s */ HI(31) | LO(439) /* sthux */, +/* u h w x l */ HI(31) | LO(311) /* lhzux */, + +/* Int. */ + +/* u i n i s */ HI(36) /* stw */, +/* u i n i l */ HI(32) /* lwz */, +/* u i n x s */ HI(31) | LO(151) /* stwx */, +/* u i n x l */ HI(31) | LO(23) /* lwzx */, + +/* u i w i s */ HI(37) /* stwu */, +/* u i w i l */ HI(33) /* lwzu */, +/* u i w x s */ HI(31) | LO(183) /* stwux */, +/* u i w x l */ HI(31) | LO(55) /* lwzux */, + +/* -------- Signed -------- */ + +/* Word. */ + +/* s w n i s */ ARCH_32_64(HI(36) /* stw */, HI(62) | ADDR_MODE2 | 0x0 /* std */), +/* s w n i l */ ARCH_32_64(HI(32) /* lwz */, HI(58) | ADDR_MODE2 | 0x0 /* ld */), +/* s w n x s */ ARCH_32_64(HI(31) | LO(151) /* stwx */, HI(31) | LO(149) /* stdx */), +/* s w n x l */ ARCH_32_64(HI(31) | LO(23) /* lwzx */, HI(31) | LO(21) /* ldx */), + +/* s w w i s */ ARCH_32_64(HI(37) /* stwu */, HI(62) | ADDR_MODE2 | 0x1 /* stdu */), +/* s w w i l */ ARCH_32_64(HI(33) /* lwzu */, HI(58) | ADDR_MODE2 | 0x1 /* ldu */), +/* s w w x s */ ARCH_32_64(HI(31) | LO(183) /* stwux */, HI(31) | LO(181) /* stdux */), +/* s w w x l */ ARCH_32_64(HI(31) | LO(55) /* lwzux */, HI(31) | LO(53) /* ldux */), + +/* Byte. */ + +/* s b n i s */ HI(38) /* stb */, +/* s b n i l */ HI(34) /* lbz */ /* EXTS_REQ */, +/* s b n x s */ HI(31) | LO(215) /* stbx */, +/* s b n x l */ HI(31) | LO(87) /* lbzx */ /* EXTS_REQ */, + +/* s b w i s */ HI(39) /* stbu */, +/* s b w i l */ HI(35) /* lbzu */ /* EXTS_REQ */, +/* s b w x s */ HI(31) | LO(247) /* stbux */, +/* s b w x l */ HI(31) | LO(119) /* lbzux */ /* EXTS_REQ */, + +/* Half. */ + +/* s h n i s */ HI(44) /* sth */, +/* s h n i l */ HI(42) /* lha */, +/* s h n x s */ HI(31) | LO(407) /* sthx */, +/* s h n x l */ HI(31) | LO(343) /* lhax */, + +/* s h w i s */ HI(45) /* sthu */, +/* s h w i l */ HI(43) /* lhau */, +/* s h w x s */ HI(31) | LO(439) /* sthux */, +/* s h w x l */ HI(31) | LO(375) /* lhaux */, + +/* Int. */ + +/* s i n i s */ HI(36) /* stw */, +/* s i n i l */ ARCH_32_64(HI(32) /* lwz */, HI(58) | ADDR_MODE2 | 0x2 /* lwa */), +/* s i n x s */ HI(31) | LO(151) /* stwx */, +/* s i n x l */ ARCH_32_64(HI(31) | LO(23) /* lwzx */, HI(31) | LO(341) /* lwax */), + +/* s i w i s */ HI(37) /* stwu */, +/* s i w i l */ ARCH_32_64(HI(33) /* lwzu */, HI(58) | ADDR_MODE2 | UPDATE_REQ | 0x2 /* lwa */), +/* s i w x s */ HI(31) | LO(183) /* stwux */, +/* s i w x l */ ARCH_32_64(HI(31) | LO(55) /* lwzux */, HI(31) | LO(373) /* lwaux */), + +/* -------- Double -------- */ + +/* d n i s */ HI(54) /* stfd */, +/* d n i l */ HI(50) /* lfd */, +/* d n x s */ HI(31) | LO(727) /* stfdx */, +/* d n x l */ HI(31) | LO(599) /* lfdx */, + +/* s n i s */ HI(52) /* stfs */, +/* s n i l */ HI(48) /* lfs */, +/* s n x s */ HI(31) | LO(663) /* stfsx */, +/* s n x l */ HI(31) | LO(535) /* lfsx */, + +}; + +#undef ARCH_32_64 + +/* Simple cases, (no caching is required). */ +static sljit_si getput_arg_fast(struct sljit_compiler *compiler, sljit_si inp_flags, sljit_si reg, sljit_si arg, sljit_sw argw) +{ + sljit_ins inst; +#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) + sljit_si tmp_reg; +#endif + + SLJIT_ASSERT(arg & SLJIT_MEM); + if (!(arg & 0xf)) { +#if (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32) + if (argw <= SIMM_MAX && argw >= SIMM_MIN) { + if (inp_flags & ARG_TEST) + return 1; + + inst = data_transfer_insts[(inp_flags & ~WRITE_BACK) & MEM_MASK]; + SLJIT_ASSERT(!(inst & (ADDR_MODE2 | UPDATE_REQ))); + push_inst(compiler, INST_CODE_AND_DST(inst, inp_flags, reg) | IMM(argw)); + return -1; + } +#else + inst = data_transfer_insts[(inp_flags & ~WRITE_BACK) & MEM_MASK]; + if (argw <= SIMM_MAX && argw >= SIMM_MIN && + (!(inst & ADDR_MODE2) || (argw & 0x3) == 0)) { + if (inp_flags & ARG_TEST) + return 1; + + push_inst(compiler, INST_CODE_AND_DST(inst, inp_flags, reg) | IMM(argw)); + return -1; + } +#endif + return 0; + } + + if (!(arg & 0xf0)) { +#if (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32) + if (argw <= SIMM_MAX && argw >= SIMM_MIN) { + if (inp_flags & ARG_TEST) + return 1; + + inst = data_transfer_insts[inp_flags & MEM_MASK]; + SLJIT_ASSERT(!(inst & (ADDR_MODE2 | UPDATE_REQ))); + push_inst(compiler, INST_CODE_AND_DST(inst, inp_flags, reg) | A(arg & 0xf) | IMM(argw)); + return -1; + } +#else + inst = data_transfer_insts[inp_flags & MEM_MASK]; + if (argw <= SIMM_MAX && argw >= SIMM_MIN && (!(inst & ADDR_MODE2) || (argw & 0x3) == 0)) { + if (inp_flags & ARG_TEST) + return 1; + + if ((inp_flags & WRITE_BACK) && (inst & UPDATE_REQ)) { + tmp_reg = (inp_flags & LOAD_DATA) ? (arg & 0xf) : TMP_REG3; + if (push_inst(compiler, ADDI | D(tmp_reg) | A(arg & 0xf) | IMM(argw))) + return -1; + arg = tmp_reg | SLJIT_MEM; + argw = 0; + } + push_inst(compiler, INST_CODE_AND_DST(inst, inp_flags, reg) | A(arg & 0xf) | IMM(argw)); + return -1; + } +#endif + } + else if (!(argw & 0x3)) { + if (inp_flags & ARG_TEST) + return 1; + inst = data_transfer_insts[(inp_flags | INDEXED) & MEM_MASK]; + SLJIT_ASSERT(!(inst & (ADDR_MODE2 | UPDATE_REQ))); + push_inst(compiler, INST_CODE_AND_DST(inst, inp_flags, reg) | A(arg & 0xf) | B((arg >> 4) & 0xf)); + return -1; + } + return 0; +} + +/* See getput_arg below. + Note: can_cache is called only for binary operators. Those operator always + uses word arguments without write back. */ +static sljit_si can_cache(sljit_si arg, sljit_sw argw, sljit_si next_arg, sljit_sw next_argw) +{ + SLJIT_ASSERT((arg & SLJIT_MEM) && (next_arg & SLJIT_MEM)); + + if (!(arg & 0xf)) + return (next_arg & SLJIT_MEM) && ((sljit_uw)argw - (sljit_uw)next_argw <= SIMM_MAX || (sljit_uw)next_argw - (sljit_uw)argw <= SIMM_MAX); + + if (arg & 0xf0) + return ((arg & 0xf0) == (next_arg & 0xf0) && (argw & 0x3) == (next_argw & 0x3)); + + if (argw <= SIMM_MAX && argw >= SIMM_MIN) { + if (arg == next_arg && (next_argw >= SIMM_MAX && next_argw <= SIMM_MIN)) + return 1; + } + + if (arg == next_arg && ((sljit_uw)argw - (sljit_uw)next_argw <= SIMM_MAX || (sljit_uw)next_argw - (sljit_uw)argw <= SIMM_MAX)) + return 1; + + return 0; +} + +#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) +#define ADJUST_CACHED_IMM(imm) \ + if ((inst & ADDR_MODE2) && (imm & 0x3)) { \ + /* Adjust cached value. Fortunately this is really a rare case */ \ + compiler->cache_argw += imm & 0x3; \ + FAIL_IF(push_inst(compiler, ADDI | D(TMP_REG3) | A(TMP_REG3) | (imm & 0x3))); \ + imm &= ~0x3; \ + } +#else +#define ADJUST_CACHED_IMM(imm) +#endif + +/* Emit the necessary instructions. See can_cache above. */ +static sljit_si getput_arg(struct sljit_compiler *compiler, sljit_si inp_flags, sljit_si reg, sljit_si arg, sljit_sw argw, sljit_si next_arg, sljit_sw next_argw) +{ + sljit_si tmp_r; + sljit_ins inst; + + SLJIT_ASSERT(arg & SLJIT_MEM); + + tmp_r = ((inp_flags & LOAD_DATA) && ((inp_flags) & MEM_MASK) <= GPR_REG) ? reg : TMP_REG1; + /* Special case for "mov reg, [reg, ... ]". */ + if ((arg & 0xf) == tmp_r) + tmp_r = TMP_REG1; + + if (!(arg & 0xf)) { + inst = data_transfer_insts[(inp_flags & ~WRITE_BACK) & MEM_MASK]; + if ((compiler->cache_arg & SLJIT_IMM) && (((sljit_uw)argw - (sljit_uw)compiler->cache_argw) <= SIMM_MAX || ((sljit_uw)compiler->cache_argw - (sljit_uw)argw) <= SIMM_MAX)) { + argw = argw - compiler->cache_argw; + ADJUST_CACHED_IMM(argw); + SLJIT_ASSERT(!(inst & UPDATE_REQ)); + return push_inst(compiler, INST_CODE_AND_DST(inst, inp_flags, reg) | A(TMP_REG3) | IMM(argw)); + } + + if ((next_arg & SLJIT_MEM) && (argw - next_argw <= SIMM_MAX || next_argw - argw <= SIMM_MAX)) { + SLJIT_ASSERT(inp_flags & LOAD_DATA); + + compiler->cache_arg = SLJIT_IMM; + compiler->cache_argw = argw; + tmp_r = TMP_REG3; + } + + FAIL_IF(load_immediate(compiler, tmp_r, argw)); + return push_inst(compiler, INST_CODE_AND_DST(inst, inp_flags, reg) | A(tmp_r)); + } + + if (SLJIT_UNLIKELY(arg & 0xf0)) { + argw &= 0x3; + /* Otherwise getput_arg_fast would capture it. */ + SLJIT_ASSERT(argw); + + if ((SLJIT_MEM | (arg & 0xf0)) == compiler->cache_arg && argw == compiler->cache_argw) + tmp_r = TMP_REG3; + else { + if ((arg & 0xf0) == (next_arg & 0xf0) && argw == (next_argw & 0x3)) { + compiler->cache_arg = SLJIT_MEM | (arg & 0xf0); + compiler->cache_argw = argw; + tmp_r = TMP_REG3; + } +#if (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32) + FAIL_IF(push_inst(compiler, RLWINM | S((arg >> 4) & 0xf) | A(tmp_r) | (argw << 11) | ((31 - argw) << 1))); +#else + FAIL_IF(push_inst(compiler, RLDI(tmp_r, (arg >> 4) & 0xf, argw, 63 - argw, 1))); +#endif + } + inst = data_transfer_insts[(inp_flags | INDEXED) & MEM_MASK]; + SLJIT_ASSERT(!(inst & (ADDR_MODE2 | UPDATE_REQ))); + return push_inst(compiler, INST_CODE_AND_DST(inst, inp_flags, reg) | A(arg & 0xf) | B(tmp_r)); + } + + inst = data_transfer_insts[inp_flags & MEM_MASK]; + + if (compiler->cache_arg == arg && ((sljit_uw)argw - (sljit_uw)compiler->cache_argw <= SIMM_MAX || (sljit_uw)compiler->cache_argw - (sljit_uw)argw <= SIMM_MAX)) { + SLJIT_ASSERT(!(inp_flags & WRITE_BACK)); + argw = argw - compiler->cache_argw; + ADJUST_CACHED_IMM(argw); + return push_inst(compiler, INST_CODE_AND_DST(inst, inp_flags, reg) | A(TMP_REG3) | IMM(argw)); + } + + if ((compiler->cache_arg & SLJIT_IMM) && compiler->cache_argw == argw) { + inst = data_transfer_insts[(inp_flags | INDEXED) & MEM_MASK]; + SLJIT_ASSERT(!(inst & (ADDR_MODE2 | UPDATE_REQ))); + return push_inst(compiler, INST_CODE_AND_DST(inst, inp_flags, reg) | A(arg & 0xf) | B(TMP_REG3)); + } + + if (argw == next_argw && (next_arg & SLJIT_MEM)) { + SLJIT_ASSERT(inp_flags & LOAD_DATA); + FAIL_IF(load_immediate(compiler, TMP_REG3, argw)); + + compiler->cache_arg = SLJIT_IMM; + compiler->cache_argw = argw; + + inst = data_transfer_insts[(inp_flags | INDEXED) & MEM_MASK]; + SLJIT_ASSERT(!(inst & (ADDR_MODE2 | UPDATE_REQ))); + return push_inst(compiler, INST_CODE_AND_DST(inst, inp_flags, reg) | A(arg & 0xf) | B(TMP_REG3)); + } + + if (arg == next_arg && !(inp_flags & WRITE_BACK) && ((sljit_uw)argw - (sljit_uw)next_argw <= SIMM_MAX || (sljit_uw)next_argw - (sljit_uw)argw <= SIMM_MAX)) { + SLJIT_ASSERT(inp_flags & LOAD_DATA); + FAIL_IF(load_immediate(compiler, TMP_REG3, argw)); + FAIL_IF(push_inst(compiler, ADD | D(TMP_REG3) | A(TMP_REG3) | B(arg & 0xf))); + + compiler->cache_arg = arg; + compiler->cache_argw = argw; + + return push_inst(compiler, INST_CODE_AND_DST(inst, inp_flags, reg) | A(TMP_REG3)); + } + + /* Get the indexed version instead of the normal one. */ + inst = data_transfer_insts[(inp_flags | INDEXED) & MEM_MASK]; + SLJIT_ASSERT(!(inst & (ADDR_MODE2 | UPDATE_REQ))); + FAIL_IF(load_immediate(compiler, tmp_r, argw)); + return push_inst(compiler, INST_CODE_AND_DST(inst, inp_flags, reg) | A(arg & 0xf) | B(tmp_r)); +} + +static SLJIT_INLINE sljit_si emit_op_mem2(struct sljit_compiler *compiler, sljit_si flags, sljit_si reg, sljit_si arg1, sljit_sw arg1w, sljit_si arg2, sljit_sw arg2w) +{ + if (getput_arg_fast(compiler, flags, reg, arg1, arg1w)) + return compiler->error; + return getput_arg(compiler, flags, reg, arg1, arg1w, arg2, arg2w); +} + +static sljit_si emit_op(struct sljit_compiler *compiler, sljit_si op, sljit_si input_flags, + sljit_si dst, sljit_sw dstw, + sljit_si src1, sljit_sw src1w, + sljit_si src2, sljit_sw src2w) +{ + /* arg1 goes to TMP_REG1 or src reg + arg2 goes to TMP_REG2, imm or src reg + TMP_REG3 can be used for caching + result goes to TMP_REG2, so put result can use TMP_REG1 and TMP_REG3. */ + sljit_si dst_r; + sljit_si src1_r; + sljit_si src2_r; + sljit_si sugg_src2_r = TMP_REG2; + sljit_si flags = input_flags & (ALT_FORM1 | ALT_FORM2 | ALT_FORM3 | ALT_FORM4 | ALT_FORM5 | ALT_FORM6 | ALT_SIGN_EXT | ALT_SET_FLAGS); + + if (!(input_flags & ALT_KEEP_CACHE)) { + compiler->cache_arg = 0; + compiler->cache_argw = 0; + } + + /* Destination check. */ + if (SLJIT_UNLIKELY(dst == SLJIT_UNUSED)) { + if (op >= SLJIT_MOV && op <= SLJIT_MOVU_SI && !(src2 & SLJIT_MEM)) + return SLJIT_SUCCESS; + dst_r = TMP_REG2; + } + else if (dst <= ZERO_REG) { + dst_r = dst; + flags |= REG_DEST; + if (op >= SLJIT_MOV && op <= SLJIT_MOVU_SI) + sugg_src2_r = dst_r; + } + else { + SLJIT_ASSERT(dst & SLJIT_MEM); + if (getput_arg_fast(compiler, input_flags | ARG_TEST, TMP_REG2, dst, dstw)) { + flags |= FAST_DEST; + dst_r = TMP_REG2; + } + else { + flags |= SLOW_DEST; + dst_r = 0; + } + } + + /* Source 1. */ + if (src1 <= ZERO_REG) { + src1_r = src1; + flags |= REG1_SOURCE; + } + else if (src1 & SLJIT_IMM) { + FAIL_IF(load_immediate(compiler, TMP_REG1, src1w)); + src1_r = TMP_REG1; + } + else if (getput_arg_fast(compiler, input_flags | LOAD_DATA, TMP_REG1, src1, src1w)) { + FAIL_IF(compiler->error); + src1_r = TMP_REG1; + } + else + src1_r = 0; + + /* Source 2. */ + if (src2 <= ZERO_REG) { + src2_r = src2; + flags |= REG2_SOURCE; + if (!(flags & REG_DEST) && op >= SLJIT_MOV && op <= SLJIT_MOVU_SI) + dst_r = src2_r; + } + else if (src2 & SLJIT_IMM) { + FAIL_IF(load_immediate(compiler, sugg_src2_r, src2w)); + src2_r = sugg_src2_r; + } + else if (getput_arg_fast(compiler, input_flags | LOAD_DATA, sugg_src2_r, src2, src2w)) { + FAIL_IF(compiler->error); + src2_r = sugg_src2_r; + } + else + src2_r = 0; + + /* src1_r, src2_r and dst_r can be zero (=unprocessed). + All arguments are complex addressing modes, and it is a binary operator. */ + if (src1_r == 0 && src2_r == 0 && dst_r == 0) { + if (!can_cache(src1, src1w, src2, src2w) && can_cache(src1, src1w, dst, dstw)) { + FAIL_IF(getput_arg(compiler, input_flags | LOAD_DATA, TMP_REG2, src2, src2w, src1, src1w)); + FAIL_IF(getput_arg(compiler, input_flags | LOAD_DATA, TMP_REG1, src1, src1w, dst, dstw)); + } + else { + FAIL_IF(getput_arg(compiler, input_flags | LOAD_DATA, TMP_REG1, src1, src1w, src2, src2w)); + FAIL_IF(getput_arg(compiler, input_flags | LOAD_DATA, TMP_REG2, src2, src2w, dst, dstw)); + } + src1_r = TMP_REG1; + src2_r = TMP_REG2; + } + else if (src1_r == 0 && src2_r == 0) { + FAIL_IF(getput_arg(compiler, input_flags | LOAD_DATA, TMP_REG1, src1, src1w, src2, src2w)); + src1_r = TMP_REG1; + } + else if (src1_r == 0 && dst_r == 0) { + FAIL_IF(getput_arg(compiler, input_flags | LOAD_DATA, TMP_REG1, src1, src1w, dst, dstw)); + src1_r = TMP_REG1; + } + else if (src2_r == 0 && dst_r == 0) { + FAIL_IF(getput_arg(compiler, input_flags | LOAD_DATA, sugg_src2_r, src2, src2w, dst, dstw)); + src2_r = sugg_src2_r; + } + + if (dst_r == 0) + dst_r = TMP_REG2; + + if (src1_r == 0) { + FAIL_IF(getput_arg(compiler, input_flags | LOAD_DATA, TMP_REG1, src1, src1w, 0, 0)); + src1_r = TMP_REG1; + } + + if (src2_r == 0) { + FAIL_IF(getput_arg(compiler, input_flags | LOAD_DATA, sugg_src2_r, src2, src2w, 0, 0)); + src2_r = sugg_src2_r; + } + + FAIL_IF(emit_single_op(compiler, op, flags, dst_r, src1_r, src2_r)); + + if (flags & (FAST_DEST | SLOW_DEST)) { + if (flags & FAST_DEST) + FAIL_IF(getput_arg_fast(compiler, input_flags, dst_r, dst, dstw)); + else + FAIL_IF(getput_arg(compiler, input_flags, dst_r, dst, dstw, 0, 0)); + } + return SLJIT_SUCCESS; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op0(struct sljit_compiler *compiler, sljit_si op) +{ + CHECK_ERROR(); + check_sljit_emit_op0(compiler, op); + + switch (GET_OPCODE(op)) { + case SLJIT_BREAKPOINT: + case SLJIT_NOP: + return push_inst(compiler, NOP); + break; + case SLJIT_UMUL: + case SLJIT_SMUL: + FAIL_IF(push_inst(compiler, OR | S(SLJIT_SCRATCH_REG1) | A(TMP_REG1) | B(SLJIT_SCRATCH_REG1))); +#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) + FAIL_IF(push_inst(compiler, MULLD | D(SLJIT_SCRATCH_REG1) | A(TMP_REG1) | B(SLJIT_SCRATCH_REG2))); + return push_inst(compiler, (GET_OPCODE(op) == SLJIT_UMUL ? MULHDU : MULHD) | D(SLJIT_SCRATCH_REG2) | A(TMP_REG1) | B(SLJIT_SCRATCH_REG2)); +#else + FAIL_IF(push_inst(compiler, MULLW | D(SLJIT_SCRATCH_REG1) | A(TMP_REG1) | B(SLJIT_SCRATCH_REG2))); + return push_inst(compiler, (GET_OPCODE(op) == SLJIT_UMUL ? MULHWU : MULHW) | D(SLJIT_SCRATCH_REG2) | A(TMP_REG1) | B(SLJIT_SCRATCH_REG2)); +#endif + case SLJIT_UDIV: + case SLJIT_SDIV: + FAIL_IF(push_inst(compiler, OR | S(SLJIT_SCRATCH_REG1) | A(TMP_REG1) | B(SLJIT_SCRATCH_REG1))); +#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) + if (op & SLJIT_INT_OP) { + FAIL_IF(push_inst(compiler, (GET_OPCODE(op) == SLJIT_UDIV ? DIVWU : DIVW) | D(SLJIT_SCRATCH_REG1) | A(TMP_REG1) | B(SLJIT_SCRATCH_REG2))); + FAIL_IF(push_inst(compiler, MULLW | D(SLJIT_SCRATCH_REG2) | A(SLJIT_SCRATCH_REG1) | B(SLJIT_SCRATCH_REG2))); + return push_inst(compiler, SUBF | D(SLJIT_SCRATCH_REG2) | A(SLJIT_SCRATCH_REG2) | B(TMP_REG1)); + } + FAIL_IF(push_inst(compiler, (GET_OPCODE(op) == SLJIT_UDIV ? DIVDU : DIVD) | D(SLJIT_SCRATCH_REG1) | A(TMP_REG1) | B(SLJIT_SCRATCH_REG2))); + FAIL_IF(push_inst(compiler, MULLD | D(SLJIT_SCRATCH_REG2) | A(SLJIT_SCRATCH_REG1) | B(SLJIT_SCRATCH_REG2))); + return push_inst(compiler, SUBF | D(SLJIT_SCRATCH_REG2) | A(SLJIT_SCRATCH_REG2) | B(TMP_REG1)); +#else + FAIL_IF(push_inst(compiler, (GET_OPCODE(op) == SLJIT_UDIV ? DIVWU : DIVW) | D(SLJIT_SCRATCH_REG1) | A(TMP_REG1) | B(SLJIT_SCRATCH_REG2))); + FAIL_IF(push_inst(compiler, MULLW | D(SLJIT_SCRATCH_REG2) | A(SLJIT_SCRATCH_REG1) | B(SLJIT_SCRATCH_REG2))); + return push_inst(compiler, SUBF | D(SLJIT_SCRATCH_REG2) | A(SLJIT_SCRATCH_REG2) | B(TMP_REG1)); +#endif + } + + return SLJIT_SUCCESS; +} + +#define EMIT_MOV(type, type_flags, type_cast) \ + emit_op(compiler, (src & SLJIT_IMM) ? SLJIT_MOV : type, flags | (type_flags), dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? type_cast srcw : srcw) + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op1(struct sljit_compiler *compiler, sljit_si op, + sljit_si dst, sljit_sw dstw, + sljit_si src, sljit_sw srcw) +{ + sljit_si flags = GET_FLAGS(op) ? ALT_SET_FLAGS : 0; + sljit_si op_flags = GET_ALL_FLAGS(op); + + CHECK_ERROR(); + check_sljit_emit_op1(compiler, op, dst, dstw, src, srcw); + ADJUST_LOCAL_OFFSET(dst, dstw); + ADJUST_LOCAL_OFFSET(src, srcw); + + op = GET_OPCODE(op); + if ((src & SLJIT_IMM) && srcw == 0) + src = ZERO_REG; + + if (op_flags & SLJIT_SET_O) + FAIL_IF(push_inst(compiler, MTXER | S(ZERO_REG))); + + if (op_flags & SLJIT_INT_OP) { + if (op >= SLJIT_MOV && op <= SLJIT_MOVU_P) { + if (src <= ZERO_REG && src == dst) { + if (!TYPE_CAST_NEEDED(op)) + return SLJIT_SUCCESS; + } +#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) + if (op == SLJIT_MOV_SI && (src & SLJIT_MEM)) + op = SLJIT_MOV_UI; + if (op == SLJIT_MOVU_SI && (src & SLJIT_MEM)) + op = SLJIT_MOVU_UI; + if (op == SLJIT_MOV_UI && (src & SLJIT_IMM)) + op = SLJIT_MOV_SI; + if (op == SLJIT_MOVU_UI && (src & SLJIT_IMM)) + op = SLJIT_MOVU_SI; +#endif + } +#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) + else { + /* Most operations expect sign extended arguments. */ + flags |= INT_DATA | SIGNED_DATA; + if (src & SLJIT_IMM) + srcw = (sljit_si)srcw; + } +#endif + } + + switch (op) { + case SLJIT_MOV: + case SLJIT_MOV_P: +#if (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32) + case SLJIT_MOV_UI: + case SLJIT_MOV_SI: +#endif + return emit_op(compiler, SLJIT_MOV, flags | WORD_DATA, dst, dstw, TMP_REG1, 0, src, srcw); + +#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) + case SLJIT_MOV_UI: + return EMIT_MOV(SLJIT_MOV_UI, INT_DATA, (sljit_ui)); + + case SLJIT_MOV_SI: + return EMIT_MOV(SLJIT_MOV_SI, INT_DATA | SIGNED_DATA, (sljit_si)); +#endif + + case SLJIT_MOV_UB: + return EMIT_MOV(SLJIT_MOV_UB, BYTE_DATA, (sljit_ub)); + + case SLJIT_MOV_SB: + return EMIT_MOV(SLJIT_MOV_SB, BYTE_DATA | SIGNED_DATA, (sljit_sb)); + + case SLJIT_MOV_UH: + return EMIT_MOV(SLJIT_MOV_UH, HALF_DATA, (sljit_uh)); + + case SLJIT_MOV_SH: + return EMIT_MOV(SLJIT_MOV_SH, HALF_DATA | SIGNED_DATA, (sljit_sh)); + + case SLJIT_MOVU: + case SLJIT_MOVU_P: +#if (defined SLJIT_CONFIG_PPC_32 && SLJIT_CONFIG_PPC_32) + case SLJIT_MOVU_UI: + case SLJIT_MOVU_SI: +#endif + return emit_op(compiler, SLJIT_MOV, flags | WORD_DATA | WRITE_BACK, dst, dstw, TMP_REG1, 0, src, srcw); + +#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) + case SLJIT_MOVU_UI: + return EMIT_MOV(SLJIT_MOV_UI, INT_DATA | WRITE_BACK, (sljit_ui)); + + case SLJIT_MOVU_SI: + return EMIT_MOV(SLJIT_MOV_SI, INT_DATA | SIGNED_DATA | WRITE_BACK, (sljit_si)); +#endif + + case SLJIT_MOVU_UB: + return EMIT_MOV(SLJIT_MOV_UB, BYTE_DATA | WRITE_BACK, (sljit_ub)); + + case SLJIT_MOVU_SB: + return EMIT_MOV(SLJIT_MOV_SB, BYTE_DATA | SIGNED_DATA | WRITE_BACK, (sljit_sb)); + + case SLJIT_MOVU_UH: + return EMIT_MOV(SLJIT_MOV_UH, HALF_DATA | WRITE_BACK, (sljit_uh)); + + case SLJIT_MOVU_SH: + return EMIT_MOV(SLJIT_MOV_SH, HALF_DATA | SIGNED_DATA | WRITE_BACK, (sljit_sh)); + + case SLJIT_NOT: + return emit_op(compiler, SLJIT_NOT, flags, dst, dstw, TMP_REG1, 0, src, srcw); + + case SLJIT_NEG: + return emit_op(compiler, SLJIT_NEG, flags, dst, dstw, TMP_REG1, 0, src, srcw); + + case SLJIT_CLZ: +#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) + return emit_op(compiler, SLJIT_CLZ, flags | (!(op_flags & SLJIT_INT_OP) ? 0 : ALT_FORM1), dst, dstw, TMP_REG1, 0, src, srcw); +#else + return emit_op(compiler, SLJIT_CLZ, flags, dst, dstw, TMP_REG1, 0, src, srcw); +#endif + } + + return SLJIT_SUCCESS; +} + +#undef EMIT_MOV + +#define TEST_SL_IMM(src, srcw) \ + (((src) & SLJIT_IMM) && (srcw) <= SIMM_MAX && (srcw) >= SIMM_MIN) + +#define TEST_UL_IMM(src, srcw) \ + (((src) & SLJIT_IMM) && !((srcw) & ~0xffff)) + +#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) +#define TEST_SH_IMM(src, srcw) \ + (((src) & SLJIT_IMM) && !((srcw) & 0xffff) && (srcw) <= SLJIT_W(0x7fffffff) && (srcw) >= SLJIT_W(-0x80000000)) +#else +#define TEST_SH_IMM(src, srcw) \ + (((src) & SLJIT_IMM) && !((srcw) & 0xffff)) +#endif + +#define TEST_UH_IMM(src, srcw) \ + (((src) & SLJIT_IMM) && !((srcw) & ~0xffff0000)) + +#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) +#define TEST_ADD_IMM(src, srcw) \ + (((src) & SLJIT_IMM) && (srcw) <= SLJIT_W(0x7fff7fff) && (srcw) >= SLJIT_W(-0x80000000)) +#else +#define TEST_ADD_IMM(src, srcw) \ + ((src) & SLJIT_IMM) +#endif + +#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) +#define TEST_UI_IMM(src, srcw) \ + (((src) & SLJIT_IMM) && !((srcw) & ~0xffffffff)) +#else +#define TEST_UI_IMM(src, srcw) \ + ((src) & SLJIT_IMM) +#endif + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op2(struct sljit_compiler *compiler, sljit_si op, + sljit_si dst, sljit_sw dstw, + sljit_si src1, sljit_sw src1w, + sljit_si src2, sljit_sw src2w) +{ + sljit_si flags = GET_FLAGS(op) ? ALT_SET_FLAGS : 0; + + CHECK_ERROR(); + check_sljit_emit_op2(compiler, op, dst, dstw, src1, src1w, src2, src2w); + ADJUST_LOCAL_OFFSET(dst, dstw); + ADJUST_LOCAL_OFFSET(src1, src1w); + ADJUST_LOCAL_OFFSET(src2, src2w); + + if ((src1 & SLJIT_IMM) && src1w == 0) + src1 = ZERO_REG; + if ((src2 & SLJIT_IMM) && src2w == 0) + src2 = ZERO_REG; + +#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) + if (op & SLJIT_INT_OP) { + /* Most operations expect sign extended arguments. */ + flags |= INT_DATA | SIGNED_DATA; + if (src1 & SLJIT_IMM) + src1w = (sljit_si)(src1w); + if (src2 & SLJIT_IMM) + src2w = (sljit_si)(src2w); + if (GET_FLAGS(op)) + flags |= ALT_SIGN_EXT; + } +#endif + if (op & SLJIT_SET_O) + FAIL_IF(push_inst(compiler, MTXER | S(ZERO_REG))); + if (src2 == TMP_REG2) + flags |= ALT_KEEP_CACHE; + + switch (GET_OPCODE(op)) { + case SLJIT_ADD: + if (!GET_FLAGS(op) && ((src1 | src2) & SLJIT_IMM)) { + if (TEST_SL_IMM(src2, src2w)) { + compiler->imm = src2w & 0xffff; + return emit_op(compiler, SLJIT_ADD, flags | ALT_FORM1, dst, dstw, src1, src1w, TMP_REG2, 0); + } + if (TEST_SL_IMM(src1, src1w)) { + compiler->imm = src1w & 0xffff; + return emit_op(compiler, SLJIT_ADD, flags | ALT_FORM1, dst, dstw, src2, src2w, TMP_REG2, 0); + } + if (TEST_SH_IMM(src2, src2w)) { + compiler->imm = (src2w >> 16) & 0xffff; + return emit_op(compiler, SLJIT_ADD, flags | ALT_FORM2, dst, dstw, src1, src1w, TMP_REG2, 0); + } + if (TEST_SH_IMM(src1, src1w)) { + compiler->imm = (src1w >> 16) & 0xffff; + return emit_op(compiler, SLJIT_ADD, flags | ALT_FORM2, dst, dstw, src2, src2w, TMP_REG2, 0); + } + /* Range between -1 and -32768 is covered above. */ + if (TEST_ADD_IMM(src2, src2w)) { + compiler->imm = src2w & 0xffffffff; + return emit_op(compiler, SLJIT_ADD, flags | ALT_FORM4, dst, dstw, src1, src1w, TMP_REG2, 0); + } + if (TEST_ADD_IMM(src1, src1w)) { + compiler->imm = src1w & 0xffffffff; + return emit_op(compiler, SLJIT_ADD, flags | ALT_FORM4, dst, dstw, src2, src2w, TMP_REG2, 0); + } + } + if (!(GET_FLAGS(op) & (SLJIT_SET_E | SLJIT_SET_O))) { + if (TEST_SL_IMM(src2, src2w)) { + compiler->imm = src2w & 0xffff; + return emit_op(compiler, SLJIT_ADD, flags | ALT_FORM3, dst, dstw, src1, src1w, TMP_REG2, 0); + } + if (TEST_SL_IMM(src1, src1w)) { + compiler->imm = src1w & 0xffff; + return emit_op(compiler, SLJIT_ADD, flags | ALT_FORM3, dst, dstw, src2, src2w, TMP_REG2, 0); + } + } + return emit_op(compiler, SLJIT_ADD, flags, dst, dstw, src1, src1w, src2, src2w); + + case SLJIT_ADDC: + return emit_op(compiler, SLJIT_ADDC, flags | (!(op & SLJIT_KEEP_FLAGS) ? 0 : ALT_FORM1), dst, dstw, src1, src1w, src2, src2w); + + case SLJIT_SUB: + if (!GET_FLAGS(op) && ((src1 | src2) & SLJIT_IMM)) { + if (TEST_SL_IMM(src2, -src2w)) { + compiler->imm = (-src2w) & 0xffff; + return emit_op(compiler, SLJIT_ADD, flags | ALT_FORM1, dst, dstw, src1, src1w, TMP_REG2, 0); + } + if (TEST_SL_IMM(src1, src1w)) { + compiler->imm = src1w & 0xffff; + return emit_op(compiler, SLJIT_SUB, flags | ALT_FORM1, dst, dstw, src2, src2w, TMP_REG2, 0); + } + if (TEST_SH_IMM(src2, -src2w)) { + compiler->imm = ((-src2w) >> 16) & 0xffff; + return emit_op(compiler, SLJIT_ADD, flags | ALT_FORM2, dst, dstw, src1, src1w, TMP_REG2, 0); + } + /* Range between -1 and -32768 is covered above. */ + if (TEST_ADD_IMM(src2, -src2w)) { + compiler->imm = -src2w & 0xffffffff; + return emit_op(compiler, SLJIT_ADD, flags | ALT_FORM4, dst, dstw, src1, src1w, TMP_REG2, 0); + } + } + if (dst == SLJIT_UNUSED && (op & (SLJIT_SET_E | SLJIT_SET_S | SLJIT_SET_U)) && !(op & (SLJIT_SET_O | SLJIT_SET_C))) { + if (!(op & SLJIT_SET_U)) { + /* We know ALT_SIGN_EXT is set if it is an SLJIT_INT_OP on 64 bit systems. */ + if (TEST_SL_IMM(src2, src2w)) { + compiler->imm = src2w & 0xffff; + return emit_op(compiler, SLJIT_SUB, flags | ALT_FORM2, dst, dstw, src1, src1w, TMP_REG2, 0); + } + if (GET_FLAGS(op) == SLJIT_SET_E && TEST_SL_IMM(src1, src1w)) { + compiler->imm = src1w & 0xffff; + return emit_op(compiler, SLJIT_SUB, flags | ALT_FORM2, dst, dstw, src2, src2w, TMP_REG2, 0); + } + } + if (!(op & (SLJIT_SET_E | SLJIT_SET_S))) { + /* We know ALT_SIGN_EXT is set if it is an SLJIT_INT_OP on 64 bit systems. */ + if (TEST_UL_IMM(src2, src2w)) { + compiler->imm = src2w & 0xffff; + return emit_op(compiler, SLJIT_SUB, flags | ALT_FORM3, dst, dstw, src1, src1w, TMP_REG2, 0); + } + return emit_op(compiler, SLJIT_SUB, flags | ALT_FORM4, dst, dstw, src1, src1w, src2, src2w); + } + if ((src2 & SLJIT_IMM) && src2w >= 0 && src2w <= 0x7fff) { + compiler->imm = src2w; + return emit_op(compiler, SLJIT_SUB, flags | ALT_FORM2 | ALT_FORM3, dst, dstw, src1, src1w, TMP_REG2, 0); + } + return emit_op(compiler, SLJIT_SUB, flags | ((op & SLJIT_SET_U) ? ALT_FORM4 : 0) | ((op & (SLJIT_SET_E | SLJIT_SET_S)) ? ALT_FORM5 : 0), dst, dstw, src1, src1w, src2, src2w); + } + if (!(op & (SLJIT_SET_E | SLJIT_SET_S | SLJIT_SET_U | SLJIT_SET_O))) { + if (TEST_SL_IMM(src2, -src2w)) { + compiler->imm = (-src2w) & 0xffff; + return emit_op(compiler, SLJIT_ADD, flags | ALT_FORM3, dst, dstw, src1, src1w, TMP_REG2, 0); + } + } + /* We know ALT_SIGN_EXT is set if it is an SLJIT_INT_OP on 64 bit systems. */ + return emit_op(compiler, SLJIT_SUB, flags | (!(op & SLJIT_SET_U) ? 0 : ALT_FORM6), dst, dstw, src1, src1w, src2, src2w); + + case SLJIT_SUBC: + return emit_op(compiler, SLJIT_SUBC, flags | (!(op & SLJIT_KEEP_FLAGS) ? 0 : ALT_FORM1), dst, dstw, src1, src1w, src2, src2w); + + case SLJIT_MUL: +#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) + if (op & SLJIT_INT_OP) + flags |= ALT_FORM2; +#endif + if (!GET_FLAGS(op)) { + if (TEST_SL_IMM(src2, src2w)) { + compiler->imm = src2w & 0xffff; + return emit_op(compiler, SLJIT_MUL, flags | ALT_FORM1, dst, dstw, src1, src1w, TMP_REG2, 0); + } + if (TEST_SL_IMM(src1, src1w)) { + compiler->imm = src1w & 0xffff; + return emit_op(compiler, SLJIT_MUL, flags | ALT_FORM1, dst, dstw, src2, src2w, TMP_REG2, 0); + } + } + return emit_op(compiler, SLJIT_MUL, flags, dst, dstw, src1, src1w, src2, src2w); + + case SLJIT_AND: + case SLJIT_OR: + case SLJIT_XOR: + /* Commutative unsigned operations. */ + if (!GET_FLAGS(op) || GET_OPCODE(op) == SLJIT_AND) { + if (TEST_UL_IMM(src2, src2w)) { + compiler->imm = src2w; + return emit_op(compiler, GET_OPCODE(op), flags | ALT_FORM1, dst, dstw, src1, src1w, TMP_REG2, 0); + } + if (TEST_UL_IMM(src1, src1w)) { + compiler->imm = src1w; + return emit_op(compiler, GET_OPCODE(op), flags | ALT_FORM1, dst, dstw, src2, src2w, TMP_REG2, 0); + } + if (TEST_UH_IMM(src2, src2w)) { + compiler->imm = (src2w >> 16) & 0xffff; + return emit_op(compiler, GET_OPCODE(op), flags | ALT_FORM2, dst, dstw, src1, src1w, TMP_REG2, 0); + } + if (TEST_UH_IMM(src1, src1w)) { + compiler->imm = (src1w >> 16) & 0xffff; + return emit_op(compiler, GET_OPCODE(op), flags | ALT_FORM2, dst, dstw, src2, src2w, TMP_REG2, 0); + } + } + if (!GET_FLAGS(op) && GET_OPCODE(op) != SLJIT_AND) { + if (TEST_UI_IMM(src2, src2w)) { + compiler->imm = src2w; + return emit_op(compiler, GET_OPCODE(op), flags | ALT_FORM3, dst, dstw, src1, src1w, TMP_REG2, 0); + } + if (TEST_UI_IMM(src1, src1w)) { + compiler->imm = src1w; + return emit_op(compiler, GET_OPCODE(op), flags | ALT_FORM3, dst, dstw, src2, src2w, TMP_REG2, 0); + } + } + return emit_op(compiler, GET_OPCODE(op), flags, dst, dstw, src1, src1w, src2, src2w); + + case SLJIT_ASHR: + if (op & SLJIT_KEEP_FLAGS) + flags |= ALT_FORM3; + /* Fall through. */ + case SLJIT_SHL: + case SLJIT_LSHR: +#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) + if (op & SLJIT_INT_OP) + flags |= ALT_FORM2; +#endif + if (src2 & SLJIT_IMM) { + compiler->imm = src2w; + return emit_op(compiler, GET_OPCODE(op), flags | ALT_FORM1, dst, dstw, src1, src1w, TMP_REG2, 0); + } + return emit_op(compiler, GET_OPCODE(op), flags, dst, dstw, src1, src1w, src2, src2w); + } + + return SLJIT_SUCCESS; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_get_register_index(sljit_si reg) +{ + check_sljit_get_register_index(reg); + return reg_map[reg]; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op_custom(struct sljit_compiler *compiler, + void *instruction, sljit_si size) +{ + CHECK_ERROR(); + check_sljit_emit_op_custom(compiler, instruction, size); + SLJIT_ASSERT(size == 4); + + return push_inst(compiler, *(sljit_ins*)instruction); +} + +/* --------------------------------------------------------------------- */ +/* Floating point operators */ +/* --------------------------------------------------------------------- */ + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_is_fpu_available(void) +{ + /* Always available. */ + return 1; +} + +#define FLOAT_DATA(op) (DOUBLE_DATA | ((op & SLJIT_SINGLE_OP) >> 6)) +#define SELECT_FOP(op, single, double) ((op & SLJIT_SINGLE_OP) ? single : double) + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fop1(struct sljit_compiler *compiler, sljit_si op, + sljit_si dst, sljit_sw dstw, + sljit_si src, sljit_sw srcw) +{ + sljit_si dst_fr; + + CHECK_ERROR(); + check_sljit_emit_fop1(compiler, op, dst, dstw, src, srcw); + SLJIT_COMPILE_ASSERT((SLJIT_SINGLE_OP == 0x100) && !(DOUBLE_DATA & 0x4), float_transfer_bit_error); + + compiler->cache_arg = 0; + compiler->cache_argw = 0; + + if (GET_OPCODE(op) == SLJIT_CMPD) { + if (dst > SLJIT_FLOAT_REG6) { + FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, dst, dstw, src, srcw)); + dst = TMP_FREG1; + } + + if (src > SLJIT_FLOAT_REG6) { + FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG2, src, srcw, 0, 0)); + src = TMP_FREG2; + } + + return push_inst(compiler, FCMPU | CRD(4) | FA(dst) | FB(src)); + } + + dst_fr = (dst > SLJIT_FLOAT_REG6) ? TMP_FREG1 : dst; + + if (src > SLJIT_FLOAT_REG6) { + FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op) | LOAD_DATA, dst_fr, src, srcw, dst, dstw)); + src = dst_fr; + } + + switch (GET_OPCODE(op)) { + case SLJIT_MOVD: + if (src != dst_fr && dst_fr != TMP_FREG1) + FAIL_IF(push_inst(compiler, FMR | FD(dst_fr) | FB(src))); + break; + case SLJIT_NEGD: + FAIL_IF(push_inst(compiler, FNEG | FD(dst_fr) | FB(src))); + break; + case SLJIT_ABSD: + FAIL_IF(push_inst(compiler, FABS | FD(dst_fr) | FB(src))); + break; + } + + if (dst_fr == TMP_FREG1) { + if (GET_OPCODE(op) == SLJIT_MOVD) + dst_fr = src; + FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op), dst_fr, dst, dstw, 0, 0)); + } + + return SLJIT_SUCCESS; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fop2(struct sljit_compiler *compiler, sljit_si op, + sljit_si dst, sljit_sw dstw, + sljit_si src1, sljit_sw src1w, + sljit_si src2, sljit_sw src2w) +{ + sljit_si dst_fr, flags = 0; + + CHECK_ERROR(); + check_sljit_emit_fop2(compiler, op, dst, dstw, src1, src1w, src2, src2w); + + compiler->cache_arg = 0; + compiler->cache_argw = 0; + + dst_fr = (dst > SLJIT_FLOAT_REG6) ? TMP_FREG2 : dst; + + if (src1 > SLJIT_FLOAT_REG6) { + if (getput_arg_fast(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, src1, src1w)) { + FAIL_IF(compiler->error); + src1 = TMP_FREG1; + } else + flags |= ALT_FORM1; + } + + if (src2 > SLJIT_FLOAT_REG6) { + if (getput_arg_fast(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG2, src2, src2w)) { + FAIL_IF(compiler->error); + src2 = TMP_FREG2; + } else + flags |= ALT_FORM2; + } + + if ((flags & (ALT_FORM1 | ALT_FORM2)) == (ALT_FORM1 | ALT_FORM2)) { + if (!can_cache(src1, src1w, src2, src2w) && can_cache(src1, src1w, dst, dstw)) { + FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG2, src2, src2w, src1, src1w)); + FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, src1, src1w, dst, dstw)); + } + else { + FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, src1, src1w, src2, src2w)); + FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG2, src2, src2w, dst, dstw)); + } + } + else if (flags & ALT_FORM1) + FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, src1, src1w, dst, dstw)); + else if (flags & ALT_FORM2) + FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG2, src2, src2w, dst, dstw)); + + if (flags & ALT_FORM1) + src1 = TMP_FREG1; + if (flags & ALT_FORM2) + src2 = TMP_FREG2; + + switch (GET_OPCODE(op)) { + case SLJIT_ADDD: + FAIL_IF(push_inst(compiler, SELECT_FOP(op, FADDS, FADD) | FD(dst_fr) | FA(src1) | FB(src2))); + break; + + case SLJIT_SUBD: + FAIL_IF(push_inst(compiler, SELECT_FOP(op, FSUBS, FSUB) | FD(dst_fr) | FA(src1) | FB(src2))); + break; + + case SLJIT_MULD: + FAIL_IF(push_inst(compiler, SELECT_FOP(op, FMULS, FMUL) | FD(dst_fr) | FA(src1) | FC(src2) /* FMUL use FC as src2 */)); + break; + + case SLJIT_DIVD: + FAIL_IF(push_inst(compiler, SELECT_FOP(op, FDIVS, FDIV) | FD(dst_fr) | FA(src1) | FB(src2))); + break; + } + + if (dst_fr == TMP_FREG2) + FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op), TMP_FREG2, dst, dstw, 0, 0)); + + return SLJIT_SUCCESS; +} + +#undef FLOAT_DATA +#undef SELECT_FOP + +/* --------------------------------------------------------------------- */ +/* Other instructions */ +/* --------------------------------------------------------------------- */ + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fast_enter(struct sljit_compiler *compiler, sljit_si dst, sljit_sw dstw) +{ + CHECK_ERROR(); + check_sljit_emit_fast_enter(compiler, dst, dstw); + ADJUST_LOCAL_OFFSET(dst, dstw); + + /* For UNUSED dst. Uncommon, but possible. */ + if (dst == SLJIT_UNUSED) + return SLJIT_SUCCESS; + + if (dst <= ZERO_REG) + return push_inst(compiler, MFLR | D(dst)); + + /* Memory. */ + FAIL_IF(push_inst(compiler, MFLR | D(TMP_REG2))); + return emit_op(compiler, SLJIT_MOV, WORD_DATA, dst, dstw, TMP_REG1, 0, TMP_REG2, 0); +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fast_return(struct sljit_compiler *compiler, sljit_si src, sljit_sw srcw) +{ + CHECK_ERROR(); + check_sljit_emit_fast_return(compiler, src, srcw); + ADJUST_LOCAL_OFFSET(src, srcw); + + if (src <= ZERO_REG) + FAIL_IF(push_inst(compiler, MTLR | S(src))); + else { + if (src & SLJIT_MEM) + FAIL_IF(emit_op(compiler, SLJIT_MOV, WORD_DATA, TMP_REG2, 0, TMP_REG1, 0, src, srcw)); + else if (src & SLJIT_IMM) + FAIL_IF(load_immediate(compiler, TMP_REG2, srcw)); + FAIL_IF(push_inst(compiler, MTLR | S(TMP_REG2))); + } + return push_inst(compiler, BLR); +} + +/* --------------------------------------------------------------------- */ +/* Conditional instructions */ +/* --------------------------------------------------------------------- */ + +SLJIT_API_FUNC_ATTRIBUTE struct sljit_label* sljit_emit_label(struct sljit_compiler *compiler) +{ + struct sljit_label *label; + + CHECK_ERROR_PTR(); + check_sljit_emit_label(compiler); + + if (compiler->last_label && compiler->last_label->size == compiler->size) + return compiler->last_label; + + label = (struct sljit_label*)ensure_abuf(compiler, sizeof(struct sljit_label)); + PTR_FAIL_IF(!label); + set_label(label, compiler); + return label; +} + +static sljit_ins get_bo_bi_flags(sljit_si type) +{ + switch (type) { + case SLJIT_C_EQUAL: + return (12 << 21) | (2 << 16); + + case SLJIT_C_NOT_EQUAL: + return (4 << 21) | (2 << 16); + + case SLJIT_C_LESS: + case SLJIT_C_FLOAT_LESS: + return (12 << 21) | ((4 + 0) << 16); + + case SLJIT_C_GREATER_EQUAL: + case SLJIT_C_FLOAT_GREATER_EQUAL: + return (4 << 21) | ((4 + 0) << 16); + + case SLJIT_C_GREATER: + case SLJIT_C_FLOAT_GREATER: + return (12 << 21) | ((4 + 1) << 16); + + case SLJIT_C_LESS_EQUAL: + case SLJIT_C_FLOAT_LESS_EQUAL: + return (4 << 21) | ((4 + 1) << 16); + + case SLJIT_C_SIG_LESS: + return (12 << 21) | (0 << 16); + + case SLJIT_C_SIG_GREATER_EQUAL: + return (4 << 21) | (0 << 16); + + case SLJIT_C_SIG_GREATER: + return (12 << 21) | (1 << 16); + + case SLJIT_C_SIG_LESS_EQUAL: + return (4 << 21) | (1 << 16); + + case SLJIT_C_OVERFLOW: + case SLJIT_C_MUL_OVERFLOW: + return (12 << 21) | (3 << 16); + + case SLJIT_C_NOT_OVERFLOW: + case SLJIT_C_MUL_NOT_OVERFLOW: + return (4 << 21) | (3 << 16); + + case SLJIT_C_FLOAT_EQUAL: + return (12 << 21) | ((4 + 2) << 16); + + case SLJIT_C_FLOAT_NOT_EQUAL: + return (4 << 21) | ((4 + 2) << 16); + + case SLJIT_C_FLOAT_UNORDERED: + return (12 << 21) | ((4 + 3) << 16); + + case SLJIT_C_FLOAT_ORDERED: + return (4 << 21) | ((4 + 3) << 16); + + default: + SLJIT_ASSERT(type >= SLJIT_JUMP && type <= SLJIT_CALL3); + return (20 << 21); + } +} + +SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compiler *compiler, sljit_si type) +{ + struct sljit_jump *jump; + sljit_ins bo_bi_flags; + + CHECK_ERROR_PTR(); + check_sljit_emit_jump(compiler, type); + + bo_bi_flags = get_bo_bi_flags(type & 0xff); + if (!bo_bi_flags) + return NULL; + + jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump)); + PTR_FAIL_IF(!jump); + set_jump(jump, compiler, type & SLJIT_REWRITABLE_JUMP); + type &= 0xff; + + /* In PPC, we don't need to touch the arguments. */ + if (type >= SLJIT_JUMP) + jump->flags |= UNCOND_B; + + PTR_FAIL_IF(emit_const(compiler, TMP_REG1, 0)); + PTR_FAIL_IF(push_inst(compiler, MTCTR | S(TMP_REG1))); + jump->addr = compiler->size; + PTR_FAIL_IF(push_inst(compiler, BCCTR | bo_bi_flags | (type >= SLJIT_FAST_CALL ? 1 : 0))); + return jump; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_ijump(struct sljit_compiler *compiler, sljit_si type, sljit_si src, sljit_sw srcw) +{ + struct sljit_jump *jump = NULL; + sljit_si src_r; + + CHECK_ERROR(); + check_sljit_emit_ijump(compiler, type, src, srcw); + ADJUST_LOCAL_OFFSET(src, srcw); + + if (src <= ZERO_REG) + src_r = src; + else if (src & SLJIT_IMM) { + jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump)); + FAIL_IF(!jump); + set_jump(jump, compiler, JUMP_ADDR | UNCOND_B); + jump->u.target = srcw; + + FAIL_IF(emit_const(compiler, TMP_REG2, 0)); + src_r = TMP_REG2; + } + else { + FAIL_IF(emit_op(compiler, SLJIT_MOV, WORD_DATA, TMP_REG2, 0, TMP_REG1, 0, src, srcw)); + src_r = TMP_REG2; + } + + FAIL_IF(push_inst(compiler, MTCTR | S(src_r))); + if (jump) + jump->addr = compiler->size; + return push_inst(compiler, BCCTR | (20 << 21) | (type >= SLJIT_FAST_CALL ? 1 : 0)); +} + +/* Get a bit from CR, all other bits are zeroed. */ +#define GET_CR_BIT(bit, dst) \ + FAIL_IF(push_inst(compiler, MFCR | D(dst))); \ + FAIL_IF(push_inst(compiler, RLWINM | S(dst) | A(dst) | ((1 + (bit)) << 11) | (31 << 6) | (31 << 1))); + +#define INVERT_BIT(dst) \ + FAIL_IF(push_inst(compiler, XORI | S(dst) | A(dst) | 0x1)); + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op_flags(struct sljit_compiler *compiler, sljit_si op, + sljit_si dst, sljit_sw dstw, + sljit_si src, sljit_sw srcw, + sljit_si type) +{ + sljit_si reg, input_flags; + sljit_si flags = GET_ALL_FLAGS(op); + + CHECK_ERROR(); + check_sljit_emit_op_flags(compiler, op, dst, dstw, src, srcw, type); + ADJUST_LOCAL_OFFSET(dst, dstw); + + if (dst == SLJIT_UNUSED) + return SLJIT_SUCCESS; + + op = GET_OPCODE(op); + reg = (op < SLJIT_ADD && dst <= ZERO_REG) ? dst : TMP_REG2; + + compiler->cache_arg = 0; + compiler->cache_argw = 0; + if (op >= SLJIT_ADD && (src & SLJIT_MEM)) { + ADJUST_LOCAL_OFFSET(src, srcw); +#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) + input_flags = (flags & SLJIT_INT_OP) ? INT_DATA : WORD_DATA; +#else + input_flags = WORD_DATA; +#endif + FAIL_IF(emit_op_mem2(compiler, input_flags | LOAD_DATA, TMP_REG1, src, srcw, dst, dstw)); + src = TMP_REG1; + srcw = 0; + } + + switch (type) { + case SLJIT_C_EQUAL: + GET_CR_BIT(2, reg); + break; + + case SLJIT_C_NOT_EQUAL: + GET_CR_BIT(2, reg); + INVERT_BIT(reg); + break; + + case SLJIT_C_LESS: + case SLJIT_C_FLOAT_LESS: + GET_CR_BIT(4 + 0, reg); + break; + + case SLJIT_C_GREATER_EQUAL: + case SLJIT_C_FLOAT_GREATER_EQUAL: + GET_CR_BIT(4 + 0, reg); + INVERT_BIT(reg); + break; + + case SLJIT_C_GREATER: + case SLJIT_C_FLOAT_GREATER: + GET_CR_BIT(4 + 1, reg); + break; + + case SLJIT_C_LESS_EQUAL: + case SLJIT_C_FLOAT_LESS_EQUAL: + GET_CR_BIT(4 + 1, reg); + INVERT_BIT(reg); + break; + + case SLJIT_C_SIG_LESS: + GET_CR_BIT(0, reg); + break; + + case SLJIT_C_SIG_GREATER_EQUAL: + GET_CR_BIT(0, reg); + INVERT_BIT(reg); + break; + + case SLJIT_C_SIG_GREATER: + GET_CR_BIT(1, reg); + break; + + case SLJIT_C_SIG_LESS_EQUAL: + GET_CR_BIT(1, reg); + INVERT_BIT(reg); + break; + + case SLJIT_C_OVERFLOW: + case SLJIT_C_MUL_OVERFLOW: + GET_CR_BIT(3, reg); + break; + + case SLJIT_C_NOT_OVERFLOW: + case SLJIT_C_MUL_NOT_OVERFLOW: + GET_CR_BIT(3, reg); + INVERT_BIT(reg); + break; + + case SLJIT_C_FLOAT_EQUAL: + GET_CR_BIT(4 + 2, reg); + break; + + case SLJIT_C_FLOAT_NOT_EQUAL: + GET_CR_BIT(4 + 2, reg); + INVERT_BIT(reg); + break; + + case SLJIT_C_FLOAT_UNORDERED: + GET_CR_BIT(4 + 3, reg); + break; + + case SLJIT_C_FLOAT_ORDERED: + GET_CR_BIT(4 + 3, reg); + INVERT_BIT(reg); + break; + + default: + SLJIT_ASSERT_STOP(); + break; + } + + if (op < SLJIT_ADD) { +#if (defined SLJIT_CONFIG_PPC_64 && SLJIT_CONFIG_PPC_64) + if (op == SLJIT_MOV) + input_flags = WORD_DATA; + else { + op = SLJIT_MOV_UI; + input_flags = INT_DATA; + } +#else + op = SLJIT_MOV; + input_flags = WORD_DATA; +#endif + return (reg == TMP_REG2) ? emit_op(compiler, op, input_flags, dst, dstw, TMP_REG1, 0, TMP_REG2, 0) : SLJIT_SUCCESS; + } + +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) || (defined SLJIT_DEBUG && SLJIT_DEBUG) + compiler->skip_checks = 1; +#endif + return sljit_emit_op2(compiler, op | flags, dst, dstw, src, srcw, TMP_REG2, 0); +} + +SLJIT_API_FUNC_ATTRIBUTE struct sljit_const* sljit_emit_const(struct sljit_compiler *compiler, sljit_si dst, sljit_sw dstw, sljit_sw init_value) +{ + struct sljit_const *const_; + sljit_si reg; + + CHECK_ERROR_PTR(); + check_sljit_emit_const(compiler, dst, dstw, init_value); + ADJUST_LOCAL_OFFSET(dst, dstw); + + const_ = (struct sljit_const*)ensure_abuf(compiler, sizeof(struct sljit_const)); + PTR_FAIL_IF(!const_); + set_const(const_, compiler); + + reg = (dst <= ZERO_REG) ? dst : TMP_REG2; + + PTR_FAIL_IF(emit_const(compiler, reg, init_value)); + + if (dst & SLJIT_MEM) + PTR_FAIL_IF(emit_op(compiler, SLJIT_MOV, WORD_DATA, dst, dstw, TMP_REG1, 0, TMP_REG2, 0)); + return const_; +} Property changes on: sys/contrib/sljit/sljitNativePPC_common.c ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Added: fbsd:nokeywords ## -0,0 +1 ## +yes \ No newline at end of property Index: sys/contrib/sljit/sljitNativeSPARC_32.c =================================================================== --- sys/contrib/sljit/sljitNativeSPARC_32.c (revision 0) +++ sys/contrib/sljit/sljitNativeSPARC_32.c (working copy) @@ -0,0 +1,164 @@ +/* + * Stack-less Just-In-Time compiler + * + * Copyright 2009-2012 Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, are + * permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of + * conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list + * of conditions and the following disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED + * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +static sljit_si load_immediate(struct sljit_compiler *compiler, sljit_si dst, sljit_sw imm) +{ + if (imm <= SIMM_MAX && imm >= SIMM_MIN) + return push_inst(compiler, OR | D(dst) | S1(0) | IMM(imm), DR(dst)); + + FAIL_IF(push_inst(compiler, SETHI | D(dst) | ((imm >> 10) & 0x3fffff), DR(dst))); + return (imm & 0x3ff) ? push_inst(compiler, OR | D(dst) | S1(dst) | IMM_ARG | (imm & 0x3ff), DR(dst)) : SLJIT_SUCCESS; +} + +#define ARG2(flags, src2) ((flags & SRC2_IMM) ? IMM(src2) : S2(src2)) + +static SLJIT_INLINE sljit_si emit_single_op(struct sljit_compiler *compiler, sljit_si op, sljit_si flags, + sljit_si dst, sljit_si src1, sljit_sw src2) +{ + SLJIT_COMPILE_ASSERT(ICC_IS_SET == SET_FLAGS, icc_is_set_and_set_flags_must_be_the_same); + + switch (op) { + case SLJIT_MOV: + case SLJIT_MOV_UI: + case SLJIT_MOV_SI: + case SLJIT_MOV_P: + SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM)); + if (dst != src2) + return push_inst(compiler, OR | D(dst) | S1(0) | S2(src2), DR(dst)); + return SLJIT_SUCCESS; + + case SLJIT_MOV_UB: + case SLJIT_MOV_SB: + SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM)); + if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) { + if (op == SLJIT_MOV_UB) + return push_inst(compiler, AND | D(dst) | S1(src2) | IMM(0xff), DR(dst)); + FAIL_IF(push_inst(compiler, SLL | D(dst) | S1(src2) | IMM(24), DR(dst))); + return push_inst(compiler, SRA | D(dst) | S1(dst) | IMM(24), DR(dst)); + } + else if (dst != src2) + SLJIT_ASSERT_STOP(); + return SLJIT_SUCCESS; + + case SLJIT_MOV_UH: + case SLJIT_MOV_SH: + SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM)); + if ((flags & (REG_DEST | REG2_SOURCE)) == (REG_DEST | REG2_SOURCE)) { + FAIL_IF(push_inst(compiler, SLL | D(dst) | S1(src2) | IMM(16), DR(dst))); + return push_inst(compiler, (op == SLJIT_MOV_SH ? SRA : SRL) | D(dst) | S1(dst) | IMM(16), DR(dst)); + } + else if (dst != src2) + SLJIT_ASSERT_STOP(); + return SLJIT_SUCCESS; + + case SLJIT_NOT: + SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM)); + return push_inst(compiler, XNOR | (flags & SET_FLAGS) | D(dst) | S1(0) | S2(src2), DR(dst) | (flags & SET_FLAGS)); + + case SLJIT_CLZ: + SLJIT_ASSERT(src1 == TMP_REG1 && !(flags & SRC2_IMM)); + /* sparc 32 does not support SLJIT_KEEP_FLAGS. Not sure I can fix this. */ + FAIL_IF(push_inst(compiler, SUB | SET_FLAGS | D(0) | S1(src2) | S2(0), SET_FLAGS)); + FAIL_IF(push_inst(compiler, OR | D(TMP_REG1) | S1(0) | S2(src2), DR(TMP_REG1))); + FAIL_IF(push_inst(compiler, BICC | DA(0x1) | (7 & DISP_MASK), UNMOVABLE_INS)); + FAIL_IF(push_inst(compiler, OR | (flags & SET_FLAGS) | D(dst) | S1(0) | IMM(32), UNMOVABLE_INS | (flags & SET_FLAGS))); + FAIL_IF(push_inst(compiler, OR | D(dst) | S1(0) | IMM(-1), DR(dst))); + + /* Loop. */ + FAIL_IF(push_inst(compiler, SUB | SET_FLAGS | D(0) | S1(TMP_REG1) | S2(0), SET_FLAGS)); + FAIL_IF(push_inst(compiler, SLL | D(TMP_REG1) | S1(TMP_REG1) | IMM(1), DR(TMP_REG1))); + FAIL_IF(push_inst(compiler, BICC | DA(0xe) | (-2 & DISP_MASK), UNMOVABLE_INS)); + return push_inst(compiler, ADD | (flags & SET_FLAGS) | D(dst) | S1(dst) | IMM(1), UNMOVABLE_INS | (flags & SET_FLAGS)); + + case SLJIT_ADD: + return push_inst(compiler, ADD | (flags & SET_FLAGS) | D(dst) | S1(src1) | ARG2(flags, src2), DR(dst) | (flags & SET_FLAGS)); + + case SLJIT_ADDC: + return push_inst(compiler, ADDC | (flags & SET_FLAGS) | D(dst) | S1(src1) | ARG2(flags, src2), DR(dst) | (flags & SET_FLAGS)); + + case SLJIT_SUB: + return push_inst(compiler, SUB | (flags & SET_FLAGS) | D(dst) | S1(src1) | ARG2(flags, src2), DR(dst) | (flags & SET_FLAGS)); + + case SLJIT_SUBC: + return push_inst(compiler, SUBC | (flags & SET_FLAGS) | D(dst) | S1(src1) | ARG2(flags, src2), DR(dst) | (flags & SET_FLAGS)); + + case SLJIT_MUL: + FAIL_IF(push_inst(compiler, SMUL | D(dst) | S1(src1) | ARG2(flags, src2), DR(dst))); + if (!(flags & SET_FLAGS)) + return SLJIT_SUCCESS; + FAIL_IF(push_inst(compiler, SRA | D(TMP_REG1) | S1(dst) | IMM(31), DR(TMP_REG1))); + FAIL_IF(push_inst(compiler, RDY | D(TMP_REG4), DR(TMP_REG4))); + return push_inst(compiler, SUB | SET_FLAGS | D(0) | S1(TMP_REG1) | S2(TMP_REG4), MOVABLE_INS | SET_FLAGS); + + case SLJIT_AND: + return push_inst(compiler, AND | (flags & SET_FLAGS) | D(dst) | S1(src1) | ARG2(flags, src2), DR(dst) | (flags & SET_FLAGS)); + + case SLJIT_OR: + return push_inst(compiler, OR | (flags & SET_FLAGS) | D(dst) | S1(src1) | ARG2(flags, src2), DR(dst) | (flags & SET_FLAGS)); + + case SLJIT_XOR: + return push_inst(compiler, XOR | (flags & SET_FLAGS) | D(dst) | S1(src1) | ARG2(flags, src2), DR(dst) | (flags & SET_FLAGS)); + + case SLJIT_SHL: + FAIL_IF(push_inst(compiler, SLL | D(dst) | S1(src1) | ARG2(flags, src2), DR(dst))); + return !(flags & SET_FLAGS) ? SLJIT_SUCCESS : push_inst(compiler, SUB | SET_FLAGS | D(0) | S1(dst) | S2(0), SET_FLAGS); + + case SLJIT_LSHR: + FAIL_IF(push_inst(compiler, SRL | D(dst) | S1(src1) | ARG2(flags, src2), DR(dst))); + return !(flags & SET_FLAGS) ? SLJIT_SUCCESS : push_inst(compiler, SUB | SET_FLAGS | D(0) | S1(dst) | S2(0), SET_FLAGS); + + case SLJIT_ASHR: + FAIL_IF(push_inst(compiler, SRA | D(dst) | S1(src1) | ARG2(flags, src2), DR(dst))); + return !(flags & SET_FLAGS) ? SLJIT_SUCCESS : push_inst(compiler, SUB | SET_FLAGS | D(0) | S1(dst) | S2(0), SET_FLAGS); + } + + SLJIT_ASSERT_STOP(); + return SLJIT_SUCCESS; +} + +static SLJIT_INLINE sljit_si emit_const(struct sljit_compiler *compiler, sljit_si dst, sljit_sw init_value) +{ + FAIL_IF(push_inst(compiler, SETHI | D(dst) | ((init_value >> 10) & 0x3fffff), DR(dst))); + return push_inst(compiler, OR | D(dst) | S1(dst) | IMM_ARG | (init_value & 0x3ff), DR(dst)); +} + +SLJIT_API_FUNC_ATTRIBUTE void sljit_set_jump_addr(sljit_uw addr, sljit_uw new_addr) +{ + sljit_ins *inst = (sljit_ins*)addr; + + inst[0] = (inst[0] & 0xffc00000) | ((new_addr >> 10) & 0x3fffff); + inst[1] = (inst[1] & 0xfffffc00) | (new_addr & 0x3ff); + SLJIT_CACHE_FLUSH(inst, inst + 2); +} + +SLJIT_API_FUNC_ATTRIBUTE void sljit_set_const(sljit_uw addr, sljit_sw new_constant) +{ + sljit_ins *inst = (sljit_ins*)addr; + + inst[0] = (inst[0] & 0xffc00000) | ((new_constant >> 10) & 0x3fffff); + inst[1] = (inst[1] & 0xfffffc00) | (new_constant & 0x3ff); + SLJIT_CACHE_FLUSH(inst, inst + 2); +} Property changes on: sys/contrib/sljit/sljitNativeSPARC_32.c ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Added: fbsd:nokeywords ## -0,0 +1 ## +yes \ No newline at end of property Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Index: sys/contrib/sljit/sljitNativeSPARC_common.c =================================================================== --- sys/contrib/sljit/sljitNativeSPARC_common.c (revision 0) +++ sys/contrib/sljit/sljitNativeSPARC_common.c (working copy) @@ -0,0 +1,1348 @@ +/* + * Stack-less Just-In-Time compiler + * + * Copyright 2009-2012 Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, are + * permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of + * conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list + * of conditions and the following disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED + * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +SLJIT_API_FUNC_ATTRIBUTE SLJIT_CONST char* sljit_get_platform_name(void) +{ + return "SPARC" SLJIT_CPUINFO; +} + +/* Length of an instruction word + Both for sparc-32 and sparc-64 */ +typedef sljit_ui sljit_ins; + +static void sparc_cache_flush(sljit_ins *from, sljit_ins *to) +{ + if (SLJIT_UNLIKELY(from == to)) + return; + + do { + __asm__ volatile ( + "flush %0\n" + : : "r"(from) + ); + /* Operates at least on doubleword. */ + from += 2; + } while (from < to); + + if (from == to) { + /* Flush the last word. */ + to --; + __asm__ volatile ( + "flush %0\n" + : : "r"(to) + ); + } +} + +/* TMP_REG2 is not used by getput_arg */ +#define TMP_REG1 (SLJIT_NO_REGISTERS + 1) +#define TMP_REG2 (SLJIT_NO_REGISTERS + 2) +#define TMP_REG3 (SLJIT_NO_REGISTERS + 3) +#define TMP_REG4 (SLJIT_NO_REGISTERS + 4) +#define LINK_REG (SLJIT_NO_REGISTERS + 5) + +#define TMP_FREG1 (0) +#define TMP_FREG2 ((SLJIT_FLOAT_REG6 + 1) << 1) + +static SLJIT_CONST sljit_ub reg_map[SLJIT_NO_REGISTERS + 7] = { + 0, 8, 9, 10, 11, 12, 16, 17, 18, 19, 20, 14, 1, 24, 25, 26, 15 +}; + +/* --------------------------------------------------------------------- */ +/* Instrucion forms */ +/* --------------------------------------------------------------------- */ + +#define D(d) (reg_map[d] << 25) +#define DA(d) ((d) << 25) +#define S1(s1) (reg_map[s1] << 14) +#define S2(s2) (reg_map[s2]) +#define S1A(s1) ((s1) << 14) +#define S2A(s2) (s2) +#define IMM_ARG 0x2000 +#define DOP(op) ((op) << 5) +#define IMM(imm) (((imm) & 0x1fff) | IMM_ARG) + +#define DR(dr) (reg_map[dr]) +#define OPC1(opcode) ((opcode) << 30) +#define OPC2(opcode) ((opcode) << 22) +#define OPC3(opcode) ((opcode) << 19) +#define SET_FLAGS OPC3(0x10) + +#define ADD (OPC1(0x2) | OPC3(0x00)) +#define ADDC (OPC1(0x2) | OPC3(0x08)) +#define AND (OPC1(0x2) | OPC3(0x01)) +#define ANDN (OPC1(0x2) | OPC3(0x05)) +#define CALL (OPC1(0x1)) +#define FABSS (OPC1(0x2) | OPC3(0x34) | DOP(0x09)) +#define FADDD (OPC1(0x2) | OPC3(0x34) | DOP(0x42)) +#define FADDS (OPC1(0x2) | OPC3(0x34) | DOP(0x41)) +#define FCMPD (OPC1(0x2) | OPC3(0x35) | DOP(0x52)) +#define FCMPS (OPC1(0x2) | OPC3(0x35) | DOP(0x51)) +#define FDIVD (OPC1(0x2) | OPC3(0x34) | DOP(0x4e)) +#define FDIVS (OPC1(0x2) | OPC3(0x34) | DOP(0x4d)) +#define FMOVS (OPC1(0x2) | OPC3(0x34) | DOP(0x01)) +#define FMULD (OPC1(0x2) | OPC3(0x34) | DOP(0x4a)) +#define FMULS (OPC1(0x2) | OPC3(0x34) | DOP(0x49)) +#define FNEGS (OPC1(0x2) | OPC3(0x34) | DOP(0x05)) +#define FSUBD (OPC1(0x2) | OPC3(0x34) | DOP(0x46)) +#define FSUBS (OPC1(0x2) | OPC3(0x34) | DOP(0x45)) +#define JMPL (OPC1(0x2) | OPC3(0x38)) +#define NOP (OPC1(0x0) | OPC2(0x04)) +#define OR (OPC1(0x2) | OPC3(0x02)) +#define ORN (OPC1(0x2) | OPC3(0x06)) +#define RDY (OPC1(0x2) | OPC3(0x28) | S1A(0)) +#define RESTORE (OPC1(0x2) | OPC3(0x3d)) +#define SAVE (OPC1(0x2) | OPC3(0x3c)) +#define SETHI (OPC1(0x0) | OPC2(0x04)) +#define SLL (OPC1(0x2) | OPC3(0x25)) +#define SLLX (OPC1(0x2) | OPC3(0x25) | (1 << 12)) +#define SRA (OPC1(0x2) | OPC3(0x27)) +#define SRAX (OPC1(0x2) | OPC3(0x27) | (1 << 12)) +#define SRL (OPC1(0x2) | OPC3(0x26)) +#define SRLX (OPC1(0x2) | OPC3(0x26) | (1 << 12)) +#define SUB (OPC1(0x2) | OPC3(0x04)) +#define SUBC (OPC1(0x2) | OPC3(0x0c)) +#define TA (OPC1(0x2) | OPC3(0x3a) | (8 << 25)) +#define WRY (OPC1(0x2) | OPC3(0x30) | DA(0)) +#define XOR (OPC1(0x2) | OPC3(0x03)) +#define XNOR (OPC1(0x2) | OPC3(0x07)) + +#if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) +#define MAX_DISP (0x1fffff) +#define MIN_DISP (-0x200000) +#define DISP_MASK (0x3fffff) + +#define BICC (OPC1(0x0) | OPC2(0x2)) +#define FBFCC (OPC1(0x0) | OPC2(0x6)) +#define SLL_W SLL +#define SDIV (OPC1(0x2) | OPC3(0x0f)) +#define SMUL (OPC1(0x2) | OPC3(0x0b)) +#define UDIV (OPC1(0x2) | OPC3(0x0e)) +#define UMUL (OPC1(0x2) | OPC3(0x0a)) +#else +#define SLL_W SLLX +#endif + +#define SIMM_MAX (0x0fff) +#define SIMM_MIN (-0x1000) + +/* dest_reg is the absolute name of the register + Useful for reordering instructions in the delay slot. */ +static sljit_si push_inst(struct sljit_compiler *compiler, sljit_ins ins, sljit_si delay_slot) +{ + sljit_ins *ptr; + SLJIT_ASSERT((delay_slot & DST_INS_MASK) == UNMOVABLE_INS + || (delay_slot & DST_INS_MASK) == MOVABLE_INS + || (delay_slot & DST_INS_MASK) == ((ins >> 25) & 0x1f)); + ptr = (sljit_ins*)ensure_buf(compiler, sizeof(sljit_ins)); + FAIL_IF(!ptr); + *ptr = ins; + compiler->size++; + compiler->delay_slot = delay_slot; + return SLJIT_SUCCESS; +} + +static SLJIT_INLINE sljit_ins* optimize_jump(struct sljit_jump *jump, sljit_ins *code_ptr, sljit_ins *code) +{ + sljit_sw diff; + sljit_uw target_addr; + sljit_ins *inst; + sljit_ins saved_inst; + + if (jump->flags & SLJIT_REWRITABLE_JUMP) + return code_ptr; + + if (jump->flags & JUMP_ADDR) + target_addr = jump->u.target; + else { + SLJIT_ASSERT(jump->flags & JUMP_LABEL); + target_addr = (sljit_uw)(code + jump->u.label->size); + } + inst = (sljit_ins*)jump->addr; + +#if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) + if (jump->flags & IS_CALL) { + /* Call is always patchable on sparc 32. */ + jump->flags |= PATCH_CALL; + if (jump->flags & IS_MOVABLE) { + inst[0] = inst[-1]; + inst[-1] = CALL; + jump->addr -= sizeof(sljit_ins); + return inst; + } + inst[0] = CALL; + inst[1] = NOP; + return inst + 1; + } +#else + /* Both calls and BPr instructions shall not pass this point. */ +#error "Implementation required" +#endif + + if (jump->flags & IS_COND) + inst--; + + if (jump->flags & IS_MOVABLE) { + diff = ((sljit_sw)target_addr - (sljit_sw)(inst - 1)) >> 2; + if (diff <= MAX_DISP && diff >= MIN_DISP) { + jump->flags |= PATCH_B; + inst--; + if (jump->flags & IS_COND) { + saved_inst = inst[0]; + inst[0] = inst[1] ^ (1 << 28); + inst[1] = saved_inst; + } else { + inst[1] = inst[0]; + inst[0] = BICC | DA(0x8); + } + jump->addr = (sljit_uw)inst; + return inst + 1; + } + } + + diff = ((sljit_sw)target_addr - (sljit_sw)(inst)) >> 2; + if (diff <= MAX_DISP && diff >= MIN_DISP) { + jump->flags |= PATCH_B; + if (jump->flags & IS_COND) + inst[0] ^= (1 << 28); + else + inst[0] = BICC | DA(0x8); + inst[1] = NOP; + jump->addr = (sljit_uw)inst; + return inst + 1; + } + + return code_ptr; +} + +SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compiler) +{ + struct sljit_memory_fragment *buf; + sljit_ins *code; + sljit_ins *code_ptr; + sljit_ins *buf_ptr; + sljit_ins *buf_end; + sljit_uw word_count; + sljit_uw addr; + + struct sljit_label *label; + struct sljit_jump *jump; + struct sljit_const *const_; + + CHECK_ERROR_PTR(); + check_sljit_generate_code(compiler); + reverse_buf(compiler); + + code = (sljit_ins*)SLJIT_MALLOC_EXEC(compiler->size * sizeof(sljit_ins)); + PTR_FAIL_WITH_EXEC_IF(code); + buf = compiler->buf; + + code_ptr = code; + word_count = 0; + label = compiler->labels; + jump = compiler->jumps; + const_ = compiler->consts; + do { + buf_ptr = (sljit_ins*)buf->memory; + buf_end = buf_ptr + (buf->used_size >> 2); + do { + *code_ptr = *buf_ptr++; + SLJIT_ASSERT(!label || label->size >= word_count); + SLJIT_ASSERT(!jump || jump->addr >= word_count); + SLJIT_ASSERT(!const_ || const_->addr >= word_count); + /* These structures are ordered by their address. */ + if (label && label->size == word_count) { + /* Just recording the address. */ + label->addr = (sljit_uw)code_ptr; + label->size = code_ptr - code; + label = label->next; + } + if (jump && jump->addr == word_count) { +#if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) + jump->addr = (sljit_uw)(code_ptr - 3); +#else + jump->addr = (sljit_uw)(code_ptr - 6); +#endif + code_ptr = optimize_jump(jump, code_ptr, code); + jump = jump->next; + } + if (const_ && const_->addr == word_count) { + /* Just recording the address. */ + const_->addr = (sljit_uw)code_ptr; + const_ = const_->next; + } + code_ptr ++; + word_count ++; + } while (buf_ptr < buf_end); + + buf = buf->next; + } while (buf); + + if (label && label->size == word_count) { + label->addr = (sljit_uw)code_ptr; + label->size = code_ptr - code; + label = label->next; + } + + SLJIT_ASSERT(!label); + SLJIT_ASSERT(!jump); + SLJIT_ASSERT(!const_); + SLJIT_ASSERT(code_ptr - code <= (sljit_si)compiler->size); + + jump = compiler->jumps; + while (jump) { + do { + addr = (jump->flags & JUMP_LABEL) ? jump->u.label->addr : jump->u.target; + buf_ptr = (sljit_ins*)jump->addr; + + if (jump->flags & PATCH_CALL) { + addr = (sljit_sw)(addr - jump->addr) >> 2; + SLJIT_ASSERT((sljit_sw)addr <= 0x1fffffff && (sljit_sw)addr >= -0x20000000); + buf_ptr[0] = CALL | (addr & 0x3fffffff); + break; + } + if (jump->flags & PATCH_B) { + addr = (sljit_sw)(addr - jump->addr) >> 2; + SLJIT_ASSERT((sljit_sw)addr <= MAX_DISP && (sljit_sw)addr >= MIN_DISP); + buf_ptr[0] = (buf_ptr[0] & ~DISP_MASK) | (addr & DISP_MASK); + break; + } + + /* Set the fields of immediate loads. */ +#if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) + buf_ptr[0] = (buf_ptr[0] & 0xffc00000) | ((addr >> 10) & 0x3fffff); + buf_ptr[1] = (buf_ptr[1] & 0xfffffc00) | (addr & 0x3ff); +#else +#error "Implementation required" +#endif + } while (0); + jump = jump->next; + } + + + compiler->error = SLJIT_ERR_COMPILED; + compiler->executable_size = compiler->size * sizeof(sljit_ins); + SLJIT_CACHE_FLUSH(code, code_ptr); + return code; +} + +/* --------------------------------------------------------------------- */ +/* Entry, exit */ +/* --------------------------------------------------------------------- */ + +/* Creates an index in data_transfer_insts array. */ +#define LOAD_DATA 0x01 +#define WORD_DATA 0x00 +#define BYTE_DATA 0x02 +#define HALF_DATA 0x04 +#define INT_DATA 0x06 +#define SIGNED_DATA 0x08 +/* Separates integer and floating point registers */ +#define GPR_REG 0x0f +#define DOUBLE_DATA 0x10 + +#define MEM_MASK 0x1f + +#define WRITE_BACK 0x00020 +#define ARG_TEST 0x00040 +#define ALT_KEEP_CACHE 0x00080 +#define CUMULATIVE_OP 0x00100 +#define IMM_OP 0x00200 +#define SRC2_IMM 0x00400 + +#define REG_DEST 0x00800 +#define REG2_SOURCE 0x01000 +#define SLOW_SRC1 0x02000 +#define SLOW_SRC2 0x04000 +#define SLOW_DEST 0x08000 + +/* SET_FLAGS (0x10 << 19) also belong here! */ + +#if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) +#include "sljitNativeSPARC_32.c" +#else +#include "sljitNativeSPARC_64.c" +#endif + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_enter(struct sljit_compiler *compiler, sljit_si args, sljit_si scratches, sljit_si saveds, sljit_si local_size) +{ + CHECK_ERROR(); + check_sljit_emit_enter(compiler, args, scratches, saveds, local_size); + + compiler->scratches = scratches; + compiler->saveds = saveds; +#if (defined SLJIT_DEBUG && SLJIT_DEBUG) + compiler->logical_local_size = local_size; +#endif + + local_size += 23 * sizeof(sljit_sw); + local_size = (local_size + 7) & ~0x7; + compiler->local_size = local_size; + + if (local_size <= SIMM_MAX) { + FAIL_IF(push_inst(compiler, SAVE | D(SLJIT_LOCALS_REG) | S1(SLJIT_LOCALS_REG) | IMM(-local_size), UNMOVABLE_INS)); + } + else { + FAIL_IF(load_immediate(compiler, TMP_REG1, -local_size)); + FAIL_IF(push_inst(compiler, SAVE | D(SLJIT_LOCALS_REG) | S1(SLJIT_LOCALS_REG) | S2(TMP_REG1), UNMOVABLE_INS)); + } + + if (args >= 1) + FAIL_IF(push_inst(compiler, OR | D(SLJIT_SAVED_REG1) | S1(0) | S2A(24), DR(SLJIT_SAVED_REG1))); + if (args >= 2) + FAIL_IF(push_inst(compiler, OR | D(SLJIT_SAVED_REG2) | S1(0) | S2A(25), DR(SLJIT_SAVED_REG2))); + if (args >= 3) + FAIL_IF(push_inst(compiler, OR | D(SLJIT_SAVED_REG3) | S1(0) | S2A(26), DR(SLJIT_SAVED_REG3))); + + return SLJIT_SUCCESS; +} + +SLJIT_API_FUNC_ATTRIBUTE void sljit_set_context(struct sljit_compiler *compiler, sljit_si args, sljit_si scratches, sljit_si saveds, sljit_si local_size) +{ + CHECK_ERROR_VOID(); + check_sljit_set_context(compiler, args, scratches, saveds, local_size); + + compiler->scratches = scratches; + compiler->saveds = saveds; +#if (defined SLJIT_DEBUG && SLJIT_DEBUG) + compiler->logical_local_size = local_size; +#endif + + local_size += 23 * sizeof(sljit_sw); + compiler->local_size = (local_size + 7) & ~0x7; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_return(struct sljit_compiler *compiler, sljit_si op, sljit_si src, sljit_sw srcw) +{ + CHECK_ERROR(); + check_sljit_emit_return(compiler, op, src, srcw); + + if (op != SLJIT_MOV || !(src <= TMP_REG3)) { + FAIL_IF(emit_mov_before_return(compiler, op, src, srcw)); + src = SLJIT_SCRATCH_REG1; + } + + FAIL_IF(push_inst(compiler, JMPL | D(0) | S1A(31) | IMM(8), UNMOVABLE_INS)); + return push_inst(compiler, RESTORE | D(SLJIT_SCRATCH_REG1) | S1(src) | S2(0), UNMOVABLE_INS); +} + +/* --------------------------------------------------------------------- */ +/* Operators */ +/* --------------------------------------------------------------------- */ + +#if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) +#define ARCH_32_64(a, b) a +#else +#define ARCH_32_64(a, b) b +#endif + +static SLJIT_CONST sljit_ins data_transfer_insts[16 + 4] = { +/* u w s */ ARCH_32_64(OPC1(3) | OPC3(0x04) /* stw */, OPC1(3) | OPC3(0x0e) /* stx */), +/* u w l */ ARCH_32_64(OPC1(3) | OPC3(0x00) /* lduw */, OPC1(3) | OPC3(0x0b) /* ldx */), +/* u b s */ OPC1(3) | OPC3(0x05) /* stb */, +/* u b l */ OPC1(3) | OPC3(0x01) /* ldub */, +/* u h s */ OPC1(3) | OPC3(0x06) /* sth */, +/* u h l */ OPC1(3) | OPC3(0x02) /* lduh */, +/* u i s */ OPC1(3) | OPC3(0x04) /* stw */, +/* u i l */ OPC1(3) | OPC3(0x00) /* lduw */, + +/* s w s */ ARCH_32_64(OPC1(3) | OPC3(0x04) /* stw */, OPC1(3) | OPC3(0x0e) /* stx */), +/* s w l */ ARCH_32_64(OPC1(3) | OPC3(0x00) /* lduw */, OPC1(3) | OPC3(0x0b) /* ldx */), +/* s b s */ OPC1(3) | OPC3(0x05) /* stb */, +/* s b l */ OPC1(3) | OPC3(0x09) /* ldsb */, +/* s h s */ OPC1(3) | OPC3(0x06) /* sth */, +/* s h l */ OPC1(3) | OPC3(0x0a) /* ldsh */, +/* s i s */ OPC1(3) | OPC3(0x04) /* stw */, +/* s i l */ ARCH_32_64(OPC1(3) | OPC3(0x00) /* lduw */, OPC1(3) | OPC3(0x08) /* ldsw */), + +/* d s */ OPC1(3) | OPC3(0x27), +/* d l */ OPC1(3) | OPC3(0x23), +/* s s */ OPC1(3) | OPC3(0x24), +/* s l */ OPC1(3) | OPC3(0x20), +}; + +#undef ARCH_32_64 + +/* Can perform an operation using at most 1 instruction. */ +static sljit_si getput_arg_fast(struct sljit_compiler *compiler, sljit_si flags, sljit_si reg, sljit_si arg, sljit_sw argw) +{ + SLJIT_ASSERT(arg & SLJIT_MEM); + + if (!(flags & WRITE_BACK) || !(arg & 0xf)) { + if ((!(arg & 0xf0) && argw <= SIMM_MAX && argw >= SIMM_MIN) + || ((arg & 0xf0) && (argw & 0x3) == 0)) { + /* Works for both absoulte and relative addresses (immediate case). */ + if (SLJIT_UNLIKELY(flags & ARG_TEST)) + return 1; + FAIL_IF(push_inst(compiler, data_transfer_insts[flags & MEM_MASK] + | ((flags & MEM_MASK) <= GPR_REG ? D(reg) : DA(reg)) + | S1(arg & 0xf) | ((arg & 0xf0) ? S2((arg >> 4) & 0xf) : IMM(argw)), + ((flags & MEM_MASK) <= GPR_REG && (flags & LOAD_DATA)) ? DR(reg) : MOVABLE_INS)); + return -1; + } + } + return 0; +} + +/* See getput_arg below. + Note: can_cache is called only for binary operators. Those + operators always uses word arguments without write back. */ +static sljit_si can_cache(sljit_si arg, sljit_sw argw, sljit_si next_arg, sljit_sw next_argw) +{ + SLJIT_ASSERT((arg & SLJIT_MEM) && (next_arg & SLJIT_MEM)); + + /* Simple operation except for updates. */ + if (arg & 0xf0) { + argw &= 0x3; + SLJIT_ASSERT(argw); + next_argw &= 0x3; + if ((arg & 0xf0) == (next_arg & 0xf0) && argw == next_argw) + return 1; + return 0; + } + + if (((next_argw - argw) <= SIMM_MAX && (next_argw - argw) >= SIMM_MIN)) + return 1; + return 0; +} + +/* Emit the necessary instructions. See can_cache above. */ +static sljit_si getput_arg(struct sljit_compiler *compiler, sljit_si flags, sljit_si reg, sljit_si arg, sljit_sw argw, sljit_si next_arg, sljit_sw next_argw) +{ + sljit_si base, arg2, delay_slot; + sljit_ins dest; + + SLJIT_ASSERT(arg & SLJIT_MEM); + if (!(next_arg & SLJIT_MEM)) { + next_arg = 0; + next_argw = 0; + } + + base = arg & 0xf; + if (SLJIT_UNLIKELY(arg & 0xf0)) { + argw &= 0x3; + SLJIT_ASSERT(argw != 0); + + /* Using the cache. */ + if (((SLJIT_MEM | (arg & 0xf0)) == compiler->cache_arg) && (argw == compiler->cache_argw)) + arg2 = TMP_REG3; + else { + if ((arg & 0xf0) == (next_arg & 0xf0) && argw == (next_argw & 0x3)) { + compiler->cache_arg = SLJIT_MEM | (arg & 0xf0); + compiler->cache_argw = argw; + arg2 = TMP_REG3; + } + else if ((flags & LOAD_DATA) && ((flags & MEM_MASK) <= GPR_REG) && reg != base && (reg << 4) != (arg & 0xf0)) + arg2 = reg; + else /* It must be a mov operation, so tmp1 must be free to use. */ + arg2 = TMP_REG1; + FAIL_IF(push_inst(compiler, SLL_W | D(arg2) | S1((arg >> 4) & 0xf) | IMM_ARG | argw, DR(arg2))); + } + } + else { + /* Using the cache. */ + if ((compiler->cache_arg == SLJIT_MEM) && (argw - compiler->cache_argw) <= SIMM_MAX && (argw - compiler->cache_argw) >= SIMM_MIN) { + if (argw != compiler->cache_argw) { + FAIL_IF(push_inst(compiler, ADD | D(TMP_REG3) | S1(TMP_REG3) | IMM(argw - compiler->cache_argw), DR(TMP_REG3))); + compiler->cache_argw = argw; + } + arg2 = TMP_REG3; + } else { + if ((next_argw - argw) <= SIMM_MAX && (next_argw - argw) >= SIMM_MIN) { + compiler->cache_arg = SLJIT_MEM; + compiler->cache_argw = argw; + arg2 = TMP_REG3; + } + else if ((flags & LOAD_DATA) && ((flags & MEM_MASK) <= GPR_REG) && reg != base) + arg2 = reg; + else /* It must be a mov operation, so tmp1 must be free to use. */ + arg2 = TMP_REG1; + FAIL_IF(load_immediate(compiler, arg2, argw)); + } + } + + dest = ((flags & MEM_MASK) <= GPR_REG ? D(reg) : DA(reg)); + delay_slot = ((flags & MEM_MASK) <= GPR_REG && (flags & LOAD_DATA)) ? DR(reg) : MOVABLE_INS; + if (!base) + return push_inst(compiler, data_transfer_insts[flags & MEM_MASK] | dest | S1(arg2) | IMM(0), delay_slot); + if (!(flags & WRITE_BACK)) + return push_inst(compiler, data_transfer_insts[flags & MEM_MASK] | dest | S1(base) | S2(arg2), delay_slot); + FAIL_IF(push_inst(compiler, data_transfer_insts[flags & MEM_MASK] | dest | S1(base) | S2(arg2), delay_slot)); + return push_inst(compiler, ADD | D(base) | S1(base) | S2(arg2), DR(base)); +} + +static SLJIT_INLINE sljit_si emit_op_mem(struct sljit_compiler *compiler, sljit_si flags, sljit_si reg, sljit_si arg, sljit_sw argw) +{ + if (getput_arg_fast(compiler, flags, reg, arg, argw)) + return compiler->error; + compiler->cache_arg = 0; + compiler->cache_argw = 0; + return getput_arg(compiler, flags, reg, arg, argw, 0, 0); +} + +static SLJIT_INLINE sljit_si emit_op_mem2(struct sljit_compiler *compiler, sljit_si flags, sljit_si reg, sljit_si arg1, sljit_sw arg1w, sljit_si arg2, sljit_sw arg2w) +{ + if (getput_arg_fast(compiler, flags, reg, arg1, arg1w)) + return compiler->error; + return getput_arg(compiler, flags, reg, arg1, arg1w, arg2, arg2w); +} + +static sljit_si emit_op(struct sljit_compiler *compiler, sljit_si op, sljit_si flags, + sljit_si dst, sljit_sw dstw, + sljit_si src1, sljit_sw src1w, + sljit_si src2, sljit_sw src2w) +{ + /* arg1 goes to TMP_REG1 or src reg + arg2 goes to TMP_REG2, imm or src reg + TMP_REG3 can be used for caching + result goes to TMP_REG2, so put result can use TMP_REG1 and TMP_REG3. */ + sljit_si dst_r = TMP_REG2; + sljit_si src1_r; + sljit_sw src2_r = 0; + sljit_si sugg_src2_r = TMP_REG2; + + if (!(flags & ALT_KEEP_CACHE)) { + compiler->cache_arg = 0; + compiler->cache_argw = 0; + } + + if (SLJIT_UNLIKELY(dst == SLJIT_UNUSED)) { + if (op >= SLJIT_MOV && op <= SLJIT_MOVU_SI && !(src2 & SLJIT_MEM)) + return SLJIT_SUCCESS; + } + else if (dst <= TMP_REG3) { + dst_r = dst; + flags |= REG_DEST; + if (op >= SLJIT_MOV && op <= SLJIT_MOVU_SI) + sugg_src2_r = dst_r; + } + else if ((dst & SLJIT_MEM) && !getput_arg_fast(compiler, flags | ARG_TEST, TMP_REG1, dst, dstw)) + flags |= SLOW_DEST; + + if (flags & IMM_OP) { + if ((src2 & SLJIT_IMM) && src2w) { + if (src2w <= SIMM_MAX && src2w >= SIMM_MIN) { + flags |= SRC2_IMM; + src2_r = src2w; + } + } + if (!(flags & SRC2_IMM) && (flags & CUMULATIVE_OP) && (src1 & SLJIT_IMM) && src1w) { + if (src1w <= SIMM_MAX && src1w >= SIMM_MIN) { + flags |= SRC2_IMM; + src2_r = src1w; + + /* And swap arguments. */ + src1 = src2; + src1w = src2w; + src2 = SLJIT_IMM; + /* src2w = src2_r unneeded. */ + } + } + } + + /* Source 1. */ + if (src1 <= TMP_REG3) + src1_r = src1; + else if (src1 & SLJIT_IMM) { + if (src1w) { + FAIL_IF(load_immediate(compiler, TMP_REG1, src1w)); + src1_r = TMP_REG1; + } + else + src1_r = 0; + } + else { + if (getput_arg_fast(compiler, flags | LOAD_DATA, TMP_REG1, src1, src1w)) + FAIL_IF(compiler->error); + else + flags |= SLOW_SRC1; + src1_r = TMP_REG1; + } + + /* Source 2. */ + if (src2 <= TMP_REG3) { + src2_r = src2; + flags |= REG2_SOURCE; + if (!(flags & REG_DEST) && op >= SLJIT_MOV && op <= SLJIT_MOVU_SI) + dst_r = src2_r; + } + else if (src2 & SLJIT_IMM) { + if (!(flags & SRC2_IMM)) { + if (src2w) { + FAIL_IF(load_immediate(compiler, sugg_src2_r, src2w)); + src2_r = sugg_src2_r; + } + else { + src2_r = 0; + if ((op >= SLJIT_MOV && op <= SLJIT_MOVU_SI) && (dst & SLJIT_MEM)) + dst_r = 0; + } + } + } + else { + if (getput_arg_fast(compiler, flags | LOAD_DATA, sugg_src2_r, src2, src2w)) + FAIL_IF(compiler->error); + else + flags |= SLOW_SRC2; + src2_r = sugg_src2_r; + } + + if ((flags & (SLOW_SRC1 | SLOW_SRC2)) == (SLOW_SRC1 | SLOW_SRC2)) { + SLJIT_ASSERT(src2_r == TMP_REG2); + if (!can_cache(src1, src1w, src2, src2w) && can_cache(src1, src1w, dst, dstw)) { + FAIL_IF(getput_arg(compiler, flags | LOAD_DATA, TMP_REG2, src2, src2w, src1, src1w)); + FAIL_IF(getput_arg(compiler, flags | LOAD_DATA, TMP_REG1, src1, src1w, dst, dstw)); + } + else { + FAIL_IF(getput_arg(compiler, flags | LOAD_DATA, TMP_REG1, src1, src1w, src2, src2w)); + FAIL_IF(getput_arg(compiler, flags | LOAD_DATA, TMP_REG2, src2, src2w, dst, dstw)); + } + } + else if (flags & SLOW_SRC1) + FAIL_IF(getput_arg(compiler, flags | LOAD_DATA, TMP_REG1, src1, src1w, dst, dstw)); + else if (flags & SLOW_SRC2) + FAIL_IF(getput_arg(compiler, flags | LOAD_DATA, sugg_src2_r, src2, src2w, dst, dstw)); + + FAIL_IF(emit_single_op(compiler, op, flags, dst_r, src1_r, src2_r)); + + if (dst & SLJIT_MEM) { + if (!(flags & SLOW_DEST)) { + getput_arg_fast(compiler, flags, dst_r, dst, dstw); + return compiler->error; + } + return getput_arg(compiler, flags, dst_r, dst, dstw, 0, 0); + } + + return SLJIT_SUCCESS; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op0(struct sljit_compiler *compiler, sljit_si op) +{ + CHECK_ERROR(); + check_sljit_emit_op0(compiler, op); + + op = GET_OPCODE(op); + switch (op) { + case SLJIT_BREAKPOINT: + return push_inst(compiler, TA, UNMOVABLE_INS); + case SLJIT_NOP: + return push_inst(compiler, NOP, UNMOVABLE_INS); + case SLJIT_UMUL: + case SLJIT_SMUL: +#if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) + FAIL_IF(push_inst(compiler, (op == SLJIT_UMUL ? UMUL : SMUL) | D(SLJIT_SCRATCH_REG1) | S1(SLJIT_SCRATCH_REG1) | S2(SLJIT_SCRATCH_REG2), DR(SLJIT_SCRATCH_REG1))); + return push_inst(compiler, RDY | D(SLJIT_SCRATCH_REG2), DR(SLJIT_SCRATCH_REG2)); +#else +#error "Implementation required" +#endif + case SLJIT_UDIV: + case SLJIT_SDIV: +#if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) + if (op == SLJIT_UDIV) + FAIL_IF(push_inst(compiler, WRY | S1(0), MOVABLE_INS)); + else { + FAIL_IF(push_inst(compiler, SRA | D(TMP_REG1) | S1(SLJIT_SCRATCH_REG1) | IMM(31), DR(TMP_REG1))); + FAIL_IF(push_inst(compiler, WRY | S1(TMP_REG1), MOVABLE_INS)); + } + FAIL_IF(push_inst(compiler, OR | D(TMP_REG2) | S1(0) | S2(SLJIT_SCRATCH_REG1), DR(TMP_REG2))); + FAIL_IF(push_inst(compiler, (op == SLJIT_UDIV ? UDIV : SDIV) | D(SLJIT_SCRATCH_REG1) | S1(SLJIT_SCRATCH_REG1) | S2(SLJIT_SCRATCH_REG2), DR(SLJIT_SCRATCH_REG1))); + FAIL_IF(push_inst(compiler, SMUL | D(SLJIT_SCRATCH_REG2) | S1(SLJIT_SCRATCH_REG1) | S2(SLJIT_SCRATCH_REG2), DR(SLJIT_SCRATCH_REG2))); + FAIL_IF(push_inst(compiler, SUB | D(SLJIT_SCRATCH_REG2) | S1(TMP_REG2) | S2(SLJIT_SCRATCH_REG2), DR(SLJIT_SCRATCH_REG2))); + return SLJIT_SUCCESS; +#else +#error "Implementation required" +#endif + } + + return SLJIT_SUCCESS; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op1(struct sljit_compiler *compiler, sljit_si op, + sljit_si dst, sljit_sw dstw, + sljit_si src, sljit_sw srcw) +{ + sljit_si flags = GET_FLAGS(op) ? SET_FLAGS : 0; + + CHECK_ERROR(); + check_sljit_emit_op1(compiler, op, dst, dstw, src, srcw); + ADJUST_LOCAL_OFFSET(dst, dstw); + ADJUST_LOCAL_OFFSET(src, srcw); + + op = GET_OPCODE(op); + switch (op) { + case SLJIT_MOV: + case SLJIT_MOV_P: + return emit_op(compiler, SLJIT_MOV, flags | WORD_DATA, dst, dstw, TMP_REG1, 0, src, srcw); + + case SLJIT_MOV_UI: + return emit_op(compiler, SLJIT_MOV_UI, flags | INT_DATA, dst, dstw, TMP_REG1, 0, src, srcw); + + case SLJIT_MOV_SI: + return emit_op(compiler, SLJIT_MOV_SI, flags | INT_DATA | SIGNED_DATA, dst, dstw, TMP_REG1, 0, src, srcw); + + case SLJIT_MOV_UB: + return emit_op(compiler, SLJIT_MOV_UB, flags | BYTE_DATA, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_ub)srcw : srcw); + + case SLJIT_MOV_SB: + return emit_op(compiler, SLJIT_MOV_SB, flags | BYTE_DATA | SIGNED_DATA, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_sb)srcw : srcw); + + case SLJIT_MOV_UH: + return emit_op(compiler, SLJIT_MOV_UH, flags | HALF_DATA, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_uh)srcw : srcw); + + case SLJIT_MOV_SH: + return emit_op(compiler, SLJIT_MOV_SH, flags | HALF_DATA | SIGNED_DATA, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_sh)srcw : srcw); + + case SLJIT_MOVU: + case SLJIT_MOVU_P: + return emit_op(compiler, SLJIT_MOV, flags | WORD_DATA | WRITE_BACK, dst, dstw, TMP_REG1, 0, src, srcw); + + case SLJIT_MOVU_UI: + return emit_op(compiler, SLJIT_MOV_UI, flags | INT_DATA | WRITE_BACK, dst, dstw, TMP_REG1, 0, src, srcw); + + case SLJIT_MOVU_SI: + return emit_op(compiler, SLJIT_MOV_SI, flags | INT_DATA | SIGNED_DATA | WRITE_BACK, dst, dstw, TMP_REG1, 0, src, srcw); + + case SLJIT_MOVU_UB: + return emit_op(compiler, SLJIT_MOV_UB, flags | BYTE_DATA | WRITE_BACK, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_ub)srcw : srcw); + + case SLJIT_MOVU_SB: + return emit_op(compiler, SLJIT_MOV_SB, flags | BYTE_DATA | SIGNED_DATA | WRITE_BACK, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_sb)srcw : srcw); + + case SLJIT_MOVU_UH: + return emit_op(compiler, SLJIT_MOV_UH, flags | HALF_DATA | WRITE_BACK, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_uh)srcw : srcw); + + case SLJIT_MOVU_SH: + return emit_op(compiler, SLJIT_MOV_SH, flags | HALF_DATA | SIGNED_DATA | WRITE_BACK, dst, dstw, TMP_REG1, 0, src, (src & SLJIT_IMM) ? (sljit_sh)srcw : srcw); + + case SLJIT_NOT: + case SLJIT_CLZ: + return emit_op(compiler, op, flags, dst, dstw, TMP_REG1, 0, src, srcw); + + case SLJIT_NEG: + return emit_op(compiler, SLJIT_SUB, flags | IMM_OP, dst, dstw, SLJIT_IMM, 0, src, srcw); + } + + return SLJIT_SUCCESS; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op2(struct sljit_compiler *compiler, sljit_si op, + sljit_si dst, sljit_sw dstw, + sljit_si src1, sljit_sw src1w, + sljit_si src2, sljit_sw src2w) +{ + sljit_si flags = GET_FLAGS(op) ? SET_FLAGS : 0; + + CHECK_ERROR(); + check_sljit_emit_op2(compiler, op, dst, dstw, src1, src1w, src2, src2w); + ADJUST_LOCAL_OFFSET(dst, dstw); + ADJUST_LOCAL_OFFSET(src1, src1w); + ADJUST_LOCAL_OFFSET(src2, src2w); + + op = GET_OPCODE(op); + switch (op) { + case SLJIT_ADD: + case SLJIT_ADDC: + case SLJIT_MUL: + case SLJIT_AND: + case SLJIT_OR: + case SLJIT_XOR: + return emit_op(compiler, op, flags | CUMULATIVE_OP | IMM_OP, dst, dstw, src1, src1w, src2, src2w); + + case SLJIT_SUB: + case SLJIT_SUBC: + return emit_op(compiler, op, flags | IMM_OP, dst, dstw, src1, src1w, src2, src2w); + + case SLJIT_SHL: + case SLJIT_LSHR: + case SLJIT_ASHR: +#if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) + if (src2 & SLJIT_IMM) + src2w &= 0x1f; +#else + SLJIT_ASSERT_STOP(); +#endif + return emit_op(compiler, op, flags | IMM_OP, dst, dstw, src1, src1w, src2, src2w); + } + + return SLJIT_SUCCESS; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_get_register_index(sljit_si reg) +{ + check_sljit_get_register_index(reg); + return reg_map[reg]; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op_custom(struct sljit_compiler *compiler, + void *instruction, sljit_si size) +{ + CHECK_ERROR(); + check_sljit_emit_op_custom(compiler, instruction, size); + SLJIT_ASSERT(size == 4); + + return push_inst(compiler, *(sljit_ins*)instruction, UNMOVABLE_INS); +} + +/* --------------------------------------------------------------------- */ +/* Floating point operators */ +/* --------------------------------------------------------------------- */ + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_is_fpu_available(void) +{ + return 1; +} + +#define FLOAT_DATA(op) (DOUBLE_DATA | ((op & SLJIT_SINGLE_OP) >> 7)) +#define SELECT_FOP(op, single, double) ((op & SLJIT_SINGLE_OP) ? single : double) + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fop1(struct sljit_compiler *compiler, sljit_si op, + sljit_si dst, sljit_sw dstw, + sljit_si src, sljit_sw srcw) +{ + sljit_si dst_fr; + + CHECK_ERROR(); + check_sljit_emit_fop1(compiler, op, dst, dstw, src, srcw); + SLJIT_COMPILE_ASSERT((SLJIT_SINGLE_OP == 0x100) && !(DOUBLE_DATA & 0x2), float_transfer_bit_error); + + compiler->cache_arg = 0; + compiler->cache_argw = 0; + + if (GET_OPCODE(op) == SLJIT_CMPD) { + if (dst > SLJIT_FLOAT_REG6) { + FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, dst, dstw, src, srcw)); + dst = TMP_FREG1; + } + else + dst <<= 1; + + if (src > SLJIT_FLOAT_REG6) { + FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG2, src, srcw, 0, 0)); + src = TMP_FREG2; + } + else + src <<= 1; + + return push_inst(compiler, SELECT_FOP(op, FCMPS, FCMPD) | S1A(dst) | S2A(src), FCC_IS_SET | MOVABLE_INS); + } + + dst_fr = (dst > SLJIT_FLOAT_REG6) ? TMP_FREG1 : (dst << 1); + + if (src > SLJIT_FLOAT_REG6) { + FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op) | LOAD_DATA, dst_fr, src, srcw, dst, dstw)); + src = dst_fr; + } + else + src <<= 1; + + switch (GET_OPCODE(op)) { + case SLJIT_MOVD: + if (src != dst_fr && dst_fr != TMP_FREG1) { + FAIL_IF(push_inst(compiler, FMOVS | DA(dst_fr) | S2A(src), MOVABLE_INS)); + if (!(op & SLJIT_SINGLE_OP)) + FAIL_IF(push_inst(compiler, FMOVS | DA(dst_fr | 1) | S2A(src | 1), MOVABLE_INS)); + } + break; + case SLJIT_NEGD: + FAIL_IF(push_inst(compiler, FNEGS | DA(dst_fr) | S2A(src), MOVABLE_INS)); + if (dst_fr != src && !(op & SLJIT_SINGLE_OP)) + FAIL_IF(push_inst(compiler, FMOVS | DA(dst_fr | 1) | S2A(src | 1), MOVABLE_INS)); + break; + case SLJIT_ABSD: + FAIL_IF(push_inst(compiler, FABSS | DA(dst_fr) | S2A(src), MOVABLE_INS)); + if (dst_fr != src && !(op & SLJIT_SINGLE_OP)) + FAIL_IF(push_inst(compiler, FMOVS | DA(dst_fr | 1) | S2A(src | 1), MOVABLE_INS)); + break; + } + + if (dst_fr == TMP_FREG1) { + if (GET_OPCODE(op) == SLJIT_MOVD) + dst_fr = src; + FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op), dst_fr, dst, dstw, 0, 0)); + } + + return SLJIT_SUCCESS; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fop2(struct sljit_compiler *compiler, sljit_si op, + sljit_si dst, sljit_sw dstw, + sljit_si src1, sljit_sw src1w, + sljit_si src2, sljit_sw src2w) +{ + sljit_si dst_fr, flags = 0; + + CHECK_ERROR(); + check_sljit_emit_fop2(compiler, op, dst, dstw, src1, src1w, src2, src2w); + + compiler->cache_arg = 0; + compiler->cache_argw = 0; + + dst_fr = (dst > SLJIT_FLOAT_REG6) ? TMP_FREG2 : (dst << 1); + + if (src1 > SLJIT_FLOAT_REG6) { + if (getput_arg_fast(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, src1, src1w)) { + FAIL_IF(compiler->error); + src1 = TMP_FREG1; + } else + flags |= SLOW_SRC1; + } + else + src1 <<= 1; + + if (src2 > SLJIT_FLOAT_REG6) { + if (getput_arg_fast(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG2, src2, src2w)) { + FAIL_IF(compiler->error); + src2 = TMP_FREG2; + } else + flags |= SLOW_SRC2; + } + else + src2 <<= 1; + + if ((flags & (SLOW_SRC1 | SLOW_SRC2)) == (SLOW_SRC1 | SLOW_SRC2)) { + if (!can_cache(src1, src1w, src2, src2w) && can_cache(src1, src1w, dst, dstw)) { + FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG2, src2, src2w, src1, src1w)); + FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, src1, src1w, dst, dstw)); + } + else { + FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, src1, src1w, src2, src2w)); + FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG2, src2, src2w, dst, dstw)); + } + } + else if (flags & SLOW_SRC1) + FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG1, src1, src1w, dst, dstw)); + else if (flags & SLOW_SRC2) + FAIL_IF(getput_arg(compiler, FLOAT_DATA(op) | LOAD_DATA, TMP_FREG2, src2, src2w, dst, dstw)); + + if (flags & SLOW_SRC1) + src1 = TMP_FREG1; + if (flags & SLOW_SRC2) + src2 = TMP_FREG2; + + switch (GET_OPCODE(op)) { + case SLJIT_ADDD: + FAIL_IF(push_inst(compiler, SELECT_FOP(op, FADDS, FADDD) | DA(dst_fr) | S1A(src1) | S2A(src2), MOVABLE_INS)); + break; + + case SLJIT_SUBD: + FAIL_IF(push_inst(compiler, SELECT_FOP(op, FSUBS, FSUBD) | DA(dst_fr) | S1A(src1) | S2A(src2), MOVABLE_INS)); + break; + + case SLJIT_MULD: + FAIL_IF(push_inst(compiler, SELECT_FOP(op, FMULS, FMULD) | DA(dst_fr) | S1A(src1) | S2A(src2), MOVABLE_INS)); + break; + + case SLJIT_DIVD: + FAIL_IF(push_inst(compiler, SELECT_FOP(op, FDIVS, FDIVD) | DA(dst_fr) | S1A(src1) | S2A(src2), MOVABLE_INS)); + break; + } + + if (dst_fr == TMP_FREG2) + FAIL_IF(emit_op_mem2(compiler, FLOAT_DATA(op), TMP_FREG2, dst, dstw, 0, 0)); + + return SLJIT_SUCCESS; +} + +#undef FLOAT_DATA +#undef SELECT_FOP + +/* --------------------------------------------------------------------- */ +/* Other instructions */ +/* --------------------------------------------------------------------- */ + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fast_enter(struct sljit_compiler *compiler, sljit_si dst, sljit_sw dstw) +{ + CHECK_ERROR(); + check_sljit_emit_fast_enter(compiler, dst, dstw); + ADJUST_LOCAL_OFFSET(dst, dstw); + + /* For UNUSED dst. Uncommon, but possible. */ + if (dst == SLJIT_UNUSED) + return SLJIT_SUCCESS; + + if (dst <= TMP_REG3) + return push_inst(compiler, OR | D(dst) | S1(0) | S2(LINK_REG), DR(dst)); + + /* Memory. */ + return emit_op_mem(compiler, WORD_DATA, LINK_REG, dst, dstw); +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fast_return(struct sljit_compiler *compiler, sljit_si src, sljit_sw srcw) +{ + CHECK_ERROR(); + check_sljit_emit_fast_return(compiler, src, srcw); + ADJUST_LOCAL_OFFSET(src, srcw); + + if (src <= TMP_REG3) + FAIL_IF(push_inst(compiler, OR | D(LINK_REG) | S1(0) | S2(src), DR(LINK_REG))); + else if (src & SLJIT_MEM) + FAIL_IF(emit_op_mem(compiler, WORD_DATA | LOAD_DATA, LINK_REG, src, srcw)); + else if (src & SLJIT_IMM) + FAIL_IF(load_immediate(compiler, LINK_REG, srcw)); + + FAIL_IF(push_inst(compiler, JMPL | D(0) | S1(LINK_REG) | IMM(8), UNMOVABLE_INS)); + return push_inst(compiler, NOP, UNMOVABLE_INS); +} + +/* --------------------------------------------------------------------- */ +/* Conditional instructions */ +/* --------------------------------------------------------------------- */ + +SLJIT_API_FUNC_ATTRIBUTE struct sljit_label* sljit_emit_label(struct sljit_compiler *compiler) +{ + struct sljit_label *label; + + CHECK_ERROR_PTR(); + check_sljit_emit_label(compiler); + + if (compiler->last_label && compiler->last_label->size == compiler->size) + return compiler->last_label; + + label = (struct sljit_label*)ensure_abuf(compiler, sizeof(struct sljit_label)); + PTR_FAIL_IF(!label); + set_label(label, compiler); + compiler->delay_slot = UNMOVABLE_INS; + return label; +} + +static sljit_ins get_cc(sljit_si type) +{ + switch (type) { + case SLJIT_C_EQUAL: + case SLJIT_C_MUL_NOT_OVERFLOW: + return DA(0x1); + + case SLJIT_C_NOT_EQUAL: + case SLJIT_C_MUL_OVERFLOW: + return DA(0x9); + + case SLJIT_C_LESS: + return DA(0x5); + + case SLJIT_C_GREATER_EQUAL: + return DA(0xd); + + case SLJIT_C_GREATER: + return DA(0xc); + + case SLJIT_C_LESS_EQUAL: + return DA(0x4); + + case SLJIT_C_SIG_LESS: + return DA(0x3); + + case SLJIT_C_SIG_GREATER_EQUAL: + return DA(0xb); + + case SLJIT_C_SIG_GREATER: + return DA(0xa); + + case SLJIT_C_SIG_LESS_EQUAL: + return DA(0x2); + + case SLJIT_C_OVERFLOW: + return DA(0x7); + + case SLJIT_C_NOT_OVERFLOW: + return DA(0xf); + + case SLJIT_C_FLOAT_EQUAL: + return DA(0x9); + + case SLJIT_C_FLOAT_NOT_EQUAL: /* Unordered. */ + return DA(0x1); + + case SLJIT_C_FLOAT_LESS: + return DA(0x4); + + case SLJIT_C_FLOAT_GREATER_EQUAL: /* Unordered. */ + return DA(0xc); + + case SLJIT_C_FLOAT_LESS_EQUAL: + return DA(0xd); + + case SLJIT_C_FLOAT_GREATER: /* Unordered. */ + return DA(0x5); + + case SLJIT_C_FLOAT_UNORDERED: + return DA(0x7); + + case SLJIT_C_FLOAT_ORDERED: + return DA(0xf); + + default: + SLJIT_ASSERT_STOP(); + return DA(0x8); + } +} + +SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compiler *compiler, sljit_si type) +{ + struct sljit_jump *jump; + + CHECK_ERROR_PTR(); + check_sljit_emit_jump(compiler, type); + + jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump)); + PTR_FAIL_IF(!jump); + set_jump(jump, compiler, type & SLJIT_REWRITABLE_JUMP); + type &= 0xff; + + if (type < SLJIT_C_FLOAT_EQUAL) { + jump->flags |= IS_COND; + if (((compiler->delay_slot & DST_INS_MASK) != UNMOVABLE_INS) && !(compiler->delay_slot & ICC_IS_SET)) + jump->flags |= IS_MOVABLE; +#if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) + PTR_FAIL_IF(push_inst(compiler, BICC | get_cc(type ^ 1) | 5, UNMOVABLE_INS)); +#else +#error "Implementation required" +#endif + } + else if (type < SLJIT_JUMP) { + jump->flags |= IS_COND; + if (((compiler->delay_slot & DST_INS_MASK) != UNMOVABLE_INS) && !(compiler->delay_slot & FCC_IS_SET)) + jump->flags |= IS_MOVABLE; +#if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) + PTR_FAIL_IF(push_inst(compiler, FBFCC | get_cc(type ^ 1) | 5, UNMOVABLE_INS)); +#else +#error "Implementation required" +#endif + } else { + if ((compiler->delay_slot & DST_INS_MASK) != UNMOVABLE_INS) + jump->flags |= IS_MOVABLE; + if (type >= SLJIT_FAST_CALL) + jump->flags |= IS_CALL; + } + + PTR_FAIL_IF(emit_const(compiler, TMP_REG2, 0)); + PTR_FAIL_IF(push_inst(compiler, JMPL | D(type >= SLJIT_FAST_CALL ? LINK_REG : 0) | S1(TMP_REG2) | IMM(0), UNMOVABLE_INS)); + jump->addr = compiler->size; + PTR_FAIL_IF(push_inst(compiler, NOP, UNMOVABLE_INS)); + + return jump; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_ijump(struct sljit_compiler *compiler, sljit_si type, sljit_si src, sljit_sw srcw) +{ + struct sljit_jump *jump = NULL; + sljit_si src_r; + + CHECK_ERROR(); + check_sljit_emit_ijump(compiler, type, src, srcw); + ADJUST_LOCAL_OFFSET(src, srcw); + + if (src <= TMP_REG3) + src_r = src; + else if (src & SLJIT_IMM) { + jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump)); + FAIL_IF(!jump); + set_jump(jump, compiler, JUMP_ADDR); + jump->u.target = srcw; + if ((compiler->delay_slot & DST_INS_MASK) != UNMOVABLE_INS) + jump->flags |= IS_MOVABLE; + if (type >= SLJIT_FAST_CALL) + jump->flags |= IS_CALL; + + FAIL_IF(emit_const(compiler, TMP_REG2, 0)); + src_r = TMP_REG2; + } + else { + FAIL_IF(emit_op_mem(compiler, WORD_DATA | LOAD_DATA, TMP_REG2, src, srcw)); + src_r = TMP_REG2; + } + + FAIL_IF(push_inst(compiler, JMPL | D(type >= SLJIT_FAST_CALL ? LINK_REG : 0) | S1(src_r) | IMM(0), UNMOVABLE_INS)); + if (jump) + jump->addr = compiler->size; + return push_inst(compiler, NOP, UNMOVABLE_INS); +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op_flags(struct sljit_compiler *compiler, sljit_si op, + sljit_si dst, sljit_sw dstw, + sljit_si src, sljit_sw srcw, + sljit_si type) +{ + sljit_si reg, flags = (GET_FLAGS(op) ? SET_FLAGS : 0); + + CHECK_ERROR(); + check_sljit_emit_op_flags(compiler, op, dst, dstw, src, srcw, type); + ADJUST_LOCAL_OFFSET(dst, dstw); + + if (dst == SLJIT_UNUSED) + return SLJIT_SUCCESS; + +#if (defined SLJIT_CONFIG_SPARC_32 && SLJIT_CONFIG_SPARC_32) + op = GET_OPCODE(op); + reg = (op < SLJIT_ADD && dst <= TMP_REG3) ? dst : TMP_REG2; + + compiler->cache_arg = 0; + compiler->cache_argw = 0; + if (op >= SLJIT_ADD && (src & SLJIT_MEM)) { + ADJUST_LOCAL_OFFSET(src, srcw); + FAIL_IF(emit_op_mem2(compiler, WORD_DATA | LOAD_DATA, TMP_REG1, src, srcw, dst, dstw)); + src = TMP_REG1; + srcw = 0; + } + + if (type < SLJIT_C_FLOAT_EQUAL) + FAIL_IF(push_inst(compiler, BICC | get_cc(type) | 3, UNMOVABLE_INS)); + else + FAIL_IF(push_inst(compiler, FBFCC | get_cc(type) | 3, UNMOVABLE_INS)); + + FAIL_IF(push_inst(compiler, OR | D(reg) | S1(0) | IMM(1), UNMOVABLE_INS)); + FAIL_IF(push_inst(compiler, OR | D(reg) | S1(0) | IMM(0), UNMOVABLE_INS)); + + if (op >= SLJIT_ADD) + return emit_op(compiler, op, flags | CUMULATIVE_OP | IMM_OP | ALT_KEEP_CACHE, dst, dstw, src, srcw, TMP_REG2, 0); + + return (reg == TMP_REG2) ? emit_op_mem(compiler, WORD_DATA, TMP_REG2, dst, dstw) : SLJIT_SUCCESS; +#else +#error "Implementation required" +#endif +} + +SLJIT_API_FUNC_ATTRIBUTE struct sljit_const* sljit_emit_const(struct sljit_compiler *compiler, sljit_si dst, sljit_sw dstw, sljit_sw init_value) +{ + sljit_si reg; + struct sljit_const *const_; + + CHECK_ERROR_PTR(); + check_sljit_emit_const(compiler, dst, dstw, init_value); + ADJUST_LOCAL_OFFSET(dst, dstw); + + const_ = (struct sljit_const*)ensure_abuf(compiler, sizeof(struct sljit_const)); + PTR_FAIL_IF(!const_); + set_const(const_, compiler); + + reg = (dst <= TMP_REG3) ? dst : TMP_REG2; + + PTR_FAIL_IF(emit_const(compiler, reg, init_value)); + + if (dst & SLJIT_MEM) + PTR_FAIL_IF(emit_op_mem(compiler, WORD_DATA, TMP_REG2, dst, dstw)); + + return const_; +} Property changes on: sys/contrib/sljit/sljitNativeSPARC_common.c ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Added: fbsd:nokeywords ## -0,0 +1 ## +yes \ No newline at end of property Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Index: sys/contrib/sljit/sljitNativeX86_32.c =================================================================== --- sys/contrib/sljit/sljitNativeX86_32.c (revision 0) +++ sys/contrib/sljit/sljitNativeX86_32.c (working copy) @@ -0,0 +1,547 @@ +/* + * Stack-less Just-In-Time compiler + * + * Copyright 2009-2012 Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, are + * permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of + * conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list + * of conditions and the following disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED + * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* x86 32-bit arch dependent functions. */ + +static sljit_si emit_do_imm(struct sljit_compiler *compiler, sljit_ub opcode, sljit_sw imm) +{ + sljit_ub *inst; + + inst = (sljit_ub*)ensure_buf(compiler, 1 + 1 + sizeof(sljit_sw)); + FAIL_IF(!inst); + INC_SIZE(1 + sizeof(sljit_sw)); + *inst++ = opcode; + *(sljit_sw*)inst = imm; + return SLJIT_SUCCESS; +} + +static sljit_ub* generate_far_jump_code(struct sljit_jump *jump, sljit_ub *code_ptr, sljit_si type) +{ + if (type == SLJIT_JUMP) { + *code_ptr++ = JMP_i32; + jump->addr++; + } + else if (type >= SLJIT_FAST_CALL) { + *code_ptr++ = CALL_i32; + jump->addr++; + } + else { + *code_ptr++ = GROUP_0F; + *code_ptr++ = get_jump_code(type); + jump->addr += 2; + } + + if (jump->flags & JUMP_LABEL) + jump->flags |= PATCH_MW; + else + *(sljit_sw*)code_ptr = jump->u.target - (jump->addr + 4); + code_ptr += 4; + + return code_ptr; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_enter(struct sljit_compiler *compiler, sljit_si args, sljit_si scratches, sljit_si saveds, sljit_si local_size) +{ + sljit_si size; + sljit_si locals_offset; + sljit_ub *inst; + + CHECK_ERROR(); + check_sljit_emit_enter(compiler, args, scratches, saveds, local_size); + + compiler->scratches = scratches; + compiler->saveds = saveds; + compiler->args = args; + compiler->flags_saved = 0; +#if (defined SLJIT_DEBUG && SLJIT_DEBUG) + compiler->logical_local_size = local_size; +#endif + +#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) + size = 1 + (saveds <= 3 ? saveds : 3) + (args > 0 ? (args * 2) : 0) + (args > 2 ? 2 : 0); +#else + size = 1 + (saveds <= 3 ? saveds : 3) + (args > 0 ? (2 + args * 3) : 0); +#endif + inst = (sljit_ub*)ensure_buf(compiler, 1 + size); + FAIL_IF(!inst); + + INC_SIZE(size); + PUSH_REG(reg_map[TMP_REGISTER]); +#if !(defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) + if (args > 0) { + *inst++ = MOV_r_rm; + *inst++ = MOD_REG | (reg_map[TMP_REGISTER] << 3) | 0x4 /* esp */; + } +#endif + if (saveds > 2) + PUSH_REG(reg_map[SLJIT_SAVED_REG3]); + if (saveds > 1) + PUSH_REG(reg_map[SLJIT_SAVED_REG2]); + if (saveds > 0) + PUSH_REG(reg_map[SLJIT_SAVED_REG1]); + +#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) + if (args > 0) { + *inst++ = MOV_r_rm; + *inst++ = MOD_REG | (reg_map[SLJIT_SAVED_REG1] << 3) | reg_map[SLJIT_SCRATCH_REG3]; + } + if (args > 1) { + *inst++ = MOV_r_rm; + *inst++ = MOD_REG | (reg_map[SLJIT_SAVED_REG2] << 3) | reg_map[SLJIT_SCRATCH_REG2]; + } + if (args > 2) { + *inst++ = MOV_r_rm; + *inst++ = MOD_DISP8 | (reg_map[SLJIT_SAVED_REG3] << 3) | 0x4 /* esp */; + *inst++ = 0x24; + *inst++ = sizeof(sljit_sw) * (3 + 2); /* saveds >= 3 as well. */ + } +#else + if (args > 0) { + *inst++ = MOV_r_rm; + *inst++ = MOD_DISP8 | (reg_map[SLJIT_SAVED_REG1] << 3) | reg_map[TMP_REGISTER]; + *inst++ = sizeof(sljit_sw) * 2; + } + if (args > 1) { + *inst++ = MOV_r_rm; + *inst++ = MOD_DISP8 | (reg_map[SLJIT_SAVED_REG2] << 3) | reg_map[TMP_REGISTER]; + *inst++ = sizeof(sljit_sw) * 3; + } + if (args > 2) { + *inst++ = MOV_r_rm; + *inst++ = MOD_DISP8 | (reg_map[SLJIT_SAVED_REG3] << 3) | reg_map[TMP_REGISTER]; + *inst++ = sizeof(sljit_sw) * 4; + } +#endif + +#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) + locals_offset = 2 * sizeof(sljit_uw); +#else + SLJIT_COMPILE_ASSERT(FIXED_LOCALS_OFFSET >= 2 * sizeof(sljit_uw), require_at_least_two_words); + locals_offset = FIXED_LOCALS_OFFSET; +#endif + compiler->scratches_start = locals_offset; + if (scratches > 3) + locals_offset += (scratches - 3) * sizeof(sljit_uw); + compiler->saveds_start = locals_offset; + if (saveds > 3) + locals_offset += (saveds - 3) * sizeof(sljit_uw); + compiler->locals_offset = locals_offset; + local_size = locals_offset + ((local_size + sizeof(sljit_uw) - 1) & ~(sizeof(sljit_uw) - 1)); + + compiler->local_size = local_size; +#ifdef _WIN32 + if (local_size > 1024) { +#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) + FAIL_IF(emit_do_imm(compiler, MOV_r_i32 + reg_map[SLJIT_SCRATCH_REG1], local_size)); +#else + local_size -= FIXED_LOCALS_OFFSET; + FAIL_IF(emit_do_imm(compiler, MOV_r_i32 + reg_map[SLJIT_SCRATCH_REG1], local_size)); + FAIL_IF(emit_non_cum_binary(compiler, SUB_r_rm, SUB_rm_r, SUB, SUB_EAX_i32, + SLJIT_LOCALS_REG, 0, SLJIT_LOCALS_REG, 0, SLJIT_IMM, FIXED_LOCALS_OFFSET)); +#endif + FAIL_IF(sljit_emit_ijump(compiler, SLJIT_CALL1, SLJIT_IMM, SLJIT_FUNC_OFFSET(sljit_grow_stack))); + } +#endif + + SLJIT_ASSERT(local_size > 0); + return emit_non_cum_binary(compiler, SUB_r_rm, SUB_rm_r, SUB, SUB_EAX_i32, + SLJIT_LOCALS_REG, 0, SLJIT_LOCALS_REG, 0, SLJIT_IMM, local_size); +} + +SLJIT_API_FUNC_ATTRIBUTE void sljit_set_context(struct sljit_compiler *compiler, sljit_si args, sljit_si scratches, sljit_si saveds, sljit_si local_size) +{ + sljit_si locals_offset; + + CHECK_ERROR_VOID(); + check_sljit_set_context(compiler, args, scratches, saveds, local_size); + + compiler->scratches = scratches; + compiler->saveds = saveds; + compiler->args = args; +#if (defined SLJIT_DEBUG && SLJIT_DEBUG) + compiler->logical_local_size = local_size; +#endif + +#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) + locals_offset = 2 * sizeof(sljit_uw); +#else + locals_offset = FIXED_LOCALS_OFFSET; +#endif + compiler->scratches_start = locals_offset; + if (scratches > 3) + locals_offset += (scratches - 3) * sizeof(sljit_uw); + compiler->saveds_start = locals_offset; + if (saveds > 3) + locals_offset += (saveds - 3) * sizeof(sljit_uw); + compiler->locals_offset = locals_offset; + compiler->local_size = locals_offset + ((local_size + sizeof(sljit_uw) - 1) & ~(sizeof(sljit_uw) - 1)); +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_return(struct sljit_compiler *compiler, sljit_si op, sljit_si src, sljit_sw srcw) +{ + sljit_si size; + sljit_ub *inst; + + CHECK_ERROR(); + check_sljit_emit_return(compiler, op, src, srcw); + SLJIT_ASSERT(compiler->args >= 0); + + compiler->flags_saved = 0; + FAIL_IF(emit_mov_before_return(compiler, op, src, srcw)); + + SLJIT_ASSERT(compiler->local_size > 0); + FAIL_IF(emit_cum_binary(compiler, ADD_r_rm, ADD_rm_r, ADD, ADD_EAX_i32, + SLJIT_LOCALS_REG, 0, SLJIT_LOCALS_REG, 0, SLJIT_IMM, compiler->local_size)); + + size = 2 + (compiler->saveds <= 3 ? compiler->saveds : 3); +#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) + if (compiler->args > 2) + size += 2; +#else + if (compiler->args > 0) + size += 2; +#endif + inst = (sljit_ub*)ensure_buf(compiler, 1 + size); + FAIL_IF(!inst); + + INC_SIZE(size); + + if (compiler->saveds > 0) + POP_REG(reg_map[SLJIT_SAVED_REG1]); + if (compiler->saveds > 1) + POP_REG(reg_map[SLJIT_SAVED_REG2]); + if (compiler->saveds > 2) + POP_REG(reg_map[SLJIT_SAVED_REG3]); + POP_REG(reg_map[TMP_REGISTER]); +#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) + if (compiler->args > 2) + RET_I16(sizeof(sljit_sw)); + else + RET(); +#else + RET(); +#endif + + return SLJIT_SUCCESS; +} + +/* --------------------------------------------------------------------- */ +/* Operators */ +/* --------------------------------------------------------------------- */ + +/* Size contains the flags as well. */ +static sljit_ub* emit_x86_instruction(struct sljit_compiler *compiler, sljit_si size, + /* The register or immediate operand. */ + sljit_si a, sljit_sw imma, + /* The general operand (not immediate). */ + sljit_si b, sljit_sw immb) +{ + sljit_ub *inst; + sljit_ub *buf_ptr; + sljit_si flags = size & ~0xf; + sljit_si inst_size; + + /* Both cannot be switched on. */ + SLJIT_ASSERT((flags & (EX86_BIN_INS | EX86_SHIFT_INS)) != (EX86_BIN_INS | EX86_SHIFT_INS)); + /* Size flags not allowed for typed instructions. */ + SLJIT_ASSERT(!(flags & (EX86_BIN_INS | EX86_SHIFT_INS)) || (flags & (EX86_BYTE_ARG | EX86_HALF_ARG)) == 0); + /* Both size flags cannot be switched on. */ + SLJIT_ASSERT((flags & (EX86_BYTE_ARG | EX86_HALF_ARG)) != (EX86_BYTE_ARG | EX86_HALF_ARG)); +#if (defined SLJIT_SSE2 && SLJIT_SSE2) + /* SSE2 and immediate is not possible. */ + SLJIT_ASSERT(!(a & SLJIT_IMM) || !(flags & EX86_SSE2)); + SLJIT_ASSERT((flags & (EX86_PREF_F2 | EX86_PREF_F3)) != (EX86_PREF_F2 | EX86_PREF_F3) + && (flags & (EX86_PREF_F2 | EX86_PREF_66)) != (EX86_PREF_F2 | EX86_PREF_66) + && (flags & (EX86_PREF_F3 | EX86_PREF_66)) != (EX86_PREF_F3 | EX86_PREF_66)); +#endif + + size &= 0xf; + inst_size = size; + +#if (defined SLJIT_SSE2 && SLJIT_SSE2) + if (flags & (EX86_PREF_F2 | EX86_PREF_F3)) + inst_size++; +#endif + if (flags & EX86_PREF_66) + inst_size++; + + /* Calculate size of b. */ + inst_size += 1; /* mod r/m byte. */ + if (b & SLJIT_MEM) { + if ((b & 0x0f) == SLJIT_UNUSED) + inst_size += sizeof(sljit_sw); + else if (immb != 0 && !(b & 0xf0)) { + /* Immediate operand. */ + if (immb <= 127 && immb >= -128) + inst_size += sizeof(sljit_sb); + else + inst_size += sizeof(sljit_sw); + } + + if ((b & 0xf) == SLJIT_LOCALS_REG && !(b & 0xf0)) + b |= SLJIT_LOCALS_REG << 4; + + if ((b & 0xf0) != SLJIT_UNUSED) + inst_size += 1; /* SIB byte. */ + } + + /* Calculate size of a. */ + if (a & SLJIT_IMM) { + if (flags & EX86_BIN_INS) { + if (imma <= 127 && imma >= -128) { + inst_size += 1; + flags |= EX86_BYTE_ARG; + } else + inst_size += 4; + } + else if (flags & EX86_SHIFT_INS) { + imma &= 0x1f; + if (imma != 1) { + inst_size ++; + flags |= EX86_BYTE_ARG; + } + } else if (flags & EX86_BYTE_ARG) + inst_size++; + else if (flags & EX86_HALF_ARG) + inst_size += sizeof(short); + else + inst_size += sizeof(sljit_sw); + } + else + SLJIT_ASSERT(!(flags & EX86_SHIFT_INS) || a == SLJIT_PREF_SHIFT_REG); + + inst = (sljit_ub*)ensure_buf(compiler, 1 + inst_size); + PTR_FAIL_IF(!inst); + + /* Encoding the byte. */ + INC_SIZE(inst_size); +#if (defined SLJIT_SSE2 && SLJIT_SSE2) + if (flags & EX86_PREF_F2) + *inst++ = 0xf2; + if (flags & EX86_PREF_F3) + *inst++ = 0xf3; +#endif + if (flags & EX86_PREF_66) + *inst++ = 0x66; + + buf_ptr = inst + size; + + /* Encode mod/rm byte. */ + if (!(flags & EX86_SHIFT_INS)) { + if ((flags & EX86_BIN_INS) && (a & SLJIT_IMM)) + *inst = (flags & EX86_BYTE_ARG) ? GROUP_BINARY_83 : GROUP_BINARY_81; + + if ((a & SLJIT_IMM) || (a == 0)) + *buf_ptr = 0; +#if (defined SLJIT_SSE2 && SLJIT_SSE2) + else if (!(flags & EX86_SSE2)) + *buf_ptr = reg_map[a] << 3; + else + *buf_ptr = a << 3; +#else + else + *buf_ptr = reg_map[a] << 3; +#endif + } + else { + if (a & SLJIT_IMM) { + if (imma == 1) + *inst = GROUP_SHIFT_1; + else + *inst = GROUP_SHIFT_N; + } else + *inst = GROUP_SHIFT_CL; + *buf_ptr = 0; + } + + if (!(b & SLJIT_MEM)) +#if (defined SLJIT_SSE2 && SLJIT_SSE2) + *buf_ptr++ |= MOD_REG + ((!(flags & EX86_SSE2)) ? reg_map[b] : b); +#else + *buf_ptr++ |= MOD_REG + reg_map[b]; +#endif + else if ((b & 0x0f) != SLJIT_UNUSED) { + if ((b & 0xf0) == SLJIT_UNUSED || (b & 0xf0) == (SLJIT_LOCALS_REG << 4)) { + if (immb != 0) { + if (immb <= 127 && immb >= -128) + *buf_ptr |= 0x40; + else + *buf_ptr |= 0x80; + } + + if ((b & 0xf0) == SLJIT_UNUSED) + *buf_ptr++ |= reg_map[b & 0x0f]; + else { + *buf_ptr++ |= 0x04; + *buf_ptr++ = reg_map[b & 0x0f] | (reg_map[(b >> 4) & 0x0f] << 3); + } + + if (immb != 0) { + if (immb <= 127 && immb >= -128) + *buf_ptr++ = immb; /* 8 bit displacement. */ + else { + *(sljit_sw*)buf_ptr = immb; /* 32 bit displacement. */ + buf_ptr += sizeof(sljit_sw); + } + } + } + else { + *buf_ptr++ |= 0x04; + *buf_ptr++ = reg_map[b & 0x0f] | (reg_map[(b >> 4) & 0x0f] << 3) | (immb << 6); + } + } + else { + *buf_ptr++ |= 0x05; + *(sljit_sw*)buf_ptr = immb; /* 32 bit displacement. */ + buf_ptr += sizeof(sljit_sw); + } + + if (a & SLJIT_IMM) { + if (flags & EX86_BYTE_ARG) + *buf_ptr = imma; + else if (flags & EX86_HALF_ARG) + *(short*)buf_ptr = imma; + else if (!(flags & EX86_SHIFT_INS)) + *(sljit_sw*)buf_ptr = imma; + } + + return !(flags & EX86_SHIFT_INS) ? inst : (inst + 1); +} + +/* --------------------------------------------------------------------- */ +/* Call / return instructions */ +/* --------------------------------------------------------------------- */ + +static SLJIT_INLINE sljit_si call_with_args(struct sljit_compiler *compiler, sljit_si type) +{ + sljit_ub *inst; + +#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) + inst = (sljit_ub*)ensure_buf(compiler, type >= SLJIT_CALL3 ? 1 + 2 + 1 : 1 + 2); + FAIL_IF(!inst); + INC_SIZE(type >= SLJIT_CALL3 ? 2 + 1 : 2); + + if (type >= SLJIT_CALL3) + PUSH_REG(reg_map[SLJIT_SCRATCH_REG3]); + *inst++ = MOV_r_rm; + *inst++ = MOD_REG | (reg_map[SLJIT_SCRATCH_REG3] << 3) | reg_map[SLJIT_SCRATCH_REG1]; +#else + inst = (sljit_ub*)ensure_buf(compiler, 1 + 4 * (type - SLJIT_CALL0)); + FAIL_IF(!inst); + INC_SIZE(4 * (type - SLJIT_CALL0)); + + *inst++ = MOV_rm_r; + *inst++ = MOD_DISP8 | (reg_map[SLJIT_SCRATCH_REG1] << 3) | 0x4 /* SIB */; + *inst++ = (0x4 /* none*/ << 3) | reg_map[SLJIT_LOCALS_REG]; + *inst++ = 0; + if (type >= SLJIT_CALL2) { + *inst++ = MOV_rm_r; + *inst++ = MOD_DISP8 | (reg_map[SLJIT_SCRATCH_REG2] << 3) | 0x4 /* SIB */; + *inst++ = (0x4 /* none*/ << 3) | reg_map[SLJIT_LOCALS_REG]; + *inst++ = sizeof(sljit_sw); + } + if (type >= SLJIT_CALL3) { + *inst++ = MOV_rm_r; + *inst++ = MOD_DISP8 | (reg_map[SLJIT_SCRATCH_REG3] << 3) | 0x4 /* SIB */; + *inst++ = (0x4 /* none*/ << 3) | reg_map[SLJIT_LOCALS_REG]; + *inst++ = 2 * sizeof(sljit_sw); + } +#endif + return SLJIT_SUCCESS; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fast_enter(struct sljit_compiler *compiler, sljit_si dst, sljit_sw dstw) +{ + sljit_ub *inst; + + CHECK_ERROR(); + check_sljit_emit_fast_enter(compiler, dst, dstw); + ADJUST_LOCAL_OFFSET(dst, dstw); + + CHECK_EXTRA_REGS(dst, dstw, (void)0); + + /* For UNUSED dst. Uncommon, but possible. */ + if (dst == SLJIT_UNUSED) + dst = TMP_REGISTER; + + if (dst <= TMP_REGISTER) { + /* Unused dest is possible here. */ + inst = (sljit_ub*)ensure_buf(compiler, 1 + 1); + FAIL_IF(!inst); + + INC_SIZE(1); + POP_REG(reg_map[dst]); + return SLJIT_SUCCESS; + } + + /* Memory. */ + inst = emit_x86_instruction(compiler, 1, 0, 0, dst, dstw); + FAIL_IF(!inst); + *inst++ = POP_rm; + return SLJIT_SUCCESS; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fast_return(struct sljit_compiler *compiler, sljit_si src, sljit_sw srcw) +{ + sljit_ub *inst; + + CHECK_ERROR(); + check_sljit_emit_fast_return(compiler, src, srcw); + ADJUST_LOCAL_OFFSET(src, srcw); + + CHECK_EXTRA_REGS(src, srcw, (void)0); + + if (src <= TMP_REGISTER) { + inst = (sljit_ub*)ensure_buf(compiler, 1 + 1 + 1); + FAIL_IF(!inst); + + INC_SIZE(1 + 1); + PUSH_REG(reg_map[src]); + } + else if (src & SLJIT_MEM) { + inst = emit_x86_instruction(compiler, 1, 0, 0, src, srcw); + FAIL_IF(!inst); + *inst++ = GROUP_FF; + *inst |= PUSH_rm; + + inst = (sljit_ub*)ensure_buf(compiler, 1 + 1); + FAIL_IF(!inst); + INC_SIZE(1); + } + else { + /* SLJIT_IMM. */ + inst = (sljit_ub*)ensure_buf(compiler, 1 + 5 + 1); + FAIL_IF(!inst); + + INC_SIZE(5 + 1); + *inst++ = PUSH_i32; + *(sljit_sw*)inst = srcw; + inst += sizeof(sljit_sw); + } + + RET(); + return SLJIT_SUCCESS; +} Property changes on: sys/contrib/sljit/sljitNativeX86_32.c ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Added: fbsd:nokeywords ## -0,0 +1 ## +yes \ No newline at end of property Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Index: sys/contrib/sljit/sljitNativeX86_64.c =================================================================== --- sys/contrib/sljit/sljitNativeX86_64.c (revision 0) +++ sys/contrib/sljit/sljitNativeX86_64.c (working copy) @@ -0,0 +1,810 @@ +/* + * Stack-less Just-In-Time compiler + * + * Copyright 2009-2012 Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, are + * permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of + * conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list + * of conditions and the following disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED + * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* x86 64-bit arch dependent functions. */ + +static sljit_si emit_load_imm64(struct sljit_compiler *compiler, sljit_si reg, sljit_sw imm) +{ + sljit_ub *inst; + + inst = (sljit_ub*)ensure_buf(compiler, 1 + 2 + sizeof(sljit_sw)); + FAIL_IF(!inst); + INC_SIZE(2 + sizeof(sljit_sw)); + *inst++ = REX_W | ((reg_map[reg] <= 7) ? 0 : REX_B); + *inst++ = MOV_r_i32 + (reg_map[reg] & 0x7); + *(sljit_sw*)inst = imm; + return SLJIT_SUCCESS; +} + +static sljit_ub* generate_far_jump_code(struct sljit_jump *jump, sljit_ub *code_ptr, sljit_si type) +{ + if (type < SLJIT_JUMP) { + /* Invert type. */ + *code_ptr++ = get_jump_code(type ^ 0x1) - 0x10; + *code_ptr++ = 10 + 3; + } + + SLJIT_COMPILE_ASSERT(reg_map[TMP_REG3] == 9, tmp3_is_9_first); + *code_ptr++ = REX_W | REX_B; + *code_ptr++ = MOV_r_i32 + 1; + jump->addr = (sljit_uw)code_ptr; + + if (jump->flags & JUMP_LABEL) + jump->flags |= PATCH_MD; + else + *(sljit_sw*)code_ptr = jump->u.target; + + code_ptr += sizeof(sljit_sw); + *code_ptr++ = REX_B; + *code_ptr++ = GROUP_FF; + *code_ptr++ = (type >= SLJIT_FAST_CALL) ? (MOD_REG | CALL_rm | 1) : (MOD_REG | JMP_rm | 1); + + return code_ptr; +} + +static sljit_ub* generate_fixed_jump(sljit_ub *code_ptr, sljit_sw addr, sljit_si type) +{ + sljit_sw delta = addr - ((sljit_sw)code_ptr + 1 + sizeof(sljit_si)); + + if (delta <= SLJIT_W(0x7fffffff) && delta >= SLJIT_W(-0x80000000)) { + *code_ptr++ = (type == 2) ? CALL_i32 : JMP_i32; + *(sljit_sw*)code_ptr = delta; + } + else { + SLJIT_COMPILE_ASSERT(reg_map[TMP_REG3] == 9, tmp3_is_9_second); + *code_ptr++ = REX_W | REX_B; + *code_ptr++ = MOV_r_i32 + 1; + *(sljit_sw*)code_ptr = addr; + code_ptr += sizeof(sljit_sw); + *code_ptr++ = REX_B; + *code_ptr++ = GROUP_FF; + *code_ptr++ = (type == 2) ? (MOD_REG | CALL_rm | 1) : (MOD_REG | JMP_rm | 1); + } + + return code_ptr; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_enter(struct sljit_compiler *compiler, sljit_si args, sljit_si scratches, sljit_si saveds, sljit_si local_size) +{ + sljit_si size, pushed_size; + sljit_ub *inst; + + CHECK_ERROR(); + check_sljit_emit_enter(compiler, args, scratches, saveds, local_size); + + compiler->scratches = scratches; + compiler->saveds = saveds; + compiler->flags_saved = 0; +#if (defined SLJIT_DEBUG && SLJIT_DEBUG) + compiler->logical_local_size = local_size; +#endif + + size = saveds; + /* Including the return address saved by the call instruction. */ + pushed_size = (saveds + 1) * sizeof(sljit_sw); +#ifndef _WIN64 + if (saveds >= 2) + size += saveds - 1; +#else + if (saveds >= 4) + size += saveds - 3; + if (scratches >= 5) { + size += (5 - 4) * 2; + pushed_size += sizeof(sljit_sw); + } +#endif + size += args * 3; + if (size > 0) { + inst = (sljit_ub*)ensure_buf(compiler, 1 + size); + FAIL_IF(!inst); + + INC_SIZE(size); + if (saveds >= 5) { + SLJIT_COMPILE_ASSERT(reg_map[SLJIT_SAVED_EREG2] >= 8, saved_ereg2_is_hireg); + *inst++ = REX_B; + PUSH_REG(reg_lmap[SLJIT_SAVED_EREG2]); + } + if (saveds >= 4) { + SLJIT_COMPILE_ASSERT(reg_map[SLJIT_SAVED_EREG1] >= 8, saved_ereg1_is_hireg); + *inst++ = REX_B; + PUSH_REG(reg_lmap[SLJIT_SAVED_EREG1]); + } + if (saveds >= 3) { +#ifndef _WIN64 + SLJIT_COMPILE_ASSERT(reg_map[SLJIT_SAVED_REG3] >= 8, saved_reg3_is_hireg); + *inst++ = REX_B; +#else + SLJIT_COMPILE_ASSERT(reg_map[SLJIT_SAVED_REG3] < 8, saved_reg3_is_loreg); +#endif + PUSH_REG(reg_lmap[SLJIT_SAVED_REG3]); + } + if (saveds >= 2) { +#ifndef _WIN64 + SLJIT_COMPILE_ASSERT(reg_map[SLJIT_SAVED_REG2] >= 8, saved_reg2_is_hireg); + *inst++ = REX_B; +#else + SLJIT_COMPILE_ASSERT(reg_map[SLJIT_SAVED_REG2] < 8, saved_reg2_is_loreg); +#endif + PUSH_REG(reg_lmap[SLJIT_SAVED_REG2]); + } + if (saveds >= 1) { + SLJIT_COMPILE_ASSERT(reg_map[SLJIT_SAVED_REG1] < 8, saved_reg1_is_loreg); + PUSH_REG(reg_lmap[SLJIT_SAVED_REG1]); + } +#ifdef _WIN64 + if (scratches >= 5) { + SLJIT_COMPILE_ASSERT(reg_map[SLJIT_TEMPORARY_EREG2] >= 8, temporary_ereg2_is_hireg); + *inst++ = REX_B; + PUSH_REG(reg_lmap[SLJIT_TEMPORARY_EREG2]); + } +#endif + +#ifndef _WIN64 + if (args > 0) { + *inst++ = REX_W; + *inst++ = MOV_r_rm; + *inst++ = MOD_REG | (reg_map[SLJIT_SAVED_REG1] << 3) | 0x7 /* rdi */; + } + if (args > 1) { + *inst++ = REX_W | REX_R; + *inst++ = MOV_r_rm; + *inst++ = MOD_REG | (reg_lmap[SLJIT_SAVED_REG2] << 3) | 0x6 /* rsi */; + } + if (args > 2) { + *inst++ = REX_W | REX_R; + *inst++ = MOV_r_rm; + *inst++ = MOD_REG | (reg_lmap[SLJIT_SAVED_REG3] << 3) | 0x2 /* rdx */; + } +#else + if (args > 0) { + *inst++ = REX_W; + *inst++ = MOV_r_rm; + *inst++ = MOD_REG | (reg_map[SLJIT_SAVED_REG1] << 3) | 0x1 /* rcx */; + } + if (args > 1) { + *inst++ = REX_W; + *inst++ = MOV_r_rm; + *inst++ = MOD_REG | (reg_map[SLJIT_SAVED_REG2] << 3) | 0x2 /* rdx */; + } + if (args > 2) { + *inst++ = REX_W | REX_B; + *inst++ = MOV_r_rm; + *inst++ = MOD_REG | (reg_map[SLJIT_SAVED_REG3] << 3) | 0x0 /* r8 */; + } +#endif + } + + local_size = ((local_size + FIXED_LOCALS_OFFSET + pushed_size + 16 - 1) & ~(16 - 1)) - pushed_size; + compiler->local_size = local_size; +#ifdef _WIN64 + if (local_size > 1024) { + /* Allocate stack for the callback, which grows the stack. */ + inst = (sljit_ub*)ensure_buf(compiler, 1 + 4 + (3 + sizeof(sljit_si))); + FAIL_IF(!inst); + INC_SIZE(4 + (3 + sizeof(sljit_si))); + *inst++ = REX_W; + *inst++ = GROUP_BINARY_83; + *inst++ = MOD_REG | SUB | 4; + /* Pushed size must be divisible by 8. */ + SLJIT_ASSERT(!(pushed_size & 0x7)); + if (pushed_size & 0x8) { + *inst++ = 5 * sizeof(sljit_sw); + local_size -= 5 * sizeof(sljit_sw); + } else { + *inst++ = 4 * sizeof(sljit_sw); + local_size -= 4 * sizeof(sljit_sw); + } + /* Second instruction */ + SLJIT_COMPILE_ASSERT(reg_map[SLJIT_SCRATCH_REG1] < 8, temporary_reg1_is_loreg); + *inst++ = REX_W; + *inst++ = MOV_rm_i32; + *inst++ = MOD_REG | reg_lmap[SLJIT_SCRATCH_REG1]; + *(sljit_si*)inst = local_size; +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) || (defined SLJIT_DEBUG && SLJIT_DEBUG) + compiler->skip_checks = 1; +#endif + FAIL_IF(sljit_emit_ijump(compiler, SLJIT_CALL1, SLJIT_IMM, SLJIT_FUNC_OFFSET(sljit_grow_stack))); + } +#endif + SLJIT_ASSERT(local_size > 0); + if (local_size <= 127) { + inst = (sljit_ub*)ensure_buf(compiler, 1 + 4); + FAIL_IF(!inst); + INC_SIZE(4); + *inst++ = REX_W; + *inst++ = GROUP_BINARY_83; + *inst++ = MOD_REG | SUB | 4; + *inst++ = local_size; + } + else { + inst = (sljit_ub*)ensure_buf(compiler, 1 + 7); + FAIL_IF(!inst); + INC_SIZE(7); + *inst++ = REX_W; + *inst++ = GROUP_BINARY_81; + *inst++ = MOD_REG | SUB | 4; + *(sljit_si*)inst = local_size; + inst += sizeof(sljit_si); + } +#ifdef _WIN64 + /* Save xmm6 with MOVAPS instruction. */ + inst = (sljit_ub*)ensure_buf(compiler, 1 + 5); + FAIL_IF(!inst); + INC_SIZE(5); + *inst++ = GROUP_0F; + *(sljit_si*)inst = 0x20247429; +#endif + + return SLJIT_SUCCESS; +} + +SLJIT_API_FUNC_ATTRIBUTE void sljit_set_context(struct sljit_compiler *compiler, sljit_si args, sljit_si scratches, sljit_si saveds, sljit_si local_size) +{ + sljit_si pushed_size; + + CHECK_ERROR_VOID(); + check_sljit_set_context(compiler, args, scratches, saveds, local_size); + + compiler->scratches = scratches; + compiler->saveds = saveds; +#if (defined SLJIT_DEBUG && SLJIT_DEBUG) + compiler->logical_local_size = local_size; +#endif + + /* Including the return address saved by the call instruction. */ + pushed_size = (saveds + 1) * sizeof(sljit_sw); +#ifdef _WIN64 + if (scratches >= 5) + pushed_size += sizeof(sljit_sw); +#endif + compiler->local_size = ((local_size + FIXED_LOCALS_OFFSET + pushed_size + 16 - 1) & ~(16 - 1)) - pushed_size; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_return(struct sljit_compiler *compiler, sljit_si op, sljit_si src, sljit_sw srcw) +{ + sljit_si size; + sljit_ub *inst; + + CHECK_ERROR(); + check_sljit_emit_return(compiler, op, src, srcw); + + compiler->flags_saved = 0; + FAIL_IF(emit_mov_before_return(compiler, op, src, srcw)); + +#ifdef _WIN64 + /* Restore xmm6 with MOVAPS instruction. */ + inst = (sljit_ub*)ensure_buf(compiler, 1 + 5); + FAIL_IF(!inst); + INC_SIZE(5); + *inst++ = GROUP_0F; + *(sljit_si*)inst = 0x20247428; +#endif + SLJIT_ASSERT(compiler->local_size > 0); + if (compiler->local_size <= 127) { + inst = (sljit_ub*)ensure_buf(compiler, 1 + 4); + FAIL_IF(!inst); + INC_SIZE(4); + *inst++ = REX_W; + *inst++ = GROUP_BINARY_83; + *inst++ = MOD_REG | ADD | 4; + *inst = compiler->local_size; + } + else { + inst = (sljit_ub*)ensure_buf(compiler, 1 + 7); + FAIL_IF(!inst); + INC_SIZE(7); + *inst++ = REX_W; + *inst++ = GROUP_BINARY_81; + *inst++ = MOD_REG | ADD | 4; + *(sljit_si*)inst = compiler->local_size; + } + + size = 1 + compiler->saveds; +#ifndef _WIN64 + if (compiler->saveds >= 2) + size += compiler->saveds - 1; +#else + if (compiler->saveds >= 4) + size += compiler->saveds - 3; + if (compiler->scratches >= 5) + size += (5 - 4) * 2; +#endif + inst = (sljit_ub*)ensure_buf(compiler, 1 + size); + FAIL_IF(!inst); + + INC_SIZE(size); + +#ifdef _WIN64 + if (compiler->scratches >= 5) { + *inst++ = REX_B; + POP_REG(reg_lmap[SLJIT_TEMPORARY_EREG2]); + } +#endif + if (compiler->saveds >= 1) + POP_REG(reg_map[SLJIT_SAVED_REG1]); + if (compiler->saveds >= 2) { +#ifndef _WIN64 + *inst++ = REX_B; +#endif + POP_REG(reg_lmap[SLJIT_SAVED_REG2]); + } + if (compiler->saveds >= 3) { +#ifndef _WIN64 + *inst++ = REX_B; +#endif + POP_REG(reg_lmap[SLJIT_SAVED_REG3]); + } + if (compiler->saveds >= 4) { + *inst++ = REX_B; + POP_REG(reg_lmap[SLJIT_SAVED_EREG1]); + } + if (compiler->saveds >= 5) { + *inst++ = REX_B; + POP_REG(reg_lmap[SLJIT_SAVED_EREG2]); + } + + RET(); + return SLJIT_SUCCESS; +} + +/* --------------------------------------------------------------------- */ +/* Operators */ +/* --------------------------------------------------------------------- */ + +static sljit_si emit_do_imm32(struct sljit_compiler *compiler, sljit_ub rex, sljit_ub opcode, sljit_sw imm) +{ + sljit_ub *inst; + sljit_si length = 1 + (rex ? 1 : 0) + sizeof(sljit_si); + + inst = (sljit_ub*)ensure_buf(compiler, 1 + length); + FAIL_IF(!inst); + INC_SIZE(length); + if (rex) + *inst++ = rex; + *inst++ = opcode; + *(sljit_si*)inst = imm; + return SLJIT_SUCCESS; +} + +static sljit_ub* emit_x86_instruction(struct sljit_compiler *compiler, sljit_si size, + /* The register or immediate operand. */ + sljit_si a, sljit_sw imma, + /* The general operand (not immediate). */ + sljit_si b, sljit_sw immb) +{ + sljit_ub *inst; + sljit_ub *buf_ptr; + sljit_ub rex = 0; + sljit_si flags = size & ~0xf; + sljit_si inst_size; + + /* The immediate operand must be 32 bit. */ + SLJIT_ASSERT(!(a & SLJIT_IMM) || compiler->mode32 || IS_HALFWORD(imma)); + /* Both cannot be switched on. */ + SLJIT_ASSERT((flags & (EX86_BIN_INS | EX86_SHIFT_INS)) != (EX86_BIN_INS | EX86_SHIFT_INS)); + /* Size flags not allowed for typed instructions. */ + SLJIT_ASSERT(!(flags & (EX86_BIN_INS | EX86_SHIFT_INS)) || (flags & (EX86_BYTE_ARG | EX86_HALF_ARG)) == 0); + /* Both size flags cannot be switched on. */ + SLJIT_ASSERT((flags & (EX86_BYTE_ARG | EX86_HALF_ARG)) != (EX86_BYTE_ARG | EX86_HALF_ARG)); +#if (defined SLJIT_SSE2 && SLJIT_SSE2) + /* SSE2 and immediate is not possible. */ + SLJIT_ASSERT(!(a & SLJIT_IMM) || !(flags & EX86_SSE2)); + SLJIT_ASSERT((flags & (EX86_PREF_F2 | EX86_PREF_F3)) != (EX86_PREF_F2 | EX86_PREF_F3) + && (flags & (EX86_PREF_F2 | EX86_PREF_66)) != (EX86_PREF_F2 | EX86_PREF_66) + && (flags & (EX86_PREF_F3 | EX86_PREF_66)) != (EX86_PREF_F3 | EX86_PREF_66)); +#endif + + size &= 0xf; + inst_size = size; + + if ((b & SLJIT_MEM) && !(b & 0xf0) && NOT_HALFWORD(immb)) { + if (emit_load_imm64(compiler, TMP_REG3, immb)) + return NULL; + immb = 0; + if (b & 0xf) + b |= TMP_REG3 << 4; + else + b |= TMP_REG3; + } + + if (!compiler->mode32 && !(flags & EX86_NO_REXW)) + rex |= REX_W; + else if (flags & EX86_REX) + rex |= REX; + +#if (defined SLJIT_SSE2 && SLJIT_SSE2) + if (flags & (EX86_PREF_F2 | EX86_PREF_F3)) + inst_size++; +#endif + if (flags & EX86_PREF_66) + inst_size++; + + /* Calculate size of b. */ + inst_size += 1; /* mod r/m byte. */ + if (b & SLJIT_MEM) { + if ((b & 0x0f) == SLJIT_UNUSED) + inst_size += 1 + sizeof(sljit_si); /* SIB byte required to avoid RIP based addressing. */ + else { + if (reg_map[b & 0x0f] >= 8) + rex |= REX_B; + if (immb != 0 && !(b & 0xf0)) { + /* Immediate operand. */ + if (immb <= 127 && immb >= -128) + inst_size += sizeof(sljit_sb); + else + inst_size += sizeof(sljit_si); + } + } + + if ((b & 0xf) == SLJIT_LOCALS_REG && !(b & 0xf0)) + b |= SLJIT_LOCALS_REG << 4; + + if ((b & 0xf0) != SLJIT_UNUSED) { + inst_size += 1; /* SIB byte. */ + if (reg_map[(b >> 4) & 0x0f] >= 8) + rex |= REX_X; + } + } +#if (defined SLJIT_SSE2 && SLJIT_SSE2) + else if (!(flags & EX86_SSE2) && reg_map[b] >= 8) + rex |= REX_B; +#else + else if (reg_map[b] >= 8) + rex |= REX_B; +#endif + + if (a & SLJIT_IMM) { + if (flags & EX86_BIN_INS) { + if (imma <= 127 && imma >= -128) { + inst_size += 1; + flags |= EX86_BYTE_ARG; + } else + inst_size += 4; + } + else if (flags & EX86_SHIFT_INS) { + imma &= compiler->mode32 ? 0x1f : 0x3f; + if (imma != 1) { + inst_size ++; + flags |= EX86_BYTE_ARG; + } + } else if (flags & EX86_BYTE_ARG) + inst_size++; + else if (flags & EX86_HALF_ARG) + inst_size += sizeof(short); + else + inst_size += sizeof(sljit_si); + } + else { + SLJIT_ASSERT(!(flags & EX86_SHIFT_INS) || a == SLJIT_PREF_SHIFT_REG); + /* reg_map[SLJIT_PREF_SHIFT_REG] is less than 8. */ +#if (defined SLJIT_SSE2 && SLJIT_SSE2) + if (!(flags & EX86_SSE2) && reg_map[a] >= 8) + rex |= REX_R; +#else + if (reg_map[a] >= 8) + rex |= REX_R; +#endif + } + + if (rex) + inst_size++; + + inst = (sljit_ub*)ensure_buf(compiler, 1 + inst_size); + PTR_FAIL_IF(!inst); + + /* Encoding the byte. */ + INC_SIZE(inst_size); +#if (defined SLJIT_SSE2 && SLJIT_SSE2) + if (flags & EX86_PREF_F2) + *inst++ = 0xf2; + if (flags & EX86_PREF_F3) + *inst++ = 0xf3; +#endif + if (flags & EX86_PREF_66) + *inst++ = 0x66; + if (rex) + *inst++ = rex; + buf_ptr = inst + size; + + /* Encode mod/rm byte. */ + if (!(flags & EX86_SHIFT_INS)) { + if ((flags & EX86_BIN_INS) && (a & SLJIT_IMM)) + *inst = (flags & EX86_BYTE_ARG) ? GROUP_BINARY_83 : GROUP_BINARY_81; + + if ((a & SLJIT_IMM) || (a == 0)) + *buf_ptr = 0; +#if (defined SLJIT_SSE2 && SLJIT_SSE2) + else if (!(flags & EX86_SSE2)) + *buf_ptr = reg_lmap[a] << 3; + else + *buf_ptr = a << 3; +#else + else + *buf_ptr = reg_lmap[a] << 3; +#endif + } + else { + if (a & SLJIT_IMM) { + if (imma == 1) + *inst = GROUP_SHIFT_1; + else + *inst = GROUP_SHIFT_N; + } else + *inst = GROUP_SHIFT_CL; + *buf_ptr = 0; + } + + if (!(b & SLJIT_MEM)) +#if (defined SLJIT_SSE2 && SLJIT_SSE2) + *buf_ptr++ |= MOD_REG + ((!(flags & EX86_SSE2)) ? reg_lmap[b] : b); +#else + *buf_ptr++ |= MOD_REG + reg_lmap[b]; +#endif + else if ((b & 0x0f) != SLJIT_UNUSED) { + if ((b & 0xf0) == SLJIT_UNUSED || (b & 0xf0) == (SLJIT_LOCALS_REG << 4)) { + if (immb != 0) { + if (immb <= 127 && immb >= -128) + *buf_ptr |= 0x40; + else + *buf_ptr |= 0x80; + } + + if ((b & 0xf0) == SLJIT_UNUSED) + *buf_ptr++ |= reg_lmap[b & 0x0f]; + else { + *buf_ptr++ |= 0x04; + *buf_ptr++ = reg_lmap[b & 0x0f] | (reg_lmap[(b >> 4) & 0x0f] << 3); + } + + if (immb != 0) { + if (immb <= 127 && immb >= -128) + *buf_ptr++ = immb; /* 8 bit displacement. */ + else { + *(sljit_si*)buf_ptr = immb; /* 32 bit displacement. */ + buf_ptr += sizeof(sljit_si); + } + } + } + else { + *buf_ptr++ |= 0x04; + *buf_ptr++ = reg_lmap[b & 0x0f] | (reg_lmap[(b >> 4) & 0x0f] << 3) | (immb << 6); + } + } + else { + *buf_ptr++ |= 0x04; + *buf_ptr++ = 0x25; + *(sljit_si*)buf_ptr = immb; /* 32 bit displacement. */ + buf_ptr += sizeof(sljit_si); + } + + if (a & SLJIT_IMM) { + if (flags & EX86_BYTE_ARG) + *buf_ptr = imma; + else if (flags & EX86_HALF_ARG) + *(short*)buf_ptr = imma; + else if (!(flags & EX86_SHIFT_INS)) + *(sljit_si*)buf_ptr = imma; + } + + return !(flags & EX86_SHIFT_INS) ? inst : (inst + 1); +} + +/* --------------------------------------------------------------------- */ +/* Call / return instructions */ +/* --------------------------------------------------------------------- */ + +static SLJIT_INLINE sljit_si call_with_args(struct sljit_compiler *compiler, sljit_si type) +{ + sljit_ub *inst; + +#ifndef _WIN64 + SLJIT_COMPILE_ASSERT(reg_map[SLJIT_SCRATCH_REG2] == 6 && reg_map[SLJIT_SCRATCH_REG1] < 8 && reg_map[SLJIT_SCRATCH_REG3] < 8, args_registers); + + inst = (sljit_ub*)ensure_buf(compiler, 1 + ((type < SLJIT_CALL3) ? 3 : 6)); + FAIL_IF(!inst); + INC_SIZE((type < SLJIT_CALL3) ? 3 : 6); + if (type >= SLJIT_CALL3) { + *inst++ = REX_W; + *inst++ = MOV_r_rm; + *inst++ = MOD_REG | (0x2 /* rdx */ << 3) | reg_lmap[SLJIT_SCRATCH_REG3]; + } + *inst++ = REX_W; + *inst++ = MOV_r_rm; + *inst++ = MOD_REG | (0x7 /* rdi */ << 3) | reg_lmap[SLJIT_SCRATCH_REG1]; +#else + SLJIT_COMPILE_ASSERT(reg_map[SLJIT_SCRATCH_REG2] == 2 && reg_map[SLJIT_SCRATCH_REG1] < 8 && reg_map[SLJIT_SCRATCH_REG3] < 8, args_registers); + + inst = (sljit_ub*)ensure_buf(compiler, 1 + ((type < SLJIT_CALL3) ? 3 : 6)); + FAIL_IF(!inst); + INC_SIZE((type < SLJIT_CALL3) ? 3 : 6); + if (type >= SLJIT_CALL3) { + *inst++ = REX_W | REX_R; + *inst++ = MOV_r_rm; + *inst++ = MOD_REG | (0x0 /* r8 */ << 3) | reg_lmap[SLJIT_SCRATCH_REG3]; + } + *inst++ = REX_W; + *inst++ = MOV_r_rm; + *inst++ = MOD_REG | (0x1 /* rcx */ << 3) | reg_lmap[SLJIT_SCRATCH_REG1]; +#endif + return SLJIT_SUCCESS; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fast_enter(struct sljit_compiler *compiler, sljit_si dst, sljit_sw dstw) +{ + sljit_ub *inst; + + CHECK_ERROR(); + check_sljit_emit_fast_enter(compiler, dst, dstw); + ADJUST_LOCAL_OFFSET(dst, dstw); + + /* For UNUSED dst. Uncommon, but possible. */ + if (dst == SLJIT_UNUSED) + dst = TMP_REGISTER; + + if (dst <= TMP_REGISTER) { + if (reg_map[dst] < 8) { + inst = (sljit_ub*)ensure_buf(compiler, 1 + 1); + FAIL_IF(!inst); + INC_SIZE(1); + POP_REG(reg_lmap[dst]); + return SLJIT_SUCCESS; + } + + inst = (sljit_ub*)ensure_buf(compiler, 1 + 2); + FAIL_IF(!inst); + INC_SIZE(2); + *inst++ = REX_B; + POP_REG(reg_lmap[dst]); + return SLJIT_SUCCESS; + } + + /* REX_W is not necessary (src is not immediate). */ + compiler->mode32 = 1; + inst = emit_x86_instruction(compiler, 1, 0, 0, dst, dstw); + FAIL_IF(!inst); + *inst++ = POP_rm; + return SLJIT_SUCCESS; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fast_return(struct sljit_compiler *compiler, sljit_si src, sljit_sw srcw) +{ + sljit_ub *inst; + + CHECK_ERROR(); + check_sljit_emit_fast_return(compiler, src, srcw); + ADJUST_LOCAL_OFFSET(src, srcw); + + if ((src & SLJIT_IMM) && NOT_HALFWORD(srcw)) { + FAIL_IF(emit_load_imm64(compiler, TMP_REGISTER, srcw)); + src = TMP_REGISTER; + } + + if (src <= TMP_REGISTER) { + if (reg_map[src] < 8) { + inst = (sljit_ub*)ensure_buf(compiler, 1 + 1 + 1); + FAIL_IF(!inst); + + INC_SIZE(1 + 1); + PUSH_REG(reg_lmap[src]); + } + else { + inst = (sljit_ub*)ensure_buf(compiler, 1 + 2 + 1); + FAIL_IF(!inst); + + INC_SIZE(2 + 1); + *inst++ = REX_B; + PUSH_REG(reg_lmap[src]); + } + } + else if (src & SLJIT_MEM) { + /* REX_W is not necessary (src is not immediate). */ + compiler->mode32 = 1; + inst = emit_x86_instruction(compiler, 1, 0, 0, src, srcw); + FAIL_IF(!inst); + *inst++ = GROUP_FF; + *inst |= PUSH_rm; + + inst = (sljit_ub*)ensure_buf(compiler, 1 + 1); + FAIL_IF(!inst); + INC_SIZE(1); + } + else { + SLJIT_ASSERT(IS_HALFWORD(srcw)); + /* SLJIT_IMM. */ + inst = (sljit_ub*)ensure_buf(compiler, 1 + 5 + 1); + FAIL_IF(!inst); + + INC_SIZE(5 + 1); + *inst++ = PUSH_i32; + *(sljit_si*)inst = srcw; + inst += sizeof(sljit_si); + } + + RET(); + return SLJIT_SUCCESS; +} + + +/* --------------------------------------------------------------------- */ +/* Extend input */ +/* --------------------------------------------------------------------- */ + +static sljit_si emit_mov_int(struct sljit_compiler *compiler, sljit_si sign, + sljit_si dst, sljit_sw dstw, + sljit_si src, sljit_sw srcw) +{ + sljit_ub* inst; + sljit_si dst_r; + + compiler->mode32 = 0; + + if (dst == SLJIT_UNUSED && !(src & SLJIT_MEM)) + return SLJIT_SUCCESS; /* Empty instruction. */ + + if (src & SLJIT_IMM) { + if (dst <= TMP_REGISTER) { + if (sign || ((sljit_uw)srcw <= 0x7fffffff)) { + inst = emit_x86_instruction(compiler, 1, SLJIT_IMM, (sljit_sw)(sljit_si)srcw, dst, dstw); + FAIL_IF(!inst); + *inst = MOV_rm_i32; + return SLJIT_SUCCESS; + } + return emit_load_imm64(compiler, dst, srcw); + } + compiler->mode32 = 1; + inst = emit_x86_instruction(compiler, 1, SLJIT_IMM, (sljit_sw)(sljit_si)srcw, dst, dstw); + FAIL_IF(!inst); + *inst = MOV_rm_i32; + compiler->mode32 = 0; + return SLJIT_SUCCESS; + } + + dst_r = (dst <= TMP_REGISTER) ? dst : TMP_REGISTER; + + if ((dst & SLJIT_MEM) && (src <= TMP_REGISTER)) + dst_r = src; + else { + if (sign) { + inst = emit_x86_instruction(compiler, 1, dst_r, 0, src, srcw); + FAIL_IF(!inst); + *inst++ = MOVSXD_r_rm; + } else { + compiler->mode32 = 1; + FAIL_IF(emit_mov(compiler, dst_r, 0, src, srcw)); + compiler->mode32 = 0; + } + } + + if (dst & SLJIT_MEM) { + compiler->mode32 = 1; + inst = emit_x86_instruction(compiler, 1, dst_r, 0, dst, dstw); + FAIL_IF(!inst); + *inst = MOV_rm_r; + compiler->mode32 = 0; + } + + return SLJIT_SUCCESS; +} Property changes on: sys/contrib/sljit/sljitNativeX86_64.c ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Added: fbsd:nokeywords ## -0,0 +1 ## +yes \ No newline at end of property Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Index: sys/contrib/sljit/sljitNativeX86_common.c =================================================================== --- sys/contrib/sljit/sljitNativeX86_common.c (revision 0) +++ sys/contrib/sljit/sljitNativeX86_common.c (working copy) @@ -0,0 +1,2836 @@ +/* + * Stack-less Just-In-Time compiler + * + * Copyright 2009-2012 Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, are + * permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of + * conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list + * of conditions and the following disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED + * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +SLJIT_API_FUNC_ATTRIBUTE SLJIT_CONST char* sljit_get_platform_name(void) +{ + return "x86" SLJIT_CPUINFO; +} + +/* + 32b register indexes: + 0 - EAX + 1 - ECX + 2 - EDX + 3 - EBX + 4 - none + 5 - EBP + 6 - ESI + 7 - EDI +*/ + +/* + 64b register indexes: + 0 - RAX + 1 - RCX + 2 - RDX + 3 - RBX + 4 - none + 5 - RBP + 6 - RSI + 7 - RDI + 8 - R8 - From now on REX prefix is required + 9 - R9 + 10 - R10 + 11 - R11 + 12 - R12 + 13 - R13 + 14 - R14 + 15 - R15 +*/ + +#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) + +/* Last register + 1. */ +#define TMP_REGISTER (SLJIT_NO_REGISTERS + 1) + +static SLJIT_CONST sljit_ub reg_map[SLJIT_NO_REGISTERS + 2] = { + 0, 0, 2, 1, 0, 0, 3, 6, 7, 0, 0, 4, 5 +}; + +#define CHECK_EXTRA_REGS(p, w, do) \ + if (p >= SLJIT_TEMPORARY_EREG1 && p <= SLJIT_TEMPORARY_EREG2) { \ + w = compiler->scratches_start + (p - SLJIT_TEMPORARY_EREG1) * sizeof(sljit_sw); \ + p = SLJIT_MEM1(SLJIT_LOCALS_REG); \ + do; \ + } \ + else if (p >= SLJIT_SAVED_EREG1 && p <= SLJIT_SAVED_EREG2) { \ + w = compiler->saveds_start + (p - SLJIT_SAVED_EREG1) * sizeof(sljit_sw); \ + p = SLJIT_MEM1(SLJIT_LOCALS_REG); \ + do; \ + } + +#else /* SLJIT_CONFIG_X86_32 */ + +/* Last register + 1. */ +#define TMP_REGISTER (SLJIT_NO_REGISTERS + 1) +#define TMP_REG2 (SLJIT_NO_REGISTERS + 2) +#define TMP_REG3 (SLJIT_NO_REGISTERS + 3) + +/* Note: r12 & 0x7 == 0b100, which decoded as SIB byte present + Note: avoid to use r12 and r13 for memory addessing + therefore r12 is better for SAVED_EREG than SAVED_REG. */ +#ifndef _WIN64 +/* 1st passed in rdi, 2nd argument passed in rsi, 3rd in rdx. */ +static SLJIT_CONST sljit_ub reg_map[SLJIT_NO_REGISTERS + 4] = { + 0, 0, 6, 1, 8, 11, 3, 15, 14, 13, 12, 4, 2, 7, 9 +}; +/* low-map. reg_map & 0x7. */ +static SLJIT_CONST sljit_ub reg_lmap[SLJIT_NO_REGISTERS + 4] = { + 0, 0, 6, 1, 0, 3, 3, 7, 6, 5, 4, 4, 2, 7, 1 +}; +#else +/* 1st passed in rcx, 2nd argument passed in rdx, 3rd in r8. */ +static SLJIT_CONST sljit_ub reg_map[SLJIT_NO_REGISTERS + 4] = { + 0, 0, 2, 1, 11, 13, 3, 6, 7, 14, 15, 4, 10, 8, 9 +}; +/* low-map. reg_map & 0x7. */ +static SLJIT_CONST sljit_ub reg_lmap[SLJIT_NO_REGISTERS + 4] = { + 0, 0, 2, 1, 3, 5, 3, 6, 7, 6, 7, 4, 2, 0, 1 +}; +#endif + +#define REX_W 0x48 +#define REX_R 0x44 +#define REX_X 0x42 +#define REX_B 0x41 +#define REX 0x40 + +#define IS_HALFWORD(x) ((x) <= 0x7fffffffll && (x) >= -0x80000000ll) +#define NOT_HALFWORD(x) ((x) > 0x7fffffffll || (x) < -0x80000000ll) + +#define CHECK_EXTRA_REGS(p, w, do) + +#endif /* SLJIT_CONFIG_X86_32 */ + +#if (defined SLJIT_SSE2 && SLJIT_SSE2) +#define TMP_FREG (0) +#endif + +/* Size flags for emit_x86_instruction: */ +#define EX86_BIN_INS 0x0010 +#define EX86_SHIFT_INS 0x0020 +#define EX86_REX 0x0040 +#define EX86_NO_REXW 0x0080 +#define EX86_BYTE_ARG 0x0100 +#define EX86_HALF_ARG 0x0200 +#define EX86_PREF_66 0x0400 + +#if (defined SLJIT_SSE2 && SLJIT_SSE2) +#define EX86_SSE2 0x0800 +#define EX86_PREF_F2 0x1000 +#define EX86_PREF_F3 0x2000 +#endif + +/* --------------------------------------------------------------------- */ +/* Instrucion forms */ +/* --------------------------------------------------------------------- */ + +#define ADD (/* BINARY */ 0 << 3) +#define ADD_EAX_i32 0x05 +#define ADD_r_rm 0x03 +#define ADD_rm_r 0x01 +#define ADDSD_x_xm 0x58 +#define ADC (/* BINARY */ 2 << 3) +#define ADC_EAX_i32 0x15 +#define ADC_r_rm 0x13 +#define ADC_rm_r 0x11 +#define AND (/* BINARY */ 4 << 3) +#define AND_EAX_i32 0x25 +#define AND_r_rm 0x23 +#define AND_rm_r 0x21 +#define ANDPD_x_xm 0x54 +#define BSR_r_rm (/* GROUP_0F */ 0xbd) +#define CALL_i32 0xe8 +#define CALL_rm (/* GROUP_FF */ 2 << 3) +#define CDQ 0x99 +#define CMOVNE_r_rm (/* GROUP_0F */ 0x45) +#define CMP (/* BINARY */ 7 << 3) +#define CMP_EAX_i32 0x3d +#define CMP_r_rm 0x3b +#define CMP_rm_r 0x39 +#define DIV (/* GROUP_F7 */ 6 << 3) +#define DIVSD_x_xm 0x5e +#define INT3 0xcc +#define IDIV (/* GROUP_F7 */ 7 << 3) +#define IMUL (/* GROUP_F7 */ 5 << 3) +#define IMUL_r_rm (/* GROUP_0F */ 0xaf) +#define IMUL_r_rm_i8 0x6b +#define IMUL_r_rm_i32 0x69 +#define JE_i8 0x74 +#define JMP_i8 0xeb +#define JMP_i32 0xe9 +#define JMP_rm (/* GROUP_FF */ 4 << 3) +#define LEA_r_m 0x8d +#define MOV_r_rm 0x8b +#define MOV_r_i32 0xb8 +#define MOV_rm_r 0x89 +#define MOV_rm_i32 0xc7 +#define MOV_rm8_i8 0xc6 +#define MOV_rm8_r8 0x88 +#define MOVSD_x_xm 0x10 +#define MOVSD_xm_x 0x11 +#define MOVSXD_r_rm 0x63 +#define MOVSX_r_rm8 (/* GROUP_0F */ 0xbe) +#define MOVSX_r_rm16 (/* GROUP_0F */ 0xbf) +#define MOVZX_r_rm8 (/* GROUP_0F */ 0xb6) +#define MOVZX_r_rm16 (/* GROUP_0F */ 0xb7) +#define MUL (/* GROUP_F7 */ 4 << 3) +#define MULSD_x_xm 0x59 +#define NEG_rm (/* GROUP_F7 */ 3 << 3) +#define NOP 0x90 +#define NOT_rm (/* GROUP_F7 */ 2 << 3) +#define OR (/* BINARY */ 1 << 3) +#define OR_r_rm 0x0b +#define OR_EAX_i32 0x0d +#define OR_rm_r 0x09 +#define POP_r 0x58 +#define POP_rm 0x8f +#define POPF 0x9d +#define PUSH_i32 0x68 +#define PUSH_r 0x50 +#define PUSH_rm (/* GROUP_FF */ 6 << 3) +#define PUSHF 0x9c +#define RET_near 0xc3 +#define RET_i16 0xc2 +#define SBB (/* BINARY */ 3 << 3) +#define SBB_EAX_i32 0x1d +#define SBB_r_rm 0x1b +#define SBB_rm_r 0x19 +#define SAR (/* SHIFT */ 7 << 3) +#define SHL (/* SHIFT */ 4 << 3) +#define SHR (/* SHIFT */ 5 << 3) +#define SUB (/* BINARY */ 5 << 3) +#define SUB_EAX_i32 0x2d +#define SUB_r_rm 0x2b +#define SUB_rm_r 0x29 +#define SUBSD_x_xm 0x5c +#define TEST_EAX_i32 0xa9 +#define TEST_rm_r 0x85 +#define UCOMISD_x_xm 0x2e +#define XCHG_EAX_r 0x90 +#define XCHG_r_rm 0x87 +#define XOR (/* BINARY */ 6 << 3) +#define XOR_EAX_i32 0x35 +#define XOR_r_rm 0x33 +#define XOR_rm_r 0x31 +#define XORPD_x_xm 0x57 + +#define GROUP_0F 0x0f +#define GROUP_F7 0xf7 +#define GROUP_FF 0xff +#define GROUP_BINARY_81 0x81 +#define GROUP_BINARY_83 0x83 +#define GROUP_SHIFT_1 0xd1 +#define GROUP_SHIFT_N 0xc1 +#define GROUP_SHIFT_CL 0xd3 + +#define MOD_REG 0xc0 +#define MOD_DISP8 0x40 + +#define INC_SIZE(s) (*inst++ = (s), compiler->size += (s)) + +#define PUSH_REG(r) (*inst++ = (PUSH_r + (r))) +#define POP_REG(r) (*inst++ = (POP_r + (r))) +#define RET() (*inst++ = (RET_near)) +#define RET_I16(n) (*inst++ = (RET_i16), *inst++ = n, *inst++ = 0) +/* r32, r/m32 */ +#define MOV_RM(mod, reg, rm) (*inst++ = (MOV_r_rm), *inst++ = (mod) << 6 | (reg) << 3 | (rm)) + +/* Multithreading does not affect these static variables, since they store + built-in CPU features. Therefore they can be overwritten by different threads + if they detect the CPU features in the same time. */ +#if (defined SLJIT_SSE2 && SLJIT_SSE2) && (defined SLJIT_DETECT_SSE2 && SLJIT_DETECT_SSE2) +static sljit_si cpu_has_sse2 = -1; +#endif +static sljit_si cpu_has_cmov = -1; + +#if defined(_MSC_VER) && (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) +#if _MSC_VER >= 1400 +#include +#else +#error "MSVC does not support inline assembly in 64 bit mode" +#endif +#endif /* _MSC_VER && SLJIT_CONFIG_X86_64 */ + +static void get_cpu_features(void) +{ + sljit_ui features; + +#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) + +#if defined(__GNUC__) || defined(__INTEL_COMPILER) || defined(__SUNPRO_C) + /* AT&T syntax. */ + __asm__ ( + "pushl %%ebx\n" + "movl $0x1, %%eax\n" + "cpuid\n" + "popl %%ebx\n" + "movl %%edx, %0\n" + : "=g" (features) + : + : "%eax", "%ecx", "%edx" + ); +#elif defined(_MSC_VER) || defined(__BORLANDC__) + /* Intel syntax. */ + __asm { + mov eax, 1 + push ebx + cpuid + pop ebx + mov features, edx + } +#else +# error "SLJIT_DETECT_SSE2 is not implemented for this C compiler" +#endif + +#else /* SLJIT_CONFIG_X86_32 */ + +#if defined(__GNUC__) || defined(__INTEL_COMPILER) || defined(__SUNPRO_C) + /* AT&T syntax. */ + __asm__ ( + "pushq %%rbx\n" + "movl $0x1, %%eax\n" + "cpuid\n" + "popq %%rbx\n" + "movl %%edx, %0\n" + : "=g" (features) + : + : "%rax", "%rcx", "%rdx" + ); +#elif defined(_MSC_VER) && _MSC_VER >= 1400 + int CPUInfo[4]; + + __cpuid(CPUInfo, 1); + features = (sljit_ui)CPUInfo[3]; +#else + __asm { + mov eax, 1 + push rbx + cpuid + pop rbx + mov features, edx + } +#endif + +#endif /* SLJIT_CONFIG_X86_32 */ + +#if (defined SLJIT_SSE2 && SLJIT_SSE2) && (defined SLJIT_DETECT_SSE2 && SLJIT_DETECT_SSE2) + cpu_has_sse2 = (features >> 26) & 0x1; +#endif + cpu_has_cmov = (features >> 15) & 0x1; +} + +static sljit_ub get_jump_code(sljit_si type) +{ + switch (type) { + case SLJIT_C_EQUAL: + case SLJIT_C_FLOAT_EQUAL: + return 0x84 /* je */; + + case SLJIT_C_NOT_EQUAL: + case SLJIT_C_FLOAT_NOT_EQUAL: + return 0x85 /* jne */; + + case SLJIT_C_LESS: + case SLJIT_C_FLOAT_LESS: + return 0x82 /* jc */; + + case SLJIT_C_GREATER_EQUAL: + case SLJIT_C_FLOAT_GREATER_EQUAL: + return 0x83 /* jae */; + + case SLJIT_C_GREATER: + case SLJIT_C_FLOAT_GREATER: + return 0x87 /* jnbe */; + + case SLJIT_C_LESS_EQUAL: + case SLJIT_C_FLOAT_LESS_EQUAL: + return 0x86 /* jbe */; + + case SLJIT_C_SIG_LESS: + return 0x8c /* jl */; + + case SLJIT_C_SIG_GREATER_EQUAL: + return 0x8d /* jnl */; + + case SLJIT_C_SIG_GREATER: + return 0x8f /* jnle */; + + case SLJIT_C_SIG_LESS_EQUAL: + return 0x8e /* jle */; + + case SLJIT_C_OVERFLOW: + case SLJIT_C_MUL_OVERFLOW: + return 0x80 /* jo */; + + case SLJIT_C_NOT_OVERFLOW: + case SLJIT_C_MUL_NOT_OVERFLOW: + return 0x81 /* jno */; + + case SLJIT_C_FLOAT_UNORDERED: + return 0x8a /* jp */; + + case SLJIT_C_FLOAT_ORDERED: + return 0x8b /* jpo */; + } + return 0; +} + +static sljit_ub* generate_far_jump_code(struct sljit_jump *jump, sljit_ub *code_ptr, sljit_si type); + +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) +static sljit_ub* generate_fixed_jump(sljit_ub *code_ptr, sljit_sw addr, sljit_si type); +#endif + +static sljit_ub* generate_near_jump_code(struct sljit_jump *jump, sljit_ub *code_ptr, sljit_ub *code, sljit_si type) +{ + sljit_si short_jump; + sljit_uw label_addr; + + if (jump->flags & JUMP_LABEL) + label_addr = (sljit_uw)(code + jump->u.label->size); + else + label_addr = jump->u.target; + short_jump = (sljit_sw)(label_addr - (jump->addr + 2)) >= -128 && (sljit_sw)(label_addr - (jump->addr + 2)) <= 127; + +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) + if ((sljit_sw)(label_addr - (jump->addr + 1)) > 0x7fffffffll || (sljit_sw)(label_addr - (jump->addr + 1)) < -0x80000000ll) + return generate_far_jump_code(jump, code_ptr, type); +#endif + + if (type == SLJIT_JUMP) { + if (short_jump) + *code_ptr++ = JMP_i8; + else + *code_ptr++ = JMP_i32; + jump->addr++; + } + else if (type >= SLJIT_FAST_CALL) { + short_jump = 0; + *code_ptr++ = CALL_i32; + jump->addr++; + } + else if (short_jump) { + *code_ptr++ = get_jump_code(type) - 0x10; + jump->addr++; + } + else { + *code_ptr++ = GROUP_0F; + *code_ptr++ = get_jump_code(type); + jump->addr += 2; + } + + if (short_jump) { + jump->flags |= PATCH_MB; + code_ptr += sizeof(sljit_sb); + } else { + jump->flags |= PATCH_MW; +#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) + code_ptr += sizeof(sljit_sw); +#else + code_ptr += sizeof(sljit_si); +#endif + } + + return code_ptr; +} + +SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compiler) +{ + struct sljit_memory_fragment *buf; + sljit_ub *code; + sljit_ub *code_ptr; + sljit_ub *buf_ptr; + sljit_ub *buf_end; + sljit_ub len; + + struct sljit_label *label; + struct sljit_jump *jump; + struct sljit_const *const_; + + CHECK_ERROR_PTR(); + check_sljit_generate_code(compiler); + reverse_buf(compiler); + + /* Second code generation pass. */ + code = (sljit_ub*)SLJIT_MALLOC_EXEC(compiler->size); + PTR_FAIL_WITH_EXEC_IF(code); + buf = compiler->buf; + + code_ptr = code; + label = compiler->labels; + jump = compiler->jumps; + const_ = compiler->consts; + do { + buf_ptr = buf->memory; + buf_end = buf_ptr + buf->used_size; + do { + len = *buf_ptr++; + if (len > 0) { + /* The code is already generated. */ + SLJIT_MEMMOVE(code_ptr, buf_ptr, len); + code_ptr += len; + buf_ptr += len; + } + else { + if (*buf_ptr >= 4) { + jump->addr = (sljit_uw)code_ptr; + if (!(jump->flags & SLJIT_REWRITABLE_JUMP)) + code_ptr = generate_near_jump_code(jump, code_ptr, code, *buf_ptr - 4); + else + code_ptr = generate_far_jump_code(jump, code_ptr, *buf_ptr - 4); + jump = jump->next; + } + else if (*buf_ptr == 0) { + label->addr = (sljit_uw)code_ptr; + label->size = code_ptr - code; + label = label->next; + } + else if (*buf_ptr == 1) { + const_->addr = ((sljit_uw)code_ptr) - sizeof(sljit_sw); + const_ = const_->next; + } + else { +#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) + *code_ptr++ = (*buf_ptr == 2) ? CALL_i32 : JMP_i32; + buf_ptr++; + *(sljit_sw*)code_ptr = *(sljit_sw*)buf_ptr - ((sljit_sw)code_ptr + sizeof(sljit_sw)); + code_ptr += sizeof(sljit_sw); + buf_ptr += sizeof(sljit_sw) - 1; +#else + code_ptr = generate_fixed_jump(code_ptr, *(sljit_sw*)(buf_ptr + 1), *buf_ptr); + buf_ptr += sizeof(sljit_sw); +#endif + } + buf_ptr++; + } + } while (buf_ptr < buf_end); + SLJIT_ASSERT(buf_ptr == buf_end); + buf = buf->next; + } while (buf); + + SLJIT_ASSERT(!label); + SLJIT_ASSERT(!jump); + SLJIT_ASSERT(!const_); + + jump = compiler->jumps; + while (jump) { + if (jump->flags & PATCH_MB) { + SLJIT_ASSERT((sljit_sw)(jump->u.label->addr - (jump->addr + sizeof(sljit_sb))) >= -128 && (sljit_sw)(jump->u.label->addr - (jump->addr + sizeof(sljit_sb))) <= 127); + *(sljit_ub*)jump->addr = (sljit_ub)(jump->u.label->addr - (jump->addr + sizeof(sljit_sb))); + } else if (jump->flags & PATCH_MW) { + if (jump->flags & JUMP_LABEL) { +#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) + *(sljit_sw*)jump->addr = (sljit_sw)(jump->u.label->addr - (jump->addr + sizeof(sljit_sw))); +#else + SLJIT_ASSERT((sljit_sw)(jump->u.label->addr - (jump->addr + sizeof(sljit_si))) >= -0x80000000ll && (sljit_sw)(jump->u.label->addr - (jump->addr + sizeof(sljit_si))) <= 0x7fffffffll); + *(sljit_si*)jump->addr = (sljit_si)(jump->u.label->addr - (jump->addr + sizeof(sljit_si))); +#endif + } + else { +#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) + *(sljit_sw*)jump->addr = (sljit_sw)(jump->u.target - (jump->addr + sizeof(sljit_sw))); +#else + SLJIT_ASSERT((sljit_sw)(jump->u.target - (jump->addr + sizeof(sljit_si))) >= -0x80000000ll && (sljit_sw)(jump->u.target - (jump->addr + sizeof(sljit_si))) <= 0x7fffffffll); + *(sljit_si*)jump->addr = (sljit_si)(jump->u.target - (jump->addr + sizeof(sljit_si))); +#endif + } + } +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) + else if (jump->flags & PATCH_MD) + *(sljit_sw*)jump->addr = jump->u.label->addr; +#endif + + jump = jump->next; + } + + /* Maybe we waste some space because of short jumps. */ + SLJIT_ASSERT(code_ptr <= code + compiler->size); + compiler->error = SLJIT_ERR_COMPILED; + compiler->executable_size = compiler->size; + return (void*)code; +} + +/* --------------------------------------------------------------------- */ +/* Operators */ +/* --------------------------------------------------------------------- */ + +static sljit_si emit_cum_binary(struct sljit_compiler *compiler, + sljit_ub op_rm, sljit_ub op_mr, sljit_ub op_imm, sljit_ub op_eax_imm, + sljit_si dst, sljit_sw dstw, + sljit_si src1, sljit_sw src1w, + sljit_si src2, sljit_sw src2w); + +static sljit_si emit_non_cum_binary(struct sljit_compiler *compiler, + sljit_ub op_rm, sljit_ub op_mr, sljit_ub op_imm, sljit_ub op_eax_imm, + sljit_si dst, sljit_sw dstw, + sljit_si src1, sljit_sw src1w, + sljit_si src2, sljit_sw src2w); + +static sljit_si emit_mov(struct sljit_compiler *compiler, + sljit_si dst, sljit_sw dstw, + sljit_si src, sljit_sw srcw); + +static SLJIT_INLINE sljit_si emit_save_flags(struct sljit_compiler *compiler) +{ + sljit_ub *inst; + +#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) + inst = (sljit_ub*)ensure_buf(compiler, 1 + 5); + FAIL_IF(!inst); + INC_SIZE(5); +#else + inst = (sljit_ub*)ensure_buf(compiler, 1 + 6); + FAIL_IF(!inst); + INC_SIZE(6); + *inst++ = REX_W; +#endif + *inst++ = LEA_r_m; /* lea esp/rsp, [esp/rsp + sizeof(sljit_sw)] */ + *inst++ = 0x64; + *inst++ = 0x24; + *inst++ = (sljit_ub)sizeof(sljit_sw); + *inst++ = PUSHF; + compiler->flags_saved = 1; + return SLJIT_SUCCESS; +} + +static SLJIT_INLINE sljit_si emit_restore_flags(struct sljit_compiler *compiler, sljit_si keep_flags) +{ + sljit_ub *inst; + +#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) + inst = (sljit_ub*)ensure_buf(compiler, 1 + 5); + FAIL_IF(!inst); + INC_SIZE(5); + *inst++ = POPF; +#else + inst = (sljit_ub*)ensure_buf(compiler, 1 + 6); + FAIL_IF(!inst); + INC_SIZE(6); + *inst++ = POPF; + *inst++ = REX_W; +#endif + *inst++ = LEA_r_m; /* lea esp/rsp, [esp/rsp - sizeof(sljit_sw)] */ + *inst++ = 0x64; + *inst++ = 0x24; + *inst++ = (sljit_ub)-(sljit_sb)sizeof(sljit_sw); + compiler->flags_saved = keep_flags; + return SLJIT_SUCCESS; +} + +#ifdef _WIN32 +#include + +static void SLJIT_CALL sljit_grow_stack(sljit_sw local_size) +{ + /* Workaround for calling the internal _chkstk() function on Windows. + This function touches all 4k pages belongs to the requested stack space, + which size is passed in local_size. This is necessary on Windows where + the stack can only grow in 4k steps. However, this function just burn + CPU cycles if the stack is large enough, but you don't know it in advance. + I think this is a bad design even if it has some reasons. */ + alloca(local_size); +} + +#endif + +#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) +#include "sljitNativeX86_32.c" +#else +#include "sljitNativeX86_64.c" +#endif + +static sljit_si emit_mov(struct sljit_compiler *compiler, + sljit_si dst, sljit_sw dstw, + sljit_si src, sljit_sw srcw) +{ + sljit_ub* inst; + + if (dst == SLJIT_UNUSED) { + /* No destination, doesn't need to setup flags. */ + if (src & SLJIT_MEM) { + inst = emit_x86_instruction(compiler, 1, TMP_REGISTER, 0, src, srcw); + FAIL_IF(!inst); + *inst = MOV_r_rm; + } + return SLJIT_SUCCESS; + } + if (src <= TMP_REGISTER) { + inst = emit_x86_instruction(compiler, 1, src, 0, dst, dstw); + FAIL_IF(!inst); + *inst = MOV_rm_r; + return SLJIT_SUCCESS; + } + if (src & SLJIT_IMM) { + if (dst <= TMP_REGISTER) { +#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) + return emit_do_imm(compiler, MOV_r_i32 + reg_map[dst], srcw); +#else + if (!compiler->mode32) { + if (NOT_HALFWORD(srcw)) + return emit_load_imm64(compiler, dst, srcw); + } + else + return emit_do_imm32(compiler, (reg_map[dst] >= 8) ? REX_B : 0, MOV_r_i32 + reg_lmap[dst], srcw); +#endif + } +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) + if (!compiler->mode32 && NOT_HALFWORD(srcw)) { + FAIL_IF(emit_load_imm64(compiler, TMP_REG2, srcw)); + inst = emit_x86_instruction(compiler, 1, TMP_REG2, 0, dst, dstw); + FAIL_IF(!inst); + *inst = MOV_rm_r; + return SLJIT_SUCCESS; + } +#endif + inst = emit_x86_instruction(compiler, 1, SLJIT_IMM, srcw, dst, dstw); + FAIL_IF(!inst); + *inst = MOV_rm_i32; + return SLJIT_SUCCESS; + } + if (dst <= TMP_REGISTER) { + inst = emit_x86_instruction(compiler, 1, dst, 0, src, srcw); + FAIL_IF(!inst); + *inst = MOV_r_rm; + return SLJIT_SUCCESS; + } + + /* Memory to memory move. Requires two instruction. */ + inst = emit_x86_instruction(compiler, 1, TMP_REGISTER, 0, src, srcw); + FAIL_IF(!inst); + *inst = MOV_r_rm; + inst = emit_x86_instruction(compiler, 1, TMP_REGISTER, 0, dst, dstw); + FAIL_IF(!inst); + *inst = MOV_rm_r; + return SLJIT_SUCCESS; +} + +#define EMIT_MOV(compiler, dst, dstw, src, srcw) \ + FAIL_IF(emit_mov(compiler, dst, dstw, src, srcw)); + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op0(struct sljit_compiler *compiler, sljit_si op) +{ + sljit_ub *inst; +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) + sljit_si size; +#endif + + CHECK_ERROR(); + check_sljit_emit_op0(compiler, op); + + switch (GET_OPCODE(op)) { + case SLJIT_BREAKPOINT: + inst = (sljit_ub*)ensure_buf(compiler, 1 + 1); + FAIL_IF(!inst); + INC_SIZE(1); + *inst = INT3; + break; + case SLJIT_NOP: + inst = (sljit_ub*)ensure_buf(compiler, 1 + 1); + FAIL_IF(!inst); + INC_SIZE(1); + *inst = NOP; + break; + case SLJIT_UMUL: + case SLJIT_SMUL: + case SLJIT_UDIV: + case SLJIT_SDIV: + compiler->flags_saved = 0; +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) +#ifdef _WIN64 + SLJIT_COMPILE_ASSERT( + reg_map[SLJIT_SCRATCH_REG1] == 0 + && reg_map[SLJIT_SCRATCH_REG2] == 2 + && reg_map[TMP_REGISTER] > 7, + invalid_register_assignment_for_div_mul); +#else + SLJIT_COMPILE_ASSERT( + reg_map[SLJIT_SCRATCH_REG1] == 0 + && reg_map[SLJIT_SCRATCH_REG2] < 7 + && reg_map[TMP_REGISTER] == 2, + invalid_register_assignment_for_div_mul); +#endif + compiler->mode32 = op & SLJIT_INT_OP; +#endif + + op = GET_OPCODE(op); + if (op == SLJIT_UDIV) { +#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) || defined(_WIN64) + EMIT_MOV(compiler, TMP_REGISTER, 0, SLJIT_SCRATCH_REG2, 0); + inst = emit_x86_instruction(compiler, 1, SLJIT_SCRATCH_REG2, 0, SLJIT_SCRATCH_REG2, 0); +#else + inst = emit_x86_instruction(compiler, 1, TMP_REGISTER, 0, TMP_REGISTER, 0); +#endif + FAIL_IF(!inst); + *inst = XOR_r_rm; + } + + if (op == SLJIT_SDIV) { +#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) || defined(_WIN64) + EMIT_MOV(compiler, TMP_REGISTER, 0, SLJIT_SCRATCH_REG2, 0); +#endif + +#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) + inst = (sljit_ub*)ensure_buf(compiler, 1 + 1); + FAIL_IF(!inst); + INC_SIZE(1); + *inst = CDQ; +#else + if (compiler->mode32) { + inst = (sljit_ub*)ensure_buf(compiler, 1 + 1); + FAIL_IF(!inst); + INC_SIZE(1); + *inst = CDQ; + } else { + inst = (sljit_ub*)ensure_buf(compiler, 1 + 2); + FAIL_IF(!inst); + INC_SIZE(2); + *inst++ = REX_W; + *inst = CDQ; + } +#endif + } + +#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) + inst = (sljit_ub*)ensure_buf(compiler, 1 + 2); + FAIL_IF(!inst); + INC_SIZE(2); + *inst++ = GROUP_F7; + *inst = MOD_REG | ((op >= SLJIT_UDIV) ? reg_map[TMP_REGISTER] : reg_map[SLJIT_SCRATCH_REG2]); +#else +#ifdef _WIN64 + size = (!compiler->mode32 || op >= SLJIT_UDIV) ? 3 : 2; +#else + size = (!compiler->mode32) ? 3 : 2; +#endif + inst = (sljit_ub*)ensure_buf(compiler, 1 + size); + FAIL_IF(!inst); + INC_SIZE(size); +#ifdef _WIN64 + if (!compiler->mode32) + *inst++ = REX_W | ((op >= SLJIT_UDIV) ? REX_B : 0); + else if (op >= SLJIT_UDIV) + *inst++ = REX_B; + *inst++ = GROUP_F7; + *inst = MOD_REG | ((op >= SLJIT_UDIV) ? reg_lmap[TMP_REGISTER] : reg_lmap[SLJIT_SCRATCH_REG2]); +#else + if (!compiler->mode32) + *inst++ = REX_W; + *inst++ = GROUP_F7; + *inst = MOD_REG | reg_map[SLJIT_SCRATCH_REG2]; +#endif +#endif + switch (op) { + case SLJIT_UMUL: + *inst |= MUL; + break; + case SLJIT_SMUL: + *inst |= IMUL; + break; + case SLJIT_UDIV: + *inst |= DIV; + break; + case SLJIT_SDIV: + *inst |= IDIV; + break; + } +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) && !defined(_WIN64) + EMIT_MOV(compiler, SLJIT_SCRATCH_REG2, 0, TMP_REGISTER, 0); +#endif + break; + } + + return SLJIT_SUCCESS; +} + +#define ENCODE_PREFIX(prefix) \ + do { \ + inst = (sljit_ub*)ensure_buf(compiler, 1 + 1); \ + FAIL_IF(!inst); \ + INC_SIZE(1); \ + *inst = (prefix); \ + } while (0) + +static sljit_si emit_mov_byte(struct sljit_compiler *compiler, sljit_si sign, + sljit_si dst, sljit_sw dstw, + sljit_si src, sljit_sw srcw) +{ + sljit_ub* inst; + sljit_si dst_r; +#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) + sljit_si work_r; +#endif + +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) + compiler->mode32 = 0; +#endif + + if (dst == SLJIT_UNUSED && !(src & SLJIT_MEM)) + return SLJIT_SUCCESS; /* Empty instruction. */ + + if (src & SLJIT_IMM) { + if (dst <= TMP_REGISTER) { +#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) + return emit_do_imm(compiler, MOV_r_i32 + reg_map[dst], srcw); +#else + inst = emit_x86_instruction(compiler, 1, SLJIT_IMM, srcw, dst, 0); + FAIL_IF(!inst); + *inst = MOV_rm_i32; + return SLJIT_SUCCESS; +#endif + } + inst = emit_x86_instruction(compiler, 1 | EX86_BYTE_ARG | EX86_NO_REXW, SLJIT_IMM, srcw, dst, dstw); + FAIL_IF(!inst); + *inst = MOV_rm8_i8; + return SLJIT_SUCCESS; + } + + dst_r = (dst <= TMP_REGISTER) ? dst : TMP_REGISTER; + + if ((dst & SLJIT_MEM) && src <= TMP_REGISTER) { +#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) + if (reg_map[src] >= 4) { + SLJIT_ASSERT(dst_r == TMP_REGISTER); + EMIT_MOV(compiler, TMP_REGISTER, 0, src, 0); + } else + dst_r = src; +#else + dst_r = src; +#endif + } +#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) + else if (src <= TMP_REGISTER && reg_map[src] >= 4) { + /* src, dst are registers. */ + SLJIT_ASSERT(dst >= SLJIT_SCRATCH_REG1 && dst <= TMP_REGISTER); + if (reg_map[dst] < 4) { + if (dst != src) + EMIT_MOV(compiler, dst, 0, src, 0); + inst = emit_x86_instruction(compiler, 2, dst, 0, dst, 0); + FAIL_IF(!inst); + *inst++ = GROUP_0F; + *inst = sign ? MOVSX_r_rm8 : MOVZX_r_rm8; + } + else { + if (dst != src) + EMIT_MOV(compiler, dst, 0, src, 0); + if (sign) { + /* shl reg, 24 */ + inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, SLJIT_IMM, 24, dst, 0); + FAIL_IF(!inst); + *inst |= SHL; + /* sar reg, 24 */ + inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, SLJIT_IMM, 24, dst, 0); + FAIL_IF(!inst); + *inst |= SAR; + } + else { + inst = emit_x86_instruction(compiler, 1 | EX86_BIN_INS, SLJIT_IMM, 0xff, dst, 0); + FAIL_IF(!inst); + *(inst + 1) |= AND; + } + } + return SLJIT_SUCCESS; + } +#endif + else { + /* src can be memory addr or reg_map[src] < 4 on x86_32 architectures. */ + inst = emit_x86_instruction(compiler, 2, dst_r, 0, src, srcw); + FAIL_IF(!inst); + *inst++ = GROUP_0F; + *inst = sign ? MOVSX_r_rm8 : MOVZX_r_rm8; + } + + if (dst & SLJIT_MEM) { +#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) + if (dst_r == TMP_REGISTER) { + /* Find a non-used register, whose reg_map[src] < 4. */ + if ((dst & 0xf) == SLJIT_SCRATCH_REG1) { + if ((dst & 0xf0) == (SLJIT_SCRATCH_REG2 << 4)) + work_r = SLJIT_SCRATCH_REG3; + else + work_r = SLJIT_SCRATCH_REG2; + } + else { + if ((dst & 0xf0) != (SLJIT_SCRATCH_REG1 << 4)) + work_r = SLJIT_SCRATCH_REG1; + else if ((dst & 0xf) == SLJIT_SCRATCH_REG2) + work_r = SLJIT_SCRATCH_REG3; + else + work_r = SLJIT_SCRATCH_REG2; + } + + if (work_r == SLJIT_SCRATCH_REG1) { + ENCODE_PREFIX(XCHG_EAX_r + reg_map[TMP_REGISTER]); + } + else { + inst = emit_x86_instruction(compiler, 1, work_r, 0, dst_r, 0); + FAIL_IF(!inst); + *inst = XCHG_r_rm; + } + + inst = emit_x86_instruction(compiler, 1, work_r, 0, dst, dstw); + FAIL_IF(!inst); + *inst = MOV_rm8_r8; + + if (work_r == SLJIT_SCRATCH_REG1) { + ENCODE_PREFIX(XCHG_EAX_r + reg_map[TMP_REGISTER]); + } + else { + inst = emit_x86_instruction(compiler, 1, work_r, 0, dst_r, 0); + FAIL_IF(!inst); + *inst = XCHG_r_rm; + } + } + else { + inst = emit_x86_instruction(compiler, 1, dst_r, 0, dst, dstw); + FAIL_IF(!inst); + *inst = MOV_rm8_r8; + } +#else + inst = emit_x86_instruction(compiler, 1 | EX86_REX | EX86_NO_REXW, dst_r, 0, dst, dstw); + FAIL_IF(!inst); + *inst = MOV_rm8_r8; +#endif + } + + return SLJIT_SUCCESS; +} + +static sljit_si emit_mov_half(struct sljit_compiler *compiler, sljit_si sign, + sljit_si dst, sljit_sw dstw, + sljit_si src, sljit_sw srcw) +{ + sljit_ub* inst; + sljit_si dst_r; + +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) + compiler->mode32 = 0; +#endif + + if (dst == SLJIT_UNUSED && !(src & SLJIT_MEM)) + return SLJIT_SUCCESS; /* Empty instruction. */ + + if (src & SLJIT_IMM) { + if (dst <= TMP_REGISTER) { +#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) + return emit_do_imm(compiler, MOV_r_i32 + reg_map[dst], srcw); +#else + inst = emit_x86_instruction(compiler, 1, SLJIT_IMM, srcw, dst, 0); + FAIL_IF(!inst); + *inst = MOV_rm_i32; + return SLJIT_SUCCESS; +#endif + } + inst = emit_x86_instruction(compiler, 1 | EX86_HALF_ARG | EX86_NO_REXW | EX86_PREF_66, SLJIT_IMM, srcw, dst, dstw); + FAIL_IF(!inst); + *inst = MOV_rm_i32; + return SLJIT_SUCCESS; + } + + dst_r = (dst <= TMP_REGISTER) ? dst : TMP_REGISTER; + + if ((dst & SLJIT_MEM) && src <= TMP_REGISTER) + dst_r = src; + else { + inst = emit_x86_instruction(compiler, 2, dst_r, 0, src, srcw); + FAIL_IF(!inst); + *inst++ = GROUP_0F; + *inst = sign ? MOVSX_r_rm16 : MOVZX_r_rm16; + } + + if (dst & SLJIT_MEM) { + inst = emit_x86_instruction(compiler, 1 | EX86_NO_REXW | EX86_PREF_66, dst_r, 0, dst, dstw); + FAIL_IF(!inst); + *inst = MOV_rm_r; + } + + return SLJIT_SUCCESS; +} + +static sljit_si emit_unary(struct sljit_compiler *compiler, sljit_ub opcode, + sljit_si dst, sljit_sw dstw, + sljit_si src, sljit_sw srcw) +{ + sljit_ub* inst; + + if (dst == SLJIT_UNUSED) { + EMIT_MOV(compiler, TMP_REGISTER, 0, src, srcw); + inst = emit_x86_instruction(compiler, 1, 0, 0, TMP_REGISTER, 0); + FAIL_IF(!inst); + *inst++ = GROUP_F7; + *inst |= opcode; + return SLJIT_SUCCESS; + } + if (dst == src && dstw == srcw) { + /* Same input and output */ + inst = emit_x86_instruction(compiler, 1, 0, 0, dst, dstw); + FAIL_IF(!inst); + *inst++ = GROUP_F7; + *inst |= opcode; + return SLJIT_SUCCESS; + } + if (dst <= TMP_REGISTER) { + EMIT_MOV(compiler, dst, 0, src, srcw); + inst = emit_x86_instruction(compiler, 1, 0, 0, dst, dstw); + FAIL_IF(!inst); + *inst++ = GROUP_F7; + *inst |= opcode; + return SLJIT_SUCCESS; + } + EMIT_MOV(compiler, TMP_REGISTER, 0, src, srcw); + inst = emit_x86_instruction(compiler, 1, 0, 0, TMP_REGISTER, 0); + FAIL_IF(!inst); + *inst++ = GROUP_F7; + *inst |= opcode; + EMIT_MOV(compiler, dst, dstw, TMP_REGISTER, 0); + return SLJIT_SUCCESS; +} + +static sljit_si emit_not_with_flags(struct sljit_compiler *compiler, + sljit_si dst, sljit_sw dstw, + sljit_si src, sljit_sw srcw) +{ + sljit_ub* inst; + + if (dst == SLJIT_UNUSED) { + EMIT_MOV(compiler, TMP_REGISTER, 0, src, srcw); + inst = emit_x86_instruction(compiler, 1, 0, 0, TMP_REGISTER, 0); + FAIL_IF(!inst); + *inst++ = GROUP_F7; + *inst |= NOT_rm; + inst = emit_x86_instruction(compiler, 1, TMP_REGISTER, 0, TMP_REGISTER, 0); + FAIL_IF(!inst); + *inst = OR_r_rm; + return SLJIT_SUCCESS; + } + if (dst <= TMP_REGISTER) { + EMIT_MOV(compiler, dst, 0, src, srcw); + inst = emit_x86_instruction(compiler, 1, 0, 0, dst, dstw); + FAIL_IF(!inst); + *inst++ = GROUP_F7; + *inst |= NOT_rm; + inst = emit_x86_instruction(compiler, 1, dst, 0, dst, 0); + FAIL_IF(!inst); + *inst = OR_r_rm; + return SLJIT_SUCCESS; + } + EMIT_MOV(compiler, TMP_REGISTER, 0, src, srcw); + inst = emit_x86_instruction(compiler, 1, 0, 0, TMP_REGISTER, 0); + FAIL_IF(!inst); + *inst++ = GROUP_F7; + *inst |= NOT_rm; + inst = emit_x86_instruction(compiler, 1, TMP_REGISTER, 0, TMP_REGISTER, 0); + FAIL_IF(!inst); + *inst = OR_r_rm; + EMIT_MOV(compiler, dst, dstw, TMP_REGISTER, 0); + return SLJIT_SUCCESS; +} + +static sljit_si emit_clz(struct sljit_compiler *compiler, sljit_si op_flags, + sljit_si dst, sljit_sw dstw, + sljit_si src, sljit_sw srcw) +{ + sljit_ub* inst; + sljit_si dst_r; + + SLJIT_UNUSED_ARG(op_flags); + if (SLJIT_UNLIKELY(dst == SLJIT_UNUSED)) { + /* Just set the zero flag. */ + EMIT_MOV(compiler, TMP_REGISTER, 0, src, srcw); + inst = emit_x86_instruction(compiler, 1, 0, 0, TMP_REGISTER, 0); + FAIL_IF(!inst); + *inst++ = GROUP_F7; + *inst |= NOT_rm; +#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) + inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, SLJIT_IMM, 31, TMP_REGISTER, 0); +#else + inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, SLJIT_IMM, !(op_flags & SLJIT_INT_OP) ? 63 : 31, TMP_REGISTER, 0); +#endif + FAIL_IF(!inst); + *inst |= SHR; + return SLJIT_SUCCESS; + } + + if (SLJIT_UNLIKELY(src & SLJIT_IMM)) { + EMIT_MOV(compiler, TMP_REGISTER, 0, SLJIT_IMM, srcw); + src = TMP_REGISTER; + srcw = 0; + } + + inst = emit_x86_instruction(compiler, 2, TMP_REGISTER, 0, src, srcw); + FAIL_IF(!inst); + *inst++ = GROUP_0F; + *inst = BSR_r_rm; + +#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) + if (dst <= TMP_REGISTER) + dst_r = dst; + else { + /* Find an unused temporary register. */ + if ((dst & 0xf) != SLJIT_SCRATCH_REG1 && (dst & 0xf0) != (SLJIT_SCRATCH_REG1 << 4)) + dst_r = SLJIT_SCRATCH_REG1; + else if ((dst & 0xf) != SLJIT_SCRATCH_REG2 && (dst & 0xf0) != (SLJIT_SCRATCH_REG2 << 4)) + dst_r = SLJIT_SCRATCH_REG2; + else + dst_r = SLJIT_SCRATCH_REG3; + EMIT_MOV(compiler, dst, dstw, dst_r, 0); + } + EMIT_MOV(compiler, dst_r, 0, SLJIT_IMM, 32 + 31); +#else + dst_r = (dst <= TMP_REGISTER) ? dst : TMP_REG2; + compiler->mode32 = 0; + EMIT_MOV(compiler, dst_r, 0, SLJIT_IMM, !(op_flags & SLJIT_INT_OP) ? 64 + 63 : 32 + 31); + compiler->mode32 = op_flags & SLJIT_INT_OP; +#endif + + if (cpu_has_cmov == -1) + get_cpu_features(); + + if (cpu_has_cmov) { + inst = emit_x86_instruction(compiler, 2, dst_r, 0, TMP_REGISTER, 0); + FAIL_IF(!inst); + *inst++ = GROUP_0F; + *inst = CMOVNE_r_rm; + } else { +#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) + inst = (sljit_ub*)ensure_buf(compiler, 1 + 4); + FAIL_IF(!inst); + INC_SIZE(4); + + *inst++ = JE_i8; + *inst++ = 2; + *inst++ = MOV_r_rm; + *inst++ = MOD_REG | (reg_map[dst_r] << 3) | reg_map[TMP_REGISTER]; +#else + inst = (sljit_ub*)ensure_buf(compiler, 1 + 5); + FAIL_IF(!inst); + INC_SIZE(5); + + *inst++ = JE_i8; + *inst++ = 3; + *inst++ = REX_W | (reg_map[dst_r] >= 8 ? REX_R : 0) | (reg_map[TMP_REGISTER] >= 8 ? REX_B : 0); + *inst++ = MOV_r_rm; + *inst++ = MOD_REG | (reg_lmap[dst_r] << 3) | reg_lmap[TMP_REGISTER]; +#endif + } + +#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) + inst = emit_x86_instruction(compiler, 1 | EX86_BIN_INS, SLJIT_IMM, 31, dst_r, 0); +#else + inst = emit_x86_instruction(compiler, 1 | EX86_BIN_INS, SLJIT_IMM, !(op_flags & SLJIT_INT_OP) ? 63 : 31, dst_r, 0); +#endif + FAIL_IF(!inst); + *(inst + 1) |= XOR; + +#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) + if (dst & SLJIT_MEM) { + inst = emit_x86_instruction(compiler, 1, dst_r, 0, dst, dstw); + FAIL_IF(!inst); + *inst = XCHG_r_rm; + } +#else + if (dst & SLJIT_MEM) + EMIT_MOV(compiler, dst, dstw, TMP_REG2, 0); +#endif + return SLJIT_SUCCESS; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op1(struct sljit_compiler *compiler, sljit_si op, + sljit_si dst, sljit_sw dstw, + sljit_si src, sljit_sw srcw) +{ + sljit_ub* inst; + sljit_si update = 0; + sljit_si op_flags = GET_ALL_FLAGS(op); +#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) + sljit_si dst_is_ereg = 0; + sljit_si src_is_ereg = 0; +#else +# define src_is_ereg 0 +#endif + + CHECK_ERROR(); + check_sljit_emit_op1(compiler, op, dst, dstw, src, srcw); + ADJUST_LOCAL_OFFSET(dst, dstw); + ADJUST_LOCAL_OFFSET(src, srcw); + + CHECK_EXTRA_REGS(dst, dstw, dst_is_ereg = 1); + CHECK_EXTRA_REGS(src, srcw, src_is_ereg = 1); +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) + compiler->mode32 = op_flags & SLJIT_INT_OP; +#endif + + op = GET_OPCODE(op); + if (op >= SLJIT_MOV && op <= SLJIT_MOVU_P) { +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) + compiler->mode32 = 0; +#endif + + if (op_flags & SLJIT_INT_OP) { + if (src <= TMP_REGISTER && src == dst) { + if (!TYPE_CAST_NEEDED(op)) + return SLJIT_SUCCESS; + } +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) + if (op == SLJIT_MOV_SI && (src & SLJIT_MEM)) + op = SLJIT_MOV_UI; + if (op == SLJIT_MOVU_SI && (src & SLJIT_MEM)) + op = SLJIT_MOVU_UI; + if (op == SLJIT_MOV_UI && (src & SLJIT_IMM)) + op = SLJIT_MOV_SI; + if (op == SLJIT_MOVU_UI && (src & SLJIT_IMM)) + op = SLJIT_MOVU_SI; +#endif + } + + SLJIT_COMPILE_ASSERT(SLJIT_MOV + 8 == SLJIT_MOVU, movu_offset); + if (op >= SLJIT_MOVU) { + update = 1; + op -= 8; + } + + if (src & SLJIT_IMM) { + switch (op) { + case SLJIT_MOV_UB: + srcw = (sljit_ub)srcw; + break; + case SLJIT_MOV_SB: + srcw = (sljit_sb)srcw; + break; + case SLJIT_MOV_UH: + srcw = (sljit_uh)srcw; + break; + case SLJIT_MOV_SH: + srcw = (sljit_sh)srcw; + break; +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) + case SLJIT_MOV_UI: + srcw = (sljit_ui)srcw; + break; + case SLJIT_MOV_SI: + srcw = (sljit_si)srcw; + break; +#endif + } +#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) + if (SLJIT_UNLIKELY(dst_is_ereg)) + return emit_mov(compiler, dst, dstw, src, srcw); +#endif + } + + if (SLJIT_UNLIKELY(update) && (src & SLJIT_MEM) && !src_is_ereg && (src & 0xf) && (srcw != 0 || (src & 0xf0) != 0)) { + inst = emit_x86_instruction(compiler, 1, src & 0xf, 0, src, srcw); + FAIL_IF(!inst); + *inst = LEA_r_m; + src &= SLJIT_MEM | 0xf; + srcw = 0; + } + +#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) + if (SLJIT_UNLIKELY(dst_is_ereg) && (!(op == SLJIT_MOV || op == SLJIT_MOV_UI || op == SLJIT_MOV_SI || op == SLJIT_MOV_P) || (src & SLJIT_MEM))) { + SLJIT_ASSERT(dst == SLJIT_MEM1(SLJIT_LOCALS_REG)); + dst = TMP_REGISTER; + } +#endif + + switch (op) { + case SLJIT_MOV: + case SLJIT_MOV_P: +#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) + case SLJIT_MOV_UI: + case SLJIT_MOV_SI: +#endif + FAIL_IF(emit_mov(compiler, dst, dstw, src, srcw)); + break; + case SLJIT_MOV_UB: + FAIL_IF(emit_mov_byte(compiler, 0, dst, dstw, src, srcw)); + break; + case SLJIT_MOV_SB: + FAIL_IF(emit_mov_byte(compiler, 1, dst, dstw, src, srcw)); + break; + case SLJIT_MOV_UH: + FAIL_IF(emit_mov_half(compiler, 0, dst, dstw, src, srcw)); + break; + case SLJIT_MOV_SH: + FAIL_IF(emit_mov_half(compiler, 1, dst, dstw, src, srcw)); + break; +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) + case SLJIT_MOV_UI: + FAIL_IF(emit_mov_int(compiler, 0, dst, dstw, src, srcw)); + break; + case SLJIT_MOV_SI: + FAIL_IF(emit_mov_int(compiler, 1, dst, dstw, src, srcw)); + break; +#endif + } + +#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) + if (SLJIT_UNLIKELY(dst_is_ereg) && dst == TMP_REGISTER) + return emit_mov(compiler, SLJIT_MEM1(SLJIT_LOCALS_REG), dstw, TMP_REGISTER, 0); +#endif + + if (SLJIT_UNLIKELY(update) && (dst & SLJIT_MEM) && (dst & 0xf) && (dstw != 0 || (dst & 0xf0) != 0)) { + inst = emit_x86_instruction(compiler, 1, dst & 0xf, 0, dst, dstw); + FAIL_IF(!inst); + *inst = LEA_r_m; + } + return SLJIT_SUCCESS; + } + + if (SLJIT_UNLIKELY(GET_FLAGS(op_flags))) + compiler->flags_saved = 0; + + switch (op) { + case SLJIT_NOT: + if (SLJIT_UNLIKELY(op_flags & SLJIT_SET_E)) + return emit_not_with_flags(compiler, dst, dstw, src, srcw); + return emit_unary(compiler, NOT_rm, dst, dstw, src, srcw); + + case SLJIT_NEG: + if (SLJIT_UNLIKELY(op_flags & SLJIT_KEEP_FLAGS) && !compiler->flags_saved) + FAIL_IF(emit_save_flags(compiler)); + return emit_unary(compiler, NEG_rm, dst, dstw, src, srcw); + + case SLJIT_CLZ: + if (SLJIT_UNLIKELY(op_flags & SLJIT_KEEP_FLAGS) && !compiler->flags_saved) + FAIL_IF(emit_save_flags(compiler)); + return emit_clz(compiler, op_flags, dst, dstw, src, srcw); + } + + return SLJIT_SUCCESS; + +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) +# undef src_is_ereg +#endif +} + +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) + +#define BINARY_IMM(op_imm, op_mr, immw, arg, argw) \ + if (IS_HALFWORD(immw) || compiler->mode32) { \ + inst = emit_x86_instruction(compiler, 1 | EX86_BIN_INS, SLJIT_IMM, immw, arg, argw); \ + FAIL_IF(!inst); \ + *(inst + 1) |= (op_imm); \ + } \ + else { \ + FAIL_IF(emit_load_imm64(compiler, TMP_REG2, immw)); \ + inst = emit_x86_instruction(compiler, 1, TMP_REG2, 0, arg, argw); \ + FAIL_IF(!inst); \ + *inst = (op_mr); \ + } + +#define BINARY_EAX_IMM(op_eax_imm, immw) \ + FAIL_IF(emit_do_imm32(compiler, (!compiler->mode32) ? REX_W : 0, (op_eax_imm), immw)) + +#else + +#define BINARY_IMM(op_imm, op_mr, immw, arg, argw) \ + inst = emit_x86_instruction(compiler, 1 | EX86_BIN_INS, SLJIT_IMM, immw, arg, argw); \ + FAIL_IF(!inst); \ + *(inst + 1) |= (op_imm); + +#define BINARY_EAX_IMM(op_eax_imm, immw) \ + FAIL_IF(emit_do_imm(compiler, (op_eax_imm), immw)) + +#endif + +static sljit_si emit_cum_binary(struct sljit_compiler *compiler, + sljit_ub op_rm, sljit_ub op_mr, sljit_ub op_imm, sljit_ub op_eax_imm, + sljit_si dst, sljit_sw dstw, + sljit_si src1, sljit_sw src1w, + sljit_si src2, sljit_sw src2w) +{ + sljit_ub* inst; + + if (dst == SLJIT_UNUSED) { + EMIT_MOV(compiler, TMP_REGISTER, 0, src1, src1w); + if (src2 & SLJIT_IMM) { + BINARY_IMM(op_imm, op_mr, src2w, TMP_REGISTER, 0); + } + else { + inst = emit_x86_instruction(compiler, 1, TMP_REGISTER, 0, src2, src2w); + FAIL_IF(!inst); + *inst = op_rm; + } + return SLJIT_SUCCESS; + } + + if (dst == src1 && dstw == src1w) { + if (src2 & SLJIT_IMM) { +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) + if ((dst == SLJIT_SCRATCH_REG1) && (src2w > 127 || src2w < -128) && (compiler->mode32 || IS_HALFWORD(src2w))) { +#else + if ((dst == SLJIT_SCRATCH_REG1) && (src2w > 127 || src2w < -128)) { +#endif + BINARY_EAX_IMM(op_eax_imm, src2w); + } + else { + BINARY_IMM(op_imm, op_mr, src2w, dst, dstw); + } + } + else if (dst <= TMP_REGISTER) { + inst = emit_x86_instruction(compiler, 1, dst, dstw, src2, src2w); + FAIL_IF(!inst); + *inst = op_rm; + } + else if (src2 <= TMP_REGISTER) { + /* Special exception for sljit_emit_op_flags. */ + inst = emit_x86_instruction(compiler, 1, src2, src2w, dst, dstw); + FAIL_IF(!inst); + *inst = op_mr; + } + else { + EMIT_MOV(compiler, TMP_REGISTER, 0, src2, src2w); + inst = emit_x86_instruction(compiler, 1, TMP_REGISTER, 0, dst, dstw); + FAIL_IF(!inst); + *inst = op_mr; + } + return SLJIT_SUCCESS; + } + + /* Only for cumulative operations. */ + if (dst == src2 && dstw == src2w) { + if (src1 & SLJIT_IMM) { +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) + if ((dst == SLJIT_SCRATCH_REG1) && (src1w > 127 || src1w < -128) && (compiler->mode32 || IS_HALFWORD(src1w))) { +#else + if ((dst == SLJIT_SCRATCH_REG1) && (src1w > 127 || src1w < -128)) { +#endif + BINARY_EAX_IMM(op_eax_imm, src1w); + } + else { + BINARY_IMM(op_imm, op_mr, src1w, dst, dstw); + } + } + else if (dst <= TMP_REGISTER) { + inst = emit_x86_instruction(compiler, 1, dst, dstw, src1, src1w); + FAIL_IF(!inst); + *inst = op_rm; + } + else if (src1 <= TMP_REGISTER) { + inst = emit_x86_instruction(compiler, 1, src1, src1w, dst, dstw); + FAIL_IF(!inst); + *inst = op_mr; + } + else { + EMIT_MOV(compiler, TMP_REGISTER, 0, src1, src1w); + inst = emit_x86_instruction(compiler, 1, TMP_REGISTER, 0, dst, dstw); + FAIL_IF(!inst); + *inst = op_mr; + } + return SLJIT_SUCCESS; + } + + /* General version. */ + if (dst <= TMP_REGISTER) { + EMIT_MOV(compiler, dst, 0, src1, src1w); + if (src2 & SLJIT_IMM) { + BINARY_IMM(op_imm, op_mr, src2w, dst, 0); + } + else { + inst = emit_x86_instruction(compiler, 1, dst, 0, src2, src2w); + FAIL_IF(!inst); + *inst = op_rm; + } + } + else { + /* This version requires less memory writing. */ + EMIT_MOV(compiler, TMP_REGISTER, 0, src1, src1w); + if (src2 & SLJIT_IMM) { + BINARY_IMM(op_imm, op_mr, src2w, TMP_REGISTER, 0); + } + else { + inst = emit_x86_instruction(compiler, 1, TMP_REGISTER, 0, src2, src2w); + FAIL_IF(!inst); + *inst = op_rm; + } + EMIT_MOV(compiler, dst, dstw, TMP_REGISTER, 0); + } + + return SLJIT_SUCCESS; +} + +static sljit_si emit_non_cum_binary(struct sljit_compiler *compiler, + sljit_ub op_rm, sljit_ub op_mr, sljit_ub op_imm, sljit_ub op_eax_imm, + sljit_si dst, sljit_sw dstw, + sljit_si src1, sljit_sw src1w, + sljit_si src2, sljit_sw src2w) +{ + sljit_ub* inst; + + if (dst == SLJIT_UNUSED) { + EMIT_MOV(compiler, TMP_REGISTER, 0, src1, src1w); + if (src2 & SLJIT_IMM) { + BINARY_IMM(op_imm, op_mr, src2w, TMP_REGISTER, 0); + } + else { + inst = emit_x86_instruction(compiler, 1, TMP_REGISTER, 0, src2, src2w); + FAIL_IF(!inst); + *inst = op_rm; + } + return SLJIT_SUCCESS; + } + + if (dst == src1 && dstw == src1w) { + if (src2 & SLJIT_IMM) { +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) + if ((dst == SLJIT_SCRATCH_REG1) && (src2w > 127 || src2w < -128) && (compiler->mode32 || IS_HALFWORD(src2w))) { +#else + if ((dst == SLJIT_SCRATCH_REG1) && (src2w > 127 || src2w < -128)) { +#endif + BINARY_EAX_IMM(op_eax_imm, src2w); + } + else { + BINARY_IMM(op_imm, op_mr, src2w, dst, dstw); + } + } + else if (dst <= TMP_REGISTER) { + inst = emit_x86_instruction(compiler, 1, dst, dstw, src2, src2w); + FAIL_IF(!inst); + *inst = op_rm; + } + else if (src2 <= TMP_REGISTER) { + inst = emit_x86_instruction(compiler, 1, src2, src2w, dst, dstw); + FAIL_IF(!inst); + *inst = op_mr; + } + else { + EMIT_MOV(compiler, TMP_REGISTER, 0, src2, src2w); + inst = emit_x86_instruction(compiler, 1, TMP_REGISTER, 0, dst, dstw); + FAIL_IF(!inst); + *inst = op_mr; + } + return SLJIT_SUCCESS; + } + + /* General version. */ + if (dst <= TMP_REGISTER && dst != src2) { + EMIT_MOV(compiler, dst, 0, src1, src1w); + if (src2 & SLJIT_IMM) { + BINARY_IMM(op_imm, op_mr, src2w, dst, 0); + } + else { + inst = emit_x86_instruction(compiler, 1, dst, 0, src2, src2w); + FAIL_IF(!inst); + *inst = op_rm; + } + } + else { + /* This version requires less memory writing. */ + EMIT_MOV(compiler, TMP_REGISTER, 0, src1, src1w); + if (src2 & SLJIT_IMM) { + BINARY_IMM(op_imm, op_mr, src2w, TMP_REGISTER, 0); + } + else { + inst = emit_x86_instruction(compiler, 1, TMP_REGISTER, 0, src2, src2w); + FAIL_IF(!inst); + *inst = op_rm; + } + EMIT_MOV(compiler, dst, dstw, TMP_REGISTER, 0); + } + + return SLJIT_SUCCESS; +} + +static sljit_si emit_mul(struct sljit_compiler *compiler, + sljit_si dst, sljit_sw dstw, + sljit_si src1, sljit_sw src1w, + sljit_si src2, sljit_sw src2w) +{ + sljit_ub* inst; + sljit_si dst_r; + + dst_r = (dst <= TMP_REGISTER) ? dst : TMP_REGISTER; + + /* Register destination. */ + if (dst_r == src1 && !(src2 & SLJIT_IMM)) { + inst = emit_x86_instruction(compiler, 2, dst_r, 0, src2, src2w); + FAIL_IF(!inst); + *inst++ = GROUP_0F; + *inst = IMUL_r_rm; + } + else if (dst_r == src2 && !(src1 & SLJIT_IMM)) { + inst = emit_x86_instruction(compiler, 2, dst_r, 0, src1, src1w); + FAIL_IF(!inst); + *inst++ = GROUP_0F; + *inst = IMUL_r_rm; + } + else if (src1 & SLJIT_IMM) { + if (src2 & SLJIT_IMM) { + EMIT_MOV(compiler, dst_r, 0, SLJIT_IMM, src2w); + src2 = dst_r; + src2w = 0; + } + + if (src1w <= 127 && src1w >= -128) { + inst = emit_x86_instruction(compiler, 1, dst_r, 0, src2, src2w); + FAIL_IF(!inst); + *inst = IMUL_r_rm_i8; + inst = (sljit_ub*)ensure_buf(compiler, 1 + 1); + FAIL_IF(!inst); + INC_SIZE(1); + *inst = (sljit_sb)src1w; + } +#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) + else { + inst = emit_x86_instruction(compiler, 1, dst_r, 0, src2, src2w); + FAIL_IF(!inst); + *inst = IMUL_r_rm_i32; + inst = (sljit_ub*)ensure_buf(compiler, 1 + 4); + FAIL_IF(!inst); + INC_SIZE(4); + *(sljit_sw*)inst = src1w; + } +#else + else if (IS_HALFWORD(src1w)) { + inst = emit_x86_instruction(compiler, 1, dst_r, 0, src2, src2w); + FAIL_IF(!inst); + *inst = IMUL_r_rm_i32; + inst = (sljit_ub*)ensure_buf(compiler, 1 + 4); + FAIL_IF(!inst); + INC_SIZE(4); + *(sljit_si*)inst = (sljit_si)src1w; + } + else { + EMIT_MOV(compiler, TMP_REG2, 0, SLJIT_IMM, src1w); + if (dst_r != src2) + EMIT_MOV(compiler, dst_r, 0, src2, src2w); + inst = emit_x86_instruction(compiler, 2, dst_r, 0, TMP_REG2, 0); + FAIL_IF(!inst); + *inst++ = GROUP_0F; + *inst = IMUL_r_rm; + } +#endif + } + else if (src2 & SLJIT_IMM) { + /* Note: src1 is NOT immediate. */ + + if (src2w <= 127 && src2w >= -128) { + inst = emit_x86_instruction(compiler, 1, dst_r, 0, src1, src1w); + FAIL_IF(!inst); + *inst = IMUL_r_rm_i8; + inst = (sljit_ub*)ensure_buf(compiler, 1 + 1); + FAIL_IF(!inst); + INC_SIZE(1); + *inst = (sljit_sb)src2w; + } +#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) + else { + inst = emit_x86_instruction(compiler, 1, dst_r, 0, src1, src1w); + FAIL_IF(!inst); + *inst = IMUL_r_rm_i32; + inst = (sljit_ub*)ensure_buf(compiler, 1 + 4); + FAIL_IF(!inst); + INC_SIZE(4); + *(sljit_sw*)inst = src2w; + } +#else + else if (IS_HALFWORD(src2w)) { + inst = emit_x86_instruction(compiler, 1, dst_r, 0, src1, src1w); + FAIL_IF(!inst); + *inst = IMUL_r_rm_i32; + inst = (sljit_ub*)ensure_buf(compiler, 1 + 4); + FAIL_IF(!inst); + INC_SIZE(4); + *(sljit_si*)inst = (sljit_si)src2w; + } + else { + EMIT_MOV(compiler, TMP_REG2, 0, SLJIT_IMM, src1w); + if (dst_r != src1) + EMIT_MOV(compiler, dst_r, 0, src1, src1w); + inst = emit_x86_instruction(compiler, 2, dst_r, 0, TMP_REG2, 0); + FAIL_IF(!inst); + *inst++ = GROUP_0F; + *inst = IMUL_r_rm; + } +#endif + } + else { + /* Neither argument is immediate. */ + if (ADDRESSING_DEPENDS_ON(src2, dst_r)) + dst_r = TMP_REGISTER; + EMIT_MOV(compiler, dst_r, 0, src1, src1w); + inst = emit_x86_instruction(compiler, 2, dst_r, 0, src2, src2w); + FAIL_IF(!inst); + *inst++ = GROUP_0F; + *inst = IMUL_r_rm; + } + + if (dst_r == TMP_REGISTER) + EMIT_MOV(compiler, dst, dstw, TMP_REGISTER, 0); + + return SLJIT_SUCCESS; +} + +static sljit_si emit_lea_binary(struct sljit_compiler *compiler, + sljit_si dst, sljit_sw dstw, + sljit_si src1, sljit_sw src1w, + sljit_si src2, sljit_sw src2w) +{ + sljit_ub* inst; + sljit_si dst_r, done = 0; + + /* These cases better be left to handled by normal way. */ + if (dst == src1 && dstw == src1w) + return SLJIT_ERR_UNSUPPORTED; + if (dst == src2 && dstw == src2w) + return SLJIT_ERR_UNSUPPORTED; + + dst_r = (dst <= TMP_REGISTER) ? dst : TMP_REGISTER; + + if (src1 <= TMP_REGISTER) { + if (src2 <= TMP_REGISTER || src2 == TMP_REGISTER) { + inst = emit_x86_instruction(compiler, 1, dst_r, 0, SLJIT_MEM2(src1, src2), 0); + FAIL_IF(!inst); + *inst = LEA_r_m; + done = 1; + } +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) + if ((src2 & SLJIT_IMM) && (compiler->mode32 || IS_HALFWORD(src2w))) { + inst = emit_x86_instruction(compiler, 1, dst_r, 0, SLJIT_MEM1(src1), (sljit_si)src2w); +#else + if (src2 & SLJIT_IMM) { + inst = emit_x86_instruction(compiler, 1, dst_r, 0, SLJIT_MEM1(src1), src2w); +#endif + FAIL_IF(!inst); + *inst = LEA_r_m; + done = 1; + } + } + else if (src2 <= TMP_REGISTER) { +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) + if ((src1 & SLJIT_IMM) && (compiler->mode32 || IS_HALFWORD(src1w))) { + inst = emit_x86_instruction(compiler, 1, dst_r, 0, SLJIT_MEM1(src2), (sljit_si)src1w); +#else + if (src1 & SLJIT_IMM) { + inst = emit_x86_instruction(compiler, 1, dst_r, 0, SLJIT_MEM1(src2), src1w); +#endif + FAIL_IF(!inst); + *inst = LEA_r_m; + done = 1; + } + } + + if (done) { + if (dst_r == TMP_REGISTER) + return emit_mov(compiler, dst, dstw, TMP_REGISTER, 0); + return SLJIT_SUCCESS; + } + return SLJIT_ERR_UNSUPPORTED; +} + +static sljit_si emit_cmp_binary(struct sljit_compiler *compiler, + sljit_si src1, sljit_sw src1w, + sljit_si src2, sljit_sw src2w) +{ + sljit_ub* inst; + +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) + if (src1 == SLJIT_SCRATCH_REG1 && (src2 & SLJIT_IMM) && (src2w > 127 || src2w < -128) && (compiler->mode32 || IS_HALFWORD(src2w))) { +#else + if (src1 == SLJIT_SCRATCH_REG1 && (src2 & SLJIT_IMM) && (src2w > 127 || src2w < -128)) { +#endif + BINARY_EAX_IMM(CMP_EAX_i32, src2w); + return SLJIT_SUCCESS; + } + + if (src1 <= TMP_REGISTER) { + if (src2 & SLJIT_IMM) { + BINARY_IMM(CMP, CMP_rm_r, src2w, src1, 0); + } + else { + inst = emit_x86_instruction(compiler, 1, src1, 0, src2, src2w); + FAIL_IF(!inst); + *inst = CMP_r_rm; + } + return SLJIT_SUCCESS; + } + + if (src2 <= TMP_REGISTER && !(src1 & SLJIT_IMM)) { + inst = emit_x86_instruction(compiler, 1, src2, 0, src1, src1w); + FAIL_IF(!inst); + *inst = CMP_rm_r; + return SLJIT_SUCCESS; + } + + if (src2 & SLJIT_IMM) { + if (src1 & SLJIT_IMM) { + EMIT_MOV(compiler, TMP_REGISTER, 0, src1, src1w); + src1 = TMP_REGISTER; + src1w = 0; + } + BINARY_IMM(CMP, CMP_rm_r, src2w, src1, src1w); + } + else { + EMIT_MOV(compiler, TMP_REGISTER, 0, src1, src1w); + inst = emit_x86_instruction(compiler, 1, TMP_REGISTER, 0, src2, src2w); + FAIL_IF(!inst); + *inst = CMP_r_rm; + } + return SLJIT_SUCCESS; +} + +static sljit_si emit_test_binary(struct sljit_compiler *compiler, + sljit_si src1, sljit_sw src1w, + sljit_si src2, sljit_sw src2w) +{ + sljit_ub* inst; + +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) + if (src1 == SLJIT_SCRATCH_REG1 && (src2 & SLJIT_IMM) && (src2w > 127 || src2w < -128) && (compiler->mode32 || IS_HALFWORD(src2w))) { +#else + if (src1 == SLJIT_SCRATCH_REG1 && (src2 & SLJIT_IMM) && (src2w > 127 || src2w < -128)) { +#endif + BINARY_EAX_IMM(TEST_EAX_i32, src2w); + return SLJIT_SUCCESS; + } + +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) + if (src2 == SLJIT_SCRATCH_REG1 && (src2 & SLJIT_IMM) && (src1w > 127 || src1w < -128) && (compiler->mode32 || IS_HALFWORD(src1w))) { +#else + if (src2 == SLJIT_SCRATCH_REG1 && (src1 & SLJIT_IMM) && (src1w > 127 || src1w < -128)) { +#endif + BINARY_EAX_IMM(TEST_EAX_i32, src1w); + return SLJIT_SUCCESS; + } + + if (src1 <= TMP_REGISTER) { + if (src2 & SLJIT_IMM) { +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) + if (IS_HALFWORD(src2w) || compiler->mode32) { + inst = emit_x86_instruction(compiler, 1, SLJIT_IMM, src2w, src1, 0); + FAIL_IF(!inst); + *inst = GROUP_F7; + } + else { + FAIL_IF(emit_load_imm64(compiler, TMP_REG2, src2w)); + inst = emit_x86_instruction(compiler, 1, TMP_REG2, 0, src1, 0); + FAIL_IF(!inst); + *inst = TEST_rm_r; + } +#else + inst = emit_x86_instruction(compiler, 1, SLJIT_IMM, src2w, src1, 0); + FAIL_IF(!inst); + *inst = GROUP_F7; +#endif + } + else { + inst = emit_x86_instruction(compiler, 1, src1, 0, src2, src2w); + FAIL_IF(!inst); + *inst = TEST_rm_r; + } + return SLJIT_SUCCESS; + } + + if (src2 <= TMP_REGISTER) { + if (src1 & SLJIT_IMM) { +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) + if (IS_HALFWORD(src1w) || compiler->mode32) { + inst = emit_x86_instruction(compiler, 1, SLJIT_IMM, src1w, src2, 0); + FAIL_IF(!inst); + *inst = GROUP_F7; + } + else { + FAIL_IF(emit_load_imm64(compiler, TMP_REG2, src1w)); + inst = emit_x86_instruction(compiler, 1, TMP_REG2, 0, src2, 0); + FAIL_IF(!inst); + *inst = TEST_rm_r; + } +#else + inst = emit_x86_instruction(compiler, 1, src1, src1w, src2, 0); + FAIL_IF(!inst); + *inst = GROUP_F7; +#endif + } + else { + inst = emit_x86_instruction(compiler, 1, src2, 0, src1, src1w); + FAIL_IF(!inst); + *inst = TEST_rm_r; + } + return SLJIT_SUCCESS; + } + + EMIT_MOV(compiler, TMP_REGISTER, 0, src1, src1w); + if (src2 & SLJIT_IMM) { +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) + if (IS_HALFWORD(src2w) || compiler->mode32) { + inst = emit_x86_instruction(compiler, 1, SLJIT_IMM, src2w, TMP_REGISTER, 0); + FAIL_IF(!inst); + *inst = GROUP_F7; + } + else { + FAIL_IF(emit_load_imm64(compiler, TMP_REG2, src2w)); + inst = emit_x86_instruction(compiler, 1, TMP_REG2, 0, TMP_REGISTER, 0); + FAIL_IF(!inst); + *inst = TEST_rm_r; + } +#else + inst = emit_x86_instruction(compiler, 1, SLJIT_IMM, src2w, TMP_REGISTER, 0); + FAIL_IF(!inst); + *inst = GROUP_F7; +#endif + } + else { + inst = emit_x86_instruction(compiler, 1, TMP_REGISTER, 0, src2, src2w); + FAIL_IF(!inst); + *inst = TEST_rm_r; + } + return SLJIT_SUCCESS; +} + +static sljit_si emit_shift(struct sljit_compiler *compiler, + sljit_ub mode, + sljit_si dst, sljit_sw dstw, + sljit_si src1, sljit_sw src1w, + sljit_si src2, sljit_sw src2w) +{ + sljit_ub* inst; + + if ((src2 & SLJIT_IMM) || (src2 == SLJIT_PREF_SHIFT_REG)) { + if (dst == src1 && dstw == src1w) { + inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, src2, src2w, dst, dstw); + FAIL_IF(!inst); + *inst |= mode; + return SLJIT_SUCCESS; + } + if (dst == SLJIT_UNUSED) { + EMIT_MOV(compiler, TMP_REGISTER, 0, src1, src1w); + inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, src2, src2w, TMP_REGISTER, 0); + FAIL_IF(!inst); + *inst |= mode; + return SLJIT_SUCCESS; + } + if (dst == SLJIT_PREF_SHIFT_REG && src2 == SLJIT_PREF_SHIFT_REG) { + EMIT_MOV(compiler, TMP_REGISTER, 0, src1, src1w); + inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, SLJIT_PREF_SHIFT_REG, 0, TMP_REGISTER, 0); + FAIL_IF(!inst); + *inst |= mode; + EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, TMP_REGISTER, 0); + return SLJIT_SUCCESS; + } + if (dst <= TMP_REGISTER) { + EMIT_MOV(compiler, dst, 0, src1, src1w); + inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, src2, src2w, dst, 0); + FAIL_IF(!inst); + *inst |= mode; + return SLJIT_SUCCESS; + } + + EMIT_MOV(compiler, TMP_REGISTER, 0, src1, src1w); + inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, src2, src2w, TMP_REGISTER, 0); + FAIL_IF(!inst); + *inst |= mode; + EMIT_MOV(compiler, dst, dstw, TMP_REGISTER, 0); + return SLJIT_SUCCESS; + } + + if (dst == SLJIT_PREF_SHIFT_REG) { + EMIT_MOV(compiler, TMP_REGISTER, 0, src1, src1w); + EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, src2, src2w); + inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, SLJIT_PREF_SHIFT_REG, 0, TMP_REGISTER, 0); + FAIL_IF(!inst); + *inst |= mode; + EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, TMP_REGISTER, 0); + } + else if (dst <= TMP_REGISTER && dst != src2 && !ADDRESSING_DEPENDS_ON(src2, dst)) { + if (src1 != dst) + EMIT_MOV(compiler, dst, 0, src1, src1w); + EMIT_MOV(compiler, TMP_REGISTER, 0, SLJIT_PREF_SHIFT_REG, 0); + EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, src2, src2w); + inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, SLJIT_PREF_SHIFT_REG, 0, dst, 0); + FAIL_IF(!inst); + *inst |= mode; + EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, TMP_REGISTER, 0); + } + else { + /* This case is really difficult, since ecx itself may used for + addressing, and we must ensure to work even in that case. */ + EMIT_MOV(compiler, TMP_REGISTER, 0, src1, src1w); +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) + EMIT_MOV(compiler, TMP_REG2, 0, SLJIT_PREF_SHIFT_REG, 0); +#else + /* [esp+0] contains the flags. */ + EMIT_MOV(compiler, SLJIT_MEM1(SLJIT_LOCALS_REG), sizeof(sljit_sw), SLJIT_PREF_SHIFT_REG, 0); +#endif + EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, src2, src2w); + inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, SLJIT_PREF_SHIFT_REG, 0, TMP_REGISTER, 0); + FAIL_IF(!inst); + *inst |= mode; +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) + EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, TMP_REG2, 0); +#else + EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, SLJIT_MEM1(SLJIT_LOCALS_REG), sizeof(sljit_sw)); +#endif + EMIT_MOV(compiler, dst, dstw, TMP_REGISTER, 0); + } + + return SLJIT_SUCCESS; +} + +static sljit_si emit_shift_with_flags(struct sljit_compiler *compiler, + sljit_ub mode, sljit_si set_flags, + sljit_si dst, sljit_sw dstw, + sljit_si src1, sljit_sw src1w, + sljit_si src2, sljit_sw src2w) +{ + /* The CPU does not set flags if the shift count is 0. */ + if (src2 & SLJIT_IMM) { +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) + if ((src2w & 0x3f) != 0 || (compiler->mode32 && (src2w & 0x1f) != 0)) + return emit_shift(compiler, mode, dst, dstw, src1, src1w, src2, src2w); +#else + if ((src2w & 0x1f) != 0) + return emit_shift(compiler, mode, dst, dstw, src1, src1w, src2, src2w); +#endif + if (!set_flags) + return emit_mov(compiler, dst, dstw, src1, src1w); + /* OR dst, src, 0 */ + return emit_cum_binary(compiler, OR_r_rm, OR_rm_r, OR, OR_EAX_i32, + dst, dstw, src1, src1w, SLJIT_IMM, 0); + } + + if (!set_flags) + return emit_shift(compiler, mode, dst, dstw, src1, src1w, src2, src2w); + + if (!(dst <= TMP_REGISTER)) + FAIL_IF(emit_cmp_binary(compiler, src1, src1w, SLJIT_IMM, 0)); + + FAIL_IF(emit_shift(compiler,mode, dst, dstw, src1, src1w, src2, src2w)); + + if (dst <= TMP_REGISTER) + return emit_cmp_binary(compiler, dst, dstw, SLJIT_IMM, 0); + return SLJIT_SUCCESS; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op2(struct sljit_compiler *compiler, sljit_si op, + sljit_si dst, sljit_sw dstw, + sljit_si src1, sljit_sw src1w, + sljit_si src2, sljit_sw src2w) +{ + CHECK_ERROR(); + check_sljit_emit_op2(compiler, op, dst, dstw, src1, src1w, src2, src2w); + ADJUST_LOCAL_OFFSET(dst, dstw); + ADJUST_LOCAL_OFFSET(src1, src1w); + ADJUST_LOCAL_OFFSET(src2, src2w); + + CHECK_EXTRA_REGS(dst, dstw, (void)0); + CHECK_EXTRA_REGS(src1, src1w, (void)0); + CHECK_EXTRA_REGS(src2, src2w, (void)0); +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) + compiler->mode32 = op & SLJIT_INT_OP; +#endif + + if (GET_OPCODE(op) >= SLJIT_MUL) { + if (SLJIT_UNLIKELY(GET_FLAGS(op))) + compiler->flags_saved = 0; + else if (SLJIT_UNLIKELY(op & SLJIT_KEEP_FLAGS) && !compiler->flags_saved) + FAIL_IF(emit_save_flags(compiler)); + } + + switch (GET_OPCODE(op)) { + case SLJIT_ADD: + if (!GET_FLAGS(op)) { + if (emit_lea_binary(compiler, dst, dstw, src1, src1w, src2, src2w) != SLJIT_ERR_UNSUPPORTED) + return compiler->error; + } + else + compiler->flags_saved = 0; + if (SLJIT_UNLIKELY(op & SLJIT_KEEP_FLAGS) && !compiler->flags_saved) + FAIL_IF(emit_save_flags(compiler)); + return emit_cum_binary(compiler, ADD_r_rm, ADD_rm_r, ADD, ADD_EAX_i32, + dst, dstw, src1, src1w, src2, src2w); + case SLJIT_ADDC: + if (SLJIT_UNLIKELY(compiler->flags_saved)) /* C flag must be restored. */ + FAIL_IF(emit_restore_flags(compiler, 1)); + else if (SLJIT_UNLIKELY(op & SLJIT_KEEP_FLAGS)) + FAIL_IF(emit_save_flags(compiler)); + if (SLJIT_UNLIKELY(GET_FLAGS(op))) + compiler->flags_saved = 0; + return emit_cum_binary(compiler, ADC_r_rm, ADC_rm_r, ADC, ADC_EAX_i32, + dst, dstw, src1, src1w, src2, src2w); + case SLJIT_SUB: + if (!GET_FLAGS(op)) { + if ((src2 & SLJIT_IMM) && emit_lea_binary(compiler, dst, dstw, src1, src1w, SLJIT_IMM, -src2w) != SLJIT_ERR_UNSUPPORTED) + return compiler->error; + } + else + compiler->flags_saved = 0; + if (SLJIT_UNLIKELY(op & SLJIT_KEEP_FLAGS) && !compiler->flags_saved) + FAIL_IF(emit_save_flags(compiler)); + if (dst == SLJIT_UNUSED) + return emit_cmp_binary(compiler, src1, src1w, src2, src2w); + return emit_non_cum_binary(compiler, SUB_r_rm, SUB_rm_r, SUB, SUB_EAX_i32, + dst, dstw, src1, src1w, src2, src2w); + case SLJIT_SUBC: + if (SLJIT_UNLIKELY(compiler->flags_saved)) /* C flag must be restored. */ + FAIL_IF(emit_restore_flags(compiler, 1)); + else if (SLJIT_UNLIKELY(op & SLJIT_KEEP_FLAGS)) + FAIL_IF(emit_save_flags(compiler)); + if (SLJIT_UNLIKELY(GET_FLAGS(op))) + compiler->flags_saved = 0; + return emit_non_cum_binary(compiler, SBB_r_rm, SBB_rm_r, SBB, SBB_EAX_i32, + dst, dstw, src1, src1w, src2, src2w); + case SLJIT_MUL: + return emit_mul(compiler, dst, dstw, src1, src1w, src2, src2w); + case SLJIT_AND: + if (dst == SLJIT_UNUSED) + return emit_test_binary(compiler, src1, src1w, src2, src2w); + return emit_cum_binary(compiler, AND_r_rm, AND_rm_r, AND, AND_EAX_i32, + dst, dstw, src1, src1w, src2, src2w); + case SLJIT_OR: + return emit_cum_binary(compiler, OR_r_rm, OR_rm_r, OR, OR_EAX_i32, + dst, dstw, src1, src1w, src2, src2w); + case SLJIT_XOR: + return emit_cum_binary(compiler, XOR_r_rm, XOR_rm_r, XOR, XOR_EAX_i32, + dst, dstw, src1, src1w, src2, src2w); + case SLJIT_SHL: + return emit_shift_with_flags(compiler, SHL, GET_FLAGS(op), + dst, dstw, src1, src1w, src2, src2w); + case SLJIT_LSHR: + return emit_shift_with_flags(compiler, SHR, GET_FLAGS(op), + dst, dstw, src1, src1w, src2, src2w); + case SLJIT_ASHR: + return emit_shift_with_flags(compiler, SAR, GET_FLAGS(op), + dst, dstw, src1, src1w, src2, src2w); + } + + return SLJIT_SUCCESS; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_get_register_index(sljit_si reg) +{ + check_sljit_get_register_index(reg); +#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) + if (reg == SLJIT_TEMPORARY_EREG1 || reg == SLJIT_TEMPORARY_EREG2 + || reg == SLJIT_SAVED_EREG1 || reg == SLJIT_SAVED_EREG2) + return -1; +#endif + return reg_map[reg]; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op_custom(struct sljit_compiler *compiler, + void *instruction, sljit_si size) +{ + sljit_ub *inst; + + CHECK_ERROR(); + check_sljit_emit_op_custom(compiler, instruction, size); + SLJIT_ASSERT(size > 0 && size < 16); + + inst = (sljit_ub*)ensure_buf(compiler, 1 + size); + FAIL_IF(!inst); + INC_SIZE(size); + SLJIT_MEMMOVE(inst, instruction, size); + return SLJIT_SUCCESS; +} + +/* --------------------------------------------------------------------- */ +/* Floating point operators */ +/* --------------------------------------------------------------------- */ + +#if (defined SLJIT_SSE2 && SLJIT_SSE2) + +/* Alignment + 2 * 16 bytes. */ +static sljit_si sse2_data[3 + (4 + 4) * 2]; +static sljit_si *sse2_buffer; + +static void init_compiler(void) +{ + sse2_buffer = (sljit_si*)(((sljit_uw)sse2_data + 15) & ~0xf); + /* Single precision constants. */ + sse2_buffer[0] = 0x80000000; + sse2_buffer[4] = 0x7fffffff; + /* Double precision constants. */ + sse2_buffer[8] = 0; + sse2_buffer[9] = 0x80000000; + sse2_buffer[12] = 0xffffffff; + sse2_buffer[13] = 0x7fffffff; +} + +#endif + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_is_fpu_available(void) +{ +#if (defined SLJIT_SSE2 && SLJIT_SSE2) +#if (defined SLJIT_DETECT_SSE2 && SLJIT_DETECT_SSE2) + if (cpu_has_sse2 == -1) + get_cpu_features(); + return cpu_has_sse2; +#else /* SLJIT_DETECT_SSE2 */ + return 1; +#endif /* SLJIT_DETECT_SSE2 */ +#else /* SLJIT_SSE2 */ + return 0; +#endif +} + +#if (defined SLJIT_SSE2 && SLJIT_SSE2) + +static sljit_si emit_sse2(struct sljit_compiler *compiler, sljit_ub opcode, + sljit_si single, sljit_si xmm1, sljit_si xmm2, sljit_sw xmm2w) +{ + sljit_ub *inst; + + inst = emit_x86_instruction(compiler, 2 | (single ? EX86_PREF_F3 : EX86_PREF_F2) | EX86_SSE2, xmm1, 0, xmm2, xmm2w); + FAIL_IF(!inst); + *inst++ = GROUP_0F; + *inst = opcode; + return SLJIT_SUCCESS; +} + +static sljit_si emit_sse2_logic(struct sljit_compiler *compiler, sljit_ub opcode, + sljit_si pref66, sljit_si xmm1, sljit_si xmm2, sljit_sw xmm2w) +{ + sljit_ub *inst; + + inst = emit_x86_instruction(compiler, 2 | (pref66 ? EX86_PREF_66 : 0) | EX86_SSE2, xmm1, 0, xmm2, xmm2w); + FAIL_IF(!inst); + *inst++ = GROUP_0F; + *inst = opcode; + return SLJIT_SUCCESS; +} + +static SLJIT_INLINE sljit_si emit_sse2_load(struct sljit_compiler *compiler, + sljit_si single, sljit_si dst, sljit_si src, sljit_sw srcw) +{ + return emit_sse2(compiler, MOVSD_x_xm, single, dst, src, srcw); +} + +static SLJIT_INLINE sljit_si emit_sse2_store(struct sljit_compiler *compiler, + sljit_si single, sljit_si dst, sljit_sw dstw, sljit_si src) +{ + return emit_sse2(compiler, MOVSD_xm_x, single, src, dst, dstw); +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fop1(struct sljit_compiler *compiler, sljit_si op, + sljit_si dst, sljit_sw dstw, + sljit_si src, sljit_sw srcw) +{ + sljit_si dst_r; + + CHECK_ERROR(); + check_sljit_emit_fop1(compiler, op, dst, dstw, src, srcw); + +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) + compiler->mode32 = 1; +#endif + + if (GET_OPCODE(op) == SLJIT_CMPD) { + compiler->flags_saved = 0; + if (dst <= SLJIT_FLOAT_REG6) + dst_r = dst; + else { + dst_r = TMP_FREG; + FAIL_IF(emit_sse2_load(compiler, op & SLJIT_SINGLE_OP, dst_r, dst, dstw)); + } + return emit_sse2_logic(compiler, UCOMISD_x_xm, !(op & SLJIT_SINGLE_OP), dst_r, src, srcw); + } + + if (op == SLJIT_MOVD) { + if (dst <= SLJIT_FLOAT_REG6) + return emit_sse2_load(compiler, op & SLJIT_SINGLE_OP, dst, src, srcw); + if (src <= SLJIT_FLOAT_REG6) + return emit_sse2_store(compiler, op & SLJIT_SINGLE_OP, dst, dstw, src); + FAIL_IF(emit_sse2_load(compiler, op & SLJIT_SINGLE_OP, TMP_FREG, src, srcw)); + return emit_sse2_store(compiler, op & SLJIT_SINGLE_OP, dst, dstw, TMP_FREG); + } + + if (dst >= SLJIT_FLOAT_REG1 && dst <= SLJIT_FLOAT_REG6) { + dst_r = dst; + if (dst != src) + FAIL_IF(emit_sse2_load(compiler, op & SLJIT_SINGLE_OP, dst_r, src, srcw)); + } + else { + dst_r = TMP_FREG; + FAIL_IF(emit_sse2_load(compiler, op & SLJIT_SINGLE_OP, dst_r, src, srcw)); + } + + switch (GET_OPCODE(op)) { + case SLJIT_NEGD: + FAIL_IF(emit_sse2_logic(compiler, XORPD_x_xm, 1, dst_r, SLJIT_MEM0(), (sljit_sw)(op & SLJIT_SINGLE_OP ? sse2_buffer : sse2_buffer + 8))); + break; + + case SLJIT_ABSD: + FAIL_IF(emit_sse2_logic(compiler, ANDPD_x_xm, 1, dst_r, SLJIT_MEM0(), (sljit_sw)(op & SLJIT_SINGLE_OP ? sse2_buffer + 4 : sse2_buffer + 12))); + break; + } + + if (dst_r == TMP_FREG) + return emit_sse2_store(compiler, op & SLJIT_SINGLE_OP, dst, dstw, TMP_FREG); + return SLJIT_SUCCESS; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fop2(struct sljit_compiler *compiler, sljit_si op, + sljit_si dst, sljit_sw dstw, + sljit_si src1, sljit_sw src1w, + sljit_si src2, sljit_sw src2w) +{ + sljit_si dst_r; + + CHECK_ERROR(); + check_sljit_emit_fop2(compiler, op, dst, dstw, src1, src1w, src2, src2w); + +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) + compiler->mode32 = 1; +#endif + + if (dst <= SLJIT_FLOAT_REG6) { + dst_r = dst; + if (dst == src1) + ; /* Do nothing here. */ + else if (dst == src2 && (op == SLJIT_ADDD || op == SLJIT_MULD)) { + /* Swap arguments. */ + src2 = src1; + src2w = src1w; + } + else if (dst != src2) + FAIL_IF(emit_sse2_load(compiler, op & SLJIT_SINGLE_OP, dst_r, src1, src1w)); + else { + dst_r = TMP_FREG; + FAIL_IF(emit_sse2_load(compiler, op & SLJIT_SINGLE_OP, TMP_FREG, src1, src1w)); + } + } + else { + dst_r = TMP_FREG; + FAIL_IF(emit_sse2_load(compiler, op & SLJIT_SINGLE_OP, TMP_FREG, src1, src1w)); + } + + switch (GET_OPCODE(op)) { + case SLJIT_ADDD: + FAIL_IF(emit_sse2(compiler, ADDSD_x_xm, op & SLJIT_SINGLE_OP, dst_r, src2, src2w)); + break; + + case SLJIT_SUBD: + FAIL_IF(emit_sse2(compiler, SUBSD_x_xm, op & SLJIT_SINGLE_OP, dst_r, src2, src2w)); + break; + + case SLJIT_MULD: + FAIL_IF(emit_sse2(compiler, MULSD_x_xm, op & SLJIT_SINGLE_OP, dst_r, src2, src2w)); + break; + + case SLJIT_DIVD: + FAIL_IF(emit_sse2(compiler, DIVSD_x_xm, op & SLJIT_SINGLE_OP, dst_r, src2, src2w)); + break; + } + + if (dst_r == TMP_FREG) + return emit_sse2_store(compiler, op & SLJIT_SINGLE_OP, dst, dstw, TMP_FREG); + return SLJIT_SUCCESS; +} + +#else + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fop1(struct sljit_compiler *compiler, sljit_si op, + sljit_si dst, sljit_sw dstw, + sljit_si src, sljit_sw srcw) +{ + CHECK_ERROR(); + /* Should cause an assertion fail. */ + check_sljit_emit_fop1(compiler, op, dst, dstw, src, srcw); + compiler->error = SLJIT_ERR_UNSUPPORTED; + return SLJIT_ERR_UNSUPPORTED; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_fop2(struct sljit_compiler *compiler, sljit_si op, + sljit_si dst, sljit_sw dstw, + sljit_si src1, sljit_sw src1w, + sljit_si src2, sljit_sw src2w) +{ + CHECK_ERROR(); + /* Should cause an assertion fail. */ + check_sljit_emit_fop2(compiler, op, dst, dstw, src1, src1w, src2, src2w); + compiler->error = SLJIT_ERR_UNSUPPORTED; + return SLJIT_ERR_UNSUPPORTED; +} + +#endif + +/* --------------------------------------------------------------------- */ +/* Conditional instructions */ +/* --------------------------------------------------------------------- */ + +SLJIT_API_FUNC_ATTRIBUTE struct sljit_label* sljit_emit_label(struct sljit_compiler *compiler) +{ + sljit_ub *inst; + struct sljit_label *label; + + CHECK_ERROR_PTR(); + check_sljit_emit_label(compiler); + + /* We should restore the flags before the label, + since other taken jumps has their own flags as well. */ + if (SLJIT_UNLIKELY(compiler->flags_saved)) + PTR_FAIL_IF(emit_restore_flags(compiler, 0)); + + if (compiler->last_label && compiler->last_label->size == compiler->size) + return compiler->last_label; + + label = (struct sljit_label*)ensure_abuf(compiler, sizeof(struct sljit_label)); + PTR_FAIL_IF(!label); + set_label(label, compiler); + + inst = (sljit_ub*)ensure_buf(compiler, 2); + PTR_FAIL_IF(!inst); + + *inst++ = 0; + *inst++ = 0; + + return label; +} + +SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compiler *compiler, sljit_si type) +{ + sljit_ub *inst; + struct sljit_jump *jump; + + CHECK_ERROR_PTR(); + check_sljit_emit_jump(compiler, type); + + if (SLJIT_UNLIKELY(compiler->flags_saved)) { + if ((type & 0xff) <= SLJIT_JUMP) + PTR_FAIL_IF(emit_restore_flags(compiler, 0)); + compiler->flags_saved = 0; + } + + jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump)); + PTR_FAIL_IF_NULL(jump); + set_jump(jump, compiler, type & SLJIT_REWRITABLE_JUMP); + type &= 0xff; + + if (type >= SLJIT_CALL1) + PTR_FAIL_IF(call_with_args(compiler, type)); + + /* Worst case size. */ +#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) + compiler->size += (type >= SLJIT_JUMP) ? 5 : 6; +#else + compiler->size += (type >= SLJIT_JUMP) ? (10 + 3) : (2 + 10 + 3); +#endif + + inst = (sljit_ub*)ensure_buf(compiler, 2); + PTR_FAIL_IF_NULL(inst); + + *inst++ = 0; + *inst++ = type + 4; + return jump; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_ijump(struct sljit_compiler *compiler, sljit_si type, sljit_si src, sljit_sw srcw) +{ + sljit_ub *inst; + struct sljit_jump *jump; + + CHECK_ERROR(); + check_sljit_emit_ijump(compiler, type, src, srcw); + ADJUST_LOCAL_OFFSET(src, srcw); + + CHECK_EXTRA_REGS(src, srcw, (void)0); + + if (SLJIT_UNLIKELY(compiler->flags_saved)) { + if (type <= SLJIT_JUMP) + FAIL_IF(emit_restore_flags(compiler, 0)); + compiler->flags_saved = 0; + } + + if (type >= SLJIT_CALL1) { +#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) +#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) + if (src == SLJIT_SCRATCH_REG3) { + EMIT_MOV(compiler, TMP_REGISTER, 0, src, 0); + src = TMP_REGISTER; + } + if (src == SLJIT_MEM1(SLJIT_LOCALS_REG) && type >= SLJIT_CALL3) + srcw += sizeof(sljit_sw); +#endif +#endif +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) && defined(_WIN64) + if (src == SLJIT_SCRATCH_REG3) { + EMIT_MOV(compiler, TMP_REGISTER, 0, src, 0); + src = TMP_REGISTER; + } +#endif + FAIL_IF(call_with_args(compiler, type)); + } + + if (src == SLJIT_IMM) { + jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump)); + FAIL_IF_NULL(jump); + set_jump(jump, compiler, JUMP_ADDR); + jump->u.target = srcw; + + /* Worst case size. */ +#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) + compiler->size += 5; +#else + compiler->size += 10 + 3; +#endif + + inst = (sljit_ub*)ensure_buf(compiler, 2); + FAIL_IF_NULL(inst); + + *inst++ = 0; + *inst++ = type + 4; + } + else { +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) + /* REX_W is not necessary (src is not immediate). */ + compiler->mode32 = 1; +#endif + inst = emit_x86_instruction(compiler, 1, 0, 0, src, srcw); + FAIL_IF(!inst); + *inst++ = GROUP_FF; + *inst |= (type >= SLJIT_FAST_CALL) ? CALL_rm : JMP_rm; + } + return SLJIT_SUCCESS; +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_emit_op_flags(struct sljit_compiler *compiler, sljit_si op, + sljit_si dst, sljit_sw dstw, + sljit_si src, sljit_sw srcw, + sljit_si type) +{ + sljit_ub *inst; + sljit_ub cond_set = 0; +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) + sljit_si reg; +#else + /* CHECK_EXTRA_REGS migh overwrite these values. */ + sljit_si dst_save = dst; + sljit_sw dstw_save = dstw; +#endif + + CHECK_ERROR(); + check_sljit_emit_op_flags(compiler, op, dst, dstw, src, srcw, type); + + if (dst == SLJIT_UNUSED) + return SLJIT_SUCCESS; + + ADJUST_LOCAL_OFFSET(dst, dstw); + CHECK_EXTRA_REGS(dst, dstw, (void)0); + if (SLJIT_UNLIKELY(compiler->flags_saved)) + FAIL_IF(emit_restore_flags(compiler, op & SLJIT_KEEP_FLAGS)); + + /* setcc = jcc + 0x10. */ + cond_set = get_jump_code(type) + 0x10; + +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) + reg = (op == SLJIT_MOV && dst <= TMP_REGISTER) ? dst : TMP_REGISTER; + + inst = (sljit_ub*)ensure_buf(compiler, 1 + 4 + 4); + FAIL_IF(!inst); + INC_SIZE(4 + 4); + /* Set low register to conditional flag. */ + *inst++ = (reg_map[reg] <= 7) ? REX : REX_B; + *inst++ = GROUP_0F; + *inst++ = cond_set; + *inst++ = MOD_REG | reg_lmap[reg]; + *inst++ = REX_W | (reg_map[reg] <= 7 ? 0 : (REX_B | REX_R)); + *inst++ = GROUP_0F; + *inst++ = MOVZX_r_rm8; + *inst = MOD_REG | (reg_lmap[reg] << 3) | reg_lmap[reg]; + + if (reg != TMP_REGISTER) + return SLJIT_SUCCESS; + + if (GET_OPCODE(op) < SLJIT_ADD) { + compiler->mode32 = GET_OPCODE(op) != SLJIT_MOV; + return emit_mov(compiler, dst, dstw, TMP_REGISTER, 0); + } +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) || (defined SLJIT_DEBUG && SLJIT_DEBUG) + compiler->skip_checks = 1; +#endif + return sljit_emit_op2(compiler, op, dst, dstw, dst, dstw, TMP_REGISTER, 0); +#else /* SLJIT_CONFIG_X86_64 */ + if (GET_OPCODE(op) < SLJIT_ADD && dst <= TMP_REGISTER) { + if (reg_map[dst] <= 4) { + /* Low byte is accessible. */ + inst = (sljit_ub*)ensure_buf(compiler, 1 + 3 + 3); + FAIL_IF(!inst); + INC_SIZE(3 + 3); + /* Set low byte to conditional flag. */ + *inst++ = GROUP_0F; + *inst++ = cond_set; + *inst++ = MOD_REG | reg_map[dst]; + + *inst++ = GROUP_0F; + *inst++ = MOVZX_r_rm8; + *inst = MOD_REG | (reg_map[dst] << 3) | reg_map[dst]; + return SLJIT_SUCCESS; + } + + /* Low byte is not accessible. */ + if (cpu_has_cmov == -1) + get_cpu_features(); + + if (cpu_has_cmov) { + EMIT_MOV(compiler, TMP_REGISTER, 0, SLJIT_IMM, 1); + /* a xor reg, reg operation would overwrite the flags. */ + EMIT_MOV(compiler, dst, 0, SLJIT_IMM, 0); + + inst = (sljit_ub*)ensure_buf(compiler, 1 + 3); + FAIL_IF(!inst); + INC_SIZE(3); + + *inst++ = GROUP_0F; + /* cmovcc = setcc - 0x50. */ + *inst++ = cond_set - 0x50; + *inst++ = MOD_REG | (reg_map[dst] << 3) | reg_map[TMP_REGISTER]; + return SLJIT_SUCCESS; + } + + inst = (sljit_ub*)ensure_buf(compiler, 1 + 1 + 3 + 3 + 1); + FAIL_IF(!inst); + INC_SIZE(1 + 3 + 3 + 1); + *inst++ = XCHG_EAX_r + reg_map[TMP_REGISTER]; + /* Set al to conditional flag. */ + *inst++ = GROUP_0F; + *inst++ = cond_set; + *inst++ = MOD_REG | 0 /* eax */; + + *inst++ = GROUP_0F; + *inst++ = MOVZX_r_rm8; + *inst++ = MOD_REG | (reg_map[dst] << 3) | 0 /* eax */; + *inst++ = XCHG_EAX_r + reg_map[TMP_REGISTER]; + return SLJIT_SUCCESS; + } + + /* Set TMP_REGISTER to the bit. */ + inst = (sljit_ub*)ensure_buf(compiler, 1 + 1 + 3 + 3 + 1); + FAIL_IF(!inst); + INC_SIZE(1 + 3 + 3 + 1); + *inst++ = XCHG_EAX_r + reg_map[TMP_REGISTER]; + /* Set al to conditional flag. */ + *inst++ = GROUP_0F; + *inst++ = cond_set; + *inst++ = MOD_REG | 0 /* eax */; + + *inst++ = GROUP_0F; + *inst++ = MOVZX_r_rm8; + *inst++ = MOD_REG | (0 << 3) /* eax */ | 0 /* eax */; + + *inst++ = XCHG_EAX_r + reg_map[TMP_REGISTER]; + + if (GET_OPCODE(op) < SLJIT_ADD) + return emit_mov(compiler, dst, dstw, TMP_REGISTER, 0); + +#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) || (defined SLJIT_DEBUG && SLJIT_DEBUG) + compiler->skip_checks = 1; +#endif + return sljit_emit_op2(compiler, op, dst_save, dstw_save, dst_save, dstw_save, TMP_REGISTER, 0); +#endif /* SLJIT_CONFIG_X86_64 */ +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_si sljit_get_local_base(struct sljit_compiler *compiler, sljit_si dst, sljit_sw dstw, sljit_sw offset) +{ + CHECK_ERROR(); + check_sljit_get_local_base(compiler, dst, dstw, offset); + ADJUST_LOCAL_OFFSET(dst, dstw); + + CHECK_EXTRA_REGS(dst, dstw, (void)0); + +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) + compiler->mode32 = 0; +#endif + + ADJUST_LOCAL_OFFSET(SLJIT_MEM1(SLJIT_LOCALS_REG), offset); + +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) + if (NOT_HALFWORD(offset)) { + FAIL_IF(emit_load_imm64(compiler, TMP_REGISTER, offset)); +#if (defined SLJIT_DEBUG && SLJIT_DEBUG) + SLJIT_ASSERT(emit_lea_binary(compiler, dst, dstw, SLJIT_LOCALS_REG, 0, TMP_REGISTER, 0) != SLJIT_ERR_UNSUPPORTED); + return compiler->error; +#else + return emit_lea_binary(compiler, dst, dstw, SLJIT_LOCALS_REG, 0, TMP_REGISTER, 0); +#endif + } +#endif + + if (offset != 0) + return emit_lea_binary(compiler, dst, dstw, SLJIT_LOCALS_REG, 0, SLJIT_IMM, offset); + return emit_mov(compiler, dst, dstw, SLJIT_LOCALS_REG, 0); +} + +SLJIT_API_FUNC_ATTRIBUTE struct sljit_const* sljit_emit_const(struct sljit_compiler *compiler, sljit_si dst, sljit_sw dstw, sljit_sw init_value) +{ + sljit_ub *inst; + struct sljit_const *const_; +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) + sljit_si reg; +#endif + + CHECK_ERROR_PTR(); + check_sljit_emit_const(compiler, dst, dstw, init_value); + ADJUST_LOCAL_OFFSET(dst, dstw); + + CHECK_EXTRA_REGS(dst, dstw, (void)0); + + const_ = (struct sljit_const*)ensure_abuf(compiler, sizeof(struct sljit_const)); + PTR_FAIL_IF(!const_); + set_const(const_, compiler); + +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) + compiler->mode32 = 0; + reg = (dst <= TMP_REGISTER) ? dst : TMP_REGISTER; + + if (emit_load_imm64(compiler, reg, init_value)) + return NULL; +#else + if (dst == SLJIT_UNUSED) + dst = TMP_REGISTER; + + if (emit_mov(compiler, dst, dstw, SLJIT_IMM, init_value)) + return NULL; +#endif + + inst = (sljit_ub*)ensure_buf(compiler, 2); + PTR_FAIL_IF(!inst); + + *inst++ = 0; + *inst++ = 1; + +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) + if (reg == TMP_REGISTER && dst != SLJIT_UNUSED) + if (emit_mov(compiler, dst, dstw, TMP_REGISTER, 0)) + return NULL; +#endif + + return const_; +} + +SLJIT_API_FUNC_ATTRIBUTE void sljit_set_jump_addr(sljit_uw addr, sljit_uw new_addr) +{ +#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) + *(sljit_sw*)addr = new_addr - (addr + 4); +#else + *(sljit_uw*)addr = new_addr; +#endif +} + +SLJIT_API_FUNC_ATTRIBUTE void sljit_set_const(sljit_uw addr, sljit_sw new_constant) +{ + *(sljit_sw*)addr = new_constant; +} Property changes on: sys/contrib/sljit/sljitNativeX86_common.c ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Added: fbsd:nokeywords ## -0,0 +1 ## +yes \ No newline at end of property Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Index: sys/contrib/sljit/sljitUtils.c =================================================================== --- sys/contrib/sljit/sljitUtils.c (revision 0) +++ sys/contrib/sljit/sljitUtils.c (working copy) @@ -0,0 +1,332 @@ +/* + * Stack-less Just-In-Time compiler + * + * Copyright 2009-2012 Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, are + * permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of + * conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list + * of conditions and the following disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND CONTRIBUTORS ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL THE COPYRIGHT HOLDER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED + * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* ------------------------------------------------------------------------ */ +/* Locks */ +/* ------------------------------------------------------------------------ */ + +#if (defined SLJIT_EXECUTABLE_ALLOCATOR && SLJIT_EXECUTABLE_ALLOCATOR) || (defined SLJIT_UTIL_GLOBAL_LOCK && SLJIT_UTIL_GLOBAL_LOCK) + +#if (defined SLJIT_SINGLE_THREADED && SLJIT_SINGLE_THREADED) + +#if (defined SLJIT_EXECUTABLE_ALLOCATOR && SLJIT_EXECUTABLE_ALLOCATOR) + +static SLJIT_INLINE void allocator_grab_lock(void) +{ + /* Always successful. */ +} + +static SLJIT_INLINE void allocator_release_lock(void) +{ + /* Always successful. */ +} + +#endif /* SLJIT_EXECUTABLE_ALLOCATOR */ + +#if (defined SLJIT_UTIL_GLOBAL_LOCK && SLJIT_UTIL_GLOBAL_LOCK) + +SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_grab_lock(void) +{ + /* Always successful. */ +} + +SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_release_lock(void) +{ + /* Always successful. */ +} + +#endif /* SLJIT_UTIL_GLOBAL_LOCK */ + +#elif defined(_WIN32) /* SLJIT_SINGLE_THREADED */ + +#include "windows.h" + +#if (defined SLJIT_EXECUTABLE_ALLOCATOR && SLJIT_EXECUTABLE_ALLOCATOR) + +static HANDLE allocator_mutex = 0; + +static SLJIT_INLINE void allocator_grab_lock(void) +{ + /* No idea what to do if an error occures. Static mutexes should never fail... */ + if (!allocator_mutex) + allocator_mutex = CreateMutex(NULL, TRUE, NULL); + else + WaitForSingleObject(allocator_mutex, INFINITE); +} + +static SLJIT_INLINE void allocator_release_lock(void) +{ + ReleaseMutex(allocator_mutex); +} + +#endif /* SLJIT_EXECUTABLE_ALLOCATOR */ + +#if (defined SLJIT_UTIL_GLOBAL_LOCK && SLJIT_UTIL_GLOBAL_LOCK) + +static HANDLE global_mutex = 0; + +SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_grab_lock(void) +{ + /* No idea what to do if an error occures. Static mutexes should never fail... */ + if (!global_mutex) + global_mutex = CreateMutex(NULL, TRUE, NULL); + else + WaitForSingleObject(global_mutex, INFINITE); +} + +SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_release_lock(void) +{ + ReleaseMutex(global_mutex); +} + +#endif /* SLJIT_UTIL_GLOBAL_LOCK */ + +#else /* _WIN32 */ + +#if (defined SLJIT_EXECUTABLE_ALLOCATOR && SLJIT_EXECUTABLE_ALLOCATOR) + +#include + +static pthread_mutex_t allocator_mutex = PTHREAD_MUTEX_INITIALIZER; + +static SLJIT_INLINE void allocator_grab_lock(void) +{ + pthread_mutex_lock(&allocator_mutex); +} + +static SLJIT_INLINE void allocator_release_lock(void) +{ + pthread_mutex_unlock(&allocator_mutex); +} + +#endif /* SLJIT_EXECUTABLE_ALLOCATOR */ + +#if (defined SLJIT_UTIL_GLOBAL_LOCK && SLJIT_UTIL_GLOBAL_LOCK) + +#include + +static pthread_mutex_t global_mutex = PTHREAD_MUTEX_INITIALIZER; + +SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_grab_lock(void) +{ + pthread_mutex_lock(&global_mutex); +} + +SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_release_lock(void) +{ + pthread_mutex_unlock(&global_mutex); +} + +#endif /* SLJIT_UTIL_GLOBAL_LOCK */ + +#endif /* _WIN32 */ + +/* ------------------------------------------------------------------------ */ +/* Stack */ +/* ------------------------------------------------------------------------ */ + +#if (defined SLJIT_UTIL_STACK && SLJIT_UTIL_STACK) || (defined SLJIT_EXECUTABLE_ALLOCATOR && SLJIT_EXECUTABLE_ALLOCATOR) + +#ifdef _WIN32 +#include "windows.h" +#else +/* Provides mmap function. */ +#include +/* For detecting the page size. */ +#include + +#ifndef MAP_ANON + +#include + +/* Some old systems does not have MAP_ANON. */ +static sljit_si dev_zero = -1; + +#if (defined SLJIT_SINGLE_THREADED && SLJIT_SINGLE_THREADED) + +static SLJIT_INLINE sljit_si open_dev_zero(void) +{ + dev_zero = open("/dev/zero", O_RDWR); + return dev_zero < 0; +} + +#else /* SLJIT_SINGLE_THREADED */ + +#include + +static pthread_mutex_t dev_zero_mutex = PTHREAD_MUTEX_INITIALIZER; + +static SLJIT_INLINE sljit_si open_dev_zero(void) +{ + pthread_mutex_lock(&dev_zero_mutex); + dev_zero = open("/dev/zero", O_RDWR); + pthread_mutex_unlock(&dev_zero_mutex); + return dev_zero < 0; +} + +#endif /* SLJIT_SINGLE_THREADED */ + +#endif + +#endif + +#endif /* SLJIT_UTIL_STACK || SLJIT_EXECUTABLE_ALLOCATOR */ + +#if (defined SLJIT_UTIL_STACK && SLJIT_UTIL_STACK) + +/* Planning to make it even more clever in the future. */ +static sljit_sw sljit_page_align = 0; + +SLJIT_API_FUNC_ATTRIBUTE struct sljit_stack* SLJIT_CALL sljit_allocate_stack(sljit_uw limit, sljit_uw max_limit) +{ + struct sljit_stack *stack; + union { + void *ptr; + sljit_uw uw; + } base; +#ifdef _WIN32 + SYSTEM_INFO si; +#endif + + if (limit > max_limit || limit < 1) + return NULL; + +#ifdef _WIN32 + if (!sljit_page_align) { + GetSystemInfo(&si); + sljit_page_align = si.dwPageSize - 1; + } +#else + if (!sljit_page_align) { + sljit_page_align = sysconf(_SC_PAGESIZE); + /* Should never happen. */ + if (sljit_page_align < 0) + sljit_page_align = 4096; + sljit_page_align--; + } +#endif + + /* Align limit and max_limit. */ + max_limit = (max_limit + sljit_page_align) & ~sljit_page_align; + + stack = (struct sljit_stack*)SLJIT_MALLOC(sizeof(struct sljit_stack)); + if (!stack) + return NULL; + +#ifdef _WIN32 + base.ptr = VirtualAlloc(NULL, max_limit, MEM_RESERVE, PAGE_READWRITE); + if (!base.ptr) { + SLJIT_FREE(stack); + return NULL; + } + stack->base = base.uw; + stack->limit = stack->base; + stack->max_limit = stack->base + max_limit; + if (sljit_stack_resize(stack, stack->base + limit)) { + sljit_free_stack(stack); + return NULL; + } +#else +#ifdef MAP_ANON + base.ptr = mmap(NULL, max_limit, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); +#else + if (dev_zero < 0) { + if (open_dev_zero()) { + SLJIT_FREE(stack); + return NULL; + } + } + base.ptr = mmap(NULL, max_limit, PROT_READ | PROT_WRITE, MAP_PRIVATE, dev_zero, 0); +#endif + if (base.ptr == MAP_FAILED) { + SLJIT_FREE(stack); + return NULL; + } + stack->base = base.uw; + stack->limit = stack->base + limit; + stack->max_limit = stack->base + max_limit; +#endif + stack->top = stack->base; + return stack; +} + +#undef PAGE_ALIGN + +SLJIT_API_FUNC_ATTRIBUTE void SLJIT_CALL sljit_free_stack(struct sljit_stack* stack) +{ +#ifdef _WIN32 + VirtualFree((void*)stack->base, 0, MEM_RELEASE); +#else + munmap((void*)stack->base, stack->max_limit - stack->base); +#endif + SLJIT_FREE(stack); +} + +SLJIT_API_FUNC_ATTRIBUTE sljit_sw SLJIT_CALL sljit_stack_resize(struct sljit_stack* stack, sljit_uw new_limit) +{ + sljit_uw aligned_old_limit; + sljit_uw aligned_new_limit; + + if ((new_limit > stack->max_limit) || (new_limit < stack->base)) + return -1; +#ifdef _WIN32 + aligned_new_limit = (new_limit + sljit_page_align) & ~sljit_page_align; + aligned_old_limit = (stack->limit + sljit_page_align) & ~sljit_page_align; + if (aligned_new_limit != aligned_old_limit) { + if (aligned_new_limit > aligned_old_limit) { + if (!VirtualAlloc((void*)aligned_old_limit, aligned_new_limit - aligned_old_limit, MEM_COMMIT, PAGE_READWRITE)) + return -1; + } + else { + if (!VirtualFree((void*)aligned_new_limit, aligned_old_limit - aligned_new_limit, MEM_DECOMMIT)) + return -1; + } + } + stack->limit = new_limit; + return 0; +#else + if (new_limit >= stack->limit) { + stack->limit = new_limit; + return 0; + } + aligned_new_limit = (new_limit + sljit_page_align) & ~sljit_page_align; + aligned_old_limit = (stack->limit + sljit_page_align) & ~sljit_page_align; + /* If madvise is available, we release the unnecessary space. */ +#if defined(POSIX_MADV_DONTNEED) + if (aligned_new_limit < aligned_old_limit) + posix_madvise((void*)aligned_new_limit, aligned_old_limit - aligned_new_limit, POSIX_MADV_DONTNEED); +#elif defined(MADV_DONTNEED) + if (aligned_new_limit < aligned_old_limit) + madvise((void*)aligned_new_limit, aligned_old_limit - aligned_new_limit, MADV_DONTNEED); +#endif + stack->limit = new_limit; + return 0; +#endif +} + +#endif /* SLJIT_UTIL_STACK */ + +#endif Property changes on: sys/contrib/sljit/sljitUtils.c ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Added: fbsd:nokeywords ## -0,0 +1 ## +yes \ No newline at end of property Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Index: sys/i386/conf/GENERIC =================================================================== --- sys/i386/conf/GENERIC (revision 243911) +++ sys/i386/conf/GENERIC (working copy) @@ -320,6 +320,7 @@ device firmware # firmware assist module # Be aware of the administrative consequences of enabling this! # Note that 'bpf' is required for DHCP. device bpf # Berkeley packet filter +options BPFJIT # enable BPF just-in-time compiler # USB support options USB_DEBUG # enable debug msgs Index: sys/i386/conf/NOTES =================================================================== --- sys/i386/conf/NOTES (revision 243911) +++ sys/i386/conf/NOTES (working copy) @@ -256,10 +256,6 @@ device xboxfb options DEVICE_POLLING -# BPF_JITTER adds support for BPF just-in-time compiler. - -options BPF_JITTER - # OpenFabrics Enterprise Distribution (Infiniband). options OFED options OFED_DEBUG_INIT Index: sys/net/bpf.c =================================================================== --- sys/net/bpf.c (revision 243911) +++ sys/net/bpf.c (working copy) @@ -71,8 +71,8 @@ __FBSDID("$FreeBSD$"); #define BPF_INTERNAL #include #include -#ifdef BPF_JITTER -#include +#ifdef BPFJIT +#include #endif #include #include @@ -1724,8 +1724,8 @@ bpf_setf(struct bpf_d *d, struct bpf_program *fp, struct bpf_program32 *fp32; #endif struct bpf_insn *fcode, *old; -#ifdef BPF_JITTER - bpf_jit_filter *jfunc, *ofunc; +#ifdef BPFJIT + bpfjit_function_t jfunc, ofunc; #endif size_t size; u_int flen; @@ -1753,7 +1753,7 @@ bpf_setf(struct bpf_d *d, struct bpf_program *fp, #endif fcode = NULL; -#ifdef BPF_JITTER +#ifdef BPFJIT jfunc = ofunc = NULL; #endif need_upgrade = 0; @@ -1774,9 +1774,12 @@ bpf_setf(struct bpf_d *d, struct bpf_program *fp, free(fcode, M_BPF); return (EINVAL); } -#ifdef BPF_JITTER +#ifdef BPFJIT /* Filter is copied inside fcode and is perfectly valid. */ - jfunc = bpf_jitter(fcode, flen); + jfunc = bpfjit_generate_code(fcode, flen); + + if (jfunc == NULL && bootverbose) + printf("bpf_setf: failed to compile filter\n"); #endif } @@ -1796,7 +1799,7 @@ bpf_setf(struct bpf_d *d, struct bpf_program *fp, } else { old = d->bd_rfilter; d->bd_rfilter = fcode; -#ifdef BPF_JITTER +#ifdef BPFJIT ofunc = d->bd_bfilter; d->bd_bfilter = jfunc; #endif @@ -1820,9 +1823,9 @@ bpf_setf(struct bpf_d *d, struct bpf_program *fp, BPFIF_WUNLOCK(d->bd_bif); if (old != NULL) free(old, M_BPF); -#ifdef BPF_JITTER +#ifdef BPFJIT if (ofunc != NULL) - bpf_destroy_jit_filter(ofunc); + bpfjit_free_code(ofunc); #endif /* Move d to active readers list. */ @@ -2031,8 +2034,8 @@ bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktl { struct bintime bt; struct bpf_d *d; -#ifdef BPF_JITTER - bpf_jit_filter *bf; +#ifdef BPFJIT + bpfjit_function_t bf; #endif u_int slen; int gottime; @@ -2058,10 +2061,10 @@ bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktl * is inbound or outbound. In the bpf_mtap() routines, we use * the interface pointers on the mbuf to figure it out. */ -#ifdef BPF_JITTER - bf = bpf_jitter_enable != 0 ? d->bd_bfilter : NULL; +#ifdef BPFJIT + bf = bpfjit_disable == 0 ? d->bd_bfilter : NULL; if (bf != NULL) - slen = (*(bf->func))(pkt, pktlen, pktlen); + slen = bf(pkt, pktlen, pktlen); else #endif slen = bpf_filter(d->bd_rfilter, pkt, pktlen, pktlen); @@ -2098,8 +2101,8 @@ bpf_mtap(struct bpf_if *bp, struct mbuf *m) { struct bintime bt; struct bpf_d *d; -#ifdef BPF_JITTER - bpf_jit_filter *bf; +#ifdef BPFJIT + bpfjit_function_t bf; #endif u_int pktlen, slen; int gottime; @@ -2119,11 +2122,10 @@ bpf_mtap(struct bpf_if *bp, struct mbuf *m) if (BPF_CHECK_DIRECTION(d, m->m_pkthdr.rcvif, bp->bif_ifp)) continue; ++d->bd_rcount; -#ifdef BPF_JITTER - bf = bpf_jitter_enable != 0 ? d->bd_bfilter : NULL; - /* XXX We cannot handle multiple mbufs. */ - if (bf != NULL && m->m_next == NULL) - slen = (*(bf->func))(mtod(m, u_char *), pktlen, pktlen); +#ifdef BPFJIT + bf = bpfjit_disable == 0 ? d->bd_bfilter : NULL; + if (bf != NULL) + slen = bf((u_char *)m, pktlen, 0); else #endif slen = bpf_filter(d->bd_rfilter, (u_char *)m, pktlen, 0); @@ -2154,6 +2156,9 @@ bpf_mtap2(struct bpf_if *bp, void *data, u_int dle struct bintime bt; struct mbuf mb; struct bpf_d *d; +#ifdef BPFJIT + bpfjit_function_t bf; +#endif u_int pktlen, slen; int gottime; @@ -2182,6 +2187,12 @@ bpf_mtap2(struct bpf_if *bp, void *data, u_int dle if (BPF_CHECK_DIRECTION(d, m->m_pkthdr.rcvif, bp->bif_ifp)) continue; ++d->bd_rcount; +#ifdef BPFJIT + bf = bpfjit_disable == 0 ? d->bd_bfilter : NULL; + if (bf != NULL) + slen = bf((u_char *)&mb, pktlen, 0); + else +#endif slen = bpf_filter(d->bd_rfilter, (u_char *)&mb, pktlen, 0); if (slen != 0) { BPFD_LOCK(d); @@ -2429,9 +2440,9 @@ bpf_freed(struct bpf_d *d) bpf_free(d); if (d->bd_rfilter != NULL) { free((caddr_t)d->bd_rfilter, M_BPF); -#ifdef BPF_JITTER +#ifdef BPFJIT if (d->bd_bfilter != NULL) - bpf_destroy_jit_filter(d->bd_bfilter); + bpfjit_free_code(d->bd_bfilter); #endif } if (d->bd_wfilter != NULL) Index: sys/net/bpf_filter.c =================================================================== --- sys/net/bpf_filter.c (revision 243911) +++ sys/net/bpf_filter.c (working copy) @@ -66,6 +66,7 @@ __FBSDID("$FreeBSD$"); #include #else #include +#include #endif #include #ifdef _KERNEL @@ -82,11 +83,12 @@ __FBSDID("$FreeBSD$"); } \ } -static u_int16_t m_xhalf(struct mbuf *m, bpf_u_int32 k, int *err); -static u_int32_t m_xword(struct mbuf *m, bpf_u_int32 k, int *err); +uint32_t m_xhalf(const struct mbuf *, uint32_t, int *); +uint32_t m_xword(const struct mbuf *, uint32_t, int *); +uint32_t m_xbyte(const struct mbuf *, uint32_t, int *); -static u_int32_t -m_xword(struct mbuf *m, bpf_u_int32 k, int *err) +uint32_t +m_xword(const struct mbuf *m, uint32_t k, int *err) { size_t len; u_char *cp, *np; @@ -134,8 +136,8 @@ __FBSDID("$FreeBSD$"); return (0); } -static u_int16_t -m_xhalf(struct mbuf *m, bpf_u_int32 k, int *err) +uint32_t +m_xhalf(const struct mbuf *m, uint32_t k, int *err) { size_t len; u_char *cp; @@ -163,6 +165,15 @@ __FBSDID("$FreeBSD$"); *err = 1; return (0); } + +uint32_t +m_xbyte(const struct mbuf *m, uint32_t k, int *err) +{ + + *err = 0; + MINDEX(m, k); + return (mtod(m, u_char *)[k]); +} #endif /* Index: sys/net/bpfjit.c =================================================================== --- sys/net/bpfjit.c (revision 0) +++ sys/net/bpfjit.c (working copy) @@ -0,0 +1,1755 @@ +/*- + * Copyright (c) 2011-2012 Alexander Nasonov. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED + * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT + * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#ifdef _KERNEL +#include +#include +#include +#include +#include +#include + +#include +#include + +#if defined(SLJIT_CONFIG_UNSUPPORTED) && (SLJIT_CONFIG_UNSUPPORTED != 0) +#error "sljit does not support this platform" +#endif + +static MALLOC_DEFINE(M_BPFJIT, "BPFJIT", "BPF JIT compiler"); + +#define BPFJIT_ASSERT(c) KASSERT(c, ("%s: %d", __func__, __LINE__)) +#define BPFJIT_FREE(p) free(p, M_BPFJIT) +#define BPFJIT_MALLOC(sz) malloc(sz, M_BPFJIT, M_NOWAIT) + +uint32_t m_xword(const struct mbuf *, uint32_t, int *); +uint32_t m_xhalf(const struct mbuf *, uint32_t, int *); +uint32_t m_xbyte(const struct mbuf *, uint32_t, int *); + +int bpfjit_disable = 0; +SYSCTL_NODE(_net, OID_AUTO, bpfjit, CTLFLAG_RW, 0, "BPF just-in-time compiler"); +SYSCTL_INT(_net_bpfjit, OID_AUTO, disable, CTLFLAG_RW, &bpfjit_disable, 0, + "disable BPF just-in-time compiler"); + +#else + +#include +#include +#include +#include +#include +#include +#include + +#include "sljitLir.h" +#include "bpfjit.h" + +#define BPFJIT_ASSERT(c) assert(c) +#define BPFJIT_MALLOC(sz) malloc(sz) +#define BPFJIT_FREE(p) free(p) +#endif + +#include + +#define BPFJIT_A SLJIT_SCRATCH_REG1 +#define BPFJIT_X SLJIT_TEMPORARY_EREG1 +#define BPFJIT_TMP1 SLJIT_SCRATCH_REG2 +#define BPFJIT_TMP2 SLJIT_SCRATCH_REG3 +#define BPFJIT_BUF SLJIT_SAVED_REG1 +#define BPFJIT_WIRELEN SLJIT_SAVED_REG2 +#define BPFJIT_BUFLEN SLJIT_SAVED_REG3 +#define BPFJIT_KERN_TMP SLJIT_TEMPORARY_EREG2 + +/* + * Flags for bpfjit_optimization_hints(). + */ +#define BPFJIT_INIT_X 0x10000 +#define BPFJIT_INIT_A 0x20000 + +/* + * Node of bj_jumps list. + */ +struct bpfjit_jump +{ + struct sljit_jump *bj_jump; + SLIST_ENTRY(bpfjit_jump) bj_entries; + uint32_t bj_safe_length; +}; + +/* + * Data for BPF_JMP instruction. + */ +struct bpfjit_jump_data +{ + /* + * These entries make up bj_jumps list: + * bj_jtf[0] - when coming from jt path, + * bj_jtf[1] - when coming from jf path. + */ + struct bpfjit_jump bj_jtf[2]; +}; + +/* + * Data for "read from packet" instructions. + * See also read_pkt_insn() function below. + */ +struct bpfjit_read_pkt_data +{ + /* + * If positive, emit "if (buflen < bj_check_length) return (0)". + * We assume that buflen is never equal to UINT32_MAX (otherwise, + * we need a special bool variable to emit unconditional "return (0)"). + */ + uint32_t bj_check_length; +}; + +/* + * Additional (optimization-related) data for bpf_insn. + */ +struct bpfjit_insn_data +{ + /* List of jumps to this insn. */ + SLIST_HEAD(, bpfjit_jump) bj_jumps; + + union { + struct bpfjit_jump_data bj_jdata; + struct bpfjit_read_pkt_data bj_rdata; + } bj_aux; + + bool bj_unreachable; +}; + +static uint32_t +read_width(struct bpf_insn *pc) +{ + + switch (BPF_SIZE(pc->code)) { + case BPF_W: + return (4); + case BPF_H: + return (2); + case BPF_B: + return (1); + default: + BPFJIT_ASSERT(false); + return (0); + } +} + +/* + * Get offset of M[k] on the stack. + */ +static size_t +mem_local_offset(uint32_t k, unsigned int minm) +{ + size_t moff = (k - minm) * sizeof(uint32_t); + +#ifdef _KERNEL + /* + * 4 bytes for the third argument of m_xword/m_xhalf/m_xbyte. + */ + return (sizeof(uint32_t) + moff); +#else + return (moff); +#endif +} + +/* + * Generate code for BPF_LD+BPF_B+BPF_ABS A <- P[k:1]. + */ +static int +emit_read8(struct sljit_compiler* compiler, uint32_t k) +{ + + return (sljit_emit_op1(compiler, + SLJIT_MOV_UB, + BPFJIT_A, 0, + SLJIT_MEM1(BPFJIT_BUF), k)); +} + +/* + * Generate code for BPF_LD+BPF_H+BPF_ABS A <- P[k:2]. + */ +static int +emit_read16(struct sljit_compiler* compiler, uint32_t k) +{ + int status; + + /* tmp1 = buf[k]; */ + status = sljit_emit_op1(compiler, + SLJIT_MOV_UB, + BPFJIT_TMP1, 0, + SLJIT_MEM1(BPFJIT_BUF), k); + if (status != SLJIT_SUCCESS) + return (status); + + /* A = buf[k+1]; */ + status = sljit_emit_op1(compiler, + SLJIT_MOV_UB, + BPFJIT_A, 0, + SLJIT_MEM1(BPFJIT_BUF), k+1); + if (status != SLJIT_SUCCESS) + return (status); + + /* tmp1 = tmp1 << 8; */ + status = sljit_emit_op2(compiler, + SLJIT_SHL, + BPFJIT_TMP1, 0, + BPFJIT_TMP1, 0, + SLJIT_IMM, 8); + if (status != SLJIT_SUCCESS) + return (status); + + /* A = A + tmp1; */ + status = sljit_emit_op2(compiler, + SLJIT_ADD, + BPFJIT_A, 0, + BPFJIT_A, 0, + BPFJIT_TMP1, 0); + return (status); +} + +/* + * Generate code for BPF_LD+BPF_W+BPF_ABS A <- P[k:4]. + */ +static int +emit_read32(struct sljit_compiler* compiler, uint32_t k) +{ + int status; + + /* tmp1 = buf[k]; */ + status = sljit_emit_op1(compiler, + SLJIT_MOV_UB, + BPFJIT_TMP1, 0, + SLJIT_MEM1(BPFJIT_BUF), k); + if (status != SLJIT_SUCCESS) + return (status); + + /* tmp2 = buf[k+1]; */ + status = sljit_emit_op1(compiler, + SLJIT_MOV_UB, + BPFJIT_TMP2, 0, + SLJIT_MEM1(BPFJIT_BUF), k+1); + if (status != SLJIT_SUCCESS) + return (status); + + /* A = buf[k+3]; */ + status = sljit_emit_op1(compiler, + SLJIT_MOV_UB, + BPFJIT_A, 0, + SLJIT_MEM1(BPFJIT_BUF), k+3); + if (status != SLJIT_SUCCESS) + return (status); + + /* tmp1 = tmp1 << 24; */ + status = sljit_emit_op2(compiler, + SLJIT_SHL, + BPFJIT_TMP1, 0, + BPFJIT_TMP1, 0, + SLJIT_IMM, 24); + if (status != SLJIT_SUCCESS) + return (status); + + /* A = A + tmp1; */ + status = sljit_emit_op2(compiler, + SLJIT_ADD, + BPFJIT_A, 0, + BPFJIT_A, 0, + BPFJIT_TMP1, 0); + if (status != SLJIT_SUCCESS) + return (status); + + /* tmp1 = buf[k+2]; */ + status = sljit_emit_op1(compiler, + SLJIT_MOV_UB, + BPFJIT_TMP1, 0, + SLJIT_MEM1(BPFJIT_BUF), k+2); + if (status != SLJIT_SUCCESS) + return (status); + + /* tmp2 = tmp2 << 16; */ + status = sljit_emit_op2(compiler, + SLJIT_SHL, + BPFJIT_TMP2, 0, + BPFJIT_TMP2, 0, + SLJIT_IMM, 16); + if (status != SLJIT_SUCCESS) + return (status); + + /* A = A + tmp2; */ + status = sljit_emit_op2(compiler, + SLJIT_ADD, + BPFJIT_A, 0, + BPFJIT_A, 0, + BPFJIT_TMP2, 0); + if (status != SLJIT_SUCCESS) + return (status); + + /* tmp1 = tmp1 << 8; */ + status = sljit_emit_op2(compiler, + SLJIT_SHL, + BPFJIT_TMP1, 0, + BPFJIT_TMP1, 0, + SLJIT_IMM, 8); + if (status != SLJIT_SUCCESS) + return (status); + + /* A = A + tmp1; */ + status = sljit_emit_op2(compiler, + SLJIT_ADD, + BPFJIT_A, 0, + BPFJIT_A, 0, + BPFJIT_TMP1, 0); + return (status); +} + +#ifdef _KERNEL +/* + * Generate m_xword/m_xhalf/m_xbyte call. + * + * pc is one of: + * BPF_LD+BPF_W+BPF_ABS A <- P[k:4] + * BPF_LD+BPF_H+BPF_ABS A <- P[k:2] + * BPF_LD+BPF_B+BPF_ABS A <- P[k:1] + * BPF_LD+BPF_W+BPF_IND A <- P[X+k:4] + * BPF_LD+BPF_H+BPF_IND A <- P[X+k:2] + * BPF_LD+BPF_B+BPF_IND A <- P[X+k:1] + * BPF_LDX+BPF_B+BPF_MSH X <- 4*(P[k:1]&0xf) + * + * dst must be BPFJIT_A for BPF_LD instructions and BPFJIT_X + * or any of BPFJIT_TMP* registrers for BPF_MSH instruction. + */ +static int +emit_xcall(struct sljit_compiler* compiler, struct bpf_insn *pc, + int dst, sljit_sw dstw, struct sljit_jump **ret0_jump, + uint32_t (*fn)(const struct mbuf *, uint32_t, int *)) +{ +#if BPFJIT_X != SLJIT_TEMPORARY_EREG1 || \ + BPFJIT_X == SLJIT_RETURN_REG +#error "Not supported assignment of registers." +#endif + int status; + + /* + * The third argument of fn is an address on stack. + */ + const int arg3_offset = 0; + + if (BPF_CLASS(pc->code) == BPF_LDX) { + /* save A */ + status = sljit_emit_op1(compiler, + SLJIT_MOV, + BPFJIT_KERN_TMP, 0, + BPFJIT_A, 0); + if (status != SLJIT_SUCCESS) + return (status); + } + + /* + * Prepare registers for fn(buf, k, &err) call. + */ + status = sljit_emit_op1(compiler, + SLJIT_MOV, + SLJIT_SCRATCH_REG1, 0, + BPFJIT_BUF, 0); + if (status != SLJIT_SUCCESS) + return (status); + + if (BPF_CLASS(pc->code) == BPF_LD && BPF_MODE(pc->code) == BPF_IND) { + status = sljit_emit_op2(compiler, + SLJIT_ADD, + SLJIT_SCRATCH_REG2, 0, + BPFJIT_X, 0, + SLJIT_IMM, (uint32_t)pc->k); + } else { + status = sljit_emit_op1(compiler, + SLJIT_MOV, + SLJIT_SCRATCH_REG2, 0, + SLJIT_IMM, (uint32_t)pc->k); + } + + if (status != SLJIT_SUCCESS) + return (status); + + status = sljit_get_local_base(compiler, + SLJIT_SCRATCH_REG3, 0, arg3_offset); + if (status != SLJIT_SUCCESS) + return (status); + + /* fn(buf, k, &err); */ + status = sljit_emit_ijump(compiler, + SLJIT_CALL3, + SLJIT_IMM, SLJIT_FUNC_OFFSET(fn)); + + if (BPF_CLASS(pc->code) == BPF_LDX) { + + /* move return value to dst */ + BPFJIT_ASSERT(dst != SLJIT_RETURN_REG); + status = sljit_emit_op1(compiler, + SLJIT_MOV, + dst, dstw, + SLJIT_RETURN_REG, 0); + if (status != SLJIT_SUCCESS) + return (status); + + /* restore A */ + status = sljit_emit_op1(compiler, + SLJIT_MOV, + BPFJIT_A, 0, + BPFJIT_KERN_TMP, 0); + if (status != SLJIT_SUCCESS) + return (status); + + } else if (dst != SLJIT_RETURN_REG) { + status = sljit_emit_op1(compiler, + SLJIT_MOV, + dst, dstw, + SLJIT_RETURN_REG, 0); + if (status != SLJIT_SUCCESS) + return (status); + } + + /* tmp3 = *err; */ + status = sljit_emit_op1(compiler, + SLJIT_MOV_UI, + SLJIT_SCRATCH_REG3, 0, + SLJIT_MEM1(SLJIT_LOCALS_REG), arg3_offset); + if (status != SLJIT_SUCCESS) + return (status); + + /* if (tmp3 != 0) return (0); */ + *ret0_jump = sljit_emit_cmp(compiler, + SLJIT_C_NOT_EQUAL, + SLJIT_SCRATCH_REG3, 0, + SLJIT_IMM, 0); + if (*ret0_jump == NULL) + return (SLJIT_ERR_ALLOC_FAILED); + + return (status); +} +#endif + +/* + * Generate code for + * BPF_LD+BPF_W+BPF_ABS A <- P[k:4] + * BPF_LD+BPF_H+BPF_ABS A <- P[k:2] + * BPF_LD+BPF_B+BPF_ABS A <- P[k:1] + * BPF_LD+BPF_W+BPF_IND A <- P[X+k:4] + * BPF_LD+BPF_H+BPF_IND A <- P[X+k:2] + * BPF_LD+BPF_B+BPF_IND A <- P[X+k:1] + */ +static int +emit_pkt_read(struct sljit_compiler* compiler, + struct bpf_insn *pc, struct sljit_jump *to_mchain_jump, + struct sljit_jump **ret0, size_t *ret0_size) +{ + int status = SLJIT_SUCCESS; + uint32_t width; + struct sljit_jump *jump; +#ifdef _KERNEL + struct sljit_label *label; + struct sljit_jump *over_mchain_jump; + const bool check_zero_buflen = (to_mchain_jump != NULL); +#endif + const uint32_t k = pc->k; + +#ifdef _KERNEL + if (to_mchain_jump == NULL) { + to_mchain_jump = sljit_emit_cmp(compiler, + SLJIT_C_EQUAL, + BPFJIT_BUFLEN, 0, + SLJIT_IMM, 0); + if (to_mchain_jump == NULL) + return (SLJIT_ERR_ALLOC_FAILED); + } +#endif + + width = read_width(pc); + + if (BPF_MODE(pc->code) == BPF_IND) { + /* tmp1 = buflen - (pc->k + width); */ + status = sljit_emit_op2(compiler, + SLJIT_SUB, + BPFJIT_TMP1, 0, + BPFJIT_BUFLEN, 0, + SLJIT_IMM, k + width); + if (status != SLJIT_SUCCESS) + return (status); + + /* buf += X; */ + status = sljit_emit_op2(compiler, + SLJIT_ADD, + BPFJIT_BUF, 0, + BPFJIT_BUF, 0, + BPFJIT_X, 0); + if (status != SLJIT_SUCCESS) + return (status); + + /* if (tmp1 < X) return (0); */ + jump = sljit_emit_cmp(compiler, + SLJIT_C_LESS, + BPFJIT_TMP1, 0, + BPFJIT_X, 0); + if (jump == NULL) + return (SLJIT_ERR_ALLOC_FAILED); + ret0[(*ret0_size)++] = jump; + } + + switch (width) { + case 4: + status = emit_read32(compiler, k); + break; + case 2: + status = emit_read16(compiler, k); + break; + case 1: + status = emit_read8(compiler, k); + break; + } + + if (status != SLJIT_SUCCESS) + return (status); + + if (BPF_MODE(pc->code) == BPF_IND) { + /* buf -= X; */ + status = sljit_emit_op2(compiler, + SLJIT_SUB, + BPFJIT_BUF, 0, + BPFJIT_BUF, 0, + BPFJIT_X, 0); + if (status != SLJIT_SUCCESS) + return (status); + } + +#ifdef _KERNEL + over_mchain_jump = sljit_emit_jump(compiler, SLJIT_JUMP); + if (over_mchain_jump == NULL) + return (SLJIT_ERR_ALLOC_FAILED); + + /* entry point to mchain handler */ + label = sljit_emit_label(compiler); + if (label == NULL) + return (SLJIT_ERR_ALLOC_FAILED); + sljit_set_label(to_mchain_jump, label); + + if (check_zero_buflen) { + /* if (buflen != 0) return (0); */ + jump = sljit_emit_cmp(compiler, + SLJIT_C_NOT_EQUAL, + BPFJIT_BUFLEN, 0, + SLJIT_IMM, 0); + if (jump == NULL) + return (SLJIT_ERR_ALLOC_FAILED); + ret0[(*ret0_size)++] = jump; + } + + switch (width) { + case 4: + status = emit_xcall(compiler, pc, BPFJIT_A, 0, &jump, &m_xword); + break; + case 2: + status = emit_xcall(compiler, pc, BPFJIT_A, 0, &jump, &m_xhalf); + break; + case 1: + status = emit_xcall(compiler, pc, BPFJIT_A, 0, &jump, &m_xbyte); + break; + } + + if (status != SLJIT_SUCCESS) + return (status); + + ret0[(*ret0_size)++] = jump; + + label = sljit_emit_label(compiler); + if (label == NULL) + return (SLJIT_ERR_ALLOC_FAILED); + sljit_set_label(over_mchain_jump, label); +#endif + + return (status); +} + +/* + * Generate code for BPF_LDX+BPF_B+BPF_MSH X <- 4*(P[k:1]&0xf). + */ +static int +emit_msh(struct sljit_compiler* compiler, + struct bpf_insn *pc, struct sljit_jump *to_mchain_jump, + struct sljit_jump **ret0, size_t *ret0_size) +{ + int status; +#ifdef _KERNEL + struct sljit_label *label; + struct sljit_jump *jump, *over_mchain_jump; + const bool check_zero_buflen = (to_mchain_jump != NULL); +#endif + const uint32_t k = pc->k; + +#ifdef _KERNEL + if (to_mchain_jump == NULL) { + to_mchain_jump = sljit_emit_cmp(compiler, + SLJIT_C_EQUAL, + BPFJIT_BUFLEN, 0, + SLJIT_IMM, 0); + if (to_mchain_jump == NULL) + return (SLJIT_ERR_ALLOC_FAILED); + } +#endif + + /* tmp1 = buf[k] */ + status = sljit_emit_op1(compiler, + SLJIT_MOV_UB, + BPFJIT_TMP1, 0, + SLJIT_MEM1(BPFJIT_BUF), k); + if (status != SLJIT_SUCCESS) + return (status); + + /* tmp1 &= 0xf */ + status = sljit_emit_op2(compiler, + SLJIT_AND, + BPFJIT_TMP1, 0, + BPFJIT_TMP1, 0, + SLJIT_IMM, 0xf); + if (status != SLJIT_SUCCESS) + return (status); + + /* tmp1 = tmp1 << 2 */ + status = sljit_emit_op2(compiler, + SLJIT_SHL, + BPFJIT_X, 0, + BPFJIT_TMP1, 0, + SLJIT_IMM, 2); + if (status != SLJIT_SUCCESS) + return (status); + +#ifdef _KERNEL + over_mchain_jump = sljit_emit_jump(compiler, SLJIT_JUMP); + if (over_mchain_jump == NULL) + return (SLJIT_ERR_ALLOC_FAILED); + + /* entry point to mchain handler */ + label = sljit_emit_label(compiler); + if (label == NULL) + return (SLJIT_ERR_ALLOC_FAILED); + sljit_set_label(to_mchain_jump, label); + + if (check_zero_buflen) { + /* if (buflen != 0) return (0); */ + jump = sljit_emit_cmp(compiler, + SLJIT_C_NOT_EQUAL, + BPFJIT_BUFLEN, 0, + SLJIT_IMM, 0); + if (jump == NULL) + return (SLJIT_ERR_ALLOC_FAILED); + ret0[(*ret0_size)++] = jump; + } + + status = emit_xcall(compiler, pc, BPFJIT_TMP1, 0, &jump, &m_xbyte); + if (status != SLJIT_SUCCESS) + return (status); + ret0[(*ret0_size)++] = jump; + + /* tmp1 &= 0xf */ + status = sljit_emit_op2(compiler, + SLJIT_AND, + BPFJIT_TMP1, 0, + BPFJIT_TMP1, 0, + SLJIT_IMM, 0xf); + if (status != SLJIT_SUCCESS) + return (status); + + /* tmp1 = tmp1 << 2 */ + status = sljit_emit_op2(compiler, + SLJIT_SHL, + BPFJIT_X, 0, + BPFJIT_TMP1, 0, + SLJIT_IMM, 2); + if (status != SLJIT_SUCCESS) + return (status); + + + label = sljit_emit_label(compiler); + if (label == NULL) + return (SLJIT_ERR_ALLOC_FAILED); + sljit_set_label(over_mchain_jump, label); +#endif + + return (status); +} + +static int +emit_pow2_division(struct sljit_compiler* compiler, uint32_t k) +{ + int shift = 0; + int status = SLJIT_SUCCESS; + + while (k > 1) { + k >>= 1; + shift++; + } + + BPFJIT_ASSERT(k == 1 && shift < 32); + + if (shift != 0) { + status = sljit_emit_op2(compiler, + SLJIT_LSHR|SLJIT_INT_OP, + BPFJIT_A, 0, + BPFJIT_A, 0, + SLJIT_IMM, shift); + } + + return (status); +} + +#if !defined(BPFJIT_USE_UDIV) +static sljit_uw +divide(sljit_uw x, sljit_uw y) +{ + + return ((uint32_t)x / (uint32_t)y); +} +#endif + +/* + * Generate A = A / div. + * divt,divw are either SLJIT_IMM,pc->k or BPFJIT_X,0. + */ +static int +emit_division(struct sljit_compiler* compiler, int divt, sljit_sw divw) +{ + int status; + +#if BPFJIT_X == SLJIT_SCRATCH_REG1 || \ + BPFJIT_X == SLJIT_RETURN_REG || \ + BPFJIT_X == SLJIT_SCRATCH_REG2 || \ + BPFJIT_A == SLJIT_SCRATCH_REG2 +#error "Not supported assignment of registers." +#endif + +#if BPFJIT_A != SLJIT_SCRATCH_REG1 + status = sljit_emit_op1(compiler, + SLJIT_MOV, + SLJIT_SCRATCH_REG1, 0, + BPFJIT_A, 0); + if (status != SLJIT_SUCCESS) + return (status); +#endif + + status = sljit_emit_op1(compiler, + SLJIT_MOV, + SLJIT_SCRATCH_REG2, 0, + divt, divw); + if (status != SLJIT_SUCCESS) + return (status); + +#if defined(BPFJIT_USE_UDIV) + status = sljit_emit_op0(compiler, SLJIT_UDIV|SLJIT_INT_OP); + +#if BPFJIT_A != SLJIT_SCRATCH_REG1 + status = sljit_emit_op1(compiler, + SLJIT_MOV, + BPFJIT_A, 0, + SLJIT_SCRATCH_REG1, 0); + if (status != SLJIT_SUCCESS) + return (status); +#endif +#else + status = sljit_emit_ijump(compiler, + SLJIT_CALL2, + SLJIT_IMM, SLJIT_FUNC_OFFSET(divide)); + +#if BPFJIT_A != SLJIT_RETURN_REG + status = sljit_emit_op1(compiler, + SLJIT_MOV, + BPFJIT_A, 0, + SLJIT_RETURN_REG, 0); + if (status != SLJIT_SUCCESS) + return (status); +#endif +#endif + + return (status); +} + +/* + * Count BPF_RET instructions. + */ +static size_t +count_returns(struct bpf_insn *insns, size_t insn_count) +{ + size_t i; + size_t rv; + + rv = 0; + for (i = 0; i < insn_count; i++) { + if (BPF_CLASS(insns[i].code) == BPF_RET) + rv++; + } + + return (rv); +} + +/* + * Return true if pc is a "read from packet" instruction. + * If length is not NULL and return value is true, *length will + * be set to a safe length required to read a packet. + */ +static bool +read_pkt_insn(struct bpf_insn *pc, uint32_t *length) +{ + bool rv; + uint32_t width; + + switch (BPF_CLASS(pc->code)) { + default: + rv = false; + break; + + case BPF_LD: + rv = BPF_MODE(pc->code) == BPF_ABS || + BPF_MODE(pc->code) == BPF_IND; + if (rv) + width = read_width(pc); + break; + + case BPF_LDX: + rv = pc->code == (BPF_LDX|BPF_B|BPF_MSH); + width = 1; + break; + } + + if (rv && length != NULL) { + *length = (pc->k > UINT32_MAX - width) ? + UINT32_MAX : pc->k + width; + } + + return (rv); +} + +/* + * Set bj_check_length for all "read from packet" instructions + * in a linear block of instructions [from, to). + */ +static void +set_check_length(struct bpf_insn *insns, struct bpfjit_insn_data *insn_dat, + size_t from, size_t to, uint32_t length) +{ + + for (; from < to; from++) { + if (read_pkt_insn(&insns[from], NULL)) { + insn_dat[from].bj_aux.bj_rdata.bj_check_length = length; + length = 0; + } + } +} + +/* + * The function divides instructions into blocks. Destination of a jump + * instruction starts a new block. BPF_RET and BPF_JMP instructions + * terminate a block. Blocks are linear, that is, there are no jumps out + * from the middle of a block and there are no jumps in to the middle of + * a block. + * If a block has one or more "read from packet" instructions, + * bj_check_length will be set to one value for the whole block and that + * value will be equal to the greatest value of safe lengths of "read from + * packet" instructions inside the block. + */ +static int +optimize(struct bpf_insn *insns, + struct bpfjit_insn_data *insn_dat, size_t insn_count) +{ + size_t i; + size_t first_read; + bool unreachable; + uint32_t jt, jf; + uint32_t length, safe_length; + struct bpfjit_jump *jmp, *jtf; + + for (i = 0; i < insn_count; i++) + SLIST_INIT(&insn_dat[i].bj_jumps); + + safe_length = 0; + unreachable = false; + first_read = SIZE_MAX; + + for (i = 0; i < insn_count; i++) { + + if (!SLIST_EMPTY(&insn_dat[i].bj_jumps)) { + unreachable = false; + + set_check_length(insns, insn_dat, + first_read, i, safe_length); + first_read = SIZE_MAX; + + safe_length = UINT32_MAX; + SLIST_FOREACH(jmp, &insn_dat[i].bj_jumps, bj_entries) { + if (jmp->bj_safe_length < safe_length) + safe_length = jmp->bj_safe_length; + } + } + + insn_dat[i].bj_unreachable = unreachable; + if (unreachable) + continue; + + if (read_pkt_insn(&insns[i], &length)) { + if (first_read == SIZE_MAX) + first_read = i; + if (length > safe_length) + safe_length = length; + } + + switch (BPF_CLASS(insns[i].code)) { + case BPF_RET: + unreachable = true; + continue; + + case BPF_JMP: + if (insns[i].code == (BPF_JMP|BPF_JA)) { + jt = jf = insns[i].k; + } else { + jt = insns[i].jt; + jf = insns[i].jf; + } + + if (jt >= insn_count - (i + 1) || + jf >= insn_count - (i + 1)) { + return (-1); + } + + if (jt > 0 && jf > 0) + unreachable = true; + + jtf = insn_dat[i].bj_aux.bj_jdata.bj_jtf; + + jtf[0].bj_jump = NULL; + jtf[0].bj_safe_length = safe_length; + SLIST_INSERT_HEAD(&insn_dat[i + 1 + jt].bj_jumps, + &jtf[0], bj_entries); + + if (jf != jt) { + jtf[1].bj_jump = NULL; + jtf[1].bj_safe_length = safe_length; + SLIST_INSERT_HEAD(&insn_dat[i + 1 + jf].bj_jumps, + &jtf[1], bj_entries); + } + + continue; + } + } + + set_check_length(insns, insn_dat, first_read, insn_count, safe_length); + + return (0); +} + +/* + * Count out-of-bounds and division by zero jumps. + * + * insn_dat should be initialized by optimize(). + */ +static size_t +get_ret0_size(struct bpf_insn *insns, struct bpfjit_insn_data *insn_dat, + size_t insn_count) +{ + size_t rv = 0; + size_t i; + + for (i = 0; i < insn_count; i++) { + + if (read_pkt_insn(&insns[i], NULL)) { + if (insn_dat[i].bj_aux.bj_rdata.bj_check_length > 0) + rv++; +#ifdef _KERNEL + rv++; +#endif + } + + if (insns[i].code == (BPF_LD|BPF_IND|BPF_B) || + insns[i].code == (BPF_LD|BPF_IND|BPF_H) || + insns[i].code == (BPF_LD|BPF_IND|BPF_W)) { + rv++; + } + + if (insns[i].code == (BPF_ALU|BPF_DIV|BPF_X)) + rv++; + + if (insns[i].code == (BPF_ALU|BPF_DIV|BPF_K) && + insns[i].k == 0) { + rv++; + } + } + + return (rv); +} + +/* + * Convert BPF_ALU operations except BPF_NEG and BPF_DIV to sljit operation. + */ +static int +bpf_alu_to_sljit_op(struct bpf_insn *pc) +{ + + /* + * Note: all supported 64bit arches have 32bit multiply + * instruction so SLJIT_INT_OP doesn't have any overhead. + */ + switch (BPF_OP(pc->code)) { + case BPF_ADD: + return (SLJIT_ADD); + case BPF_SUB: + return (SLJIT_SUB); + case BPF_MUL: + return (SLJIT_MUL|SLJIT_INT_OP); + case BPF_OR: + return (SLJIT_OR); + case BPF_AND: + return (SLJIT_AND); + case BPF_LSH: + return (SLJIT_SHL); + case BPF_RSH: + return (SLJIT_LSHR|SLJIT_INT_OP); + default: + BPFJIT_ASSERT(false); + return (0); + } +} + +/* + * Convert BPF_JMP operations except BPF_JA to sljit condition. + */ +static int +bpf_jmp_to_sljit_cond(struct bpf_insn *pc, bool negate) +{ + /* + * Note: all supported 64bit arches have 32bit comparison + * instructions so SLJIT_INT_OP doesn't have any overhead. + */ + int rv = SLJIT_INT_OP; + + switch (BPF_OP(pc->code)) { + case BPF_JGT: + rv |= negate ? SLJIT_C_LESS_EQUAL : SLJIT_C_GREATER; + break; + case BPF_JGE: + rv |= negate ? SLJIT_C_LESS : SLJIT_C_GREATER_EQUAL; + break; + case BPF_JEQ: + rv |= negate ? SLJIT_C_NOT_EQUAL : SLJIT_C_EQUAL; + break; + case BPF_JSET: + rv |= negate ? SLJIT_C_EQUAL : SLJIT_C_NOT_EQUAL; + break; + default: + BPFJIT_ASSERT(false); + return (0); + } + + return (rv); +} + +static unsigned int +bpfjit_optimization_hints(struct bpf_insn *insns, size_t insn_count) +{ + unsigned int rv = BPFJIT_INIT_A; + struct bpf_insn *pc; + unsigned int minm, maxm; + + BPFJIT_ASSERT(BPF_MEMWORDS - 1 <= 0xff); + + maxm = 0; + minm = BPF_MEMWORDS - 1; + + for (pc = insns; pc != insns + insn_count; pc++) { + switch (BPF_CLASS(pc->code)) { + case BPF_LD: + if (BPF_MODE(pc->code) == BPF_IND) + rv |= BPFJIT_INIT_X; + if (BPF_MODE(pc->code) == BPF_MEM && + (uint32_t)pc->k < BPF_MEMWORDS) { + if (pc->k > maxm) + maxm = pc->k; + if (pc->k < minm) + minm = pc->k; + } + continue; + case BPF_LDX: + rv |= BPFJIT_INIT_X; + if (BPF_MODE(pc->code) == BPF_MEM && + (uint32_t)pc->k < BPF_MEMWORDS) { + if (pc->k > maxm) + maxm = pc->k; + if (pc->k < minm) + minm = pc->k; + } + continue; + case BPF_ST: + if ((uint32_t)pc->k < BPF_MEMWORDS) { + if (pc->k > maxm) + maxm = pc->k; + if (pc->k < minm) + minm = pc->k; + } + continue; + case BPF_STX: + rv |= BPFJIT_INIT_X; + if ((uint32_t)pc->k < BPF_MEMWORDS) { + if (pc->k > maxm) + maxm = pc->k; + if (pc->k < minm) + minm = pc->k; + } + continue; + case BPF_ALU: + if (pc->code == (BPF_ALU|BPF_NEG)) + continue; + if (BPF_SRC(pc->code) == BPF_X) + rv |= BPFJIT_INIT_X; + continue; + case BPF_JMP: + if (pc->code == (BPF_JMP|BPF_JA)) + continue; + if (BPF_SRC(pc->code) == BPF_X) + rv |= BPFJIT_INIT_X; + continue; + case BPF_RET: + continue; + case BPF_MISC: + rv |= BPFJIT_INIT_X; + continue; + default: + BPFJIT_ASSERT(false); + return (0); + } + } + + return (rv | (maxm << 8) | minm); +} + +/* + * Convert BPF_K and BPF_X to sljit register. + */ +static int +kx_to_reg(struct bpf_insn *pc) +{ + + switch (BPF_SRC(pc->code)) { + case BPF_K: + return (SLJIT_IMM); + case BPF_X: + return (BPFJIT_X); + default: + BPFJIT_ASSERT(false); + return (0); + } +} + +static sljit_sw +kx_to_reg_arg(struct bpf_insn *pc) +{ + + switch (BPF_SRC(pc->code)) { + case BPF_K: + return ((uint32_t)pc->k); /* SLJIT_IMM, pc->k, */ + case BPF_X: + return (0); /* BPFJIT_X, 0, */ + default: + BPFJIT_ASSERT(false); + return (0); + } +} + +bpfjit_function_t +bpfjit_generate_code(struct bpf_insn *insns, size_t insn_count) +{ + static struct bpf_insn bpf_accept_all = BPF_STMT(BPF_RET+BPF_K, -1); + void *rv; + size_t i; + int status; + int branching, negate; + unsigned int rval, mode, src; + int ntmp; + unsigned int locals_size; + unsigned int minm, maxm; /* min/max k for M[k] */ + size_t mem_locals_start; /* start of M[] array */ + unsigned int opts; + struct bpf_insn *pc; + struct sljit_compiler* compiler; + + /* a list of jumps to a normal return from a generated function */ + struct sljit_jump **returns; + size_t returns_size, returns_maxsize; + + /* a list of jumps to out-of-bound return from a generated function */ + struct sljit_jump **ret0; + size_t ret0_size, ret0_maxsize; + + struct bpfjit_insn_data *insn_dat; + + /* for local use */ + struct sljit_label *label; + struct sljit_jump *jump; + struct bpfjit_jump *bjump, *jtf; + + struct sljit_jump *to_mchain_jump; + + uint32_t jt, jf; + + rv = NULL; + compiler = NULL; + insn_dat = NULL; + returns = NULL; + ret0 = NULL; + + /* An empty filter means accept all. */ + if (insn_count == 0) { + insns = &bpf_accept_all; + insn_count++; + } + + opts = bpfjit_optimization_hints(insns, insn_count); + minm = opts & 0xff; + maxm = (opts >> 8) & 0xff; + mem_locals_start = mem_local_offset(0, 0); + locals_size = (minm <= maxm) ? + mem_local_offset(maxm + 1, minm) : mem_locals_start; + + ntmp = 4; +#ifdef _KERNEL + ntmp += 1; /* for BPFJIT_KERN_TMP */ +#endif + + returns_maxsize = count_returns(insns, insn_count); + if (returns_maxsize == 0) + goto fail; + + insn_dat = BPFJIT_MALLOC(insn_count * sizeof(insn_dat[0])); + if (insn_dat == NULL) + goto fail; + + if (optimize(insns, insn_dat, insn_count) < 0) + goto fail; + + ret0_size = 0; + ret0_maxsize = get_ret0_size(insns, insn_dat, insn_count); + if (ret0_maxsize > 0) { + ret0 = BPFJIT_MALLOC(ret0_maxsize * sizeof(ret0[0])); + if (ret0 == NULL) + goto fail; + } + + returns_size = 0; + returns = BPFJIT_MALLOC(returns_maxsize * sizeof(returns[0])); + if (returns == NULL) + goto fail; + + compiler = sljit_create_compiler(); + if (compiler == NULL) + goto fail; + +#if !defined(_KERNEL) && defined(SLJIT_VERBOSE) && SLJIT_VERBOSE + sljit_compiler_verbose(compiler, stderr); +#endif + + status = sljit_emit_enter(compiler, 3, ntmp, 3, locals_size); + if (status != SLJIT_SUCCESS) + goto fail; + + for (i = mem_locals_start; i < locals_size; i+= sizeof(uint32_t)) { + status = sljit_emit_op1(compiler, + SLJIT_MOV_UI, + SLJIT_MEM1(SLJIT_LOCALS_REG), i, + SLJIT_IMM, 0); + if (status != SLJIT_SUCCESS) + goto fail; + } + + if (opts & BPFJIT_INIT_A) { + /* A = 0; */ + status = sljit_emit_op1(compiler, + SLJIT_MOV, + BPFJIT_A, 0, + SLJIT_IMM, 0); + if (status != SLJIT_SUCCESS) + goto fail; + } + + if (opts & BPFJIT_INIT_X) { + /* X = 0; */ + status = sljit_emit_op1(compiler, + SLJIT_MOV, + BPFJIT_X, 0, + SLJIT_IMM, 0); + if (status != SLJIT_SUCCESS) + goto fail; + } + + for (i = 0; i < insn_count; i++) { + if (insn_dat[i].bj_unreachable) + continue; + + to_mchain_jump = NULL; + + /* + * Resolve jumps to the current insn. + */ + label = NULL; + SLIST_FOREACH(bjump, &insn_dat[i].bj_jumps, bj_entries) { + if (bjump->bj_jump != NULL) { + if (label == NULL) + label = sljit_emit_label(compiler); + if (label == NULL) + goto fail; + sljit_set_label(bjump->bj_jump, label); + } + } + + if (read_pkt_insn(&insns[i], NULL) && + insn_dat[i].bj_aux.bj_rdata.bj_check_length > 0) { + /* if (buflen < bj_check_length) return (0); */ + jump = sljit_emit_cmp(compiler, + SLJIT_C_LESS, + BPFJIT_BUFLEN, 0, + SLJIT_IMM, + insn_dat[i].bj_aux.bj_rdata.bj_check_length); + if (jump == NULL) + goto fail; +#ifdef _KERNEL + to_mchain_jump = jump; +#else + ret0[ret0_size++] = jump; +#endif + } + + pc = &insns[i]; + switch (BPF_CLASS(pc->code)) { + + default: + goto fail; + + case BPF_LD: + /* BPF_LD+BPF_IMM A <- k */ + if (pc->code == (BPF_LD|BPF_IMM)) { + status = sljit_emit_op1(compiler, + SLJIT_MOV, + BPFJIT_A, 0, + SLJIT_IMM, (uint32_t)pc->k); + if (status != SLJIT_SUCCESS) + goto fail; + + continue; + } + + /* BPF_LD+BPF_MEM A <- M[k] */ + if (pc->code == (BPF_LD|BPF_MEM)) { + if (pc->k < minm || pc->k > maxm) + goto fail; + status = sljit_emit_op1(compiler, + SLJIT_MOV_UI, + BPFJIT_A, 0, + SLJIT_MEM1(SLJIT_LOCALS_REG), + mem_local_offset(pc->k, minm)); + if (status != SLJIT_SUCCESS) + goto fail; + + continue; + } + + /* BPF_LD+BPF_W+BPF_LEN A <- len */ + if (pc->code == (BPF_LD|BPF_W|BPF_LEN)) { + status = sljit_emit_op1(compiler, + SLJIT_MOV, + BPFJIT_A, 0, + BPFJIT_WIRELEN, 0); + if (status != SLJIT_SUCCESS) + goto fail; + + continue; + } + + mode = BPF_MODE(pc->code); + if (mode != BPF_ABS && mode != BPF_IND) + goto fail; + + status = emit_pkt_read(compiler, pc, + to_mchain_jump, ret0, &ret0_size); + if (status != SLJIT_SUCCESS) + goto fail; + + continue; + + case BPF_LDX: + mode = BPF_MODE(pc->code); + + /* BPF_LDX+BPF_W+BPF_IMM X <- k */ + if (mode == BPF_IMM) { + if (BPF_SIZE(pc->code) != BPF_W) + goto fail; + status = sljit_emit_op1(compiler, + SLJIT_MOV, + BPFJIT_X, 0, + SLJIT_IMM, (uint32_t)pc->k); + if (status != SLJIT_SUCCESS) + goto fail; + + continue; + } + + /* BPF_LDX+BPF_W+BPF_LEN X <- len */ + if (mode == BPF_LEN) { + if (BPF_SIZE(pc->code) != BPF_W) + goto fail; + status = sljit_emit_op1(compiler, + SLJIT_MOV, + BPFJIT_X, 0, + BPFJIT_WIRELEN, 0); + if (status != SLJIT_SUCCESS) + goto fail; + + continue; + } + + /* BPF_LDX+BPF_W+BPF_MEM X <- M[k] */ + if (mode == BPF_MEM) { + if (BPF_SIZE(pc->code) != BPF_W) + goto fail; + if (pc->k < minm || pc->k > maxm) + goto fail; + status = sljit_emit_op1(compiler, + SLJIT_MOV_UI, + BPFJIT_X, 0, + SLJIT_MEM1(SLJIT_LOCALS_REG), + mem_local_offset(pc->k, minm)); + if (status != SLJIT_SUCCESS) + goto fail; + + continue; + } + + /* BPF_LDX+BPF_B+BPF_MSH X <- 4*(P[k:1]&0xf) */ + if (mode != BPF_MSH || BPF_SIZE(pc->code) != BPF_B) + goto fail; + + status = emit_msh(compiler, pc, + to_mchain_jump, ret0, &ret0_size); + if (status != SLJIT_SUCCESS) + goto fail; + + continue; + + case BPF_ST: + if (pc->code != BPF_ST || pc->k < minm || pc->k > maxm) + goto fail; + + status = sljit_emit_op1(compiler, + SLJIT_MOV_UI, + SLJIT_MEM1(SLJIT_LOCALS_REG), + mem_local_offset(pc->k, minm), + BPFJIT_A, 0); + if (status != SLJIT_SUCCESS) + goto fail; + + continue; + + case BPF_STX: + if (pc->code != BPF_STX || pc->k < minm || pc->k > maxm) + goto fail; + + status = sljit_emit_op1(compiler, + SLJIT_MOV_UI, + SLJIT_MEM1(SLJIT_LOCALS_REG), + mem_local_offset(pc->k, minm), + BPFJIT_X, 0); + if (status != SLJIT_SUCCESS) + goto fail; + + continue; + + case BPF_ALU: + + if (pc->code == (BPF_ALU|BPF_NEG)) { + status = sljit_emit_op1(compiler, + SLJIT_NEG, + BPFJIT_A, 0, + BPFJIT_A, 0); + if (status != SLJIT_SUCCESS) + goto fail; + + continue; + } + + if (BPF_OP(pc->code) != BPF_DIV) { + status = sljit_emit_op2(compiler, + bpf_alu_to_sljit_op(pc), + BPFJIT_A, 0, + BPFJIT_A, 0, + kx_to_reg(pc), kx_to_reg_arg(pc)); + if (status != SLJIT_SUCCESS) + goto fail; + + continue; + } + + /* BPF_DIV */ + + src = BPF_SRC(pc->code); + if (src != BPF_X && src != BPF_K) + goto fail; + + /* division by zero? */ + if (src == BPF_X) { + jump = sljit_emit_cmp(compiler, + SLJIT_C_EQUAL|SLJIT_INT_OP, + BPFJIT_X, 0, + SLJIT_IMM, 0); + if (jump == NULL) + goto fail; + ret0[ret0_size++] = jump; + } else if (pc->k == 0) { + jump = sljit_emit_jump(compiler, SLJIT_JUMP); + if (jump == NULL) + goto fail; + ret0[ret0_size++] = jump; + } + + if (src == BPF_X) { + status = emit_division(compiler, BPFJIT_X, 0); + if (status != SLJIT_SUCCESS) + goto fail; + } else if (pc->k != 0) { + if (pc->k & (pc->k - 1)) { + status = emit_division(compiler, + SLJIT_IMM, (uint32_t)pc->k); + } else { + status = emit_pow2_division(compiler, + (uint32_t)pc->k); + } + if (status != SLJIT_SUCCESS) + goto fail; + } + + continue; + + case BPF_JMP: + + switch (BPF_OP(pc->code)) { + case BPF_JA: + jt = jf = pc->k; + break; + case BPF_JEQ: + case BPF_JGT: + case BPF_JGE: + case BPF_JSET: + jt = pc->jt; + jf = pc->jf; + break; + default: + goto fail; + } + + negate = (jt == 0) ? 1 : 0; + branching = (jt == jf) ? 0 : 1; + jtf = insn_dat[i].bj_aux.bj_jdata.bj_jtf; + + if (branching) { + if (BPF_OP(pc->code) != BPF_JSET) { + jump = sljit_emit_cmp(compiler, + bpf_jmp_to_sljit_cond(pc, negate), + BPFJIT_A, 0, + kx_to_reg(pc), kx_to_reg_arg(pc)); + } else { + status = sljit_emit_op2(compiler, + SLJIT_AND, + BPFJIT_TMP1, 0, + BPFJIT_A, 0, + kx_to_reg(pc), kx_to_reg_arg(pc)); + if (status != SLJIT_SUCCESS) + goto fail; + + jump = sljit_emit_cmp(compiler, + bpf_jmp_to_sljit_cond(pc, negate), + BPFJIT_TMP1, 0, + SLJIT_IMM, 0); + } + + if (jump == NULL) + goto fail; + + BPFJIT_ASSERT(jtf[negate].bj_jump == NULL); + jtf[negate].bj_jump = jump; + } + + if (!branching || (jt != 0 && jf != 0)) { + jump = sljit_emit_jump(compiler, SLJIT_JUMP); + if (jump == NULL) + goto fail; + + BPFJIT_ASSERT(jtf[branching].bj_jump == NULL); + jtf[branching].bj_jump = jump; + } + + continue; + + case BPF_RET: + + rval = BPF_RVAL(pc->code); + if (rval == BPF_X) + goto fail; + + /* BPF_RET+BPF_K accept k bytes */ + if (rval == BPF_K) { + status = sljit_emit_op1(compiler, + SLJIT_MOV, + BPFJIT_A, 0, + SLJIT_IMM, (uint32_t)pc->k); + if (status != SLJIT_SUCCESS) + goto fail; + } + + /* BPF_RET+BPF_A accept A bytes */ + if (rval == BPF_A) { +#if BPFJIT_A != SLJIT_RETURN_REG + status = sljit_emit_op1(compiler, + SLJIT_MOV, + SLJIT_RETURN_REG, 0, + BPFJIT_A, 0); + if (status != SLJIT_SUCCESS) + goto fail; +#endif + } + + /* + * Save a jump to a normal return. If the program + * ends with BPF_RET, no jump is needed because + * the normal return is generated right after the + * last instruction. + */ + if (i != insn_count - 1) { + jump = sljit_emit_jump(compiler, SLJIT_JUMP); + if (jump == NULL) + goto fail; + returns[returns_size++] = jump; + } + + continue; + + case BPF_MISC: + + if (pc->code == (BPF_MISC|BPF_TAX)) { + status = sljit_emit_op1(compiler, + SLJIT_MOV_UI, + BPFJIT_X, 0, + BPFJIT_A, 0); + if (status != SLJIT_SUCCESS) + goto fail; + + continue; + } + + if (pc->code == (BPF_MISC|BPF_TXA)) { + status = sljit_emit_op1(compiler, + SLJIT_MOV, + BPFJIT_A, 0, + BPFJIT_X, 0); + if (status != SLJIT_SUCCESS) + goto fail; + + continue; + } + + goto fail; + } /* switch */ + } /* main loop */ + + BPFJIT_ASSERT(ret0_size == ret0_maxsize); + BPFJIT_ASSERT(returns_size <= returns_maxsize); + + if (returns_size > 0) { + label = sljit_emit_label(compiler); + if (label == NULL) + goto fail; + for (i = 0; i < returns_size; i++) + sljit_set_label(returns[i], label); + } + + status = sljit_emit_return(compiler, + SLJIT_MOV_UI, + BPFJIT_A, 0); + if (status != SLJIT_SUCCESS) + goto fail; + + if (ret0_size > 0) { + label = sljit_emit_label(compiler); + if (label == NULL) + goto fail; + + for (i = 0; i < ret0_size; i++) + sljit_set_label(ret0[i], label); + + status = sljit_emit_op1(compiler, + SLJIT_MOV, + SLJIT_RETURN_REG, 0, + SLJIT_IMM, 0); + if (status != SLJIT_SUCCESS) + goto fail; + + status = sljit_emit_return(compiler, + SLJIT_MOV_UI, + SLJIT_RETURN_REG, 0); + if (status != SLJIT_SUCCESS) + goto fail; + } + + rv = sljit_generate_code(compiler); + +fail: + if (compiler != NULL) + sljit_free_compiler(compiler); + + if (insn_dat != NULL) + BPFJIT_FREE(insn_dat); + + if (returns != NULL) + BPFJIT_FREE(returns); + + if (ret0 != NULL) + BPFJIT_FREE(ret0); + + return ((bpfjit_function_t)rv); +} + +void +bpfjit_free_code(bpfjit_function_t code) +{ + + sljit_free_code((void *)code); +} Property changes on: sys/net/bpfjit.c ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: sys/net/bpfjit.h =================================================================== --- sys/net/bpfjit.h (revision 0) +++ sys/net/bpfjit.h (working copy) @@ -0,0 +1,62 @@ +/*- + * Copyright (c) 2011-2012 Alexander Nasonov. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED + * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT + * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#ifndef _NET_BPFJIT_H_ +#define _NET_BPFJIT_H_ + +#ifdef _KERNEL +#include + +extern int bpfjit_disable; +#else +#include +#include +#endif + +#include + +/* + * RETURN value and arguments of a function generated by sljit have sljit_uw + * type which can have a greater width than arguments below. In such cases, + * we rely on the fact that calling conventions use same registers for + * smaller types. + * SLJIT_MOV_UI is passed to sljit_emit_return() to make sure that the + * return value is truncated to unsigned int. + */ +typedef unsigned int (*bpfjit_function_t)(const uint8_t *p, + unsigned int wirelen, unsigned int buflen); + +bpfjit_function_t bpfjit_generate_code(struct bpf_insn *insns, + size_t insn_count); + +void bpfjit_free_code(bpfjit_function_t code); + +#endif /* !_NET_BPFJIT_H_ */ Property changes on: sys/net/bpfjit.h ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Index: sys/netgraph/ng_bpf.c =================================================================== --- sys/netgraph/ng_bpf.c (revision 243911) +++ sys/netgraph/ng_bpf.c (working copy) @@ -64,8 +64,8 @@ #include #include -#ifdef BPF_JITTER -#include +#ifdef BPFJIT +#include #endif #include @@ -89,8 +89,8 @@ struct ng_bpf_hookinfo { hook_p match; hook_p nomatch; struct ng_bpf_hookprog *prog; -#ifdef BPF_JITTER - bpf_jit_filter *jit_prog; +#ifdef BPFJIT + bpfjit_function_t jit_prog; #endif struct ng_bpf_hookstat stats; }; @@ -437,8 +437,8 @@ ng_bpf_rcvdata(hook_p hook, item_p item) goto ready; } -#ifdef BPF_JITTER - if (bpf_jitter_enable != 0 && hip->jit_prog != NULL) +#ifdef BPFJIT + if (bpfjit_disable == 0 && hip->jit_prog != NULL) usejit = 1; #endif @@ -465,9 +465,9 @@ ng_bpf_rcvdata(hook_p hook, item_p item) } /* Run packet through filter */ -#ifdef BPF_JITTER +#ifdef BPFJIT if (usejit) - len = (*(hip->jit_prog->func))(data, totlen, totlen); + len = (hip->jit_prog)(data, totlen, totlen); else #endif if (data) @@ -533,9 +533,9 @@ ng_bpf_disconnect(hook_p hook) NG_NODE_FOREACH_HOOK(node, ng_bpf_remrefs, hook, tmp); free(hip->prog, M_NETGRAPH_BPF); -#ifdef BPF_JITTER +#ifdef BPFJIT if (hip->jit_prog != NULL) - bpf_destroy_jit_filter(hip->jit_prog); + bpfjit_free_code(hip->jit_prog); #endif free(hip, M_NETGRAPH_BPF); if ((NG_NODE_NUMHOOKS(node) == 0) && @@ -557,8 +557,8 @@ ng_bpf_setprog(hook_p hook, const struct ng_bpf_ho { const hinfo_p hip = NG_HOOK_PRIVATE(hook); struct ng_bpf_hookprog *hp; -#ifdef BPF_JITTER - bpf_jit_filter *jit_prog; +#ifdef BPFJIT + bpfjit_function_t jit_prog; #endif int size; @@ -573,17 +573,17 @@ ng_bpf_setprog(hook_p hook, const struct ng_bpf_ho if (hp == NULL) return (ENOMEM); bcopy(hp0, hp, size); -#ifdef BPF_JITTER - jit_prog = bpf_jitter(hp->bpf_prog, hp->bpf_prog_len); +#ifdef BPFJIT + jit_prog = bpfjit_generate_code(hp->bpf_prog, hp->bpf_prog_len); #endif /* Free previous program, if any, and assign new one */ if (hip->prog != NULL) free(hip->prog, M_NETGRAPH_BPF); hip->prog = hp; -#ifdef BPF_JITTER +#ifdef BPFJIT if (hip->jit_prog != NULL) - bpf_destroy_jit_filter(hip->jit_prog); + bpfjit_free_code(hip->jit_prog); hip->jit_prog = jit_prog; #endif Index: sys/pc98/conf/GENERIC =================================================================== --- sys/pc98/conf/GENERIC (revision 243911) +++ sys/pc98/conf/GENERIC (working copy) @@ -231,6 +231,7 @@ device firmware # firmware assist module # Be aware of the administrative consequences of enabling this! # Note that 'bpf' is required for DHCP. device bpf # Berkeley packet filter +options BPFJIT # enable BPF just-in-time compiler # USB support #options USB_DEBUG # enable debug msgs Index: sys/pc98/conf/NOTES =================================================================== --- sys/pc98/conf/NOTES (revision 243911) +++ sys/pc98/conf/NOTES (working copy) @@ -187,10 +187,6 @@ options PERFMON options DEVICE_POLLING -# BPF_JITTER adds support for BPF just-in-time compiler. - -options BPF_JITTER - ##################################################################### # MISCELLANEOUS DEVICES AND OPTIONS Index: sys/powerpc/conf/GENERIC =================================================================== --- sys/powerpc/conf/GENERIC (revision 243911) +++ sys/powerpc/conf/GENERIC (working copy) @@ -157,6 +157,7 @@ device firmware # firmware assist module # Be aware of the administrative consequences of enabling this! # Note that 'bpf' is required for DHCP. device bpf #Berkeley packet filter +options BPFJIT # enable BPF just-in-time compiler # USB support options USB_DEBUG # enable debug msgs Index: sys/sparc64/conf/NOTES =================================================================== --- sys/sparc64/conf/NOTES (revision 243911) +++ sys/sparc64/conf/NOTES (working copy) @@ -145,6 +145,7 @@ nooption SC_NORM_ATTR nooption SC_DFLT_FONT nooption SC_ALT_MOUSE_IMAGE nooption EXT2FS +nooption BPFJIT ##################################################################### Index: tools/regression/bpf/bpf_filter/Makefile =================================================================== --- tools/regression/bpf/bpf_filter/Makefile (revision 243911) +++ tools/regression/bpf/bpf_filter/Makefile (working copy) @@ -24,7 +24,7 @@ TEST_CASES?= test0001 test0002 test0003 test0004 \ SYSDIR?= ${.CURDIR}/../../../../sys -SRCS= ${.CURDIR}/bpf_test.c +SRCS= ${.CURDIR}/bpf_test.c ${SYSDIR}/net/bpf_filter.c CFLAGS+= -g -I${SYSDIR} -I${.CURDIR}/tests @@ -38,17 +38,17 @@ CFLAGS+= -DLOG_LEVEL="${LOG_LEVEL}" CFLAGS+= -DBPF_VALIDATE .endif -.if defined(BPF_JIT) && \ - (${MACHINE_ARCH} == "amd64" || ${MACHINE_ARCH} == "i386") -SRCS+= ${SYSDIR}/net/bpf_jitter.c \ - ${SYSDIR}/${MACHINE_ARCH}/${MACHINE_ARCH}/bpf_jit_machdep.c -CFLAGS+= -DBPF_JIT_COMPILER -LIBS+= -lutil -WARNS?= 6 +.if defined(BPF_JIT) +SRCS+= ${SYSDIR}/net/bpfjit.c ${SYSDIR}/contrib/sljit/sljitLir.c +CFLAGS+= -DBPF_JIT_COMPILER -I${SYSDIR}/contrib/sljit +CFLAGS+= -DBPFJIT_USE_UDIV -DSLJIT_CONFIG_AUTO=1 -DSLJIT_DEBUG=0 +.if !defined(BPF_BENCHMARK) && defined(LOG_LEVEL) && (${LOG_LEVEL} > 2) +CFLAGS+= -DSLJIT_VERBOSE=1 .else -SRCS+= ${SYSDIR}/net/bpf_filter.c +CFLAGS+= -DSLJIT_VERBOSE=0 +.endif +.endif WARNS?= 2 -.endif .for TEST in ${TEST_CASES} ${TEST}: ${.CURDIR}/tests/${TEST}.h ${SRCS} Index: tools/regression/bpf/bpf_filter/bpf_test.c =================================================================== --- tools/regression/bpf/bpf_filter/bpf_test.c (revision 243911) +++ tools/regression/bpf/bpf_filter/bpf_test.c (working copy) @@ -59,31 +59,28 @@ static int verbose = LOG_LEVEL; #include -#include +#include static u_int bpf_compile_and_filter(void) { - bpf_jit_filter *filter; + bpfjit_function_t filter; u_int i, ret; /* Compile the BPF filter program and generate native code. */ - if ((filter = bpf_jitter(pc, nins)) == NULL) { + filter = bpfjit_generate_code(pc, nins); + if (filter == NULL && invalid == 0) { if (verbose > 1) printf("Failed to allocate memory:\t"); if (verbose > 0) printf("FATAL\n"); exit(FATAL); } - if (verbose > 2) { - printf("\n"); - hexdump(filter->func, filter->size, NULL, HD_OMIT_CHARS); - } for (i = 0; i < BPF_NRUNS; i++) - ret = (*(filter->func))(pkt, wirelen, buflen); + ret = filter(pkt, wirelen, buflen); - bpf_destroy_jit_filter(filter); + bpfjit_free_code(filter); return (ret); } Index: tools/regression/bpf/bpf_filter/tests/test0001.h =================================================================== --- tools/regression/bpf/bpf_filter/tests/test0001.h (revision 243911) +++ tools/regression/bpf/bpf_filter/tests/test0001.h (working copy) @@ -28,4 +28,8 @@ int invalid = 1; u_int expect = 0; /* Expected signal */ +#ifdef BPF_JIT_COMPILER +int expect_signal = SIGSEGV; +#else int expect_signal = SIGABRT; +#endif Index: tools/regression/bpf/bpf_filter/tests/test0075.h =================================================================== --- tools/regression/bpf/bpf_filter/tests/test0075.h (revision 243911) +++ tools/regression/bpf/bpf_filter/tests/test0075.h (working copy) @@ -29,8 +29,8 @@ int invalid = 1; u_int expect = 0xdeadc0de; /* Expected signal */ -#ifdef __amd64__ +#ifdef BPF_JIT_COMPILER +int expect_signal = SIGSEGV; +#else int expect_signal = SIGBUS; -#else -int expect_signal = SIGSEGV; #endif Index: tools/regression/bpf/bpf_filter/tests/test0076.h =================================================================== --- tools/regression/bpf/bpf_filter/tests/test0076.h (revision 243911) +++ tools/regression/bpf/bpf_filter/tests/test0076.h (working copy) @@ -30,8 +30,8 @@ int invalid = 1; u_int expect = 0xdeadc0de; /* Expected signal */ -#ifdef __amd64__ +#ifdef BPF_JIT_COMPILER +int expect_signal = SIGSEGV; +#else int expect_signal = SIGBUS; -#else -int expect_signal = SIGSEGV; #endif Index: tools/regression/bpf/bpf_filter/tests/test0077.h =================================================================== --- tools/regression/bpf/bpf_filter/tests/test0077.h (revision 243911) +++ tools/regression/bpf/bpf_filter/tests/test0077.h (working copy) @@ -29,8 +29,8 @@ int invalid = 1; u_int expect = 0xdeadc0de; /* Expected signal */ -#ifdef __amd64__ +#ifdef BPF_JIT_COMPILER +int expect_signal = SIGSEGV; +#else int expect_signal = SIGBUS; -#else -int expect_signal = SIGSEGV; #endif Index: tools/regression/bpf/bpf_filter/tests/test0078.h =================================================================== --- tools/regression/bpf/bpf_filter/tests/test0078.h (revision 243911) +++ tools/regression/bpf/bpf_filter/tests/test0078.h (working copy) @@ -30,8 +30,8 @@ int invalid = 1; u_int expect = 0xdeadc0de; /* Expected signal */ -#ifdef __amd64__ +#ifdef BPF_JIT_COMPILER +int expect_signal = SIGSEGV; +#else int expect_signal = SIGBUS; -#else -int expect_signal = SIGSEGV; #endif