Index: lib/csu/Makefile.inc =================================================================== --- lib/csu/Makefile.inc (revision 298097) +++ lib/csu/Makefile.inc (working copy) @@ -2,6 +2,10 @@ SSP_CFLAGS= +.if !empty(CFLAGS:M-fsafe-stack) +SSP_CFLAGS=-fno-safe-stack +.endif + SED_FIX_NOTE = -i "" -e '/\.note\.tag/s/progbits/note/' NO_WMISSING_VARIABLE_DECLARATIONS= Index: lib/csu/amd64/crt1.c =================================================================== --- lib/csu/amd64/crt1.c (revision 298097) +++ lib/csu/amd64/crt1.c (working copy) @@ -65,8 +65,15 @@ if (&_DYNAMIC != NULL) atexit(cleanup); - else + else { _init_tls(); + /* + * Initialize the unsafe stack, and store its pointer + * into the tcb. + */ + void *unsafestack = init_unsafestack(); + __builtin_safestack_set_usp(unsafestack); + } #ifdef GCRT atexit(_mcleanup); Index: lib/csu/common/ignore_init.c =================================================================== --- lib/csu/common/ignore_init.c (revision 298097) +++ lib/csu/common/ignore_init.c (working copy) @@ -23,11 +23,25 @@ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#include __FBSDID("$FreeBSD$"); +#include +#include +#include +#include +#include +#include +#include +#include + #include "notes.h" +#ifdef PIC +#define SAFESTACK_PROT _rtld_get_stack_prot() +#else +#define SAFESTACK_PROT (PROT_READ | PROT_WRITE | PROT_EXEC) +#endif /* PIC */ + extern int main(int, char **, char **); extern void (*__preinit_array_start[])(int, char **, char **) __hidden; @@ -60,6 +74,40 @@ _fini(); } +static inline void * +init_unsafestack(void) +{ + struct rlimit rl; + size_t stack_size; + size_t guard_size; + void *memory; + + /* getrlimit to know the stack size */ + if (!syscall(SYS_getrlimit, RLIMIT_STACK, &rl) && + rl.rlim_cur != RLIM_INFINITY) + stack_size = rl.rlim_cur; + else /* pointer size * 1MB */ + stack_size = sizeof (void *) * (1 << 20); + + /* + * Allocate memory. + * The stack protection should use rtld_get_stack_prot() for + * dynamically linked binaries. + * XXX: How to know when it is the case? + */ + guard_size = PAGE_SIZE > 4096 ? PAGE_SIZE : 4096; + memory = ((void *(*)(int, void *, size_t, int, int, int, + off_t))syscall)(SYS_mmap, NULL, rl.rlim_cur + guard_size, + SAFESTACK_PROT, MAP_STACK, -1, 0); + if (memory == MAP_FAILED) + exit(1); + + /* setup the stack guard */ + syscall(SYS_mprotect, memory, guard_size, PROT_NONE); + + return ((char *)memory) + guard_size + stack_size; +} + static inline void handle_static_init(int argc, char **argv, char **env) { Index: lib/csu/i386-elf/crt1_c.c =================================================================== --- lib/csu/i386-elf/crt1_c.c (revision 298097) +++ lib/csu/i386-elf/crt1_c.c (working copy) @@ -64,8 +64,15 @@ handle_argv(argc, argv, env); if (&_DYNAMIC != NULL) atexit(cleanup); - else + else { _init_tls(); + /* + * Initialize the unsafe stack and store its pointer + * into the tcb. + */ + void *unsafestack = init_unsafestack(); + __builtin_safestack_set_usp(unsafestack); + } #ifdef GCRT atexit(_mcleanup); Index: lib/libc/amd64/Makefile.inc =================================================================== --- lib/libc/amd64/Makefile.inc (revision 298097) +++ lib/libc/amd64/Makefile.inc (working copy) @@ -4,6 +4,7 @@ # # Long double is 80 bits +CFLAGS+=-fno-safe-stack GDTOASRCS+=strtorx.c MDSRCS+=machdep_ldisx.c SYM_MAPS+=${.CURDIR}/amd64/Symbol.map Index: lib/libc/amd64/gen/Makefile.inc =================================================================== --- lib/libc/amd64/gen/Makefile.inc (revision 298097) +++ lib/libc/amd64/gen/Makefile.inc (working copy) @@ -1,6 +1,7 @@ # @(#)Makefile.inc 8.1 (Berkeley) 6/4/93 # $FreeBSD$ +CFLAGS+= -fno-safe-stack SRCS+= _setjmp.S _set_tp.c rfork_thread.S setjmp.S sigsetjmp.S \ fabs.S getcontextx.c \ infinity.c ldexp.c makecontext.c signalcontext.c \ Index: lib/libc/amd64/gen/makecontext.c =================================================================== --- lib/libc/amd64/gen/makecontext.c (revision 298097) +++ lib/libc/amd64/gen/makecontext.c (working copy) @@ -31,6 +31,7 @@ #include #include #include +#include typedef void (*func_t)(uint64_t, uint64_t, uint64_t, uint64_t, uint64_t, uint64_t); @@ -92,6 +93,9 @@ ucp->uc_mcontext.mc_rbx = (register_t)sp; ucp->uc_mcontext.mc_rsp = (register_t)sp; ucp->uc_mcontext.mc_rip = (register_t)makectx_wrapper; + + /* Clear the spare field for safestack. */ + memset(ucp->__spare__, 0, sizeof (ucp->__spare__)); } static void Index: lib/libc/gen/Makefile.inc =================================================================== --- lib/libc/gen/Makefile.inc (revision 298097) +++ lib/libc/gen/Makefile.inc (working copy) @@ -4,6 +4,7 @@ # machine-independent gen sources .PATH: ${.CURDIR}/${LIBC_ARCH}/gen ${.CURDIR}/gen +CFLAGS+= -fno-safe-stack SRCS+= __getosreldate.c \ __pthread_mutex_init_calloc_cb_stub.c \ __xuname.c \ Index: lib/libc/i386/Makefile.inc =================================================================== --- lib/libc/i386/Makefile.inc (revision 298097) +++ lib/libc/i386/Makefile.inc (working copy) @@ -4,3 +4,4 @@ GDTOASRCS+=strtorx.c MDSRCS+=machdep_ldisx.c SYM_MAPS+=${.CURDIR}/i386/Symbol.map +CFLAGS+=-fno-safe-stack Index: lib/libc/i386/gen/Makefile.inc =================================================================== --- lib/libc/i386/gen/Makefile.inc (revision 298097) +++ lib/libc/i386/gen/Makefile.inc (working copy) @@ -4,3 +4,4 @@ SRCS+= _ctx_start.S _setjmp.S _set_tp.c fabs.S \ flt_rounds.c getcontextx.c infinity.c ldexp.c makecontext.c \ rfork_thread.S setjmp.S signalcontext.c sigsetjmp.S +CFLAGS+=-fno-safe-stack Index: lib/libc/i386/gen/makecontext.c =================================================================== --- lib/libc/i386/gen/makecontext.c (revision 298097) +++ lib/libc/i386/gen/makecontext.c (working copy) @@ -35,6 +35,7 @@ #include #include #include +#include /* Prototypes */ extern void _ctx_start(ucontext_t *, int argc, ...); @@ -160,4 +161,7 @@ ucp->uc_mcontext.mc_esp = (int)stack_top + sizeof(caddr_t); ucp->uc_mcontext.mc_eip = (int)_ctx_start; } + + /* Clear the spare field for safestack. */ + memset(ucp->__spare__, 0, sizeof (ucp->__spare__)); } Index: lib/libc/include/libc_private.h =================================================================== --- lib/libc/include/libc_private.h (revision 298097) +++ lib/libc/include/libc_private.h (working copy) @@ -286,6 +286,11 @@ #include #include +struct __ucontext; +extern int __sys_getcontext(struct __ucontext *); +extern int __sys_setcontext(const struct __ucontext *); +extern int __sys_swapcontext(struct __ucontext *, const struct __ucontext *); + /* With pad */ __off_t __sys_freebsd6_lseek(int, int, __off_t, int); int __sys_freebsd6_ftruncate(int, int, __off_t); @@ -326,6 +331,7 @@ int, const struct timespec *); __off_t __sys_lseek(int, __off_t, int); void *__sys_mmap(void *, __size_t, int, int, int, __off_t); +int __sys_munmap(void *, __size_t); int __sys_msync(void *, __size_t, int); int __sys_nanosleep(const struct timespec *, struct timespec *); int __sys_open(const char *, int, ...); @@ -396,4 +402,9 @@ void _pthread_cancel_enter(int); void _pthread_cancel_leave(int); +int safestack_munmap(void *, __size_t); +int safestack_setcontext(const struct __ucontext *); +int safestack_swapcontext(struct __ucontext *, const struct __ucontext *); +int safestack_getcontext(struct __ucontext *); + #endif /* _LIBC_PRIVATE_H_ */ Index: lib/libc/sys/Makefile.inc =================================================================== --- lib/libc/sys/Makefile.inc (revision 298097) +++ lib/libc/sys/Makefile.inc (working copy) @@ -24,7 +24,8 @@ stack_protector.c \ stack_protector_compat.c \ __error.c \ - interposing_table.c + interposing_table.c \ + safestack.c .if !defined(WITHOUT_SYSCALL_COMPAT) SYSCALL_COMPAT_SRCS= \ Index: lib/libc/sys/interposing_table.c =================================================================== --- lib/libc/sys/interposing_table.c (revision 298097) +++ lib/libc/sys/interposing_table.c (working copy) @@ -59,7 +59,7 @@ SLOT(select, __sys_select), SLOT(sendmsg, __sys_sendmsg), SLOT(sendto, __sys_sendto), - SLOT(setcontext, __sys_setcontext), + SLOT(setcontext, safestack_setcontext), SLOT(sigaction, __sys_sigaction), SLOT(sigprocmask, __sys_sigprocmask), SLOT(sigsuspend, __sys_sigsuspend), @@ -66,7 +66,7 @@ SLOT(sigwait, __libc_sigwait), SLOT(sigtimedwait, __sys_sigtimedwait), SLOT(sigwaitinfo, __sys_sigwaitinfo), - SLOT(swapcontext, __sys_swapcontext), + SLOT(swapcontext, safestack_swapcontext), SLOT(system, __libc_system), SLOT(tcdrain, __libc_tcdrain), SLOT(wait4, __sys_wait4), Index: lib/libc/sys/safestack.c =================================================================== --- lib/libc/sys/safestack.c (nonexistent) +++ lib/libc/sys/safestack.c (working copy) @@ -0,0 +1,494 @@ +/*- + * Copyright (c) 2015 Alexandre BIQUE + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "libc_private.h" + +#define SAFESTACK_ALLOCATOR_BLOCK_SIZE (1 << 14) + +#define safestack_array_size(X) (sizeof (X) / sizeof (X[0])) + +#define safestack_dlist_append(Head, Item) \ + do { \ + if (!(Head)) { \ + (Head) = (Item); \ + (Item)->next = (Item); \ + (Item)->prev = (Item); \ + } else { \ + (Item)->next = (Head); \ + (Item)->prev = (Head)->prev; \ + (Item)->prev->next = (Item); \ + (Item)->next->prev = (Item); \ + } \ + } while (0) + +#define safestack_dlist_remove(Head, Item) \ + do { \ + if ((Item)->next == (Item)) \ + (Head) = NULL; \ + else { \ + (Head) = (Item)->next; \ + (Item)->next->prev = (Item)->prev; \ + (Item)->prev->next = (Item)->next; \ + } \ + (Item)->next = NULL; \ + (Item)->prev = NULL; \ + } while (0) + +struct safestack_uctx_entry { + struct safestack_uctx_entry *prev; + struct safestack_uctx_entry *next; + + void *ptr; + void *unsafe_ptr; /* Unsafe stack associated to this entry. */ + size_t size; +}; + +struct safestack_uctx_allocator_block { + struct safestack_uctx_allocator_block *next; + struct safestack_uctx_entry entries[0]; +}; + +struct safestack_uctx_allocator { + struct safestack_uctx_allocator_block *blocks; + struct safestack_uctx_entry *free_entries; /* dlist */ +}; + +struct safestack_uctx { + pthread_mutex_t mutex; + struct safestack_uctx_allocator allocator; + struct safestack_uctx_entry **entries; + size_t entries_size; + size_t entries_count; +}; + +static struct safestack_uctx g_safestack_uctx = { + PTHREAD_MUTEX_INITIALIZER, + {NULL, NULL}, + NULL, + 0, + 0 +}; + +/* This hash function removes the uninteresting part of the pointer. */ +static inline uint32_t +safestack_hash_stack(void *addr, size_t size) +{ + return (((ptrdiff_t)addr) / size); +} + +/* + * Use some prime numbers from google for the hash table size. + * Also, if we had vectors multiples of PAGE_SIZE, I removed + * the first entries. + */ +static const uint32_t safestack_hash_table_size[] = { + 389, + 769, + 1543, + 3079, + 6151, + 12289, + 24593, + 49157, + 98317, + 196613, + 393241, + 786433, + 1572869, + 3145739, + 6291469, + 12582917, + 25165843, + 50331653, + 100663319, + 201326611, + 402653189, + 805306457, + 1610612741, +}; + +/* + * Finds a good size for the hash table. + * It uses some prime numbers from google to reduce clustering. + */ +static uint32_t +safestack_hash_best_size(void) +{ + uint32_t i; + + for (i = 0; i < safestack_array_size(safestack_hash_table_size); ++i) { + if (g_safestack_uctx.entries_count < safestack_hash_table_size[i]) + return safestack_hash_table_size[i]; + } + return (safestack_hash_table_size[i - 1]); +} + +/* + * Compute a valid size for mmap. + */ +static inline size_t +safestack_hash_mmap_size(size_t size) +{ + return (((size * sizeof(void *)) & ~4095ULL) + 4096); +} + +static void + safestack_hash_rehash(size_t new_size); + +/* + * Adds an entry to the hash table. + * Automaticaly rehash when needed, but only grow, never shrink. + */ +static void +safestack_hash_insert(struct safestack_uctx_entry *entry) +{ + size_t best_size = safestack_hash_best_size(); + + if (best_size > g_safestack_uctx.entries_size) + safestack_hash_rehash(best_size); + + size_t hash = safestack_hash_stack(entry->ptr, entry->size); + size_t index = hash % g_safestack_uctx.entries_size; + + safestack_dlist_append(g_safestack_uctx.entries[index], entry); + ++g_safestack_uctx.entries_count; +} + +/* + * Removes an entry from the hash table. + * Does not free the entry. Does not rehash the hash table. + */ +static void +safestack_hash_remove(struct safestack_uctx_entry *entry) +{ + size_t hash = safestack_hash_stack(entry->ptr, entry->size); + size_t index = hash % g_safestack_uctx.entries_size; + + safestack_dlist_remove(g_safestack_uctx.entries[index], entry); + --g_safestack_uctx.entries_count; +} + +/* Rehash the hash table. */ +static void +safestack_hash_rehash(size_t new_size) +{ + /* Allocate new vector. */ + size_t new_mmap_size = safestack_hash_mmap_size(new_size); + struct safestack_uctx_entry **new_entries = mmap( + NULL, new_mmap_size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS, -1, 0); + + if (new_entries == MAP_FAILED) + return; + memset(new_entries, 0, new_mmap_size); + + /* Backup old container. */ + struct safestack_uctx_entry **old_entries = g_safestack_uctx.entries; + size_t old_size = g_safestack_uctx.entries_size; + size_t old_count = g_safestack_uctx.entries_count; + size_t old_mmap_size = safestack_hash_mmap_size(old_size); + + /* Copy new container. */ + g_safestack_uctx.entries = new_entries; + g_safestack_uctx.entries_size = new_size; + g_safestack_uctx.entries_count = 0; + + /* were old container allocated? */ + if (!old_entries) + return; + + /* add old entries */ + for (size_t i = 0; i < old_size; ++i) { + while (old_entries[i]) { + struct safestack_uctx_entry *entry = old_entries[i]; + + safestack_dlist_remove(old_entries[i], entry); + safestack_hash_insert(entry); + } + } + assert(old_count == g_safestack_uctx.entries_count); + + /* Move entries. */ + munmap(old_entries, old_mmap_size); +} + +/* + * Find an entry that matches (addr, size). + */ +static struct safestack_uctx_entry * +safestack_uctx_find(void *addr, size_t size) +{ + if (size == 0 || g_safestack_uctx.entries_count == 0) + return (NULL); + + size_t hash = safestack_hash_stack(addr, size); + size_t index = hash % g_safestack_uctx.entries_size; + struct safestack_uctx_entry *it = g_safestack_uctx.entries[index]; + + if (!it) + return (NULL); + + do { + if (addr == it->ptr && size == it->size) + return (it); + + it = it->next; + } while (it != g_safestack_uctx.entries[index]); + + return (NULL); +} + +/* + * Allocate a new entry, and if needed a new allocator block. + */ +static inline struct safestack_uctx_entry * +safestack_uctx_entry_alloc(void) +{ + struct safestack_uctx_entry *entry = + g_safestack_uctx.allocator.free_entries; + + /* do we have free entries? */ + if (entry) { + safestack_dlist_remove(g_safestack_uctx.allocator.free_entries, + entry); + return (entry); + } + /* needs to allocate a new block */ + void *ptr = mmap(NULL, SAFESTACK_ALLOCATOR_BLOCK_SIZE, + PROT_READ | PROT_WRITE, MAP_ANONYMOUS, -1, 0); + + if (ptr == MAP_FAILED) + return (NULL); + + /* calculating the end of the block */ + struct safestack_uctx_entry *end = ptr + SAFESTACK_ALLOCATOR_BLOCK_SIZE; + struct safestack_uctx_allocator_block *block = ptr; + + block->next = g_safestack_uctx.allocator.blocks; + g_safestack_uctx.allocator.blocks = block->next; + + /* initialize every entries */ + for (entry = block->entries; entry < end; ++entry) + safestack_dlist_append(g_safestack_uctx.allocator.free_entries, + entry); + + /* Take the first entry. */ + entry = g_safestack_uctx.allocator.free_entries; + safestack_dlist_remove(g_safestack_uctx.allocator.free_entries, entry); + return (entry); +} + +/* + * Adds an entry to the free list. + */ +static inline void +safestack_uctx_entry_free(struct safestack_uctx_entry *val) +{ + safestack_dlist_append(g_safestack_uctx.allocator.free_entries, val); +} + +/* + * Allocates a new entry, + * Allocate a new unsafe stack and save the entry into the hash table. + */ +static struct safestack_uctx_entry * +safestack_uctx_add(void *addr, size_t len) +{ + if (len == 0) + return (NULL); + + struct safestack_uctx_entry *entry = safestack_uctx_entry_alloc(); + + if (!entry) + return (NULL); + entry->ptr = addr; + entry->size = len; + entry->unsafe_ptr = mmap( + NULL, len, PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_STACK | MAP_ANONYMOUS, + -1, 0); + + /* XXX: add guards? */ + + if (entry->unsafe_ptr == MAP_FAILED) { + safestack_uctx_entry_free(entry); + return NULL; + } + safestack_hash_insert(entry); + return (entry); +} + +/* + * Checks that ucp has a safestack allocated and if not it tries to + * allocate it and save it into the hash table as well. + * On success returns 0, -1 otherwise. + */ +static int +safestack_ensure_allocated(ucontext_t *ucp) +{ + if (ucp->uc_usp_ptr) + return 0; + + /* lock the global map and find/allocate an entry */ + pthread_mutex_lock(&g_safestack_uctx.mutex); + struct safestack_uctx_entry *entry = safestack_uctx_find( + ucp->uc_stack.ss_sp, ucp->uc_stack.ss_size); + + if (!entry) { + entry = safestack_uctx_add(ucp->uc_stack.ss_sp, + ucp->uc_stack.ss_size); + if (!entry) { + pthread_mutex_unlock(&g_safestack_uctx.mutex); + errno = ENOMEM; + return -1; + } + ucp->uc_usp_ptr = entry->unsafe_ptr + entry->size; + } + pthread_mutex_unlock(&g_safestack_uctx.mutex); + return (0); +} + +/* + * SafeStack libc wrappers. + */ + +/* + * If we track munmap on a user stack that was used for swapcontex(), + * or setcontext(), then we also munmap the unsafe stack we allocated + * for it. + */ +int +safestack_munmap(void *addr, __size_t len) +{ + /* + * The following test is always correct regardless of the missing + * synchronisation. Also it did prevent a bug during jemalloc + * initialization, which occured in + * pthread_mutex_lock(&g_safestack_uctx.mutex). + */ + if (g_safestack_uctx.entries_count == 0) + return (__sys_munmap(addr, len)); + + pthread_mutex_lock(&g_safestack_uctx.mutex); + struct safestack_uctx_entry *entry = safestack_uctx_find(addr, len); + + if (entry) { + __sys_munmap(entry->ptr, entry->size); + safestack_hash_remove(entry); + safestack_uctx_entry_free(entry); + } + pthread_mutex_unlock(&g_safestack_uctx.mutex); + return (__sys_munmap(addr, len)); +} + +/* + * Check if the unsafe stack is allocated for ucp. + * If not, then allocate it ;-). + */ +int +safestack_setcontext(const struct __ucontext *ucp) +{ + /* Do we already have an unsafe stack? */ + if (safestack_ensure_allocated((ucontext_t *)ucp)) + return -1; + + /* Set the new unsafe stack pointer. */ + __builtin_safestack_set_usp(ucp->uc_usp_ptr); + return (__sys_setcontext(ucp)); +} + +/* Save the unsafe stack pointer to oucp. */ +int +safestack_getcontext(struct __ucontext *ucp) +{ + ucp->uc_usp_ptr = __builtin_safestack_get_usp(); + return (__sys_getcontext(ucp)); +} + +/* + * Check if the unsafe stack is allocated for ucp. + * If not, then allocate it ;-). + * Also save the unsafe stack pointer to oucp. + */ +int +safestack_swapcontext(struct __ucontext *oucp, const struct __ucontext *ucp) +{ + oucp->uc_usp_ptr = __builtin_safestack_get_usp(); + + /* do we already have an unsafe stack? */ + if (safestack_ensure_allocated((ucontext_t *)ucp)) + return (-1); + + /* set the new unsafe stack pointer */ + __builtin_safestack_set_usp(ucp->uc_usp_ptr); + return (__sys_swapcontext(oucp, ucp)); +} + + +/********************* + * LIBC entry points * + *********************/ + +int +munmap(void *addr, size_t len) +{ + return (safestack_munmap(addr, len)); +} + +#if 0 +int +setcontext(const ucontext_t *ucp) +{ + return (safestack_setcontext(ucp)); +} + +int +swapcontext(ucontext_t *oucp, const ucontext_t *ucp) +{ + return (safestack_swapcontext(oucp, ucp)); +} + +#endif + +int +getcontext(ucontext_t *ucp) +{ + return (safestack_getcontext(ucp)); +} Property changes on: lib/libc/sys/safestack.c ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: lib/libthr/arch/amd64/include/pthread_md.h =================================================================== --- lib/libthr/arch/amd64/include/pthread_md.h (revision 298097) +++ lib/libthr/arch/amd64/include/pthread_md.h (working copy) @@ -36,6 +36,7 @@ #include #include #include +#include #define CPU_SPINWAIT __asm __volatile("pause") @@ -49,7 +50,7 @@ struct tcb *tcb_self; /* required by rtld */ void *tcb_dtv; /* required by rtld */ struct pthread *tcb_thread; - void *tcb_spare[1]; + void *tcb_unsafestack; /* unsafe stack ptr */ }; /* @@ -100,4 +101,7 @@ #define HAS__UMTX_OP_ERR 1 +/* Extra check for unsafe stack entry offset. */ +_Static_assert(__tcb_offset(tcb_unsafestack) == 0x18, "unsafestack TCB ABI break"); + #endif Index: lib/libthr/arch/i386/include/pthread_md.h =================================================================== --- lib/libthr/arch/i386/include/pthread_md.h (revision 298097) +++ lib/libthr/arch/i386/include/pthread_md.h (working copy) @@ -36,6 +36,7 @@ #include #include #include +#include #define CPU_SPINWAIT __asm __volatile("pause") @@ -49,6 +50,7 @@ struct tcb *tcb_self; /* required by rtld */ void *tcb_dtv; /* required by rtld */ struct pthread *tcb_thread; + void *tcb_unsafestack; /* unsafe stack ptr */ }; /* @@ -105,4 +107,7 @@ #define HAS__UMTX_OP_ERR 1 +/* Extra check for unsafe stack entry offset. */ +_Static_assert(__tcb_offset(tcb_unsafestack) == 0xC, "unsafestack TCB ABI break"); + #endif Index: lib/libthr/thread/thr_create.c =================================================================== --- lib/libthr/thread/thr_create.c (revision 298097) +++ lib/libthr/thread/thr_create.c (working copy) @@ -234,17 +234,29 @@ static int create_stack(struct pthread_attr *pattr) { - int ret; - /* Check if a stack was specified in the thread attributes: */ if ((pattr->stackaddr_attr) != NULL) { pattr->guardsize_attr = 0; pattr->flags |= THR_STACK_USER; - ret = 0; } - else - ret = _thr_stack_alloc(pattr); - return (ret); + else { + pattr->flags &= ~THR_STACK_USER; + pattr->stackaddr_attr = _thr_stack_alloc( + pattr->stacksize_attr, pattr->guardsize_attr); + if (!pattr->stackaddr_attr) + return (-1); + } + + /* Allocate the unsafe stack. */ + pattr->unsafe_stackaddr_attr = _thr_stack_alloc( + pattr->stacksize_attr, pattr->guardsize_attr); + if (!pattr->unsafe_stackaddr_attr) { + _thr_stack_free(pattr->stackaddr_attr, + pattr->stacksize_attr, + pattr->guardsize_attr); + return (-1); + } + return (0); } static void @@ -252,6 +264,11 @@ { sigset_t set; + /* Initialize the unsafe stack. */ + _tcb_get()->tcb_unsafestack = + ((char *)curthread->attr.unsafe_stackaddr_attr) + + curthread->attr.stacksize_attr; + if (curthread->attr.suspend == THR_CREATE_SUSPENDED) set = curthread->sigmask; Index: lib/libthr/thread/thr_list.c =================================================================== --- lib/libthr/thread/thr_list.c (revision 298097) +++ lib/libthr/thread/thr_list.c (working copy) @@ -106,7 +106,17 @@ /* make sure we are not still in userland */ continue; } - _thr_stack_free(&td->attr); + if (!(td->attr.flags & THR_STACK_USER)) { + _thr_stack_free(td->attr.stackaddr_attr, + td->attr.stacksize_attr, + td->attr.guardsize_attr); + td->attr.stackaddr_attr = NULL; + } + _thr_stack_free(td->attr.unsafe_stackaddr_attr, + td->attr.stacksize_attr, + td->attr.guardsize_attr); + td->attr.unsafe_stackaddr_attr = NULL; + THR_GCLIST_REMOVE(td); TAILQ_INSERT_HEAD(&worklist, td, gcle); } Index: lib/libthr/thread/thr_private.h =================================================================== --- lib/libthr/thread/thr_private.h (revision 298097) +++ lib/libthr/thread/thr_private.h (working copy) @@ -245,6 +245,7 @@ #define THR_STACK_USER 0x100 /* 0xFF reserved for */ int flags; void *stackaddr_attr; + void *unsafe_stackaddr_attr; size_t stacksize_attr; size_t guardsize_attr; #define pthread_attr_end_copy cpuset @@ -749,8 +750,8 @@ int _thr_find_thread(struct pthread *, struct pthread *, int) __hidden; void _thr_rtld_init(void) __hidden; void _thr_rtld_postfork_child(void) __hidden; -int _thr_stack_alloc(struct pthread_attr *) __hidden; -void _thr_stack_free(struct pthread_attr *) __hidden; +void *_thr_stack_alloc(size_t, size_t) __hidden; +void _thr_stack_free(void *, size_t, size_t) __hidden; void _thr_free(struct pthread *, struct pthread *) __hidden; void _thr_gc(struct pthread *) __hidden; void _thread_cleanupspecific(void) __hidden; Index: lib/libthr/thread/thr_stack.c =================================================================== --- lib/libthr/thread/thr_stack.c (revision 298097) +++ lib/libthr/thread/thr_stack.c (working copy) @@ -186,14 +186,12 @@ THREAD_LIST_UNLOCK(curthread); } -int -_thr_stack_alloc(struct pthread_attr *attr) +void * +_thr_stack_alloc(size_t stacksize, size_t guardsize) { struct pthread *curthread = _get_curthread(); - struct stack *spare_stack; - size_t stacksize; - size_t guardsize; - char *stackaddr; + struct stack *spare_stack = NULL; + char *stackaddr = NULL; /* * Round up stack size to nearest multiple of _thr_page_size so @@ -202,12 +200,9 @@ * unused space above the beginning of the stack, so the stack * sits snugly against its guard. */ - stacksize = round_up(attr->stacksize_attr); - guardsize = round_up(attr->guardsize_attr); + stacksize = round_up(stacksize); + guardsize = round_up(guardsize); - attr->stackaddr_attr = NULL; - attr->flags &= ~THR_STACK_USER; - /* * Use the garbage collector lock for synchronization of the * spare stack lists and allocations from usrstack. @@ -222,7 +217,7 @@ if ((spare_stack = LIST_FIRST(&dstackq)) != NULL) { /* Use the spare stack. */ LIST_REMOVE(spare_stack, qe); - attr->stackaddr_attr = spare_stack->stackaddr; + stackaddr = spare_stack->stackaddr; } } /* @@ -235,12 +230,12 @@ if (spare_stack->stacksize == stacksize && spare_stack->guardsize == guardsize) { LIST_REMOVE(spare_stack, qe); - attr->stackaddr_attr = spare_stack->stackaddr; + stackaddr = spare_stack->stackaddr; break; } } } - if (attr->stackaddr_attr != NULL) { + if (stackaddr != NULL) { /* A cached stack was found. Release the lock. */ THREAD_LIST_UNLOCK(curthread); } @@ -281,37 +276,31 @@ munmap(stackaddr, stacksize + guardsize); stackaddr = NULL; } - attr->stackaddr_attr = stackaddr; } - if (attr->stackaddr_attr != NULL) - return (0); - else - return (-1); + return (stackaddr); } /* This function must be called with _thread_list_lock held. */ void -_thr_stack_free(struct pthread_attr *attr) +_thr_stack_free(void *stackaddr, size_t stacksize, size_t guardsize) { struct stack *spare_stack; - if ((attr != NULL) && ((attr->flags & THR_STACK_USER) == 0) - && (attr->stackaddr_attr != NULL)) { - spare_stack = (struct stack *) - ((char *)attr->stackaddr_attr + - attr->stacksize_attr - sizeof(struct stack)); - spare_stack->stacksize = round_up(attr->stacksize_attr); - spare_stack->guardsize = round_up(attr->guardsize_attr); - spare_stack->stackaddr = attr->stackaddr_attr; + if (!stackaddr) + return; - if (spare_stack->stacksize == THR_STACK_DEFAULT && - spare_stack->guardsize == _thr_guard_default) { - /* Default stack/guard size. */ - LIST_INSERT_HEAD(&dstackq, spare_stack, qe); - } else { - /* Non-default stack/guard size. */ - LIST_INSERT_HEAD(&mstackq, spare_stack, qe); - } - attr->stackaddr_attr = NULL; + spare_stack = (struct stack *) + ((char *)stackaddr + stacksize - sizeof(struct stack)); + spare_stack->stacksize = round_up(stacksize); + spare_stack->guardsize = round_up(guardsize); + spare_stack->stackaddr = stackaddr; + + if (spare_stack->stacksize == THR_STACK_DEFAULT && + spare_stack->guardsize == _thr_guard_default) { + /* Default stack/guard size. */ + LIST_INSERT_HEAD(&dstackq, spare_stack, qe); + } else { + /* Non-default stack/guard size. */ + LIST_INSERT_HEAD(&mstackq, spare_stack, qe); } } Index: libexec/rtld-elf/Makefile =================================================================== --- libexec/rtld-elf/Makefile (revision 298097) +++ libexec/rtld-elf/Makefile (working copy) @@ -10,12 +10,13 @@ PROG?= ld-elf.so.1 SRCS= rtld_start.S \ reloc.c rtld.c rtld_lock.c rtld_printf.c map_object.c \ - malloc.c xmalloc.c debug.c libmap.c + malloc.c xmalloc.c debug.c libmap.c safe_stack.c MAN= rtld.1 CSTD?= gnu99 TOPSRCDIR= ${.CURDIR}/../.. CFLAGS+= -Wall -DFREEBSD_ELF -DIN_RTLD CFLAGS+= -I${TOPSRCDIR}/lib/csu/common +CFLAGS+= -fno-safe-stack .if exists(${.CURDIR}/${MACHINE_ARCH}) RTLD_ARCH= ${MACHINE_ARCH} .else Index: libexec/rtld-elf/Symbol.map =================================================================== --- libexec/rtld-elf/Symbol.map (revision 298097) +++ libexec/rtld-elf/Symbol.map (working copy) @@ -31,5 +31,6 @@ _rtld_addr_phdr; _rtld_get_stack_prot; _rtld_is_dlopened; + _rtld_allocate_unsafe_stack; _r_debug_postinit; }; Index: libexec/rtld-elf/rtld.c =================================================================== --- libexec/rtld-elf/rtld.c (revision 298097) +++ libexec/rtld-elf/rtld.c (working copy) @@ -59,6 +59,7 @@ #include "rtld_tls.h" #include "rtld_printf.h" #include "notes.h" +#include "safe_stack.h" #ifndef COMPAT_32BIT #define PATH_RTLD "/libexec/ld-elf.so.1" @@ -622,6 +623,16 @@ dbg("initializing initial thread local storage"); allocate_initial_tls(globallist_curr(TAILQ_FIRST(&obj_list))); + /* + * Setup the unsafe stack, this should be done as soon as possible, + * after the tls initialization. + */ + dbg("initializing the unsafe stack"); + void *unsafe_stack = _rtld_allocate_unsafe_stack(); + if (!unsafe_stack) + die(); + __builtin_safestack_set_usp(unsafe_stack); + dbg("initializing key program variables"); set_program_var("__progname", argv[0] != NULL ? basename(argv[0]) : ""); set_program_var("environ", env); Index: sys/sys/ucontext.h =================================================================== --- sys/sys/ucontext.h (revision 298097) +++ sys/sys/ucontext.h (working copy) @@ -50,7 +50,11 @@ stack_t uc_stack; int uc_flags; #define UCF_SWAPPED 0x00000001 /* Used by swapcontext(3). */ - int __spare__[4]; + + union { + void *uc_usp_ptr; /* Unsafe stack pointer */ + int __spare__[4]; + }; } ucontext_t; #if defined(_KERNEL) && defined(COMPAT_FREEBSD4)