Index: libexec/rtld-elf/rtld.c =================================================================== --- libexec/rtld-elf/rtld.c (revision 235120) +++ libexec/rtld-elf/rtld.c (working copy) @@ -4223,6 +4223,56 @@ lock_release(rtld_bind_lock, &lockstate); } +void * +_rtld_malloc(size_t size) +{ + RtldLockState lockstate; + void *ptr; + + wlock_acquire(rtld_bind_lock, &lockstate); + ptr = malloc(size); + lock_release(rtld_bind_lock, &lockstate); + return (ptr); +} + +void * +rtld_calloc(size_t num, size_t size) +{ + void *ptr; + + if (size != 0 && (num * size) / size != num) { + /* size_t overflow. */ + return (NULL); + } + + if ((ptr = _rtld_malloc(num * size)) != NULL) + memset(ptr, 0, num * size); + + return (ptr); +} + +void +_rtld_free(void *ptr) +{ + RtldLockState lockstate; + + wlock_acquire(rtld_bind_lock, &lockstate); + free(ptr); + lock_release(rtld_bind_lock, &lockstate); +} + +void* +_rtld_realloc(void *ptr, size_t size) +{ + RtldLockState lockstate; + void *newptr; + + wlock_acquire(rtld_bind_lock, &lockstate); + newptr = realloc(ptr, size); + lock_release(rtld_bind_lock, &lockstate); + return (newptr); +} + static void object_add_name(Obj_Entry *obj, const char *name) { Index: libexec/rtld-elf/rtld.h =================================================================== --- libexec/rtld-elf/rtld.h (revision 235120) +++ libexec/rtld-elf/rtld.h (working copy) @@ -383,5 +383,8 @@ int reloc_iresolve(Obj_Entry *, struct Struct_RtldLockState *); int reloc_gnu_ifunc(Obj_Entry *, int flags, struct Struct_RtldLockState *); void allocate_initial_tls(Obj_Entry *); - +void *_rtld_malloc(size_t); +void _rtld_free(void *); +void *_rtld_calloc(size_t, size_t); +void *_rtld_realloc(void *, size_t); #endif /* } */ Index: libexec/rtld-elf/Symbol.map =================================================================== --- libexec/rtld-elf/Symbol.map (revision 235120) +++ libexec/rtld-elf/Symbol.map (working copy) @@ -30,4 +30,8 @@ _rtld_atfork_post; _rtld_addr_phdr; _rtld_get_stack_prot; + _rtld_malloc; + _rtld_calloc; + _rtld_realloc; + _rtld_free; }; Index: lib/libthr/thread/thr_rwlockattr.c =================================================================== --- lib/libthr/thread/thr_rwlockattr.c (revision 235120) +++ lib/libthr/thread/thr_rwlockattr.c (working copy) @@ -52,7 +52,7 @@ if (prwlockattr == NULL) return(EINVAL); - free(prwlockattr); + _lfree(prwlockattr); return(0); } @@ -75,7 +75,7 @@ return(EINVAL); prwlockattr = (pthread_rwlockattr_t) - malloc(sizeof(struct pthread_rwlockattr)); + _lmalloc(sizeof(struct pthread_rwlockattr)); if (prwlockattr == NULL) return(ENOMEM); Index: lib/libthr/thread/thr_mutex.c =================================================================== --- lib/libthr/thread/thr_mutex.c (revision 235120) +++ lib/libthr/thread/thr_mutex.c (working copy) @@ -270,7 +270,7 @@ } else { *mutex = THR_MUTEX_DESTROYED; MUTEX_ASSERT_NOT_OWNED(m); - free(m); + _lfree(m); ret = 0; } } Index: lib/libthr/thread/thr_pspinlock.c =================================================================== --- lib/libthr/thread/thr_pspinlock.c (revision 235120) +++ lib/libthr/thread/thr_pspinlock.c (working copy) @@ -50,7 +50,7 @@ if (lock == NULL || pshared != PTHREAD_PROCESS_PRIVATE) ret = EINVAL; - else if ((lck = malloc(sizeof(struct pthread_spinlock))) == NULL) + else if ((lck = _lmalloc(sizeof(struct pthread_spinlock))) == NULL) ret = ENOMEM; else { _thr_umutex_init(&lck->s_lock); @@ -69,7 +69,7 @@ if (lock == NULL || *lock == NULL) ret = EINVAL; else { - free(*lock); + _lfree(*lock); *lock = NULL; ret = 0; } Index: lib/libthr/thread/thr_condattr.c =================================================================== --- lib/libthr/thread/thr_condattr.c (revision 235120) +++ lib/libthr/thread/thr_condattr.c (working copy) @@ -52,7 +52,7 @@ int ret; if ((pattr = (pthread_condattr_t) - malloc(sizeof(struct pthread_cond_attr))) == NULL) { + _lmalloc(sizeof(struct pthread_cond_attr))) == NULL) { ret = ENOMEM; } else { memcpy(pattr, &_pthread_condattr_default, @@ -71,7 +71,7 @@ if (attr == NULL || *attr == NULL) { ret = EINVAL; } else { - free(*attr); + _lfree(*attr); *attr = NULL; ret = 0; } Index: lib/libthr/thread/thr_barrierattr.c =================================================================== --- lib/libthr/thread/thr_barrierattr.c (revision 235120) +++ lib/libthr/thread/thr_barrierattr.c (working copy) @@ -50,7 +50,7 @@ if (attr == NULL || *attr == NULL) return (EINVAL); - free(*attr); + _lfree(*attr); return (0); } @@ -73,7 +73,7 @@ if (attr == NULL) return (EINVAL); - if ((*attr = malloc(sizeof(struct pthread_barrierattr))) == NULL) + if ((*attr = _lmalloc(sizeof(struct pthread_barrierattr))) == NULL) return (ENOMEM); (*attr)->pshared = PTHREAD_PROCESS_PRIVATE; Index: lib/libthr/thread/thr_init.c =================================================================== --- lib/libthr/thread/thr_init.c (revision 235120) +++ lib/libthr/thread/thr_init.c (working copy) @@ -114,6 +114,8 @@ int _thr_yieldloops; int _thr_queuefifo = 4; int _gc_count; + +struct umutex _lmalloc_lock = DEFAULT_UMUTEX; struct umutex _mutex_static_lock = DEFAULT_UMUTEX; struct umutex _cond_static_lock = DEFAULT_UMUTEX; struct umutex _rwlock_static_lock = DEFAULT_UMUTEX; Index: lib/libthr/thread/thr_malloc.c =================================================================== --- lib/libthr/thread/thr_malloc.c (revision 0) +++ lib/libthr/thread/thr_malloc.c (working copy) @@ -0,0 +1,575 @@ +/*- + * Copyright (c) 1983 Regents of the University of California. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#if defined(LIBC_SCCS) && !defined(lint) +/*static char *sccsid = "from: @(#)malloc.c 5.11 (Berkeley) 2/23/91";*/ +static char *rcsid = "$FreeBSD$"; +#endif /* LIBC_SCCS and not lint */ + +#ifndef PIC +/* + * malloc.c (Caltech) 2/21/82 + * Chris Kingsley, kingsley@cit-20. + * + * This is a very fast storage allocator. It allocates blocks of a small + * number of different sizes, and keeps free lists of each size. Blocks that + * don't exactly fit are passed up to the next larger size. In this + * implementation, the available sizes are 2^n-4 (or 2^n-10) bytes long. + * This is designed for use in a virtual memory environment. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "thr_private.h" + +/* + * Pre-allocate mmap'ed pages + */ +#define NPOOLPAGES (32*1024/pagesz) +static caddr_t pagepool_start, pagepool_end; + +/* + * The overhead on a block is at least 4 bytes. When free, this space + * contains a pointer to the next free block, and the bottom two bits must + * be zero. When in use, the first byte is set to MAGIC, and the second + * byte is the size index. The remaining bytes are for alignment. + * If range checking is enabled then a second word holds the size of the + * requested block, less 1, rounded up to a multiple of sizeof(RMAGIC). + * The order of elements is critical: ov_magic must overlay the low order + * bits of ov_next, and ov_magic can not be a valid ov_next bit pattern. + */ +union overhead { + union overhead *ov_next; /* when free */ + struct { + u_char ovu_magic; /* magic number */ + u_char ovu_index; /* bucket # */ +#ifdef RCHECK + u_short ovu_rmagic; /* range magic number */ + u_int ovu_size; /* actual block size */ +#endif + } ovu; +#define ov_magic ovu.ovu_magic +#define ov_index ovu.ovu_index +#define ov_rmagic ovu.ovu_rmagic +#define ov_size ovu.ovu_size +}; + +#define MAGIC 0xef /* magic # on accounting info */ +#define RMAGIC 0x5555 /* magic # on range info */ + +#ifdef RCHECK +#define RSLOP sizeof (u_short) +#else +#define RSLOP 0 +#endif + +/* + * nextf[i] is the pointer to the next free block of size 2^(i+3). The + * smallest allocatable block is 8 bytes. The overhead information + * precedes the data area returned to the user. + */ +#define NBUCKETS 30 +static union overhead *nextf[NBUCKETS]; + +static int pagesz; /* page size */ +static int pagebucket; /* page size bucket */ + +static void morecore(int); +static int morepages(int); +static int findbucket(union overhead *, int); + +#ifdef MSTATS +/* + * nmalloc[i] is the difference between the number of mallocs and frees + * for a given block size. + */ +static u_int nmalloc[NBUCKETS]; +#include +#endif + +#if defined(MALLOC_DEBUG) || defined(RCHECK) +#define ASSERT(p) if (!(p)) botch("p") +#include +static void +botch(s) + char *s; +{ + fprintf(stderr, "\r\nassertion botched: %s\r\n", s); + (void) fflush(stderr); /* just in case user buffered it */ + abort(); +} +#else +#define ASSERT(p) +#endif + +/* Debugging stuff */ +#define TRACE() rtld_printf("TRACE %s:%d\n", __FILE__, __LINE__) + +static int pagesize; + +static int +lgetpagesize(void) +{ + int mib[2]; + size_t size; + + if (pagesize != 0) + return (pagesize); + + mib[0] = CTL_HW; + mib[1] = HW_PAGESIZE; + size = sizeof(pagesize); + if (sysctl(mib, 2, &pagesize, &size, NULL, 0) == -1) + return (-1); + return (pagesize); + +} + +static void * +lmalloc_impl(size_t nbytes) +{ + register union overhead *op; + register int bucket; + register long n; + register unsigned amt; + + /* + * First time malloc is called, setup page size and + * align break pointer so all data will be page aligned. + */ + if (pagesz == 0) { + pagesz = n = lgetpagesize(); + if (morepages(NPOOLPAGES) == 0) + return NULL; + op = (union overhead *)(pagepool_start); + n = n - sizeof (*op) - ((long)op & (n - 1)); + if (n < 0) + n += pagesz; + if (n) { + pagepool_start += n; + } + bucket = 0; + amt = 8; + while ((unsigned)pagesz > amt) { + amt <<= 1; + bucket++; + } + pagebucket = bucket; + } + /* + * Convert amount of memory requested into closest block size + * stored in hash buckets which satisfies request. + * Account for space used per block for accounting. + */ + if (nbytes <= (unsigned long)(n = pagesz - sizeof (*op) - RSLOP)) { +#ifndef RCHECK + amt = 8; /* size of first bucket */ + bucket = 0; +#else + amt = 16; /* size of first bucket */ + bucket = 1; +#endif + n = -(sizeof (*op) + RSLOP); + } else { + amt = pagesz; + bucket = pagebucket; + } + while (nbytes > amt + n) { + amt <<= 1; + if (amt == 0) + return (NULL); + bucket++; + } + /* + * If nothing in hash bucket right now, + * request more memory from the system. + */ + if ((op = nextf[bucket]) == NULL) { + morecore(bucket); + if ((op = nextf[bucket]) == NULL) + return (NULL); + } + /* remove from linked list */ + nextf[bucket] = op->ov_next; + op->ov_magic = MAGIC; + op->ov_index = bucket; +#ifdef MSTATS + nmalloc[bucket]++; +#endif +#ifdef RCHECK + /* + * Record allocated size of block and + * bound space with magic numbers. + */ + op->ov_size = (nbytes + RSLOP - 1) & ~(RSLOP - 1); + op->ov_rmagic = RMAGIC; + *(u_short *)((caddr_t)(op + 1) + op->ov_size) = RMAGIC; +#endif + return ((char *)(op + 1)); +} + +void * +_lcalloc(size_t num, size_t size) +{ + void *ret; + + if (size != 0 && (num * size) / size != num) { + /* size_t overflow. */ + return (NULL); + } + + if ((ret = _lmalloc(num * size)) != NULL) + memset(ret, 0, num * size); + + return (ret); +} + +/* + * Allocate more memory to the indicated bucket. + */ +static void +morecore(int bucket) +{ + register union overhead *op; + register int sz; /* size of desired block */ + int amt; /* amount to allocate */ + int nblks; /* how many blocks we get */ + + /* + * sbrk_size <= 0 only for big, FLUFFY, requests (about + * 2^30 bytes on a VAX, I think) or for a negative arg. + */ + sz = 1 << (bucket + 3); +#ifdef MALLOC_DEBUG + ASSERT(sz > 0); +#else + if (sz <= 0) + return; +#endif + if (sz < pagesz) { + amt = pagesz; + nblks = amt / sz; + } else { + amt = sz + pagesz; + nblks = 1; + } + if (amt > pagepool_end - pagepool_start) + if (morepages(amt/pagesz + NPOOLPAGES) == 0) + return; + op = (union overhead *)pagepool_start; + pagepool_start += amt; + + /* + * Add new memory allocated to that on + * free list for this hash bucket. + */ + nextf[bucket] = op; + while (--nblks > 0) { + op->ov_next = (union overhead *)((caddr_t)op + sz); + op = (union overhead *)((caddr_t)op + sz); + } +} + +static void +lfree_impl(void *cp) +{ + register int size; + register union overhead *op; + + if (cp == NULL) + return; + op = (union overhead *)((caddr_t)cp - sizeof (union overhead)); +#ifdef MALLOC_DEBUG + ASSERT(op->ov_magic == MAGIC); /* make sure it was in use */ +#else + if (op->ov_magic != MAGIC) + return; /* sanity */ +#endif +#ifdef RCHECK + ASSERT(op->ov_rmagic == RMAGIC); + ASSERT(*(u_short *)((caddr_t)(op + 1) + op->ov_size) == RMAGIC); +#endif + size = op->ov_index; + ASSERT(size < NBUCKETS); + op->ov_next = nextf[size]; /* also clobbers ov_magic */ + nextf[size] = op; +#ifdef MSTATS + nmalloc[size]--; +#endif +} + +/* + * When a program attempts "storage compaction" as mentioned in the + * old malloc man page, it realloc's an already freed block. Usually + * this is the last block it freed; occasionally it might be farther + * back. We have to search all the free lists for the block in order + * to determine its bucket: 1st we make one pass thru the lists + * checking only the first block in each; if that fails we search + * ``realloc_srchlen'' blocks in each list for a match (the variable + * is extern so the caller can modify it). If that fails we just copy + * however many bytes was given to realloc() and hope it's not huge. + */ +static int realloc_srchlen = 4; /* 4 should be plenty, -1 =>'s whole list */ + +static void * +lrealloc_impl(void *cp, size_t nbytes) +{ + register u_int onb; + register int i; + union overhead *op; + char *res; + int was_alloced = 0; + + if (cp == NULL) + return (lmalloc_impl(nbytes)); + op = (union overhead *)((caddr_t)cp - sizeof (union overhead)); + if (op->ov_magic == MAGIC) { + was_alloced++; + i = op->ov_index; + } else { + /* + * Already free, doing "compaction". + * + * Search for the old block of memory on the + * free list. First, check the most common + * case (last element free'd), then (this failing) + * the last ``realloc_srchlen'' items free'd. + * If all lookups fail, then assume the size of + * the memory block being realloc'd is the + * largest possible (so that all "nbytes" of new + * memory are copied into). Note that this could cause + * a memory fault if the old area was tiny, and the moon + * is gibbous. However, that is very unlikely. + */ + if ((i = findbucket(op, 1)) < 0 && + (i = findbucket(op, realloc_srchlen)) < 0) + i = NBUCKETS; + } + onb = 1 << (i + 3); + if (onb < (u_int)pagesz) + onb -= sizeof (*op) + RSLOP; + else + onb += pagesz - sizeof (*op) - RSLOP; + /* avoid the copy if same size block */ + if (was_alloced) { + if (i) { + i = 1 << (i + 2); + if (i < pagesz) + i -= sizeof (*op) + RSLOP; + else + i += pagesz - sizeof (*op) - RSLOP; + } + if (nbytes <= onb && nbytes > (size_t)i) { +#ifdef RCHECK + op->ov_size = (nbytes + RSLOP - 1) & ~(RSLOP - 1); + *(u_short *)((caddr_t)(op + 1) + op->ov_size) = RMAGIC; +#endif + return(cp); + } else + lfree_impl(cp); + } + if ((res = lmalloc_impl(nbytes)) == NULL) + return (NULL); + if (cp != res) /* common optimization if "compacting" */ + bcopy(cp, res, (nbytes < onb) ? nbytes : onb); + return (res); +} + +/* + * Search ``srchlen'' elements of each free list for a block whose + * header starts at ``freep''. If srchlen is -1 search the whole list. + * Return bucket number, or -1 if not found. + */ +static int +findbucket(union overhead *freep, int srchlen) +{ + register union overhead *p; + register int i, j; + + for (i = 0; i < NBUCKETS; i++) { + j = 0; + for (p = nextf[i]; p && j != srchlen; p = p->ov_next) { + if (p == freep) + return (i); + j++; + } + } + return (-1); +} + +#ifdef MSTATS +/* + * mstats - print out statistics about malloc + * + * Prints two lines of numbers, one showing the length of the free list + * for each size category, the second showing the number of mallocs - + * frees for each size category. + */ +mstats(s) + char *s; +{ + register int i, j; + register union overhead *p; + int totfree = 0, + totused = 0; + + fprintf(stderr, "Memory allocation statistics %s\nfree:\t", s); + for (i = 0; i < NBUCKETS; i++) { + for (j = 0, p = nextf[i]; p; p = p->ov_next, j++) + ; + fprintf(stderr, " %d", j); + totfree += j * (1 << (i + 3)); + } + fprintf(stderr, "\nused:\t"); + for (i = 0; i < NBUCKETS; i++) { + fprintf(stderr, " %d", nmalloc[i]); + totused += nmalloc[i] * (1 << (i + 3)); + } + fprintf(stderr, "\n\tTotal in use: %d, total free: %d\n", + totused, totfree); +} +#endif + + +static int +morepages(int n) +{ + int fd = -1; + int offset; + + if (pagepool_end - pagepool_start > pagesz) { + caddr_t addr = (caddr_t) + (((long)pagepool_start + pagesz - 1) & ~(pagesz - 1)); + if (munmap(addr, pagepool_end - addr) != 0) + _thread_printf(STDERR_FILENO, "morepages: munmap %p", + addr); + } + + offset = (long)pagepool_start - ((long)pagepool_start & ~(pagesz - 1)); + + if ((pagepool_start = mmap(0, n * pagesz, + PROT_READ|PROT_WRITE, + MAP_ANON|MAP_COPY, fd, 0)) == (caddr_t)-1) { + _thread_printf(STDERR_FILENO, "Cannot map anonymous memory\n"); + return 0; + } + pagepool_end = pagepool_start + n * pagesz; + pagepool_start += offset; + + return n; +} + +void * +_lmalloc(size_t nbytes) +{ + struct pthread *curthread; + void *p; + + curthread = _get_curthread(); + if (curthread != NULL) { + THR_LOCK_ACQUIRE(curthread, &_lmalloc_lock); + p = lmalloc_impl(nbytes); + THR_LOCK_RELEASE(curthread, &_lmalloc_lock); + } else + p = lmalloc_impl(nbytes); + return (p); +} + +void * +_lrealloc(void *cp, size_t nbytes) +{ + struct pthread *curthread; + void *p; + + curthread = _get_curthread(); + if (curthread != NULL) { + THR_LOCK_ACQUIRE(curthread, &_lmalloc_lock); + p = lrealloc_impl(cp, nbytes); + THR_LOCK_RELEASE(curthread, &_lmalloc_lock); + } else + p = lrealloc_impl(cp, nbytes); + return (p); +} + +void +_lfree(void *cp) +{ + struct pthread *curthread; + + curthread = _get_curthread(); + if (curthread != NULL) { + THR_LOCK_ACQUIRE(curthread, &_lmalloc_lock); + lfree_impl(cp); + THR_LOCK_RELEASE(curthread, &_lmalloc_lock); + } else + lfree_impl(cp); +} + +#else + +#include +#include +#include + +#include "thr_private.h" + +void *_lmalloc(size_t nbytes) +{ + return _rtld_malloc(nbytes); +} + +void _lfree(void *ptr) +{ + _rtld_free(ptr); +} + +void *_lcalloc(size_t num, size_t nbytes) +{ + return _rtld_calloc(num, nbytes); +} + +void *_lrealloc(void *cp, size_t nbytes) +{ + return _rtld_realloc(cp, nbytes); +} +#endif Index: lib/libthr/thread/thr_clean.c =================================================================== --- lib/libthr/thread/thr_clean.c (revision 235120) +++ lib/libthr/thread/thr_clean.c (working copy) @@ -71,7 +71,7 @@ if (execute) old->routine(old->routine_arg); if (old->onheap) - free(old); + _lfree(old); } } @@ -84,7 +84,7 @@ curthread->unwind_disabled = 1; #endif if ((newbuf = (struct pthread_cleanup *) - malloc(sizeof(struct _pthread_cleanup_info))) != NULL) { + _lmalloc(sizeof(struct _pthread_cleanup_info))) != NULL) { newbuf->routine = routine; newbuf->routine_arg = arg; newbuf->onheap = 1; Index: lib/libthr/thread/thr_barrier.c =================================================================== --- lib/libthr/thread/thr_barrier.c (revision 235120) +++ lib/libthr/thread/thr_barrier.c (working copy) @@ -71,7 +71,7 @@ THR_UMUTEX_UNLOCK(curthread, &bar->b_lock); *barrier = NULL; - free(bar); + _lfree(bar); return (0); } @@ -86,7 +86,7 @@ if (barrier == NULL || count <= 0) return (EINVAL); - bar = malloc(sizeof(struct pthread_barrier)); + bar = _lmalloc(sizeof(struct pthread_barrier)); if (bar == NULL) return (ENOMEM); Index: lib/libthr/thread/Makefile.inc =================================================================== --- lib/libthr/thread/Makefile.inc (revision 235120) +++ lib/libthr/thread/Makefile.inc (working copy) @@ -29,6 +29,7 @@ thr_list.c \ thr_kern.c \ thr_kill.c \ + thr_malloc.c \ thr_main_np.c \ thr_multi_np.c \ thr_mutex.c \ Index: lib/libthr/thread/thr_attr.c =================================================================== --- lib/libthr/thread/thr_attr.c (revision 235120) +++ lib/libthr/thread/thr_attr.c (working copy) @@ -119,9 +119,9 @@ ret = EINVAL; else { if ((*attr)->cpuset != NULL) - free((*attr)->cpuset); + _lfree((*attr)->cpuset); /* Free the memory allocated to the attribute object: */ - free(*attr); + _lfree(*attr); /* * Leave the attribute pointer NULL now that the memory @@ -343,7 +343,7 @@ _thr_check_init(); /* Allocate memory for the attribute object: */ - if ((pattr = (pthread_attr_t) malloc(sizeof(struct pthread_attr))) == NULL) + if ((pattr = (pthread_attr_t) _lmalloc(sizeof(struct pthread_attr))) == NULL) /* Insufficient memory: */ ret = ENOMEM; else { @@ -592,7 +592,7 @@ else { if (cpusetsize == 0 || cpusetp == NULL) { if (attr->cpuset != NULL) { - free(attr->cpuset); + _lfree(attr->cpuset); attr->cpuset = NULL; attr->cpusetsize = 0; } Index: lib/libthr/thread/thr_private.h =================================================================== --- lib/libthr/thread/thr_private.h (revision 235120) +++ lib/libthr/thread/thr_private.h (working copy) @@ -715,6 +715,7 @@ /* Garbage thread count. */ extern int _gc_count __hidden; +extern struct umutex _lmalloc_lock __hidden; extern struct umutex _mutex_static_lock __hidden; extern struct umutex _cond_static_lock __hidden; extern struct umutex _rwlock_static_lock __hidden; @@ -904,6 +905,11 @@ void _thr_sigact_unload(struct dl_phdr_info *phdr_info) __hidden; void _thr_stack_fix_protection(struct pthread *thrd); +void *_lmalloc(size_t); +void *_lcalloc(size_t, size_t); +void *_lrealloc(void *, size_t); +void _lfree(void *); + __END_DECLS #endif /* !_THR_PRIVATE_H */ Index: lib/libthr/thread/thr_mutexattr.c =================================================================== --- lib/libthr/thread/thr_mutexattr.c (revision 235120) +++ lib/libthr/thread/thr_mutexattr.c (working copy) @@ -89,7 +89,7 @@ pthread_mutexattr_t pattr; if ((pattr = (pthread_mutexattr_t) - malloc(sizeof(struct pthread_mutex_attr))) == NULL) { + _lmalloc(sizeof(struct pthread_mutex_attr))) == NULL) { ret = ENOMEM; } else { memcpy(pattr, &_pthread_mutexattr_default, @@ -162,7 +162,7 @@ if (attr == NULL || *attr == NULL) { ret = EINVAL; } else { - free(*attr); + _lfree(*attr); *attr = NULL; ret = 0; } Index: lib/libthr/thread/thr_fork.c =================================================================== --- lib/libthr/thread/thr_fork.c (revision 235120) +++ lib/libthr/thread/thr_fork.c (working copy) @@ -82,7 +82,7 @@ _thr_check_init(); - if ((af = malloc(sizeof(struct pthread_atfork))) == NULL) + if ((af = _lmalloc(sizeof(struct pthread_atfork))) == NULL) return (ENOMEM); curthread = _get_curthread(); @@ -121,7 +121,7 @@ THR_CRITICAL_LEAVE(curthread); while ((af = TAILQ_FIRST(&temp_list)) != NULL) { TAILQ_REMOVE(&temp_list, af, qe); - free(af); + _lfree(af); } _thr_tsd_unload(phdr_info); _thr_sigact_unload(phdr_info); @@ -169,6 +169,7 @@ if (_thr_isthreaded() != 0) { was_threaded = 1; _malloc_prefork(); + THR_UMUTEX_LOCK(curthread, &_lmalloc_lock); _rtld_atfork_pre(rtld_locks); } else { was_threaded = 0; @@ -192,6 +193,7 @@ /* clear other threads locked us. */ _thr_umutex_init(&curthread->lock); + _thr_umutex_init(&_lmalloc_lock); _mutex_fork(curthread); _thr_signal_postfork_child(); @@ -233,6 +235,7 @@ if (was_threaded) { _rtld_atfork_post(rtld_locks); + THR_UMUTEX_UNLOCK(curthread, &_lmalloc_lock); _malloc_postfork(); }