Index: lib/libc/stdlib/qr.h =================================================================== --- lib/libc/stdlib/qr.h (revision 234236) +++ lib/libc/stdlib/qr.h (working copy) @@ -1,106 +0,0 @@ -/****************************************************************************** - * - * Copyright (C) 2002 Jason Evans . - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice(s), this list of conditions and the following disclaimer - * unmodified other than the allowable addition of one or more - * copyright notices. - * 2. Redistributions in binary form must reproduce the above copyright - * notice(s), this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR - * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE - * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, - * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -#ifndef QR_H_ -#define QR_H_ - -#include -__FBSDID("$FreeBSD$"); - -/* Ring definitions. */ -#define qr(a_type) \ -struct { \ - a_type *qre_next; \ - a_type *qre_prev; \ -} - -/* Ring functions. */ -#define qr_new(a_qr, a_field) do { \ - (a_qr)->a_field.qre_next = (a_qr); \ - (a_qr)->a_field.qre_prev = (a_qr); \ -} while (0) - -#define qr_next(a_qr, a_field) ((a_qr)->a_field.qre_next) - -#define qr_prev(a_qr, a_field) ((a_qr)->a_field.qre_prev) - -#define qr_before_insert(a_qrelm, a_qr, a_field) do { \ - (a_qr)->a_field.qre_prev = (a_qrelm)->a_field.qre_prev; \ - (a_qr)->a_field.qre_next = (a_qrelm); \ - (a_qr)->a_field.qre_prev->a_field.qre_next = (a_qr); \ - (a_qrelm)->a_field.qre_prev = (a_qr); \ -} while (0) - -#define qr_after_insert(a_qrelm, a_qr, a_field) \ - do \ - { \ - (a_qr)->a_field.qre_next = (a_qrelm)->a_field.qre_next; \ - (a_qr)->a_field.qre_prev = (a_qrelm); \ - (a_qr)->a_field.qre_next->a_field.qre_prev = (a_qr); \ - (a_qrelm)->a_field.qre_next = (a_qr); \ - } while (0) - -#define qr_meld(a_qr_a, a_qr_b, a_field) do { \ - void *t; \ - (a_qr_a)->a_field.qre_prev->a_field.qre_next = (a_qr_b); \ - (a_qr_b)->a_field.qre_prev->a_field.qre_next = (a_qr_a); \ - t = (a_qr_a)->a_field.qre_prev; \ - (a_qr_a)->a_field.qre_prev = (a_qr_b)->a_field.qre_prev; \ - (a_qr_b)->a_field.qre_prev = t; \ -} while (0) - -/* qr_meld() and qr_split() are functionally equivalent, so there's no need to - * have two copies of the code. */ -#define qr_split(a_qr_a, a_qr_b, a_field) \ - qr_meld((a_qr_a), (a_qr_b), a_field) - -#define qr_remove(a_qr, a_field) do { \ - (a_qr)->a_field.qre_prev->a_field.qre_next \ - = (a_qr)->a_field.qre_next; \ - (a_qr)->a_field.qre_next->a_field.qre_prev \ - = (a_qr)->a_field.qre_prev; \ - (a_qr)->a_field.qre_next = (a_qr); \ - (a_qr)->a_field.qre_prev = (a_qr); \ -} while (0) - -#define qr_foreach(var, a_qr, a_field) \ - for ((var) = (a_qr); \ - (var) != NULL; \ - (var) = (((var)->a_field.qre_next != (a_qr)) \ - ? (var)->a_field.qre_next : NULL)) - -#define qr_reverse_foreach(var, a_qr, a_field) \ - for ((var) = ((a_qr) != NULL) ? qr_prev(a_qr, a_field) : NULL; \ - (var) != NULL; \ - (var) = (((var) != (a_qr)) \ - ? (var)->a_field.qre_prev : NULL)) - -#endif /* QR_H_ */ Index: lib/libc/stdlib/Symbol.map =================================================================== --- lib/libc/stdlib/Symbol.map (revision 234236) +++ lib/libc/stdlib/Symbol.map (working copy) @@ -47,14 +47,6 @@ lldiv; lsearch; lfind; - _malloc_options; - _malloc_message; - malloc; - posix_memalign; - calloc; - realloc; - free; - malloc_usable_size; mergesort; putenv; qsort_r; @@ -93,7 +85,6 @@ }; FBSD_1.3 { - aligned_alloc; at_quick_exit; atof_l; atoi_l; @@ -114,9 +105,6 @@ }; FBSDprivate_1.0 { - _malloc_thread_cleanup; - _malloc_prefork; - _malloc_postfork; __system; _system; }; Index: lib/libc/stdlib/reallocf.3 =================================================================== --- lib/libc/stdlib/reallocf.3 (working copy) +++ lib/libc/stdlib/reallocf.3 (working copy) @@ -36,85 +36,17 @@ .Dt MALLOC 3 .Os .Sh NAME -.Nm malloc , calloc , realloc , free , reallocf , malloc_usable_size -.Nd general purpose memory allocation functions +.Nm reallocf +.Nd memory reallocation function .Sh LIBRARY .Lb libc .Sh SYNOPSIS .In stdlib.h .Ft void * -.Fn malloc "size_t size" -.Ft void * -.Fn calloc "size_t number" "size_t size" -.Ft void * -.Fn realloc "void *ptr" "size_t size" -.Ft void * .Fn reallocf "void *ptr" "size_t size" -.Ft void -.Fn free "void *ptr" -.Ft const char * -.Va _malloc_options ; -.Ft void -.Fn \*(lp*_malloc_message\*(rp "const char *p1" "const char *p2" "const char *p3" "const char *p4" -.In malloc_np.h -.Ft size_t -.Fn malloc_usable_size "const void *ptr" .Sh DESCRIPTION The -.Fn malloc -function allocates -.Fa size -bytes of uninitialized memory. -The allocated space is suitably aligned (after possible pointer coercion) -for storage of any type of object. -.Pp -The -.Fn calloc -function allocates space for -.Fa number -objects, -each -.Fa size -bytes in length. -The result is identical to calling -.Fn malloc -with an argument of -.Dq "number * size" , -with the exception that the allocated memory is explicitly initialized -to zero bytes. -.Pp -The -.Fn realloc -function changes the size of the previously allocated memory referenced by -.Fa ptr -to -.Fa size -bytes. -The contents of the memory are unchanged up to the lesser of the new and -old sizes. -If the new size is larger, -the contents of the newly allocated portion of the memory are undefined. -Upon success, the memory referenced by -.Fa ptr -is freed and a pointer to the newly allocated memory is returned. -Note that -.Fn realloc -and .Fn reallocf -may move the memory allocation, resulting in a different return value than -.Fa ptr . -If -.Fa ptr -is -.Dv NULL , -the -.Fn realloc -function behaves identically to -.Fn malloc -for the specified size. -.Pp -The -.Fn reallocf function is identical to the .Fn realloc function, except that it @@ -125,395 +57,10 @@ for .Fn realloc causing memory leaks in libraries. -.Pp -The -.Fn free -function causes the allocated memory referenced by -.Fa ptr -to be made available for future allocations. -If -.Fa ptr -is -.Dv NULL , -no action occurs. -.Pp -The -.Fn malloc_usable_size -function returns the usable size of the allocation pointed to by -.Fa ptr . -The return value may be larger than the size that was requested during -allocation. -The -.Fn malloc_usable_size -function is not a mechanism for in-place -.Fn realloc ; -rather it is provided solely as a tool for introspection purposes. -Any discrepancy between the requested allocation size and the size reported by -.Fn malloc_usable_size -should not be depended on, since such behavior is entirely -implementation-dependent. -.Sh TUNING -Once, when the first call is made to one of these memory allocation -routines, various flags will be set or reset, which affects the -workings of this allocator implementation. -.Pp -The -.Dq name -of the file referenced by the symbolic link named -.Pa /etc/malloc.conf , -the value of the environment variable -.Ev MALLOC_OPTIONS , -and the string pointed to by the global variable -.Va _malloc_options -will be interpreted, in that order, from left to right as flags. -.Pp -Each flag is a single letter, optionally prefixed by a non-negative base 10 -integer repetition count. -For example, -.Dq 3N -is equivalent to -.Dq NNN . -Some flags control parameter magnitudes, where uppercase increases the -magnitude, and lowercase decreases the magnitude. -Other flags control boolean parameters, where uppercase indicates that a -behavior is set, or on, and lowercase means that a behavior is not set, or off. -.Bl -tag -width indent -.It A -All warnings (except for the warning about unknown -flags being set) become fatal. -The process will call -.Xr abort 3 -in these cases. -.It C -Double/halve the size of the maximum size class that is a multiple of the -cacheline size (64). -Above this size, subpage spacing (256 bytes) is used for size classes. -The default value is 512 bytes. -.It D -Use -.Xr sbrk 2 -to acquire memory in the data storage segment (DSS). -This option is enabled by default. -See the -.Dq M -option for related information and interactions. -.It E -Double/halve the size of the maximum medium size class. -The valid range is from one page to one half chunk. -The default value is 32 KiB. -.It F -Halve/double the per-arena minimum ratio of active to dirty pages. -Some dirty unused pages may be allowed to accumulate, within the limit set by -the ratio, before informing the kernel about at least half of those pages via -.Xr madvise 2 . -This provides the kernel with sufficient information to recycle dirty pages if -physical memory becomes scarce and the pages remain unused. -The default minimum ratio is 32:1; -.Ev MALLOC_OPTIONS=6F -will disable dirty page purging. -.It G -Double/halve the approximate interval (counted in terms of -thread-specific cache allocation/deallocation events) between full -thread-specific cache garbage collection sweeps. -Garbage collection is actually performed incrementally, one size -class at a time, in order to avoid large collection pauses. -The default sweep interval is 8192; -.Ev MALLOC_OPTIONS=14g -will disable garbage collection. -.It H -Double/halve the number of thread-specific cache slots per size -class. -When there are multiple threads, each thread uses a -thread-specific cache for small and medium objects. -Thread-specific caching allows many allocations to be satisfied -without performing any thread synchronization, at the cost of -increased memory use. -See the -.Dq G -option for related tuning information. -The default number of cache slots is 128; -.Ev MALLOC_OPTIONS=7h -will disable thread-specific caching. -Note that one cache slot per size class is not a valid -configuration due to implementation details. -.It J -Each byte of new memory allocated by -.Fn malloc , -.Fn realloc , -or -.Fn reallocf -will be initialized to 0xa5. -All memory returned by -.Fn free , -.Fn realloc , -or -.Fn reallocf -will be initialized to 0x5a. -This is intended for debugging and will impact performance negatively. -.It K -Double/halve the virtual memory chunk size. -The default chunk size is 4 MiB. -.It M -Use -.Xr mmap 2 -to acquire anonymously mapped memory. -This option is enabled by default. -If both the -.Dq D -and -.Dq M -options are enabled, the allocator prefers anonymous mappings over the DSS, -but allocation only fails if memory cannot be acquired via either method. -If neither option is enabled, then the -.Dq M -option is implicitly enabled in order to assure that there is a method for -acquiring memory. -.It N -Double/halve the number of arenas. -The default number of arenas is two times the number of CPUs, or one if there -is a single CPU. -.It P -Various statistics are printed at program exit via an -.Xr atexit 3 -function. -This has the potential to cause deadlock for a multi-threaded process that exits -while one or more threads are executing in the memory allocation functions. -Therefore, this option should only be used with care; it is primarily intended -as a performance tuning aid during application development. -.It Q -Double/halve the size of the maximum size class that is a multiple of the -quantum (8 or 16 bytes, depending on architecture). -Above this size, cacheline spacing is used for size classes. -The default value is 128 bytes. -.It U -Generate -.Dq utrace -entries for -.Xr ktrace 1 , -for all operations. -Consult the source for details on this option. -.It V -Attempting to allocate zero bytes will return a -.Dv NULL -pointer instead of a valid pointer. -(The default behavior is to make a minimal allocation and return a -pointer to it.) -This option is provided for System V compatibility. -This option is incompatible with the -.Dq X -option. -.It X -Rather than return failure for any allocation function, display a diagnostic -message on -.Dv STDERR_FILENO -and cause the program to drop core (using -.Xr abort 3 ) . -This option should be set at compile time by including the following in the -source code: -.Bd -literal -offset indent -_malloc_options = "X"; -.Ed -.It Z -Each byte of new memory allocated by -.Fn malloc , -.Fn realloc , -or -.Fn reallocf -will be initialized to 0. -Note that this initialization only happens once for each byte, so -.Fn realloc -and -.Fn reallocf -calls do not zero memory that was previously allocated. -This is intended for debugging and will impact performance negatively. -.El -.Pp -The -.Dq J -and -.Dq Z -options are intended for testing and debugging. -An application which changes its behavior when these options are used -is flawed. -.Sh IMPLEMENTATION NOTES -Traditionally, allocators have used -.Xr sbrk 2 -to obtain memory, which is suboptimal for several reasons, including race -conditions, increased fragmentation, and artificial limitations on maximum -usable memory. -This allocator uses both -.Xr sbrk 2 -and -.Xr mmap 2 -by default, but it can be configured at run time to use only one or the other. -If resource limits are not a primary concern, the preferred configuration is -.Ev MALLOC_OPTIONS=dM -or -.Ev MALLOC_OPTIONS=DM . -When so configured, the -.Ar datasize -resource limit has little practical effect for typical applications; use -.Ev MALLOC_OPTIONS=Dm -if that is a concern. -Regardless of allocator configuration, the -.Ar vmemoryuse -resource limit can be used to bound the total virtual memory used by a -process, as described in -.Xr limits 1 . -.Pp -This allocator uses multiple arenas in order to reduce lock contention for -threaded programs on multi-processor systems. -This works well with regard to threading scalability, but incurs some costs. -There is a small fixed per-arena overhead, and additionally, arenas manage -memory completely independently of each other, which means a small fixed -increase in overall memory fragmentation. -These overheads are not generally an issue, given the number of arenas normally -used. -Note that using substantially more arenas than the default is not likely to -improve performance, mainly due to reduced cache performance. -However, it may make sense to reduce the number of arenas if an application -does not make much use of the allocation functions. -.Pp -In addition to multiple arenas, this allocator supports thread-specific caching -for small and medium objects, in order to make it possible to completely avoid -synchronization for most small and medium allocation requests. -Such caching allows very fast allocation in the common case, but it increases -memory usage and fragmentation, since a bounded number of objects can remain -allocated in each thread cache. -.Pp -Memory is conceptually broken into equal-sized chunks, where the chunk size is -a power of two that is greater than the page size. -Chunks are always aligned to multiples of the chunk size. -This alignment makes it possible to find metadata for user objects very -quickly. -.Pp -User objects are broken into four categories according to size: small, medium, -large, and huge. -Small objects are smaller than one page. -Medium objects range from one page to an upper limit determined at run time (see -the -.Dq E -option). -Large objects are smaller than the chunk size. -Huge objects are a multiple of the chunk size. -Small, medium, and large objects are managed by arenas; huge objects are managed -separately in a single data structure that is shared by all threads. -Huge objects are used by applications infrequently enough that this single -data structure is not a scalability issue. -.Pp -Each chunk that is managed by an arena tracks its contents as runs of -contiguous pages (unused, backing a set of small or medium objects, or backing -one large object). -The combination of chunk alignment and chunk page maps makes it possible to -determine all metadata regarding small and large allocations in constant time. -.Pp -Small and medium objects are managed in groups by page runs. -Each run maintains a bitmap that tracks which regions are in use. -Allocation requests that are no more than half the quantum (8 or 16, depending -on architecture) are rounded up to the nearest power of two. -Allocation requests that are more than half the quantum, but no more than the -minimum cacheline-multiple size class (see the -.Dq Q -option) are rounded up to the nearest multiple of the quantum. -Allocation requests that are more than the minimum cacheline-multiple size -class, but no more than the minimum subpage-multiple size class (see the -.Dq C -option) are rounded up to the nearest multiple of the cacheline size (64). -Allocation requests that are more than the minimum subpage-multiple size class, -but no more than the maximum subpage-multiple size class are rounded up to the -nearest multiple of the subpage size (256). -Allocation requests that are more than the maximum subpage-multiple size class, -but no more than the maximum medium size class (see the -.Dq M -option) are rounded up to the nearest medium size class; spacing is an -automatically determined power of two and ranges from the subpage size to the -page size. -Allocation requests that are more than the maximum medium size class, but small -enough to fit in an arena-managed chunk (see the -.Dq K -option), are rounded up to the nearest run size. -Allocation requests that are too large to fit in an arena-managed chunk are -rounded up to the nearest multiple of the chunk size. -.Pp -Allocations are packed tightly together, which can be an issue for -multi-threaded applications. -If you need to assure that allocations do not suffer from cacheline sharing, -round your allocation requests up to the nearest multiple of the cacheline -size. -.Sh DEBUGGING MALLOC PROBLEMS -The first thing to do is to set the -.Dq A -option. -This option forces a coredump (if possible) at the first sign of trouble, -rather than the normal policy of trying to continue if at all possible. -.Pp -It is probably also a good idea to recompile the program with suitable -options and symbols for debugger support. -.Pp -If the program starts to give unusual results, coredump or generally behave -differently without emitting any of the messages mentioned in the next -section, it is likely because it depends on the storage being filled with -zero bytes. -Try running it with the -.Dq Z -option set; -if that improves the situation, this diagnosis has been confirmed. -If the program still misbehaves, -the likely problem is accessing memory outside the allocated area. -.Pp -Alternatively, if the symptoms are not easy to reproduce, setting the -.Dq J -option may help provoke the problem. -.Pp -In truly difficult cases, the -.Dq U -option, if supported by the kernel, can provide a detailed trace of -all calls made to these functions. -.Pp -Unfortunately this implementation does not provide much detail about -the problems it detects; the performance impact for storing such information -would be prohibitive. -There are a number of allocator implementations available on the Internet -which focus on detecting and pinpointing problems by trading performance for -extra sanity checks and detailed diagnostics. -.Sh DIAGNOSTIC MESSAGES -If any of the memory allocation/deallocation functions detect an error or -warning condition, a message will be printed to file descriptor -.Dv STDERR_FILENO . -Errors will result in the process dumping core. -If the -.Dq A -option is set, all warnings are treated as errors. -.Pp -The -.Va _malloc_message -variable allows the programmer to override the function which emits the text -strings forming the errors and warnings if for some reason the -.Dv STDERR_FILENO -file descriptor is not suitable for this. -Please note that doing anything which tries to allocate memory in this function -is likely to result in a crash or deadlock. -.Pp -All messages are prefixed by -.Dq Ao Ar progname Ac Ns Li : (malloc) . .Sh RETURN VALUES The -.Fn malloc -and -.Fn calloc -functions return a pointer to the allocated memory if successful; otherwise -a -.Dv NULL -pointer is returned and -.Va errno -is set to -.Er ENOMEM . -.Pp -The -.Fn realloc -and .Fn reallocf -functions return a pointer, possibly identical to +function returns a pointer, possibly identical to .Fa ptr , to the allocated memory if successful; otherwise a @@ -524,68 +71,12 @@ .Er ENOMEM if the error was the result of an allocation failure. The -.Fn realloc -function always leaves the original buffer intact -when an error occurs, whereas .Fn reallocf -deallocates it in this case. -.Pp -The -.Fn free -function returns no value. -.Pp -The -.Fn malloc_usable_size -function returns the usable size of the allocation pointed to by -.Fa ptr . -.Sh ENVIRONMENT -The following environment variables affect the execution of the allocation -functions: -.Bl -tag -width ".Ev MALLOC_OPTIONS" -.It Ev MALLOC_OPTIONS -If the environment variable -.Ev MALLOC_OPTIONS -is set, the characters it contains will be interpreted as flags to the -allocation functions. -.El -.Sh EXAMPLES -To dump core whenever a problem occurs: -.Bd -literal -offset indent -ln -s 'A' /etc/malloc.conf -.Ed -.Pp -To specify in the source that a program does no return value checking -on calls to these functions: -.Bd -literal -offset indent -_malloc_options = "X"; -.Ed +function deletes the original buffer when an error occurs. .Sh SEE ALSO -.Xr limits 1 , -.Xr madvise 2 , -.Xr mmap 2 , -.Xr sbrk 2 , -.Xr alloca 3 , -.Xr atexit 3 , -.Xr getpagesize 3 , -.Xr getpagesizes 3 , -.Xr memory 3 , -.Xr posix_memalign 3 -.Sh STANDARDS -The -.Fn malloc , -.Fn calloc , -.Fn realloc -and -.Fn free -functions conform to -.St -isoC . +.Xr realloc 3 .Sh HISTORY The .Fn reallocf function first appeared in .Fx 3.0 . -.Pp -The -.Fn malloc_usable_size -function first appeared in -.Fx 7.0 . Index: lib/libc/stdlib/Makefile.inc =================================================================== --- lib/libc/stdlib/Makefile.inc (revision 234236) +++ lib/libc/stdlib/Makefile.inc (working copy) @@ -7,7 +7,7 @@ MISRCS+=_Exit.c a64l.c abort.c abs.c atexit.c atof.c atoi.c atol.c atoll.c \ bsearch.c div.c exit.c getenv.c getopt.c getopt_long.c \ getsubopt.c hcreate.c heapsort.c imaxabs.c imaxdiv.c \ - insque.c l64a.c labs.c ldiv.c llabs.c lldiv.c lsearch.c malloc.c \ + insque.c l64a.c labs.c ldiv.c llabs.c lldiv.c lsearch.c \ merge.c ptsname.c qsort.c qsort_r.c quick_exit.c radixsort.c rand.c \ random.c reallocf.c realpath.c remque.c strfmon.c strtoimax.c \ strtol.c strtoll.c strtoq.c strtoul.c strtonum.c strtoull.c \ @@ -18,18 +18,17 @@ # machine-dependent stdlib sources .sinclude "${.CURDIR}/${LIBC_ARCH}/stdlib/Makefile.inc" -MAN+= a64l.3 abort.3 abs.3 aligned_alloc.3 alloca.3 atexit.3 atof.3 \ +MAN+= a64l.3 abort.3 abs.3 alloca.3 atexit.3 atof.3 \ atoi.3 atol.3 at_quick_exit.3 bsearch.3 \ div.3 exit.3 getenv.3 getopt.3 getopt_long.3 getsubopt.3 \ hcreate.3 imaxabs.3 imaxdiv.3 insque.3 labs.3 ldiv.3 llabs.3 lldiv.3 \ - lsearch.3 malloc.3 memory.3 ptsname.3 qsort.3 \ + lsearch.3 memory.3 ptsname.3 qsort.3 \ quick_exit.3 \ - radixsort.3 rand.3 random.3 \ + radixsort.3 rand.3 random.3 reallocf.3 \ realpath.3 strfmon.3 strtod.3 strtol.3 strtonum.3 strtoul.3 system.3 \ tsearch.3 MLINKS+=a64l.3 l64a.3 a64l.3 l64a_r.3 -MLINKS+=aligned_alloc.3 posix_memalign.3 MLINKS+=atol.3 atoll.3 MLINKS+=exit.3 _Exit.3 MLINKS+=getenv.3 putenv.3 getenv.3 setenv.3 getenv.3 unsetenv.3 @@ -46,11 +45,4 @@ MLINKS+=strtod.3 strtof.3 strtod.3 strtold.3 MLINKS+=strtol.3 strtoll.3 strtol.3 strtoq.3 strtol.3 strtoimax.3 MLINKS+=strtoul.3 strtoull.3 strtoul.3 strtouq.3 strtoul.3 strtoumax.3 -MLINKS+=malloc.3 calloc.3 malloc.3 free.3 malloc.3 malloc.conf.5 \ - malloc.3 realloc.3 malloc.3 reallocf.3 malloc.3 malloc_usable_size.3 MLINKS+=tsearch.3 tdelete.3 tsearch.3 tfind.3 tsearch.3 twalk.3 - -.if defined(MALLOC_PRODUCTION) -CFLAGS+= -DMALLOC_PRODUCTION -.endif - Index: lib/libc/stdlib/rb.h =================================================================== --- lib/libc/stdlib/rb.h (revision 234236) +++ lib/libc/stdlib/rb.h (working copy) @@ -1,1002 +0,0 @@ -/*- - ******************************************************************************* - * - * Copyright (C) 2008-2010 Jason Evans . - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice(s), this list of conditions and the following disclaimer - * unmodified other than the allowable addition of one or more - * copyright notices. - * 2. Redistributions in binary form must reproduce the above copyright - * notice(s), this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR - * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE - * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, - * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ****************************************************************************** - * - * cpp macro implementation of left-leaning 2-3 red-black trees. Parent - * pointers are not used, and color bits are stored in the least significant - * bit of right-child pointers (if RB_COMPACT is defined), thus making node - * linkage as compact as is possible for red-black trees. - * - * Usage: - * - * #include - * #include - * #define NDEBUG // (Optional, see assert(3).) - * #include - * #define RB_COMPACT // (Optional, embed color bits in right-child pointers.) - * #include - * ... - * - ******************************************************************************* - */ - -#ifndef RB_H_ -#define RB_H_ - -#include -__FBSDID("$FreeBSD$"); - -#ifdef RB_COMPACT -/* Node structure. */ -#define rb_node(a_type) \ -struct { \ - a_type *rbn_left; \ - a_type *rbn_right_red; \ -} -#else -#define rb_node(a_type) \ -struct { \ - a_type *rbn_left; \ - a_type *rbn_right; \ - bool rbn_red; \ -} -#endif - -/* Root structure. */ -#define rb_tree(a_type) \ -struct { \ - a_type *rbt_root; \ - a_type rbt_nil; \ -} - -/* Left accessors. */ -#define rbtn_left_get(a_type, a_field, a_node) \ - ((a_node)->a_field.rbn_left) -#define rbtn_left_set(a_type, a_field, a_node, a_left) do { \ - (a_node)->a_field.rbn_left = a_left; \ -} while (0) - -#ifdef RB_COMPACT -/* Right accessors. */ -#define rbtn_right_get(a_type, a_field, a_node) \ - ((a_type *) (((intptr_t) (a_node)->a_field.rbn_right_red) \ - & ((ssize_t)-2))) -#define rbtn_right_set(a_type, a_field, a_node, a_right) do { \ - (a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) a_right) \ - | (((uintptr_t) (a_node)->a_field.rbn_right_red) & ((size_t)1))); \ -} while (0) - -/* Color accessors. */ -#define rbtn_red_get(a_type, a_field, a_node) \ - ((bool) (((uintptr_t) (a_node)->a_field.rbn_right_red) \ - & ((size_t)1))) -#define rbtn_color_set(a_type, a_field, a_node, a_red) do { \ - (a_node)->a_field.rbn_right_red = (a_type *) ((((intptr_t) \ - (a_node)->a_field.rbn_right_red) & ((ssize_t)-2)) \ - | ((ssize_t)a_red)); \ -} while (0) -#define rbtn_red_set(a_type, a_field, a_node) do { \ - (a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) \ - (a_node)->a_field.rbn_right_red) | ((size_t)1)); \ -} while (0) -#define rbtn_black_set(a_type, a_field, a_node) do { \ - (a_node)->a_field.rbn_right_red = (a_type *) (((intptr_t) \ - (a_node)->a_field.rbn_right_red) & ((ssize_t)-2)); \ -} while (0) -#else -/* Right accessors. */ -#define rbtn_right_get(a_type, a_field, a_node) \ - ((a_node)->a_field.rbn_right) -#define rbtn_right_set(a_type, a_field, a_node, a_right) do { \ - (a_node)->a_field.rbn_right = a_right; \ -} while (0) - -/* Color accessors. */ -#define rbtn_red_get(a_type, a_field, a_node) \ - ((a_node)->a_field.rbn_red) -#define rbtn_color_set(a_type, a_field, a_node, a_red) do { \ - (a_node)->a_field.rbn_red = (a_red); \ -} while (0) -#define rbtn_red_set(a_type, a_field, a_node) do { \ - (a_node)->a_field.rbn_red = true; \ -} while (0) -#define rbtn_black_set(a_type, a_field, a_node) do { \ - (a_node)->a_field.rbn_red = false; \ -} while (0) -#endif - -/* Node initializer. */ -#define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \ - rbtn_left_set(a_type, a_field, (a_node), &(a_rbt)->rbt_nil); \ - rbtn_right_set(a_type, a_field, (a_node), &(a_rbt)->rbt_nil); \ - rbtn_red_set(a_type, a_field, (a_node)); \ -} while (0) - -/* Tree initializer. */ -#define rb_new(a_type, a_field, a_rbt) do { \ - (a_rbt)->rbt_root = &(a_rbt)->rbt_nil; \ - rbt_node_new(a_type, a_field, a_rbt, &(a_rbt)->rbt_nil); \ - rbtn_black_set(a_type, a_field, &(a_rbt)->rbt_nil); \ -} while (0) - -/* Internal utility macros. */ -#define rbtn_first(a_type, a_field, a_rbt, a_root, r_node) do { \ - (r_node) = (a_root); \ - if ((r_node) != &(a_rbt)->rbt_nil) { \ - for (; \ - rbtn_left_get(a_type, a_field, (r_node)) != &(a_rbt)->rbt_nil;\ - (r_node) = rbtn_left_get(a_type, a_field, (r_node))) { \ - } \ - } \ -} while (0) - -#define rbtn_last(a_type, a_field, a_rbt, a_root, r_node) do { \ - (r_node) = (a_root); \ - if ((r_node) != &(a_rbt)->rbt_nil) { \ - for (; rbtn_right_get(a_type, a_field, (r_node)) != \ - &(a_rbt)->rbt_nil; (r_node) = rbtn_right_get(a_type, a_field, \ - (r_node))) { \ - } \ - } \ -} while (0) - -#define rbtn_rotate_left(a_type, a_field, a_node, r_node) do { \ - (r_node) = rbtn_right_get(a_type, a_field, (a_node)); \ - rbtn_right_set(a_type, a_field, (a_node), \ - rbtn_left_get(a_type, a_field, (r_node))); \ - rbtn_left_set(a_type, a_field, (r_node), (a_node)); \ -} while (0) - -#define rbtn_rotate_right(a_type, a_field, a_node, r_node) do { \ - (r_node) = rbtn_left_get(a_type, a_field, (a_node)); \ - rbtn_left_set(a_type, a_field, (a_node), \ - rbtn_right_get(a_type, a_field, (r_node))); \ - rbtn_right_set(a_type, a_field, (r_node), (a_node)); \ -} while (0) - -/* - * The rb_proto() macro generates function prototypes that correspond to the - * functions generated by an equivalently parameterized call to rb_gen(). - */ - -#define rb_proto(a_attr, a_prefix, a_rbt_type, a_type) \ -a_attr void \ -a_prefix##new(a_rbt_type *rbtree); \ -a_attr a_type * \ -a_prefix##first(a_rbt_type *rbtree); \ -a_attr a_type * \ -a_prefix##last(a_rbt_type *rbtree); \ -a_attr a_type * \ -a_prefix##next(a_rbt_type *rbtree, a_type *node); \ -a_attr a_type * \ -a_prefix##prev(a_rbt_type *rbtree, a_type *node); \ -a_attr a_type * \ -a_prefix##search(a_rbt_type *rbtree, a_type *key); \ -a_attr a_type * \ -a_prefix##nsearch(a_rbt_type *rbtree, a_type *key); \ -a_attr a_type * \ -a_prefix##psearch(a_rbt_type *rbtree, a_type *key); \ -a_attr void \ -a_prefix##insert(a_rbt_type *rbtree, a_type *node); \ -a_attr void \ -a_prefix##remove(a_rbt_type *rbtree, a_type *node); \ -a_attr a_type * \ -a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \ - a_rbt_type *, a_type *, void *), void *arg); \ -a_attr a_type * \ -a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \ - a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg); - -/* - * The rb_gen() macro generates a type-specific red-black tree implementation, - * based on the above cpp macros. - * - * Arguments: - * - * a_attr : Function attribute for generated functions (ex: static). - * a_prefix : Prefix for generated functions (ex: extree_). - * a_rb_type : Type for red-black tree data structure (ex: extree_t). - * a_type : Type for red-black tree node data structure (ex: - * extree_node_t). - * a_field : Name of red-black tree node linkage (ex: extree_link). - * a_cmp : Node comparison function name, with the following prototype: - * int (a_cmp *)(a_type *a_node, a_type *a_other); - * ^^^^^^ - * or a_key - * Interpretation of comparision function return values: - * -1 : a_node < a_other - * 0 : a_node == a_other - * 1 : a_node > a_other - * In all cases, the a_node or a_key macro argument is the first - * argument to the comparison function, which makes it possible - * to write comparison functions that treat the first argument - * specially. - * - * Assuming the following setup: - * - * typedef struct ex_node_s ex_node_t; - * struct ex_node_s { - * rb_node(ex_node_t) ex_link; - * }; - * typedef rb(ex_node_t) ex_t; - * rb_gen(static, ex_, ex_t, ex_node_t, ex_link, ex_cmp, 1297, 1301) - * - * The following API is generated: - * - * static void - * ex_new(ex_t *extree); - * Description: Initialize a red-black tree structure. - * Args: - * extree: Pointer to an uninitialized red-black tree object. - * - * static ex_node_t * - * ex_first(ex_t *extree); - * static ex_node_t * - * ex_last(ex_t *extree); - * Description: Get the first/last node in extree. - * Args: - * extree: Pointer to an initialized red-black tree object. - * Ret: First/last node in extree, or NULL if extree is empty. - * - * static ex_node_t * - * ex_next(ex_t *extree, ex_node_t *node); - * static ex_node_t * - * ex_prev(ex_t *extree, ex_node_t *node); - * Description: Get node's successor/predecessor. - * Args: - * extree: Pointer to an initialized red-black tree object. - * node : A node in extree. - * Ret: node's successor/predecessor in extree, or NULL if node is - * last/first. - * - * static ex_node_t * - * ex_search(ex_t *extree, ex_node_t *key); - * Description: Search for node that matches key. - * Args: - * extree: Pointer to an initialized red-black tree object. - * key : Search key. - * Ret: Node in extree that matches key, or NULL if no match. - * - * static ex_node_t * - * ex_nsearch(ex_t *extree, ex_node_t *key); - * static ex_node_t * - * ex_psearch(ex_t *extree, ex_node_t *key); - * Description: Search for node that matches key. If no match is found, - * return what would be key's successor/predecessor, were - * key in extree. - * Args: - * extree: Pointer to an initialized red-black tree object. - * key : Search key. - * Ret: Node in extree that matches key, or if no match, hypothetical - * node's successor/predecessor (NULL if no successor/predecessor). - * - * static void - * ex_insert(ex_t *extree, ex_node_t *node); - * Description: Insert node into extree. - * Args: - * extree: Pointer to an initialized red-black tree object. - * node : Node to be inserted into extree. - * - * static void - * ex_remove(ex_t *extree, ex_node_t *node); - * Description: Remove node from extree. - * Args: - * extree: Pointer to an initialized red-black tree object. - * node : Node in extree to be removed. - * - * static ex_node_t * - * ex_iter(ex_t *extree, ex_node_t *start, ex_node_t *(*cb)(ex_t *, - * ex_node_t *, void *), void *arg); - * static ex_node_t * - * ex_reverse_iter(ex_t *extree, ex_node_t *start, ex_node *(*cb)(ex_t *, - * ex_node_t *, void *), void *arg); - * Description: Iterate forward/backward over extree, starting at node. - * If extree is modified, iteration must be immediately - * terminated by the callback function that causes the - * modification. - * Args: - * extree: Pointer to an initialized red-black tree object. - * start : Node at which to start iteration, or NULL to start at - * first/last node. - * cb : Callback function, which is called for each node during - * iteration. Under normal circumstances the callback function - * should return NULL, which causes iteration to continue. If a - * callback function returns non-NULL, iteration is immediately - * terminated and the non-NULL return value is returned by the - * iterator. This is useful for re-starting iteration after - * modifying extree. - * arg : Opaque pointer passed to cb(). - * Ret: NULL if iteration completed, or the non-NULL callback return value - * that caused termination of the iteration. - */ -#define rb_gen(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp) \ -a_attr void \ -a_prefix##new(a_rbt_type *rbtree) { \ - rb_new(a_type, a_field, rbtree); \ -} \ -a_attr a_type * \ -a_prefix##first(a_rbt_type *rbtree) { \ - a_type *ret; \ - rbtn_first(a_type, a_field, rbtree, rbtree->rbt_root, ret); \ - if (ret == &rbtree->rbt_nil) { \ - ret = NULL; \ - } \ - return (ret); \ -} \ -a_attr a_type * \ -a_prefix##last(a_rbt_type *rbtree) { \ - a_type *ret; \ - rbtn_last(a_type, a_field, rbtree, rbtree->rbt_root, ret); \ - if (ret == &rbtree->rbt_nil) { \ - ret = NULL; \ - } \ - return (ret); \ -} \ -a_attr a_type * \ -a_prefix##next(a_rbt_type *rbtree, a_type *node) { \ - a_type *ret; \ - if (rbtn_right_get(a_type, a_field, node) != &rbtree->rbt_nil) { \ - rbtn_first(a_type, a_field, rbtree, rbtn_right_get(a_type, \ - a_field, node), ret); \ - } else { \ - a_type *tnode = rbtree->rbt_root; \ - assert(tnode != &rbtree->rbt_nil); \ - ret = &rbtree->rbt_nil; \ - while (true) { \ - int cmp = (a_cmp)(node, tnode); \ - if (cmp < 0) { \ - ret = tnode; \ - tnode = rbtn_left_get(a_type, a_field, tnode); \ - } else if (cmp > 0) { \ - tnode = rbtn_right_get(a_type, a_field, tnode); \ - } else { \ - break; \ - } \ - assert(tnode != &rbtree->rbt_nil); \ - } \ - } \ - if (ret == &rbtree->rbt_nil) { \ - ret = (NULL); \ - } \ - return (ret); \ -} \ -a_attr a_type * \ -a_prefix##prev(a_rbt_type *rbtree, a_type *node) { \ - a_type *ret; \ - if (rbtn_left_get(a_type, a_field, node) != &rbtree->rbt_nil) { \ - rbtn_last(a_type, a_field, rbtree, rbtn_left_get(a_type, \ - a_field, node), ret); \ - } else { \ - a_type *tnode = rbtree->rbt_root; \ - assert(tnode != &rbtree->rbt_nil); \ - ret = &rbtree->rbt_nil; \ - while (true) { \ - int cmp = (a_cmp)(node, tnode); \ - if (cmp < 0) { \ - tnode = rbtn_left_get(a_type, a_field, tnode); \ - } else if (cmp > 0) { \ - ret = tnode; \ - tnode = rbtn_right_get(a_type, a_field, tnode); \ - } else { \ - break; \ - } \ - assert(tnode != &rbtree->rbt_nil); \ - } \ - } \ - if (ret == &rbtree->rbt_nil) { \ - ret = (NULL); \ - } \ - return (ret); \ -} \ -a_attr a_type * \ -a_prefix##search(a_rbt_type *rbtree, a_type *key) { \ - a_type *ret; \ - int cmp; \ - ret = rbtree->rbt_root; \ - while (ret != &rbtree->rbt_nil \ - && (cmp = (a_cmp)(key, ret)) != 0) { \ - if (cmp < 0) { \ - ret = rbtn_left_get(a_type, a_field, ret); \ - } else { \ - ret = rbtn_right_get(a_type, a_field, ret); \ - } \ - } \ - if (ret == &rbtree->rbt_nil) { \ - ret = (NULL); \ - } \ - return (ret); \ -} \ -a_attr a_type * \ -a_prefix##nsearch(a_rbt_type *rbtree, a_type *key) { \ - a_type *ret; \ - a_type *tnode = rbtree->rbt_root; \ - ret = &rbtree->rbt_nil; \ - while (tnode != &rbtree->rbt_nil) { \ - int cmp = (a_cmp)(key, tnode); \ - if (cmp < 0) { \ - ret = tnode; \ - tnode = rbtn_left_get(a_type, a_field, tnode); \ - } else if (cmp > 0) { \ - tnode = rbtn_right_get(a_type, a_field, tnode); \ - } else { \ - ret = tnode; \ - break; \ - } \ - } \ - if (ret == &rbtree->rbt_nil) { \ - ret = (NULL); \ - } \ - return (ret); \ -} \ -a_attr a_type * \ -a_prefix##psearch(a_rbt_type *rbtree, a_type *key) { \ - a_type *ret; \ - a_type *tnode = rbtree->rbt_root; \ - ret = &rbtree->rbt_nil; \ - while (tnode != &rbtree->rbt_nil) { \ - int cmp = (a_cmp)(key, tnode); \ - if (cmp < 0) { \ - tnode = rbtn_left_get(a_type, a_field, tnode); \ - } else if (cmp > 0) { \ - ret = tnode; \ - tnode = rbtn_right_get(a_type, a_field, tnode); \ - } else { \ - ret = tnode; \ - break; \ - } \ - } \ - if (ret == &rbtree->rbt_nil) { \ - ret = (NULL); \ - } \ - return (ret); \ -} \ -a_attr void \ -a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \ - struct { \ - a_type *node; \ - int cmp; \ - } path[sizeof(void *) << 4], *pathp; \ - rbt_node_new(a_type, a_field, rbtree, node); \ - /* Wind. */ \ - path->node = rbtree->rbt_root; \ - for (pathp = path; pathp->node != &rbtree->rbt_nil; pathp++) { \ - int cmp = pathp->cmp = a_cmp(node, pathp->node); \ - assert(cmp != 0); \ - if (cmp < 0) { \ - pathp[1].node = rbtn_left_get(a_type, a_field, \ - pathp->node); \ - } else { \ - pathp[1].node = rbtn_right_get(a_type, a_field, \ - pathp->node); \ - } \ - } \ - pathp->node = node; \ - /* Unwind. */ \ - for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) { \ - a_type *cnode = pathp->node; \ - if (pathp->cmp < 0) { \ - a_type *left = pathp[1].node; \ - rbtn_left_set(a_type, a_field, cnode, left); \ - if (rbtn_red_get(a_type, a_field, left)) { \ - a_type *leftleft = rbtn_left_get(a_type, a_field, left);\ - if (rbtn_red_get(a_type, a_field, leftleft)) { \ - /* Fix up 4-node. */ \ - a_type *tnode; \ - rbtn_black_set(a_type, a_field, leftleft); \ - rbtn_rotate_right(a_type, a_field, cnode, tnode); \ - cnode = tnode; \ - } \ - } else { \ - return; \ - } \ - } else { \ - a_type *right = pathp[1].node; \ - rbtn_right_set(a_type, a_field, cnode, right); \ - if (rbtn_red_get(a_type, a_field, right)) { \ - a_type *left = rbtn_left_get(a_type, a_field, cnode); \ - if (rbtn_red_get(a_type, a_field, left)) { \ - /* Split 4-node. */ \ - rbtn_black_set(a_type, a_field, left); \ - rbtn_black_set(a_type, a_field, right); \ - rbtn_red_set(a_type, a_field, cnode); \ - } else { \ - /* Lean left. */ \ - a_type *tnode; \ - bool tred = rbtn_red_get(a_type, a_field, cnode); \ - rbtn_rotate_left(a_type, a_field, cnode, tnode); \ - rbtn_color_set(a_type, a_field, tnode, tred); \ - rbtn_red_set(a_type, a_field, cnode); \ - cnode = tnode; \ - } \ - } else { \ - return; \ - } \ - } \ - pathp->node = cnode; \ - } \ - /* Set root, and make it black. */ \ - rbtree->rbt_root = path->node; \ - rbtn_black_set(a_type, a_field, rbtree->rbt_root); \ -} \ -a_attr void \ -a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ - struct { \ - a_type *node; \ - int cmp; \ - } *pathp, *nodep, path[sizeof(void *) << 4]; \ - /* Wind. */ \ - nodep = NULL; /* Silence compiler warning. */ \ - path->node = rbtree->rbt_root; \ - for (pathp = path; pathp->node != &rbtree->rbt_nil; pathp++) { \ - int cmp = pathp->cmp = a_cmp(node, pathp->node); \ - if (cmp < 0) { \ - pathp[1].node = rbtn_left_get(a_type, a_field, \ - pathp->node); \ - } else { \ - pathp[1].node = rbtn_right_get(a_type, a_field, \ - pathp->node); \ - if (cmp == 0) { \ - /* Find node's successor, in preparation for swap. */ \ - pathp->cmp = 1; \ - nodep = pathp; \ - for (pathp++; pathp->node != &rbtree->rbt_nil; \ - pathp++) { \ - pathp->cmp = -1; \ - pathp[1].node = rbtn_left_get(a_type, a_field, \ - pathp->node); \ - } \ - break; \ - } \ - } \ - } \ - assert(nodep->node == node); \ - pathp--; \ - if (pathp->node != node) { \ - /* Swap node with its successor. */ \ - bool tred = rbtn_red_get(a_type, a_field, pathp->node); \ - rbtn_color_set(a_type, a_field, pathp->node, \ - rbtn_red_get(a_type, a_field, node)); \ - rbtn_left_set(a_type, a_field, pathp->node, \ - rbtn_left_get(a_type, a_field, node)); \ - /* If node's successor is its right child, the following code */\ - /* will do the wrong thing for the right child pointer. */\ - /* However, it doesn't matter, because the pointer will be */\ - /* properly set when the successor is pruned. */\ - rbtn_right_set(a_type, a_field, pathp->node, \ - rbtn_right_get(a_type, a_field, node)); \ - rbtn_color_set(a_type, a_field, node, tred); \ - /* The pruned leaf node's child pointers are never accessed */\ - /* again, so don't bother setting them to nil. */\ - nodep->node = pathp->node; \ - pathp->node = node; \ - if (nodep == path) { \ - rbtree->rbt_root = nodep->node; \ - } else { \ - if (nodep[-1].cmp < 0) { \ - rbtn_left_set(a_type, a_field, nodep[-1].node, \ - nodep->node); \ - } else { \ - rbtn_right_set(a_type, a_field, nodep[-1].node, \ - nodep->node); \ - } \ - } \ - } else { \ - a_type *left = rbtn_left_get(a_type, a_field, node); \ - if (left != &rbtree->rbt_nil) { \ - /* node has no successor, but it has a left child. */\ - /* Splice node out, without losing the left child. */\ - assert(rbtn_red_get(a_type, a_field, node) == false); \ - assert(rbtn_red_get(a_type, a_field, left)); \ - rbtn_black_set(a_type, a_field, left); \ - if (pathp == path) { \ - rbtree->rbt_root = left; \ - } else { \ - if (pathp[-1].cmp < 0) { \ - rbtn_left_set(a_type, a_field, pathp[-1].node, \ - left); \ - } else { \ - rbtn_right_set(a_type, a_field, pathp[-1].node, \ - left); \ - } \ - } \ - return; \ - } else if (pathp == path) { \ - /* The tree only contained one node. */ \ - rbtree->rbt_root = &rbtree->rbt_nil; \ - return; \ - } \ - } \ - if (rbtn_red_get(a_type, a_field, pathp->node)) { \ - /* Prune red node, which requires no fixup. */ \ - assert(pathp[-1].cmp < 0); \ - rbtn_left_set(a_type, a_field, pathp[-1].node, \ - &rbtree->rbt_nil); \ - return; \ - } \ - /* The node to be pruned is black, so unwind until balance is */\ - /* restored. */\ - pathp->node = &rbtree->rbt_nil; \ - for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) { \ - assert(pathp->cmp != 0); \ - if (pathp->cmp < 0) { \ - rbtn_left_set(a_type, a_field, pathp->node, \ - pathp[1].node); \ - assert(rbtn_red_get(a_type, a_field, pathp[1].node) \ - == false); \ - if (rbtn_red_get(a_type, a_field, pathp->node)) { \ - a_type *right = rbtn_right_get(a_type, a_field, \ - pathp->node); \ - a_type *rightleft = rbtn_left_get(a_type, a_field, \ - right); \ - a_type *tnode; \ - if (rbtn_red_get(a_type, a_field, rightleft)) { \ - /* In the following diagrams, ||, //, and \\ */\ - /* indicate the path to the removed node. */\ - /* */\ - /* || */\ - /* pathp(r) */\ - /* // \ */\ - /* (b) (b) */\ - /* / */\ - /* (r) */\ - /* */\ - rbtn_black_set(a_type, a_field, pathp->node); \ - rbtn_rotate_right(a_type, a_field, right, tnode); \ - rbtn_right_set(a_type, a_field, pathp->node, tnode);\ - rbtn_rotate_left(a_type, a_field, pathp->node, \ - tnode); \ - } else { \ - /* || */\ - /* pathp(r) */\ - /* // \ */\ - /* (b) (b) */\ - /* / */\ - /* (b) */\ - /* */\ - rbtn_rotate_left(a_type, a_field, pathp->node, \ - tnode); \ - } \ - /* Balance restored, but rotation modified subtree */\ - /* root. */\ - assert((uintptr_t)pathp > (uintptr_t)path); \ - if (pathp[-1].cmp < 0) { \ - rbtn_left_set(a_type, a_field, pathp[-1].node, \ - tnode); \ - } else { \ - rbtn_right_set(a_type, a_field, pathp[-1].node, \ - tnode); \ - } \ - return; \ - } else { \ - a_type *right = rbtn_right_get(a_type, a_field, \ - pathp->node); \ - a_type *rightleft = rbtn_left_get(a_type, a_field, \ - right); \ - if (rbtn_red_get(a_type, a_field, rightleft)) { \ - /* || */\ - /* pathp(b) */\ - /* // \ */\ - /* (b) (b) */\ - /* / */\ - /* (r) */\ - a_type *tnode; \ - rbtn_black_set(a_type, a_field, rightleft); \ - rbtn_rotate_right(a_type, a_field, right, tnode); \ - rbtn_right_set(a_type, a_field, pathp->node, tnode);\ - rbtn_rotate_left(a_type, a_field, pathp->node, \ - tnode); \ - /* Balance restored, but rotation modified */\ - /* subree root, which may actually be the tree */\ - /* root. */\ - if (pathp == path) { \ - /* Set root. */ \ - rbtree->rbt_root = tnode; \ - } else { \ - if (pathp[-1].cmp < 0) { \ - rbtn_left_set(a_type, a_field, \ - pathp[-1].node, tnode); \ - } else { \ - rbtn_right_set(a_type, a_field, \ - pathp[-1].node, tnode); \ - } \ - } \ - return; \ - } else { \ - /* || */\ - /* pathp(b) */\ - /* // \ */\ - /* (b) (b) */\ - /* / */\ - /* (b) */\ - a_type *tnode; \ - rbtn_red_set(a_type, a_field, pathp->node); \ - rbtn_rotate_left(a_type, a_field, pathp->node, \ - tnode); \ - pathp->node = tnode; \ - } \ - } \ - } else { \ - a_type *left; \ - rbtn_right_set(a_type, a_field, pathp->node, \ - pathp[1].node); \ - left = rbtn_left_get(a_type, a_field, pathp->node); \ - if (rbtn_red_get(a_type, a_field, left)) { \ - a_type *tnode; \ - a_type *leftright = rbtn_right_get(a_type, a_field, \ - left); \ - a_type *leftrightleft = rbtn_left_get(a_type, a_field, \ - leftright); \ - if (rbtn_red_get(a_type, a_field, leftrightleft)) { \ - /* || */\ - /* pathp(b) */\ - /* / \\ */\ - /* (r) (b) */\ - /* \ */\ - /* (b) */\ - /* / */\ - /* (r) */\ - a_type *unode; \ - rbtn_black_set(a_type, a_field, leftrightleft); \ - rbtn_rotate_right(a_type, a_field, pathp->node, \ - unode); \ - rbtn_rotate_right(a_type, a_field, pathp->node, \ - tnode); \ - rbtn_right_set(a_type, a_field, unode, tnode); \ - rbtn_rotate_left(a_type, a_field, unode, tnode); \ - } else { \ - /* || */\ - /* pathp(b) */\ - /* / \\ */\ - /* (r) (b) */\ - /* \ */\ - /* (b) */\ - /* / */\ - /* (b) */\ - assert(leftright != &rbtree->rbt_nil); \ - rbtn_red_set(a_type, a_field, leftright); \ - rbtn_rotate_right(a_type, a_field, pathp->node, \ - tnode); \ - rbtn_black_set(a_type, a_field, tnode); \ - } \ - /* Balance restored, but rotation modified subtree */\ - /* root, which may actually be the tree root. */\ - if (pathp == path) { \ - /* Set root. */ \ - rbtree->rbt_root = tnode; \ - } else { \ - if (pathp[-1].cmp < 0) { \ - rbtn_left_set(a_type, a_field, pathp[-1].node, \ - tnode); \ - } else { \ - rbtn_right_set(a_type, a_field, pathp[-1].node, \ - tnode); \ - } \ - } \ - return; \ - } else if (rbtn_red_get(a_type, a_field, pathp->node)) { \ - a_type *leftleft = rbtn_left_get(a_type, a_field, left);\ - if (rbtn_red_get(a_type, a_field, leftleft)) { \ - /* || */\ - /* pathp(r) */\ - /* / \\ */\ - /* (b) (b) */\ - /* / */\ - /* (r) */\ - a_type *tnode; \ - rbtn_black_set(a_type, a_field, pathp->node); \ - rbtn_red_set(a_type, a_field, left); \ - rbtn_black_set(a_type, a_field, leftleft); \ - rbtn_rotate_right(a_type, a_field, pathp->node, \ - tnode); \ - /* Balance restored, but rotation modified */\ - /* subtree root. */\ - assert((uintptr_t)pathp > (uintptr_t)path); \ - if (pathp[-1].cmp < 0) { \ - rbtn_left_set(a_type, a_field, pathp[-1].node, \ - tnode); \ - } else { \ - rbtn_right_set(a_type, a_field, pathp[-1].node, \ - tnode); \ - } \ - return; \ - } else { \ - /* || */\ - /* pathp(r) */\ - /* / \\ */\ - /* (b) (b) */\ - /* / */\ - /* (b) */\ - rbtn_red_set(a_type, a_field, left); \ - rbtn_black_set(a_type, a_field, pathp->node); \ - /* Balance restored. */ \ - return; \ - } \ - } else { \ - a_type *leftleft = rbtn_left_get(a_type, a_field, left);\ - if (rbtn_red_get(a_type, a_field, leftleft)) { \ - /* || */\ - /* pathp(b) */\ - /* / \\ */\ - /* (b) (b) */\ - /* / */\ - /* (r) */\ - a_type *tnode; \ - rbtn_black_set(a_type, a_field, leftleft); \ - rbtn_rotate_right(a_type, a_field, pathp->node, \ - tnode); \ - /* Balance restored, but rotation modified */\ - /* subtree root, which may actually be the tree */\ - /* root. */\ - if (pathp == path) { \ - /* Set root. */ \ - rbtree->rbt_root = tnode; \ - } else { \ - if (pathp[-1].cmp < 0) { \ - rbtn_left_set(a_type, a_field, \ - pathp[-1].node, tnode); \ - } else { \ - rbtn_right_set(a_type, a_field, \ - pathp[-1].node, tnode); \ - } \ - } \ - return; \ - } else { \ - /* || */\ - /* pathp(b) */\ - /* / \\ */\ - /* (b) (b) */\ - /* / */\ - /* (b) */\ - rbtn_red_set(a_type, a_field, left); \ - } \ - } \ - } \ - } \ - /* Set root. */ \ - rbtree->rbt_root = path->node; \ - assert(rbtn_red_get(a_type, a_field, rbtree->rbt_root) == false); \ -} \ -a_attr a_type * \ -a_prefix##iter_recurse(a_rbt_type *rbtree, a_type *node, \ - a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ - if (node == &rbtree->rbt_nil) { \ - return (&rbtree->rbt_nil); \ - } else { \ - a_type *ret; \ - if ((ret = a_prefix##iter_recurse(rbtree, rbtn_left_get(a_type, \ - a_field, node), cb, arg)) != &rbtree->rbt_nil \ - || (ret = cb(rbtree, node, arg)) != NULL) { \ - return (ret); \ - } \ - return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \ - a_field, node), cb, arg)); \ - } \ -} \ -a_attr a_type * \ -a_prefix##iter_start(a_rbt_type *rbtree, a_type *start, a_type *node, \ - a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ - int cmp = a_cmp(start, node); \ - if (cmp < 0) { \ - a_type *ret; \ - if ((ret = a_prefix##iter_start(rbtree, start, \ - rbtn_left_get(a_type, a_field, node), cb, arg)) != \ - &rbtree->rbt_nil || (ret = cb(rbtree, node, arg)) != NULL) { \ - return (ret); \ - } \ - return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \ - a_field, node), cb, arg)); \ - } else if (cmp > 0) { \ - return (a_prefix##iter_start(rbtree, start, \ - rbtn_right_get(a_type, a_field, node), cb, arg)); \ - } else { \ - a_type *ret; \ - if ((ret = cb(rbtree, node, arg)) != NULL) { \ - return (ret); \ - } \ - return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \ - a_field, node), cb, arg)); \ - } \ -} \ -a_attr a_type * \ -a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \ - a_rbt_type *, a_type *, void *), void *arg) { \ - a_type *ret; \ - if (start != NULL) { \ - ret = a_prefix##iter_start(rbtree, start, rbtree->rbt_root, \ - cb, arg); \ - } else { \ - ret = a_prefix##iter_recurse(rbtree, rbtree->rbt_root, cb, arg);\ - } \ - if (ret == &rbtree->rbt_nil) { \ - ret = NULL; \ - } \ - return (ret); \ -} \ -a_attr a_type * \ -a_prefix##reverse_iter_recurse(a_rbt_type *rbtree, a_type *node, \ - a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ - if (node == &rbtree->rbt_nil) { \ - return (&rbtree->rbt_nil); \ - } else { \ - a_type *ret; \ - if ((ret = a_prefix##reverse_iter_recurse(rbtree, \ - rbtn_right_get(a_type, a_field, node), cb, arg)) != \ - &rbtree->rbt_nil || (ret = cb(rbtree, node, arg)) != NULL) { \ - return (ret); \ - } \ - return (a_prefix##reverse_iter_recurse(rbtree, \ - rbtn_left_get(a_type, a_field, node), cb, arg)); \ - } \ -} \ -a_attr a_type * \ -a_prefix##reverse_iter_start(a_rbt_type *rbtree, a_type *start, \ - a_type *node, a_type *(*cb)(a_rbt_type *, a_type *, void *), \ - void *arg) { \ - int cmp = a_cmp(start, node); \ - if (cmp > 0) { \ - a_type *ret; \ - if ((ret = a_prefix##reverse_iter_start(rbtree, start, \ - rbtn_right_get(a_type, a_field, node), cb, arg)) != \ - &rbtree->rbt_nil || (ret = cb(rbtree, node, arg)) != NULL) { \ - return (ret); \ - } \ - return (a_prefix##reverse_iter_recurse(rbtree, \ - rbtn_left_get(a_type, a_field, node), cb, arg)); \ - } else if (cmp < 0) { \ - return (a_prefix##reverse_iter_start(rbtree, start, \ - rbtn_left_get(a_type, a_field, node), cb, arg)); \ - } else { \ - a_type *ret; \ - if ((ret = cb(rbtree, node, arg)) != NULL) { \ - return (ret); \ - } \ - return (a_prefix##reverse_iter_recurse(rbtree, \ - rbtn_left_get(a_type, a_field, node), cb, arg)); \ - } \ -} \ -a_attr a_type * \ -a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \ - a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ - a_type *ret; \ - if (start != NULL) { \ - ret = a_prefix##reverse_iter_start(rbtree, start, \ - rbtree->rbt_root, cb, arg); \ - } else { \ - ret = a_prefix##reverse_iter_recurse(rbtree, rbtree->rbt_root, \ - cb, arg); \ - } \ - if (ret == &rbtree->rbt_nil) { \ - ret = NULL; \ - } \ - return (ret); \ -} - -#endif /* RB_H_ */ Index: lib/libc/stdlib/ql.h =================================================================== --- lib/libc/stdlib/ql.h (revision 234236) +++ lib/libc/stdlib/ql.h (working copy) @@ -1,122 +0,0 @@ -/****************************************************************************** - * - * Copyright (C) 2002 Jason Evans . - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice(s), this list of conditions and the following disclaimer - * unmodified other than the allowable addition of one or more - * copyright notices. - * 2. Redistributions in binary form must reproduce the above copyright - * notice(s), this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR - * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE - * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, - * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************/ - -#ifndef QL_H_ -#define QL_H_ - -#include -__FBSDID("$FreeBSD$"); - -/* - * List definitions. - */ -#define ql_head(a_type) \ -struct { \ - a_type *qlh_first; \ -} - -#define ql_head_initializer(a_head) {NULL} - -#define ql_elm(a_type) qr(a_type) - -/* List functions. */ -#define ql_new(a_head) do { \ - (a_head)->qlh_first = NULL; \ -} while (0) - -#define ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field) - -#define ql_first(a_head) ((a_head)->qlh_first) - -#define ql_last(a_head, a_field) \ - ((ql_first(a_head) != NULL) \ - ? qr_prev(ql_first(a_head), a_field) : NULL) - -#define ql_next(a_head, a_elm, a_field) \ - ((ql_last(a_head, a_field) != (a_elm)) \ - ? qr_next((a_elm), a_field) : NULL) - -#define ql_prev(a_head, a_elm, a_field) \ - ((ql_first(a_head) != (a_elm)) ? qr_prev((a_elm), a_field) \ - : NULL) - -#define ql_before_insert(a_head, a_qlelm, a_elm, a_field) do { \ - qr_before_insert((a_qlelm), (a_elm), a_field); \ - if (ql_first(a_head) == (a_qlelm)) { \ - ql_first(a_head) = (a_elm); \ - } \ -} while (0) - -#define ql_after_insert(a_qlelm, a_elm, a_field) \ - qr_after_insert((a_qlelm), (a_elm), a_field) - -#define ql_head_insert(a_head, a_elm, a_field) do { \ - if (ql_first(a_head) != NULL) { \ - qr_before_insert(ql_first(a_head), (a_elm), a_field); \ - } \ - ql_first(a_head) = (a_elm); \ -} while (0) - -#define ql_tail_insert(a_head, a_elm, a_field) do { \ - if (ql_first(a_head) != NULL) { \ - qr_before_insert(ql_first(a_head), (a_elm), a_field); \ - } \ - ql_first(a_head) = qr_next((a_elm), a_field); \ -} while (0) - -#define ql_remove(a_head, a_elm, a_field) do { \ - if (ql_first(a_head) == (a_elm)) { \ - ql_first(a_head) = qr_next(ql_first(a_head), a_field); \ - } \ - if (ql_first(a_head) != (a_elm)) { \ - qr_remove((a_elm), a_field); \ - } else { \ - ql_first(a_head) = NULL; \ - } \ -} while (0) - -#define ql_head_remove(a_head, a_type, a_field) do { \ - a_type *t = ql_first(a_head); \ - ql_remove((a_head), t, a_field); \ -} while (0) - -#define ql_tail_remove(a_head, a_type, a_field) do { \ - a_type *t = ql_last(a_head, a_field); \ - ql_remove((a_head), t, a_field); \ -} while (0) - -#define ql_foreach(a_var, a_head, a_field) \ - qr_foreach((a_var), ql_first(a_head), a_field) - -#define ql_reverse_foreach(a_var, a_head, a_field) \ - qr_reverse_foreach((a_var), ql_first(a_head), a_field) - -#endif /* QL_H_ */ Index: lib/libc/stdlib/malloc.3 =================================================================== --- lib/libc/stdlib/malloc.3 (revision 234236) +++ lib/libc/stdlib/malloc.3 (working copy) @@ -1,591 +0,0 @@ -.\" Copyright (c) 1980, 1991, 1993 -.\" The Regents of the University of California. All rights reserved. -.\" -.\" This code is derived from software contributed to Berkeley by -.\" the American National Standards Committee X3, on Information -.\" Processing Systems. -.\" -.\" Redistribution and use in source and binary forms, with or without -.\" modification, are permitted provided that the following conditions -.\" are met: -.\" 1. Redistributions of source code must retain the above copyright -.\" notice, this list of conditions and the following disclaimer. -.\" 2. Redistributions in binary form must reproduce the above copyright -.\" notice, this list of conditions and the following disclaimer in the -.\" documentation and/or other materials provided with the distribution. -.\" 3. Neither the name of the University nor the names of its contributors -.\" may be used to endorse or promote products derived from this software -.\" without specific prior written permission. -.\" -.\" THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND -.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -.\" ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE -.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS -.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) -.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY -.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF -.\" SUCH DAMAGE. -.\" -.\" @(#)malloc.3 8.1 (Berkeley) 6/4/93 -.\" $FreeBSD$ -.\" -.Dd January 31, 2010 -.Dt MALLOC 3 -.Os -.Sh NAME -.Nm malloc , calloc , realloc , free , reallocf , malloc_usable_size -.Nd general purpose memory allocation functions -.Sh LIBRARY -.Lb libc -.Sh SYNOPSIS -.In stdlib.h -.Ft void * -.Fn malloc "size_t size" -.Ft void * -.Fn calloc "size_t number" "size_t size" -.Ft void * -.Fn realloc "void *ptr" "size_t size" -.Ft void * -.Fn reallocf "void *ptr" "size_t size" -.Ft void -.Fn free "void *ptr" -.Ft const char * -.Va _malloc_options ; -.Ft void -.Fn \*(lp*_malloc_message\*(rp "const char *p1" "const char *p2" "const char *p3" "const char *p4" -.In malloc_np.h -.Ft size_t -.Fn malloc_usable_size "const void *ptr" -.Sh DESCRIPTION -The -.Fn malloc -function allocates -.Fa size -bytes of uninitialized memory. -The allocated space is suitably aligned (after possible pointer coercion) -for storage of any type of object. -.Pp -The -.Fn calloc -function allocates space for -.Fa number -objects, -each -.Fa size -bytes in length. -The result is identical to calling -.Fn malloc -with an argument of -.Dq "number * size" , -with the exception that the allocated memory is explicitly initialized -to zero bytes. -.Pp -The -.Fn realloc -function changes the size of the previously allocated memory referenced by -.Fa ptr -to -.Fa size -bytes. -The contents of the memory are unchanged up to the lesser of the new and -old sizes. -If the new size is larger, -the contents of the newly allocated portion of the memory are undefined. -Upon success, the memory referenced by -.Fa ptr -is freed and a pointer to the newly allocated memory is returned. -Note that -.Fn realloc -and -.Fn reallocf -may move the memory allocation, resulting in a different return value than -.Fa ptr . -If -.Fa ptr -is -.Dv NULL , -the -.Fn realloc -function behaves identically to -.Fn malloc -for the specified size. -.Pp -The -.Fn reallocf -function is identical to the -.Fn realloc -function, except that it -will free the passed pointer when the requested memory cannot be allocated. -This is a -.Fx -specific API designed to ease the problems with traditional coding styles -for -.Fn realloc -causing memory leaks in libraries. -.Pp -The -.Fn free -function causes the allocated memory referenced by -.Fa ptr -to be made available for future allocations. -If -.Fa ptr -is -.Dv NULL , -no action occurs. -.Pp -The -.Fn malloc_usable_size -function returns the usable size of the allocation pointed to by -.Fa ptr . -The return value may be larger than the size that was requested during -allocation. -The -.Fn malloc_usable_size -function is not a mechanism for in-place -.Fn realloc ; -rather it is provided solely as a tool for introspection purposes. -Any discrepancy between the requested allocation size and the size reported by -.Fn malloc_usable_size -should not be depended on, since such behavior is entirely -implementation-dependent. -.Sh TUNING -Once, when the first call is made to one of these memory allocation -routines, various flags will be set or reset, which affects the -workings of this allocator implementation. -.Pp -The -.Dq name -of the file referenced by the symbolic link named -.Pa /etc/malloc.conf , -the value of the environment variable -.Ev MALLOC_OPTIONS , -and the string pointed to by the global variable -.Va _malloc_options -will be interpreted, in that order, from left to right as flags. -.Pp -Each flag is a single letter, optionally prefixed by a non-negative base 10 -integer repetition count. -For example, -.Dq 3N -is equivalent to -.Dq NNN . -Some flags control parameter magnitudes, where uppercase increases the -magnitude, and lowercase decreases the magnitude. -Other flags control boolean parameters, where uppercase indicates that a -behavior is set, or on, and lowercase means that a behavior is not set, or off. -.Bl -tag -width indent -.It A -All warnings (except for the warning about unknown -flags being set) become fatal. -The process will call -.Xr abort 3 -in these cases. -.It C -Double/halve the size of the maximum size class that is a multiple of the -cacheline size (64). -Above this size, subpage spacing (256 bytes) is used for size classes. -The default value is 512 bytes. -.It D -Use -.Xr sbrk 2 -to acquire memory in the data storage segment (DSS). -This option is enabled by default. -See the -.Dq M -option for related information and interactions. -.It E -Double/halve the size of the maximum medium size class. -The valid range is from one page to one half chunk. -The default value is 32 KiB. -.It F -Halve/double the per-arena minimum ratio of active to dirty pages. -Some dirty unused pages may be allowed to accumulate, within the limit set by -the ratio, before informing the kernel about at least half of those pages via -.Xr madvise 2 . -This provides the kernel with sufficient information to recycle dirty pages if -physical memory becomes scarce and the pages remain unused. -The default minimum ratio is 32:1; -.Ev MALLOC_OPTIONS=6F -will disable dirty page purging. -.It G -Double/halve the approximate interval (counted in terms of -thread-specific cache allocation/deallocation events) between full -thread-specific cache garbage collection sweeps. -Garbage collection is actually performed incrementally, one size -class at a time, in order to avoid large collection pauses. -The default sweep interval is 8192; -.Ev MALLOC_OPTIONS=14g -will disable garbage collection. -.It H -Double/halve the number of thread-specific cache slots per size -class. -When there are multiple threads, each thread uses a -thread-specific cache for small and medium objects. -Thread-specific caching allows many allocations to be satisfied -without performing any thread synchronization, at the cost of -increased memory use. -See the -.Dq G -option for related tuning information. -The default number of cache slots is 128; -.Ev MALLOC_OPTIONS=7h -will disable thread-specific caching. -Note that one cache slot per size class is not a valid -configuration due to implementation details. -.It J -Each byte of new memory allocated by -.Fn malloc , -.Fn realloc , -or -.Fn reallocf -will be initialized to 0xa5. -All memory returned by -.Fn free , -.Fn realloc , -or -.Fn reallocf -will be initialized to 0x5a. -This is intended for debugging and will impact performance negatively. -.It K -Double/halve the virtual memory chunk size. -The default chunk size is 4 MiB. -.It M -Use -.Xr mmap 2 -to acquire anonymously mapped memory. -This option is enabled by default. -If both the -.Dq D -and -.Dq M -options are enabled, the allocator prefers anonymous mappings over the DSS, -but allocation only fails if memory cannot be acquired via either method. -If neither option is enabled, then the -.Dq M -option is implicitly enabled in order to assure that there is a method for -acquiring memory. -.It N -Double/halve the number of arenas. -The default number of arenas is two times the number of CPUs, or one if there -is a single CPU. -.It P -Various statistics are printed at program exit via an -.Xr atexit 3 -function. -This has the potential to cause deadlock for a multi-threaded process that exits -while one or more threads are executing in the memory allocation functions. -Therefore, this option should only be used with care; it is primarily intended -as a performance tuning aid during application development. -.It Q -Double/halve the size of the maximum size class that is a multiple of the -quantum (8 or 16 bytes, depending on architecture). -Above this size, cacheline spacing is used for size classes. -The default value is 128 bytes. -.It U -Generate -.Dq utrace -entries for -.Xr ktrace 1 , -for all operations. -Consult the source for details on this option. -.It V -Attempting to allocate zero bytes will return a -.Dv NULL -pointer instead of a valid pointer. -(The default behavior is to make a minimal allocation and return a -pointer to it.) -This option is provided for System V compatibility. -This option is incompatible with the -.Dq X -option. -.It X -Rather than return failure for any allocation function, display a diagnostic -message on -.Dv STDERR_FILENO -and cause the program to drop core (using -.Xr abort 3 ) . -This option should be set at compile time by including the following in the -source code: -.Bd -literal -offset indent -_malloc_options = "X"; -.Ed -.It Z -Each byte of new memory allocated by -.Fn malloc , -.Fn realloc , -or -.Fn reallocf -will be initialized to 0. -Note that this initialization only happens once for each byte, so -.Fn realloc -and -.Fn reallocf -calls do not zero memory that was previously allocated. -This is intended for debugging and will impact performance negatively. -.El -.Pp -The -.Dq J -and -.Dq Z -options are intended for testing and debugging. -An application which changes its behavior when these options are used -is flawed. -.Sh IMPLEMENTATION NOTES -Traditionally, allocators have used -.Xr sbrk 2 -to obtain memory, which is suboptimal for several reasons, including race -conditions, increased fragmentation, and artificial limitations on maximum -usable memory. -This allocator uses both -.Xr sbrk 2 -and -.Xr mmap 2 -by default, but it can be configured at run time to use only one or the other. -If resource limits are not a primary concern, the preferred configuration is -.Ev MALLOC_OPTIONS=dM -or -.Ev MALLOC_OPTIONS=DM . -When so configured, the -.Ar datasize -resource limit has little practical effect for typical applications; use -.Ev MALLOC_OPTIONS=Dm -if that is a concern. -Regardless of allocator configuration, the -.Ar vmemoryuse -resource limit can be used to bound the total virtual memory used by a -process, as described in -.Xr limits 1 . -.Pp -This allocator uses multiple arenas in order to reduce lock contention for -threaded programs on multi-processor systems. -This works well with regard to threading scalability, but incurs some costs. -There is a small fixed per-arena overhead, and additionally, arenas manage -memory completely independently of each other, which means a small fixed -increase in overall memory fragmentation. -These overheads are not generally an issue, given the number of arenas normally -used. -Note that using substantially more arenas than the default is not likely to -improve performance, mainly due to reduced cache performance. -However, it may make sense to reduce the number of arenas if an application -does not make much use of the allocation functions. -.Pp -In addition to multiple arenas, this allocator supports thread-specific caching -for small and medium objects, in order to make it possible to completely avoid -synchronization for most small and medium allocation requests. -Such caching allows very fast allocation in the common case, but it increases -memory usage and fragmentation, since a bounded number of objects can remain -allocated in each thread cache. -.Pp -Memory is conceptually broken into equal-sized chunks, where the chunk size is -a power of two that is greater than the page size. -Chunks are always aligned to multiples of the chunk size. -This alignment makes it possible to find metadata for user objects very -quickly. -.Pp -User objects are broken into four categories according to size: small, medium, -large, and huge. -Small objects are smaller than one page. -Medium objects range from one page to an upper limit determined at run time (see -the -.Dq E -option). -Large objects are smaller than the chunk size. -Huge objects are a multiple of the chunk size. -Small, medium, and large objects are managed by arenas; huge objects are managed -separately in a single data structure that is shared by all threads. -Huge objects are used by applications infrequently enough that this single -data structure is not a scalability issue. -.Pp -Each chunk that is managed by an arena tracks its contents as runs of -contiguous pages (unused, backing a set of small or medium objects, or backing -one large object). -The combination of chunk alignment and chunk page maps makes it possible to -determine all metadata regarding small and large allocations in constant time. -.Pp -Small and medium objects are managed in groups by page runs. -Each run maintains a bitmap that tracks which regions are in use. -Allocation requests that are no more than half the quantum (8 or 16, depending -on architecture) are rounded up to the nearest power of two. -Allocation requests that are more than half the quantum, but no more than the -minimum cacheline-multiple size class (see the -.Dq Q -option) are rounded up to the nearest multiple of the quantum. -Allocation requests that are more than the minimum cacheline-multiple size -class, but no more than the minimum subpage-multiple size class (see the -.Dq C -option) are rounded up to the nearest multiple of the cacheline size (64). -Allocation requests that are more than the minimum subpage-multiple size class, -but no more than the maximum subpage-multiple size class are rounded up to the -nearest multiple of the subpage size (256). -Allocation requests that are more than the maximum subpage-multiple size class, -but no more than the maximum medium size class (see the -.Dq M -option) are rounded up to the nearest medium size class; spacing is an -automatically determined power of two and ranges from the subpage size to the -page size. -Allocation requests that are more than the maximum medium size class, but small -enough to fit in an arena-managed chunk (see the -.Dq K -option), are rounded up to the nearest run size. -Allocation requests that are too large to fit in an arena-managed chunk are -rounded up to the nearest multiple of the chunk size. -.Pp -Allocations are packed tightly together, which can be an issue for -multi-threaded applications. -If you need to assure that allocations do not suffer from cacheline sharing, -round your allocation requests up to the nearest multiple of the cacheline -size. -.Sh DEBUGGING MALLOC PROBLEMS -The first thing to do is to set the -.Dq A -option. -This option forces a coredump (if possible) at the first sign of trouble, -rather than the normal policy of trying to continue if at all possible. -.Pp -It is probably also a good idea to recompile the program with suitable -options and symbols for debugger support. -.Pp -If the program starts to give unusual results, coredump or generally behave -differently without emitting any of the messages mentioned in the next -section, it is likely because it depends on the storage being filled with -zero bytes. -Try running it with the -.Dq Z -option set; -if that improves the situation, this diagnosis has been confirmed. -If the program still misbehaves, -the likely problem is accessing memory outside the allocated area. -.Pp -Alternatively, if the symptoms are not easy to reproduce, setting the -.Dq J -option may help provoke the problem. -.Pp -In truly difficult cases, the -.Dq U -option, if supported by the kernel, can provide a detailed trace of -all calls made to these functions. -.Pp -Unfortunately this implementation does not provide much detail about -the problems it detects; the performance impact for storing such information -would be prohibitive. -There are a number of allocator implementations available on the Internet -which focus on detecting and pinpointing problems by trading performance for -extra sanity checks and detailed diagnostics. -.Sh DIAGNOSTIC MESSAGES -If any of the memory allocation/deallocation functions detect an error or -warning condition, a message will be printed to file descriptor -.Dv STDERR_FILENO . -Errors will result in the process dumping core. -If the -.Dq A -option is set, all warnings are treated as errors. -.Pp -The -.Va _malloc_message -variable allows the programmer to override the function which emits the text -strings forming the errors and warnings if for some reason the -.Dv STDERR_FILENO -file descriptor is not suitable for this. -Please note that doing anything which tries to allocate memory in this function -is likely to result in a crash or deadlock. -.Pp -All messages are prefixed by -.Dq Ao Ar progname Ac Ns Li : (malloc) . -.Sh RETURN VALUES -The -.Fn malloc -and -.Fn calloc -functions return a pointer to the allocated memory if successful; otherwise -a -.Dv NULL -pointer is returned and -.Va errno -is set to -.Er ENOMEM . -.Pp -The -.Fn realloc -and -.Fn reallocf -functions return a pointer, possibly identical to -.Fa ptr , -to the allocated memory -if successful; otherwise a -.Dv NULL -pointer is returned, and -.Va errno -is set to -.Er ENOMEM -if the error was the result of an allocation failure. -The -.Fn realloc -function always leaves the original buffer intact -when an error occurs, whereas -.Fn reallocf -deallocates it in this case. -.Pp -The -.Fn free -function returns no value. -.Pp -The -.Fn malloc_usable_size -function returns the usable size of the allocation pointed to by -.Fa ptr . -.Sh ENVIRONMENT -The following environment variables affect the execution of the allocation -functions: -.Bl -tag -width ".Ev MALLOC_OPTIONS" -.It Ev MALLOC_OPTIONS -If the environment variable -.Ev MALLOC_OPTIONS -is set, the characters it contains will be interpreted as flags to the -allocation functions. -.El -.Sh EXAMPLES -To dump core whenever a problem occurs: -.Bd -literal -offset indent -ln -s 'A' /etc/malloc.conf -.Ed -.Pp -To specify in the source that a program does no return value checking -on calls to these functions: -.Bd -literal -offset indent -_malloc_options = "X"; -.Ed -.Sh SEE ALSO -.Xr limits 1 , -.Xr madvise 2 , -.Xr mmap 2 , -.Xr sbrk 2 , -.Xr alloca 3 , -.Xr atexit 3 , -.Xr getpagesize 3 , -.Xr getpagesizes 3 , -.Xr memory 3 , -.Xr posix_memalign 3 -.Sh STANDARDS -The -.Fn malloc , -.Fn calloc , -.Fn realloc -and -.Fn free -functions conform to -.St -isoC . -.Sh HISTORY -The -.Fn reallocf -function first appeared in -.Fx 3.0 . -.Pp -The -.Fn malloc_usable_size -function first appeared in -.Fx 7.0 . Index: lib/libc/stdlib/aligned_alloc.3 =================================================================== --- lib/libc/stdlib/aligned_alloc.3 (revision 234236) +++ lib/libc/stdlib/aligned_alloc.3 (working copy) @@ -1,126 +0,0 @@ -.\" Copyright (C) 2006 Jason Evans . -.\" All rights reserved. -.\" -.\" Redistribution and use in source and binary forms, with or without -.\" modification, are permitted provided that the following conditions -.\" are met: -.\" 1. Redistributions of source code must retain the above copyright -.\" notice(s), this list of conditions and the following disclaimer as -.\" the first lines of this file unmodified other than the possible -.\" addition of one or more copyright notices. -.\" 2. Redistributions in binary form must reproduce the above copyright -.\" notice(s), this list of conditions and the following disclaimer in -.\" the documentation and/or other materials provided with the -.\" distribution. -.\" -.\" THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY -.\" EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -.\" PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE -.\" LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -.\" CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -.\" SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR -.\" BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, -.\" WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE -.\" OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, -.\" EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -.\" -.\" $FreeBSD$ -.\" -.Dd January 7, 2011 -.Dt ALIGNED_ALLOC 3 -.Os -.Sh NAME -.Nm aligned_alloc , -.Nm posix_memalign -.Nd aligned memory allocation -.Sh LIBRARY -.Lb libc -.Sh SYNOPSIS -.In stdlib.h -.Ft void * -.Fn aligned_alloc "size_t alignment" "size_t size" -.Ft int -.Fn posix_memalign "void **ptr" "size_t alignment" "size_t size" -.Sh DESCRIPTION -The -.Fn aligned_alloc -and -.Fn posix_memalign -functions allocate -.Fa size -bytes of memory such that the allocation's base address is an even multiple of -.Fa alignment . -The -.Fn aligned_alloc -function returns the allocation, while the -.Fn posix_memalign -function stores the allocation in the value pointed to by -.Fa ptr . -.Pp -The requested -.Fa alignment -must be a power of 2 at least as large as -.Fn sizeof "void *" . -.Pp -Memory that is allocated via -.Fn aligned_alloc -and -.Fn posix_memalign -can be used as an argument in subsequent calls to -.Xr realloc 3 , -.Xr reallocf 3 , -and -.Xr free 3 . -.Sh RETURN VALUES -The -.Fn aligned_alloc -function returns a pointer to the allocation if successful; otherwise a -NULL pointer is returned and -.Va errno -is set to an error value. -.Pp -The -.Fn posix_memalign -function returns the value 0 if successful; otherwise it returns an error value. -.Sh ERRORS -The -.Fn aligned_alloc -and -.Fn posix_memalign -functions will fail if: -.Bl -tag -width Er -.It Bq Er EINVAL -The -.Fa alignment -parameter is not a power of 2 at least as large as -.Fn sizeof "void *" . -.It Bq Er ENOMEM -Memory allocation error. -.El -.Sh SEE ALSO -.Xr free 3 , -.Xr malloc 3 , -.Xr realloc 3 , -.Xr reallocf 3 , -.Xr valloc 3 -.Sh STANDARDS -The -.Fn aligned_alloc -function conforms to -.St -isoC-2011 . -.Pp -The -.Fn posix_memalign -function conforms to -.St -p1003.1-2001 . -.Sh HISTORY -The -.Fn posix_memalign -function first appeared in -.Fx 7.0 . -.Pp -The -.Fn aligned_alloc -function first appeared in -.Fx 10.0 . Index: lib/libc/stdlib/jemalloc/Symbol.map =================================================================== --- lib/libc/stdlib/jemalloc/Symbol.map (revision 0) +++ lib/libc/stdlib/jemalloc/Symbol.map (working copy) @@ -0,0 +1,35 @@ +/* + * $FreeBSD$ + */ + +FBSD_1.0 { + _malloc_options; + _malloc_message; + malloc; + posix_memalign; + calloc; + realloc; + free; + malloc_usable_size; +}; + +FBSD_1.3 { + malloc_conf; + malloc_message; + aligned_alloc; + malloc_stats_print; + mallctl; + mallctlnametomib; + mallctlbymib; + allocm; + rallocm; + sallocm; + dallocm; + nallocm; +}; + +FBSDprivate_1.0 { + _malloc_thread_cleanup; + _malloc_prefork; + _malloc_postfork; +}; Index: lib/libc/stdlib/jemalloc/Makefile.inc =================================================================== --- lib/libc/stdlib/jemalloc/Makefile.inc (revision 0) +++ lib/libc/stdlib/jemalloc/Makefile.inc (working copy) @@ -0,0 +1,46 @@ +# $FreeBSD$ + +.PATH: ${.CURDIR}/stdlib/jemalloc + +JEMALLOCSRCS:= jemalloc.c arena.c atomic.c base.c bitmap.c chunk.c \ + chunk_dss.c chunk_mmap.c ckh.c ctl.c extent.c hash.c huge.c mb.c \ + mutex.c prof.c quarantine.c rtree.c stats.c tcache.c util.c tsd.c + +SYM_MAPS+=${.CURDIR}/stdlib/jemalloc/Symbol.map + +CFLAGS+=-I${.CURDIR}/../../contrib/jemalloc/include + +.for src in ${JEMALLOCSRCS} +MISRCS+=jemalloc_${src} +CLEANFILES+=jemalloc_${src} +jemalloc_${src}: + ln -sf ${.CURDIR}/../../contrib/jemalloc/src/${src} ${.TARGET} +.endfor + +MAN+=jemalloc.3 +CLEANFILES+=jemalloc.3 +jemalloc.3: + ln -sf ${.CURDIR}/../../contrib/jemalloc/doc/jemalloc.3 ${.TARGET} + +MLINKS+= \ + jemalloc.3 malloc.3 \ + jemalloc.3 calloc.3 \ + jemalloc.3 posix_memalign.3 \ + jemalloc.3 aligned_alloc.3 \ + jemalloc.3 realloc.3 \ + jemalloc.3 free.3 \ + jemalloc.3 malloc_usable_size.3 \ + jemalloc.3 malloc_stats_print.3 \ + jemalloc.3 mallctl.3 \ + jemalloc.3 mallctlnametomib.3 \ + jemalloc.3 mallctlbymib.3 \ + jemalloc.3 allocm.3 \ + jemalloc.3 rallocm.3 \ + jemalloc.3 sallocm.3 \ + jemalloc.3 dallocm.3 \ + jemalloc.3 nallocm.3 \ + jemalloc.3 malloc.conf.5 + +.if defined(MALLOC_PRODUCTION) +CFLAGS+= -DMALLOC_PRODUCTION +.endif Property changes on: lib/libc/stdlib/jemalloc/Makefile.inc ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H Added: svn:eol-style ## -0,0 +1 ## +native Index: lib/libc/stdlib/malloc.c =================================================================== --- lib/libc/stdlib/malloc.c (revision 234236) +++ lib/libc/stdlib/malloc.c (working copy) @@ -1,6270 +0,0 @@ -/*- - * Copyright (C) 2006-2010 Jason Evans . - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice(s), this list of conditions and the following disclaimer as - * the first lines of this file unmodified other than the possible - * addition of one or more copyright notices. - * 2. Redistributions in binary form must reproduce the above copyright - * notice(s), this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR - * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE - * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, - * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - ******************************************************************************* - * - * This allocator implementation is designed to provide scalable performance - * for multi-threaded programs on multi-processor systems. The following - * features are included for this purpose: - * - * + Multiple arenas are used if there are multiple CPUs, which reduces lock - * contention and cache sloshing. - * - * + Thread-specific caching is used if there are multiple threads, which - * reduces the amount of locking. - * - * + Cache line sharing between arenas is avoided for internal data - * structures. - * - * + Memory is managed in chunks and runs (chunks can be split into runs), - * rather than as individual pages. This provides a constant-time - * mechanism for associating allocations with particular arenas. - * - * Allocation requests are rounded up to the nearest size class, and no record - * of the original request size is maintained. Allocations are broken into - * categories according to size class. Assuming runtime defaults, 4 KiB pages - * and a 16 byte quantum on a 32-bit system, the size classes in each category - * are as follows: - * - * |========================================| - * | Category | Subcategory | Size | - * |========================================| - * | Small | Tiny | 2 | - * | | | 4 | - * | | | 8 | - * | |------------------+----------| - * | | Quantum-spaced | 16 | - * | | | 32 | - * | | | 48 | - * | | | ... | - * | | | 96 | - * | | | 112 | - * | | | 128 | - * | |------------------+----------| - * | | Cacheline-spaced | 192 | - * | | | 256 | - * | | | 320 | - * | | | 384 | - * | | | 448 | - * | | | 512 | - * | |------------------+----------| - * | | Sub-page | 760 | - * | | | 1024 | - * | | | 1280 | - * | | | ... | - * | | | 3328 | - * | | | 3584 | - * | | | 3840 | - * |========================================| - * | Medium | 4 KiB | - * | | 6 KiB | - * | | 8 KiB | - * | | ... | - * | | 28 KiB | - * | | 30 KiB | - * | | 32 KiB | - * |========================================| - * | Large | 36 KiB | - * | | 40 KiB | - * | | 44 KiB | - * | | ... | - * | | 1012 KiB | - * | | 1016 KiB | - * | | 1020 KiB | - * |========================================| - * | Huge | 1 MiB | - * | | 2 MiB | - * | | 3 MiB | - * | | ... | - * |========================================| - * - * Different mechanisms are used accoding to category: - * - * Small/medium : Each size class is segregated into its own set of runs. - * Each run maintains a bitmap of which regions are - * free/allocated. - * - * Large : Each allocation is backed by a dedicated run. Metadata are stored - * in the associated arena chunk header maps. - * - * Huge : Each allocation is backed by a dedicated contiguous set of chunks. - * Metadata are stored in a separate red-black tree. - * - ******************************************************************************* - */ - -/* - * MALLOC_PRODUCTION disables assertions and statistics gathering. It also - * defaults the A and J runtime options to off. These settings are appropriate - * for production systems. - */ -/* #define MALLOC_PRODUCTION */ - -#ifndef MALLOC_PRODUCTION - /* - * MALLOC_DEBUG enables assertions and other sanity checks, and disables - * inline functions. - */ -# define MALLOC_DEBUG - - /* MALLOC_STATS enables statistics calculation. */ -# define MALLOC_STATS -#endif - -/* - * MALLOC_TINY enables support for tiny objects, which are smaller than one - * quantum. - */ -#define MALLOC_TINY - -/* - * MALLOC_TCACHE enables a thread-specific caching layer for small and medium - * objects. This makes it possible to allocate/deallocate objects without any - * locking when the cache is in the steady state. - */ -#define MALLOC_TCACHE - -/* - * MALLOC_DSS enables use of sbrk(2) to allocate chunks from the data storage - * segment (DSS). In an ideal world, this functionality would be completely - * unnecessary, but we are burdened by history and the lack of resource limits - * for anonymous mapped memory. - */ -#define MALLOC_DSS - -#include -__FBSDID("$FreeBSD$"); - -#include "libc_private.h" -#ifdef MALLOC_DEBUG -# define _LOCK_DEBUG -#endif -#include "spinlock.h" -#include "namespace.h" -#include -#include -#include -#include -#include -#include -#include /* Must come after several other sys/ includes. */ - -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "un-namespace.h" - -#include "libc_private.h" - -#define RB_COMPACT -#include "rb.h" -#if (defined(MALLOC_TCACHE) && defined(MALLOC_STATS)) -#include "qr.h" -#include "ql.h" -#endif - -#ifdef MALLOC_DEBUG - /* Disable inlining to make debugging easier. */ -# define inline -#endif - -/* Size of stack-allocated buffer passed to strerror_r(). */ -#define STRERROR_BUF 64 - -/* - * Minimum alignment of allocations is 2^LG_QUANTUM bytes. - */ -#ifdef __i386__ -# define LG_QUANTUM 4 -# define LG_SIZEOF_PTR 2 -# define CPU_SPINWAIT __asm__ volatile("pause") -# ifdef __clang__ -# define TLS_MODEL /* clang does not support tls_model yet */ -# else -# define TLS_MODEL __attribute__((tls_model("initial-exec"))) -# endif -#endif -#ifdef __ia64__ -# define LG_QUANTUM 4 -# define LG_SIZEOF_PTR 3 -# define TLS_MODEL /* default */ -#endif -#ifdef __alpha__ -# define LG_QUANTUM 4 -# define LG_SIZEOF_PTR 3 -# define NO_TLS -#endif -#ifdef __sparc64__ -# define LG_QUANTUM 4 -# define LG_SIZEOF_PTR 3 -# define TLS_MODEL __attribute__((tls_model("initial-exec"))) -#endif -#ifdef __amd64__ -# define LG_QUANTUM 4 -# define LG_SIZEOF_PTR 3 -# define CPU_SPINWAIT __asm__ volatile("pause") -# ifdef __clang__ -# define TLS_MODEL /* clang does not support tls_model yet */ -# else -# define TLS_MODEL __attribute__((tls_model("initial-exec"))) -# endif -#endif -#ifdef __arm__ -# define LG_QUANTUM 3 -# define LG_SIZEOF_PTR 2 -# define NO_TLS -#endif -#ifdef __mips__ -# define LG_QUANTUM 3 -# define LG_SIZEOF_PTR 2 -# define NO_TLS -#endif -#ifdef __powerpc64__ -# define LG_QUANTUM 4 -# define LG_SIZEOF_PTR 3 -# define TLS_MODEL /* default */ -#elif defined(__powerpc__) -# define LG_QUANTUM 4 -# define LG_SIZEOF_PTR 2 -# define TLS_MODEL /* default */ -#endif -#ifdef __s390x__ -# define LG_QUANTUM 4 -#endif - -#define QUANTUM ((size_t)(1U << LG_QUANTUM)) -#define QUANTUM_MASK (QUANTUM - 1) - -#define SIZEOF_PTR (1U << LG_SIZEOF_PTR) - -/* sizeof(int) == (1U << LG_SIZEOF_INT). */ -#ifndef LG_SIZEOF_INT -# define LG_SIZEOF_INT 2 -#endif - -/* We can't use TLS in non-PIC programs, since TLS relies on loader magic. */ -#if (!defined(PIC) && !defined(NO_TLS)) -# define NO_TLS -#endif - -#ifdef NO_TLS - /* MALLOC_TCACHE requires TLS. */ -# ifdef MALLOC_TCACHE -# undef MALLOC_TCACHE -# endif -#endif - -/* - * Size and alignment of memory chunks that are allocated by the OS's virtual - * memory system. - */ -#define LG_CHUNK_DEFAULT 22 - -/* - * The minimum ratio of active:dirty pages per arena is computed as: - * - * (nactive >> opt_lg_dirty_mult) >= ndirty - * - * So, supposing that opt_lg_dirty_mult is 5, there can be no less than 32 - * times as many active pages as dirty pages. - */ -#define LG_DIRTY_MULT_DEFAULT 5 - -/* - * Maximum size of L1 cache line. This is used to avoid cache line aliasing. - * In addition, this controls the spacing of cacheline-spaced size classes. - */ -#define LG_CACHELINE 6 -#define CACHELINE ((size_t)(1U << LG_CACHELINE)) -#define CACHELINE_MASK (CACHELINE - 1) - -/* - * Subpages are an artificially designated partitioning of pages. Their only - * purpose is to support subpage-spaced size classes. - * - * There must be at least 4 subpages per page, due to the way size classes are - * handled. - */ -#define LG_SUBPAGE 8 -#define SUBPAGE ((size_t)(1U << LG_SUBPAGE)) -#define SUBPAGE_MASK (SUBPAGE - 1) - -#ifdef MALLOC_TINY - /* Smallest size class to support. */ -# define LG_TINY_MIN 1 -#endif - -/* - * Maximum size class that is a multiple of the quantum, but not (necessarily) - * a power of 2. Above this size, allocations are rounded up to the nearest - * power of 2. - */ -#define LG_QSPACE_MAX_DEFAULT 7 - -/* - * Maximum size class that is a multiple of the cacheline, but not (necessarily) - * a power of 2. Above this size, allocations are rounded up to the nearest - * power of 2. - */ -#define LG_CSPACE_MAX_DEFAULT 9 - -/* - * Maximum medium size class. This must not be more than 1/4 of a chunk - * (LG_MEDIUM_MAX_DEFAULT <= LG_CHUNK_DEFAULT - 2). - */ -#define LG_MEDIUM_MAX_DEFAULT 15 - -/* - * RUN_MAX_OVRHD indicates maximum desired run header overhead. Runs are sized - * as small as possible such that this setting is still honored, without - * violating other constraints. The goal is to make runs as small as possible - * without exceeding a per run external fragmentation threshold. - * - * We use binary fixed point math for overhead computations, where the binary - * point is implicitly RUN_BFP bits to the left. - * - * Note that it is possible to set RUN_MAX_OVRHD low enough that it cannot be - * honored for some/all object sizes, since there is one bit of header overhead - * per object (plus a constant). This constraint is relaxed (ignored) for runs - * that are so small that the per-region overhead is greater than: - * - * (RUN_MAX_OVRHD / (reg_size << (3+RUN_BFP)) - */ -#define RUN_BFP 12 -/* \/ Implicit binary fixed point. */ -#define RUN_MAX_OVRHD 0x0000003dU -#define RUN_MAX_OVRHD_RELAX 0x00001800U - -/* Put a cap on small object run size. This overrides RUN_MAX_OVRHD. */ -#define RUN_MAX_SMALL \ - (arena_maxclass <= (1U << (CHUNK_MAP_LG_PG_RANGE + PAGE_SHIFT)) \ - ? arena_maxclass : (1U << (CHUNK_MAP_LG_PG_RANGE + \ - PAGE_SHIFT))) - -/* - * Hyper-threaded CPUs may need a special instruction inside spin loops in - * order to yield to another virtual CPU. If no such instruction is defined - * above, make CPU_SPINWAIT a no-op. - */ -#ifndef CPU_SPINWAIT -# define CPU_SPINWAIT -#endif - -/* - * Adaptive spinning must eventually switch to blocking, in order to avoid the - * potential for priority inversion deadlock. Backing off past a certain point - * can actually waste time. - */ -#define LG_SPIN_LIMIT 11 - -#ifdef MALLOC_TCACHE - /* - * Default number of cache slots for each bin in the thread cache (0: - * disabled). - */ -# define LG_TCACHE_NSLOTS_DEFAULT 7 - /* - * (1U << opt_lg_tcache_gc_sweep) is the approximate number of - * allocation events between full GC sweeps (-1: disabled). Integer - * rounding may cause the actual number to be slightly higher, since GC is - * performed incrementally. - */ -# define LG_TCACHE_GC_SWEEP_DEFAULT 13 -#endif - -/******************************************************************************/ - -/* - * Mutexes based on spinlocks. We can't use normal pthread spinlocks in all - * places, because they require malloc()ed memory, which causes bootstrapping - * issues in some cases. - */ -typedef struct { - spinlock_t lock; -} malloc_mutex_t; - -/* Set to true once the allocator has been initialized. */ -static bool malloc_initialized = false; - -/* Used to avoid initialization races. */ -static malloc_mutex_t init_lock = {_SPINLOCK_INITIALIZER}; - -/******************************************************************************/ -/* - * Statistics data structures. - */ - -#ifdef MALLOC_STATS - -#ifdef MALLOC_TCACHE -typedef struct tcache_bin_stats_s tcache_bin_stats_t; -struct tcache_bin_stats_s { - /* - * Number of allocation requests that corresponded to the size of this - * bin. - */ - uint64_t nrequests; -}; -#endif - -typedef struct malloc_bin_stats_s malloc_bin_stats_t; -struct malloc_bin_stats_s { - /* - * Number of allocation requests that corresponded to the size of this - * bin. - */ - uint64_t nrequests; - -#ifdef MALLOC_TCACHE - /* Number of tcache fills from this bin. */ - uint64_t nfills; - - /* Number of tcache flushes to this bin. */ - uint64_t nflushes; -#endif - - /* Total number of runs created for this bin's size class. */ - uint64_t nruns; - - /* - * Total number of runs reused by extracting them from the runs tree for - * this bin's size class. - */ - uint64_t reruns; - - /* High-water mark for this bin. */ - size_t highruns; - - /* Current number of runs in this bin. */ - size_t curruns; -}; - -typedef struct malloc_large_stats_s malloc_large_stats_t; -struct malloc_large_stats_s { - /* - * Number of allocation requests that corresponded to this size class. - */ - uint64_t nrequests; - - /* High-water mark for this size class. */ - size_t highruns; - - /* Current number of runs of this size class. */ - size_t curruns; -}; - -typedef struct arena_stats_s arena_stats_t; -struct arena_stats_s { - /* Number of bytes currently mapped. */ - size_t mapped; - - /* - * Total number of purge sweeps, total number of madvise calls made, - * and total pages purged in order to keep dirty unused memory under - * control. - */ - uint64_t npurge; - uint64_t nmadvise; - uint64_t purged; - - /* Per-size-category statistics. */ - size_t allocated_small; - uint64_t nmalloc_small; - uint64_t ndalloc_small; - - size_t allocated_medium; - uint64_t nmalloc_medium; - uint64_t ndalloc_medium; - - size_t allocated_large; - uint64_t nmalloc_large; - uint64_t ndalloc_large; - - /* - * One element for each possible size class, including sizes that - * overlap with bin size classes. This is necessary because ipalloc() - * sometimes has to use such large objects in order to assure proper - * alignment. - */ - malloc_large_stats_t *lstats; -}; - -typedef struct chunk_stats_s chunk_stats_t; -struct chunk_stats_s { - /* Number of chunks that were allocated. */ - uint64_t nchunks; - - /* High-water mark for number of chunks allocated. */ - size_t highchunks; - - /* - * Current number of chunks allocated. This value isn't maintained for - * any other purpose, so keep track of it in order to be able to set - * highchunks. - */ - size_t curchunks; -}; - -#endif /* #ifdef MALLOC_STATS */ - -/******************************************************************************/ -/* - * Extent data structures. - */ - -/* Tree of extents. */ -typedef struct extent_node_s extent_node_t; -struct extent_node_s { -#ifdef MALLOC_DSS - /* Linkage for the size/address-ordered tree. */ - rb_node(extent_node_t) link_szad; -#endif - - /* Linkage for the address-ordered tree. */ - rb_node(extent_node_t) link_ad; - - /* Pointer to the extent that this tree node is responsible for. */ - void *addr; - - /* Total region size. */ - size_t size; -}; -typedef rb_tree(extent_node_t) extent_tree_t; - -/******************************************************************************/ -/* - * Arena data structures. - */ - -typedef struct arena_s arena_t; -typedef struct arena_bin_s arena_bin_t; - -/* Each element of the chunk map corresponds to one page within the chunk. */ -typedef struct arena_chunk_map_s arena_chunk_map_t; -struct arena_chunk_map_s { - /* - * Linkage for run trees. There are two disjoint uses: - * - * 1) arena_t's runs_avail tree. - * 2) arena_run_t conceptually uses this linkage for in-use non-full - * runs, rather than directly embedding linkage. - */ - rb_node(arena_chunk_map_t) link; - - /* - * Run address (or size) and various flags are stored together. The bit - * layout looks like (assuming 32-bit system): - * - * ???????? ???????? ????cccc ccccdzla - * - * ? : Unallocated: Run address for first/last pages, unset for internal - * pages. - * Small/medium: Don't care. - * Large: Run size for first page, unset for trailing pages. - * - : Unused. - * c : refcount (could overflow for PAGE_SIZE >= 128 KiB) - * d : dirty? - * z : zeroed? - * l : large? - * a : allocated? - * - * Following are example bit patterns for the three types of runs. - * - * p : run page offset - * s : run size - * x : don't care - * - : 0 - * [dzla] : bit set - * - * Unallocated: - * ssssssss ssssssss ssss---- -------- - * xxxxxxxx xxxxxxxx xxxx---- ----d--- - * ssssssss ssssssss ssss---- -----z-- - * - * Small/medium: - * pppppppp ppppcccc cccccccc cccc---a - * pppppppp ppppcccc cccccccc cccc---a - * pppppppp ppppcccc cccccccc cccc---a - * - * Large: - * ssssssss ssssssss ssss---- ------la - * -------- -------- -------- ------la - * -------- -------- -------- ------la - */ - size_t bits; -#define CHUNK_MAP_PG_MASK ((size_t)0xfff00000U) -#define CHUNK_MAP_PG_SHIFT 20 -#define CHUNK_MAP_LG_PG_RANGE 12 - -#define CHUNK_MAP_RC_MASK ((size_t)0xffff0U) -#define CHUNK_MAP_RC_ONE ((size_t)0x00010U) - -#define CHUNK_MAP_FLAGS_MASK ((size_t)0xfU) -#define CHUNK_MAP_DIRTY ((size_t)0x8U) -#define CHUNK_MAP_ZEROED ((size_t)0x4U) -#define CHUNK_MAP_LARGE ((size_t)0x2U) -#define CHUNK_MAP_ALLOCATED ((size_t)0x1U) -#define CHUNK_MAP_KEY (CHUNK_MAP_DIRTY | CHUNK_MAP_ALLOCATED) -}; -typedef rb_tree(arena_chunk_map_t) arena_avail_tree_t; -typedef rb_tree(arena_chunk_map_t) arena_run_tree_t; - -/* Arena chunk header. */ -typedef struct arena_chunk_s arena_chunk_t; -struct arena_chunk_s { - /* Arena that owns the chunk. */ - arena_t *arena; - - /* Linkage for the arena's chunks_dirty tree. */ - rb_node(arena_chunk_t) link_dirty; - - /* - * True if the chunk is currently in the chunks_dirty tree, due to - * having at some point contained one or more dirty pages. Removal - * from chunks_dirty is lazy, so (dirtied && ndirty == 0) is possible. - */ - bool dirtied; - - /* Number of dirty pages. */ - size_t ndirty; - - /* Map of pages within chunk that keeps track of free/large/small. */ - arena_chunk_map_t map[1]; /* Dynamically sized. */ -}; -typedef rb_tree(arena_chunk_t) arena_chunk_tree_t; - -typedef struct arena_run_s arena_run_t; -struct arena_run_s { -#ifdef MALLOC_DEBUG - uint32_t magic; -# define ARENA_RUN_MAGIC 0x384adf93 -#endif - - /* Bin this run is associated with. */ - arena_bin_t *bin; - - /* Index of first element that might have a free region. */ - unsigned regs_minelm; - - /* Number of free regions in run. */ - unsigned nfree; - - /* Bitmask of in-use regions (0: in use, 1: free). */ - unsigned regs_mask[1]; /* Dynamically sized. */ -}; - -struct arena_bin_s { - /* - * Current run being used to service allocations of this bin's size - * class. - */ - arena_run_t *runcur; - - /* - * Tree of non-full runs. This tree is used when looking for an - * existing run when runcur is no longer usable. We choose the - * non-full run that is lowest in memory; this policy tends to keep - * objects packed well, and it can also help reduce the number of - * almost-empty chunks. - */ - arena_run_tree_t runs; - - /* Size of regions in a run for this bin's size class. */ - size_t reg_size; - - /* Total size of a run for this bin's size class. */ - size_t run_size; - - /* Total number of regions in a run for this bin's size class. */ - uint32_t nregs; - - /* Number of elements in a run's regs_mask for this bin's size class. */ - uint32_t regs_mask_nelms; - - /* Offset of first region in a run for this bin's size class. */ - uint32_t reg0_offset; - -#ifdef MALLOC_STATS - /* Bin statistics. */ - malloc_bin_stats_t stats; -#endif -}; - -#ifdef MALLOC_TCACHE -typedef struct tcache_s tcache_t; -#endif - -struct arena_s { -#ifdef MALLOC_DEBUG - uint32_t magic; -# define ARENA_MAGIC 0x947d3d24 -#endif - - /* All operations on this arena require that lock be locked. */ - pthread_mutex_t lock; - -#ifdef MALLOC_STATS - arena_stats_t stats; -# ifdef MALLOC_TCACHE - /* - * List of tcaches for extant threads associated with this arena. - * Stats from these are merged incrementally, and at exit. - */ - ql_head(tcache_t) tcache_ql; -# endif -#endif - - /* Tree of dirty-page-containing chunks this arena manages. */ - arena_chunk_tree_t chunks_dirty; - - /* - * In order to avoid rapid chunk allocation/deallocation when an arena - * oscillates right on the cusp of needing a new chunk, cache the most - * recently freed chunk. The spare is left in the arena's chunk trees - * until it is deleted. - * - * There is one spare chunk per arena, rather than one spare total, in - * order to avoid interactions between multiple threads that could make - * a single spare inadequate. - */ - arena_chunk_t *spare; - - /* Number of pages in active runs. */ - size_t nactive; - - /* - * Current count of pages within unused runs that are potentially - * dirty, and for which madvise(... MADV_FREE) has not been called. By - * tracking this, we can institute a limit on how much dirty unused - * memory is mapped for each arena. - */ - size_t ndirty; - - /* - * Size/address-ordered tree of this arena's available runs. This tree - * is used for first-best-fit run allocation. - */ - arena_avail_tree_t runs_avail; - - /* - * bins is used to store trees of free regions of the following sizes, - * assuming a 16-byte quantum, 4 KiB page size, and default - * MALLOC_OPTIONS. - * - * bins[i] | size | - * --------+--------+ - * 0 | 2 | - * 1 | 4 | - * 2 | 8 | - * --------+--------+ - * 3 | 16 | - * 4 | 32 | - * 5 | 48 | - * : : - * 8 | 96 | - * 9 | 112 | - * 10 | 128 | - * --------+--------+ - * 11 | 192 | - * 12 | 256 | - * 13 | 320 | - * 14 | 384 | - * 15 | 448 | - * 16 | 512 | - * --------+--------+ - * 17 | 768 | - * 18 | 1024 | - * 19 | 1280 | - * : : - * 27 | 3328 | - * 28 | 3584 | - * 29 | 3840 | - * --------+--------+ - * 30 | 4 KiB | - * 31 | 6 KiB | - * 33 | 8 KiB | - * : : - * 43 | 28 KiB | - * 44 | 30 KiB | - * 45 | 32 KiB | - * --------+--------+ - */ - arena_bin_t bins[1]; /* Dynamically sized. */ -}; - -/******************************************************************************/ -/* - * Thread cache data structures. - */ - -#ifdef MALLOC_TCACHE -typedef struct tcache_bin_s tcache_bin_t; -struct tcache_bin_s { -# ifdef MALLOC_STATS - tcache_bin_stats_t tstats; -# endif - unsigned low_water; /* Min # cached since last GC. */ - unsigned high_water; /* Max # cached since last GC. */ - unsigned ncached; /* # of cached objects. */ - void *slots[1]; /* Dynamically sized. */ -}; - -struct tcache_s { -# ifdef MALLOC_STATS - ql_elm(tcache_t) link; /* Used for aggregating stats. */ -# endif - arena_t *arena; /* This thread's arena. */ - unsigned ev_cnt; /* Event count since incremental GC. */ - unsigned next_gc_bin; /* Next bin to GC. */ - tcache_bin_t *tbins[1]; /* Dynamically sized. */ -}; -#endif - -/******************************************************************************/ -/* - * Data. - */ - -/* Number of CPUs. */ -static unsigned ncpus; - -/* Various bin-related settings. */ -#ifdef MALLOC_TINY /* Number of (2^n)-spaced tiny bins. */ -# define ntbins ((unsigned)(LG_QUANTUM - LG_TINY_MIN)) -#else -# define ntbins 0 -#endif -static unsigned nqbins; /* Number of quantum-spaced bins. */ -static unsigned ncbins; /* Number of cacheline-spaced bins. */ -static unsigned nsbins; /* Number of subpage-spaced bins. */ -static unsigned nmbins; /* Number of medium bins. */ -static unsigned nbins; -static unsigned mbin0; /* mbin offset (nbins - nmbins). */ -#ifdef MALLOC_TINY -# define tspace_max ((size_t)(QUANTUM >> 1)) -#endif -#define qspace_min QUANTUM -static size_t qspace_max; -static size_t cspace_min; -static size_t cspace_max; -static size_t sspace_min; -static size_t sspace_max; -#define small_maxclass sspace_max -#define medium_min PAGE_SIZE -static size_t medium_max; -#define bin_maxclass medium_max - -/* - * Soft limit on the number of medium size classes. Spacing between medium - * size classes never exceeds pagesize, which can force more than NBINS_MAX - * medium size classes. - */ -#define NMBINS_MAX 16 -/* Spacing between medium size classes. */ -static size_t lg_mspace; -static size_t mspace_mask; - -static uint8_t const *small_size2bin; -/* - * const_small_size2bin is a static constant lookup table that in the common - * case can be used as-is for small_size2bin. For dynamically linked programs, - * this avoids a page of memory overhead per process. - */ -#define S2B_1(i) i, -#define S2B_2(i) S2B_1(i) S2B_1(i) -#define S2B_4(i) S2B_2(i) S2B_2(i) -#define S2B_8(i) S2B_4(i) S2B_4(i) -#define S2B_16(i) S2B_8(i) S2B_8(i) -#define S2B_32(i) S2B_16(i) S2B_16(i) -#define S2B_64(i) S2B_32(i) S2B_32(i) -#define S2B_128(i) S2B_64(i) S2B_64(i) -#define S2B_256(i) S2B_128(i) S2B_128(i) -static const uint8_t const_small_size2bin[PAGE_SIZE - 255] = { - S2B_1(0xffU) /* 0 */ -#if (LG_QUANTUM == 4) -/* 64-bit system ************************/ -# ifdef MALLOC_TINY - S2B_2(0) /* 2 */ - S2B_2(1) /* 4 */ - S2B_4(2) /* 8 */ - S2B_8(3) /* 16 */ -# define S2B_QMIN 3 -# else - S2B_16(0) /* 16 */ -# define S2B_QMIN 0 -# endif - S2B_16(S2B_QMIN + 1) /* 32 */ - S2B_16(S2B_QMIN + 2) /* 48 */ - S2B_16(S2B_QMIN + 3) /* 64 */ - S2B_16(S2B_QMIN + 4) /* 80 */ - S2B_16(S2B_QMIN + 5) /* 96 */ - S2B_16(S2B_QMIN + 6) /* 112 */ - S2B_16(S2B_QMIN + 7) /* 128 */ -# define S2B_CMIN (S2B_QMIN + 8) -#else -/* 32-bit system ************************/ -# ifdef MALLOC_TINY - S2B_2(0) /* 2 */ - S2B_2(1) /* 4 */ - S2B_4(2) /* 8 */ -# define S2B_QMIN 2 -# else - S2B_8(0) /* 8 */ -# define S2B_QMIN 0 -# endif - S2B_8(S2B_QMIN + 1) /* 16 */ - S2B_8(S2B_QMIN + 2) /* 24 */ - S2B_8(S2B_QMIN + 3) /* 32 */ - S2B_8(S2B_QMIN + 4) /* 40 */ - S2B_8(S2B_QMIN + 5) /* 48 */ - S2B_8(S2B_QMIN + 6) /* 56 */ - S2B_8(S2B_QMIN + 7) /* 64 */ - S2B_8(S2B_QMIN + 8) /* 72 */ - S2B_8(S2B_QMIN + 9) /* 80 */ - S2B_8(S2B_QMIN + 10) /* 88 */ - S2B_8(S2B_QMIN + 11) /* 96 */ - S2B_8(S2B_QMIN + 12) /* 104 */ - S2B_8(S2B_QMIN + 13) /* 112 */ - S2B_8(S2B_QMIN + 14) /* 120 */ - S2B_8(S2B_QMIN + 15) /* 128 */ -# define S2B_CMIN (S2B_QMIN + 16) -#endif -/****************************************/ - S2B_64(S2B_CMIN + 0) /* 192 */ - S2B_64(S2B_CMIN + 1) /* 256 */ - S2B_64(S2B_CMIN + 2) /* 320 */ - S2B_64(S2B_CMIN + 3) /* 384 */ - S2B_64(S2B_CMIN + 4) /* 448 */ - S2B_64(S2B_CMIN + 5) /* 512 */ -# define S2B_SMIN (S2B_CMIN + 6) - S2B_256(S2B_SMIN + 0) /* 768 */ - S2B_256(S2B_SMIN + 1) /* 1024 */ - S2B_256(S2B_SMIN + 2) /* 1280 */ - S2B_256(S2B_SMIN + 3) /* 1536 */ - S2B_256(S2B_SMIN + 4) /* 1792 */ - S2B_256(S2B_SMIN + 5) /* 2048 */ - S2B_256(S2B_SMIN + 6) /* 2304 */ - S2B_256(S2B_SMIN + 7) /* 2560 */ - S2B_256(S2B_SMIN + 8) /* 2816 */ - S2B_256(S2B_SMIN + 9) /* 3072 */ - S2B_256(S2B_SMIN + 10) /* 3328 */ - S2B_256(S2B_SMIN + 11) /* 3584 */ - S2B_256(S2B_SMIN + 12) /* 3840 */ -#if (PAGE_SHIFT == 13) - S2B_256(S2B_SMIN + 13) /* 4096 */ - S2B_256(S2B_SMIN + 14) /* 4352 */ - S2B_256(S2B_SMIN + 15) /* 4608 */ - S2B_256(S2B_SMIN + 16) /* 4864 */ - S2B_256(S2B_SMIN + 17) /* 5120 */ - S2B_256(S2B_SMIN + 18) /* 5376 */ - S2B_256(S2B_SMIN + 19) /* 5632 */ - S2B_256(S2B_SMIN + 20) /* 5888 */ - S2B_256(S2B_SMIN + 21) /* 6144 */ - S2B_256(S2B_SMIN + 22) /* 6400 */ - S2B_256(S2B_SMIN + 23) /* 6656 */ - S2B_256(S2B_SMIN + 24) /* 6912 */ - S2B_256(S2B_SMIN + 25) /* 7168 */ - S2B_256(S2B_SMIN + 26) /* 7424 */ - S2B_256(S2B_SMIN + 27) /* 7680 */ - S2B_256(S2B_SMIN + 28) /* 7936 */ -#endif -}; -#undef S2B_1 -#undef S2B_2 -#undef S2B_4 -#undef S2B_8 -#undef S2B_16 -#undef S2B_32 -#undef S2B_64 -#undef S2B_128 -#undef S2B_256 -#undef S2B_QMIN -#undef S2B_CMIN -#undef S2B_SMIN - -/* Various chunk-related settings. */ -static size_t chunksize; -static size_t chunksize_mask; /* (chunksize - 1). */ -static size_t chunk_npages; -static size_t arena_chunk_header_npages; -static size_t arena_maxclass; /* Max size class for arenas. */ - -/********/ -/* - * Chunks. - */ - -/* Protects chunk-related data structures. */ -static malloc_mutex_t huge_mtx; - -/* Tree of chunks that are stand-alone huge allocations. */ -static extent_tree_t huge; - -#ifdef MALLOC_DSS -/* - * Protects sbrk() calls. This avoids malloc races among threads, though it - * does not protect against races with threads that call sbrk() directly. - */ -static malloc_mutex_t dss_mtx; -/* Base address of the DSS. */ -static void *dss_base; -/* Current end of the DSS, or ((void *)-1) if the DSS is exhausted. */ -static void *dss_prev; -/* Current upper limit on DSS addresses. */ -static void *dss_max; - -/* - * Trees of chunks that were previously allocated (trees differ only in node - * ordering). These are used when allocating chunks, in an attempt to re-use - * address space. Depending on function, different tree orderings are needed, - * which is why there are two trees with the same contents. - */ -static extent_tree_t dss_chunks_szad; -static extent_tree_t dss_chunks_ad; -#endif - -#ifdef MALLOC_STATS -/* Huge allocation statistics. */ -static uint64_t huge_nmalloc; -static uint64_t huge_ndalloc; -static size_t huge_allocated; -#endif - -/****************************/ -/* - * base (internal allocation). - */ - -/* - * Current pages that are being used for internal memory allocations. These - * pages are carved up in cacheline-size quanta, so that there is no chance of - * false cache line sharing. - */ -static void *base_pages; -static void *base_next_addr; -static void *base_past_addr; /* Addr immediately past base_pages. */ -static extent_node_t *base_nodes; -static malloc_mutex_t base_mtx; -#ifdef MALLOC_STATS -static size_t base_mapped; -#endif - -/********/ -/* - * Arenas. - */ - -/* - * Arenas that are used to service external requests. Not all elements of the - * arenas array are necessarily used; arenas are created lazily as needed. - */ -static arena_t **arenas; -static unsigned narenas; -#ifndef NO_TLS -static unsigned next_arena; -#endif -static pthread_mutex_t arenas_lock; /* Protects arenas initialization. */ - -#ifndef NO_TLS -/* - * Map of _pthread_self() --> arenas[???], used for selecting an arena to use - * for allocations. - */ -static __thread arena_t *arenas_map TLS_MODEL; -#endif - -#ifdef MALLOC_TCACHE -/* Map of thread-specific caches. */ -static __thread tcache_t *tcache_tls TLS_MODEL; - -/* - * Number of cache slots for each bin in the thread cache, or 0 if tcache is - * disabled. - */ -size_t tcache_nslots; - -/* Number of tcache allocation/deallocation events between incremental GCs. */ -unsigned tcache_gc_incr; -#endif - -/* - * Used by chunk_alloc_mmap() to decide whether to attempt the fast path and - * potentially avoid some system calls. We can get away without TLS here, - * since the state of mmap_unaligned only affects performance, rather than - * correct function. - */ -#ifndef NO_TLS -static __thread bool mmap_unaligned TLS_MODEL; -#else -static bool mmap_unaligned; -#endif - -#ifdef MALLOC_STATS -static malloc_mutex_t chunks_mtx; -/* Chunk statistics. */ -static chunk_stats_t stats_chunks; -#endif - -/*******************************/ -/* - * Runtime configuration options. - */ -const char *_malloc_options; - -#ifndef MALLOC_PRODUCTION -static bool opt_abort = true; -static bool opt_junk = true; -#else -static bool opt_abort = false; -static bool opt_junk = false; -#endif -#ifdef MALLOC_TCACHE -static size_t opt_lg_tcache_nslots = LG_TCACHE_NSLOTS_DEFAULT; -static ssize_t opt_lg_tcache_gc_sweep = LG_TCACHE_GC_SWEEP_DEFAULT; -#endif -#ifdef MALLOC_DSS -static bool opt_dss = true; -static bool opt_mmap = true; -#endif -static ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT; -static bool opt_stats_print = false; -static size_t opt_lg_qspace_max = LG_QSPACE_MAX_DEFAULT; -static size_t opt_lg_cspace_max = LG_CSPACE_MAX_DEFAULT; -static size_t opt_lg_medium_max = LG_MEDIUM_MAX_DEFAULT; -static size_t opt_lg_chunk = LG_CHUNK_DEFAULT; -static bool opt_utrace = false; -static bool opt_sysv = false; -static bool opt_xmalloc = false; -static bool opt_zero = false; -static int opt_narenas_lshift = 0; - -typedef struct { - void *p; - size_t s; - void *r; -} malloc_utrace_t; - -#define UTRACE(a, b, c) \ - if (opt_utrace) { \ - malloc_utrace_t ut; \ - ut.p = (a); \ - ut.s = (b); \ - ut.r = (c); \ - utrace(&ut, sizeof(ut)); \ - } - -/******************************************************************************/ -/* - * Begin function prototypes for non-inline static functions. - */ - -static void malloc_mutex_init(malloc_mutex_t *mutex); -static bool malloc_spin_init(pthread_mutex_t *lock); -#ifdef MALLOC_TINY -static size_t pow2_ceil(size_t x); -#endif -static void wrtmessage(const char *p1, const char *p2, const char *p3, - const char *p4); -#ifdef MALLOC_STATS -static void malloc_printf(const char *format, ...); -#endif -static char *umax2s(uintmax_t x, unsigned base, char *s); -#ifdef MALLOC_DSS -static bool base_pages_alloc_dss(size_t minsize); -#endif -static bool base_pages_alloc_mmap(size_t minsize); -static bool base_pages_alloc(size_t minsize); -static void *base_alloc(size_t size); -static void *base_calloc(size_t number, size_t size); -static extent_node_t *base_node_alloc(void); -static void base_node_dealloc(extent_node_t *node); -static void *pages_map(void *addr, size_t size); -static void pages_unmap(void *addr, size_t size); -#ifdef MALLOC_DSS -static void *chunk_alloc_dss(size_t size, bool *zero); -static void *chunk_recycle_dss(size_t size, bool *zero); -#endif -static void *chunk_alloc_mmap_slow(size_t size, bool unaligned); -static void *chunk_alloc_mmap(size_t size); -static void *chunk_alloc(size_t size, bool *zero); -#ifdef MALLOC_DSS -static extent_node_t *chunk_dealloc_dss_record(void *chunk, size_t size); -static bool chunk_dealloc_dss(void *chunk, size_t size); -#endif -static void chunk_dealloc_mmap(void *chunk, size_t size); -static void chunk_dealloc(void *chunk, size_t size); -#ifndef NO_TLS -static arena_t *choose_arena_hard(void); -#endif -static void arena_run_split(arena_t *arena, arena_run_t *run, size_t size, - bool large, bool zero); -static arena_chunk_t *arena_chunk_alloc(arena_t *arena); -static void arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk); -static arena_run_t *arena_run_alloc(arena_t *arena, size_t size, bool large, - bool zero); -static void arena_purge(arena_t *arena); -static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty); -static void arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, - arena_run_t *run, size_t oldsize, size_t newsize); -static void arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, - arena_run_t *run, size_t oldsize, size_t newsize, bool dirty); -static arena_run_t *arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin); -static void *arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin); -static size_t arena_bin_run_size_calc(arena_bin_t *bin, size_t min_run_size); -#ifdef MALLOC_TCACHE -static void tcache_bin_fill(tcache_t *tcache, tcache_bin_t *tbin, - size_t binind); -static void *tcache_alloc_hard(tcache_t *tcache, tcache_bin_t *tbin, - size_t binind); -#endif -static void *arena_malloc_medium(arena_t *arena, size_t size, bool zero); -static void *arena_malloc_large(arena_t *arena, size_t size, bool zero); -static void *arena_palloc(arena_t *arena, size_t alignment, size_t size, - size_t alloc_size); -static bool arena_is_large(const void *ptr); -static size_t arena_salloc(const void *ptr); -static void -arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, - arena_bin_t *bin); -#ifdef MALLOC_STATS -static void arena_stats_print(arena_t *arena); -#endif -static void stats_print_atexit(void); -#ifdef MALLOC_TCACHE -static void tcache_bin_flush(tcache_bin_t *tbin, size_t binind, - unsigned rem); -#endif -static void arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, - void *ptr); -#ifdef MALLOC_TCACHE -static void arena_dalloc_hard(arena_t *arena, arena_chunk_t *chunk, - void *ptr, arena_chunk_map_t *mapelm, tcache_t *tcache); -#endif -static void arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, - void *ptr, size_t size, size_t oldsize); -static bool arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, - void *ptr, size_t size, size_t oldsize); -static bool arena_ralloc_large(void *ptr, size_t size, size_t oldsize); -static void *arena_ralloc(void *ptr, size_t size, size_t oldsize); -static bool arena_new(arena_t *arena, unsigned ind); -static arena_t *arenas_extend(unsigned ind); -#ifdef MALLOC_TCACHE -static tcache_bin_t *tcache_bin_create(arena_t *arena); -static void tcache_bin_destroy(tcache_t *tcache, tcache_bin_t *tbin, - unsigned binind); -# ifdef MALLOC_STATS -static void tcache_stats_merge(tcache_t *tcache, arena_t *arena); -# endif -static tcache_t *tcache_create(arena_t *arena); -static void tcache_destroy(tcache_t *tcache); -#endif -static void *huge_malloc(size_t size, bool zero); -static void *huge_palloc(size_t alignment, size_t size); -static void *huge_ralloc(void *ptr, size_t size, size_t oldsize); -static void huge_dalloc(void *ptr); -static void malloc_stats_print(void); -#ifdef MALLOC_DEBUG -static void small_size2bin_validate(void); -#endif -static bool small_size2bin_init(void); -static bool small_size2bin_init_hard(void); -static unsigned malloc_ncpus(void); -static bool malloc_init_hard(void); - -/* - * End function prototypes. - */ -/******************************************************************************/ - -static void -wrtmessage(const char *p1, const char *p2, const char *p3, const char *p4) -{ - - if (_write(STDERR_FILENO, p1, strlen(p1)) < 0 - || _write(STDERR_FILENO, p2, strlen(p2)) < 0 - || _write(STDERR_FILENO, p3, strlen(p3)) < 0 - || _write(STDERR_FILENO, p4, strlen(p4)) < 0) - return; -} - -void (*_malloc_message)(const char *p1, const char *p2, const char *p3, - const char *p4) = wrtmessage; - -/* - * We don't want to depend on vsnprintf() for production builds, since that can - * cause unnecessary bloat for static binaries. umax2s() provides minimal - * integer printing functionality, so that malloc_printf() use can be limited to - * MALLOC_STATS code. - */ -#define UMAX2S_BUFSIZE 65 -static char * -umax2s(uintmax_t x, unsigned base, char *s) -{ - unsigned i; - - i = UMAX2S_BUFSIZE - 1; - s[i] = '\0'; - switch (base) { - case 10: - do { - i--; - s[i] = "0123456789"[x % 10]; - x /= 10; - } while (x > 0); - break; - case 16: - do { - i--; - s[i] = "0123456789abcdef"[x & 0xf]; - x >>= 4; - } while (x > 0); - break; - default: - do { - i--; - s[i] = "0123456789abcdefghijklmnopqrstuvwxyz"[x % base]; - x /= base; - } while (x > 0); - } - - return (&s[i]); -} - -/* - * Define a custom assert() in order to reduce the chances of deadlock during - * assertion failure. - */ -#ifdef MALLOC_DEBUG -# define assert(e) do { \ - if (!(e)) { \ - char line_buf[UMAX2S_BUFSIZE]; \ - _malloc_message(_getprogname(), ": (malloc) ", \ - __FILE__, ":"); \ - _malloc_message(umax2s(__LINE__, 10, line_buf), \ - ": Failed assertion: ", "\"", #e); \ - _malloc_message("\"\n", "", "", ""); \ - abort(); \ - } \ -} while (0) -#else -#define assert(e) -#endif - -#ifdef MALLOC_STATS -/* - * Print to stderr in such a way as to (hopefully) avoid memory allocation. - */ -static void -malloc_printf(const char *format, ...) -{ - char buf[4096]; - va_list ap; - - va_start(ap, format); - vsnprintf(buf, sizeof(buf), format, ap); - va_end(ap); - _malloc_message(buf, "", "", ""); -} -#endif - -/******************************************************************************/ -/* - * Begin mutex. We can't use normal pthread mutexes in all places, because - * they require malloc()ed memory, which causes bootstrapping issues in some - * cases. - */ - -static void -malloc_mutex_init(malloc_mutex_t *mutex) -{ - static const spinlock_t lock = _SPINLOCK_INITIALIZER; - - mutex->lock = lock; -} - -static inline void -malloc_mutex_lock(malloc_mutex_t *mutex) -{ - - if (__isthreaded) - _SPINLOCK(&mutex->lock); -} - -static inline void -malloc_mutex_unlock(malloc_mutex_t *mutex) -{ - - if (__isthreaded) - _SPINUNLOCK(&mutex->lock); -} - -/* - * End mutex. - */ -/******************************************************************************/ -/* - * Begin spin lock. Spin locks here are actually adaptive mutexes that block - * after a period of spinning, because unbounded spinning would allow for - * priority inversion. - */ - -/* - * We use an unpublished interface to initialize pthread mutexes with an - * allocation callback, in order to avoid infinite recursion. - */ -int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex, - void *(calloc_cb)(size_t, size_t)); - -__weak_reference(_pthread_mutex_init_calloc_cb_stub, - _pthread_mutex_init_calloc_cb); - -int -_pthread_mutex_init_calloc_cb_stub(pthread_mutex_t *mutex, - void *(calloc_cb)(size_t, size_t)) -{ - - return (0); -} - -static bool -malloc_spin_init(pthread_mutex_t *lock) -{ - - if (_pthread_mutex_init_calloc_cb(lock, base_calloc) != 0) - return (true); - - return (false); -} - -static inline void -malloc_spin_lock(pthread_mutex_t *lock) -{ - - if (__isthreaded) { - if (_pthread_mutex_trylock(lock) != 0) { - /* Exponentially back off if there are multiple CPUs. */ - if (ncpus > 1) { - unsigned i; - volatile unsigned j; - - for (i = 1; i <= LG_SPIN_LIMIT; i++) { - for (j = 0; j < (1U << i); j++) { - CPU_SPINWAIT; - } - - if (_pthread_mutex_trylock(lock) == 0) - return; - } - } - - /* - * Spinning failed. Block until the lock becomes - * available, in order to avoid indefinite priority - * inversion. - */ - _pthread_mutex_lock(lock); - } - } -} - -static inline void -malloc_spin_unlock(pthread_mutex_t *lock) -{ - - if (__isthreaded) - _pthread_mutex_unlock(lock); -} - -/* - * End spin lock. - */ -/******************************************************************************/ -/* - * Begin Utility functions/macros. - */ - -/* Return the chunk address for allocation address a. */ -#define CHUNK_ADDR2BASE(a) \ - ((void *)((uintptr_t)(a) & ~chunksize_mask)) - -/* Return the chunk offset of address a. */ -#define CHUNK_ADDR2OFFSET(a) \ - ((size_t)((uintptr_t)(a) & chunksize_mask)) - -/* Return the smallest chunk multiple that is >= s. */ -#define CHUNK_CEILING(s) \ - (((s) + chunksize_mask) & ~chunksize_mask) - -/* Return the smallest quantum multiple that is >= a. */ -#define QUANTUM_CEILING(a) \ - (((a) + QUANTUM_MASK) & ~QUANTUM_MASK) - -/* Return the smallest cacheline multiple that is >= s. */ -#define CACHELINE_CEILING(s) \ - (((s) + CACHELINE_MASK) & ~CACHELINE_MASK) - -/* Return the smallest subpage multiple that is >= s. */ -#define SUBPAGE_CEILING(s) \ - (((s) + SUBPAGE_MASK) & ~SUBPAGE_MASK) - -/* Return the smallest medium size class that is >= s. */ -#define MEDIUM_CEILING(s) \ - (((s) + mspace_mask) & ~mspace_mask) - -/* Return the smallest pagesize multiple that is >= s. */ -#define PAGE_CEILING(s) \ - (((s) + PAGE_MASK) & ~PAGE_MASK) - -#ifdef MALLOC_TINY -/* Compute the smallest power of 2 that is >= x. */ -static size_t -pow2_ceil(size_t x) -{ - - x--; - x |= x >> 1; - x |= x >> 2; - x |= x >> 4; - x |= x >> 8; - x |= x >> 16; -#if (SIZEOF_PTR == 8) - x |= x >> 32; -#endif - x++; - return (x); -} -#endif - -/******************************************************************************/ - -#ifdef MALLOC_DSS -static bool -base_pages_alloc_dss(size_t minsize) -{ - - /* - * Do special DSS allocation here, since base allocations don't need to - * be chunk-aligned. - */ - malloc_mutex_lock(&dss_mtx); - if (dss_prev != (void *)-1) { - intptr_t incr; - size_t csize = CHUNK_CEILING(minsize); - - do { - /* Get the current end of the DSS. */ - dss_max = sbrk(0); - - /* - * Calculate how much padding is necessary to - * chunk-align the end of the DSS. Don't worry about - * dss_max not being chunk-aligned though. - */ - incr = (intptr_t)chunksize - - (intptr_t)CHUNK_ADDR2OFFSET(dss_max); - assert(incr >= 0); - if ((size_t)incr < minsize) - incr += csize; - - dss_prev = sbrk(incr); - if (dss_prev == dss_max) { - /* Success. */ - dss_max = (void *)((intptr_t)dss_prev + incr); - base_pages = dss_prev; - base_next_addr = base_pages; - base_past_addr = dss_max; -#ifdef MALLOC_STATS - base_mapped += incr; -#endif - malloc_mutex_unlock(&dss_mtx); - return (false); - } - } while (dss_prev != (void *)-1); - } - malloc_mutex_unlock(&dss_mtx); - - return (true); -} -#endif - -static bool -base_pages_alloc_mmap(size_t minsize) -{ - size_t csize; - - assert(minsize != 0); - csize = PAGE_CEILING(minsize); - base_pages = pages_map(NULL, csize); - if (base_pages == NULL) - return (true); - base_next_addr = base_pages; - base_past_addr = (void *)((uintptr_t)base_pages + csize); -#ifdef MALLOC_STATS - base_mapped += csize; -#endif - - return (false); -} - -static bool -base_pages_alloc(size_t minsize) -{ - -#ifdef MALLOC_DSS - if (opt_mmap && minsize != 0) -#endif - { - if (base_pages_alloc_mmap(minsize) == false) - return (false); - } - -#ifdef MALLOC_DSS - if (opt_dss) { - if (base_pages_alloc_dss(minsize) == false) - return (false); - } - -#endif - - return (true); -} - -static void * -base_alloc(size_t size) -{ - void *ret; - size_t csize; - - /* Round size up to nearest multiple of the cacheline size. */ - csize = CACHELINE_CEILING(size); - - malloc_mutex_lock(&base_mtx); - /* Make sure there's enough space for the allocation. */ - if ((uintptr_t)base_next_addr + csize > (uintptr_t)base_past_addr) { - if (base_pages_alloc(csize)) { - malloc_mutex_unlock(&base_mtx); - return (NULL); - } - } - /* Allocate. */ - ret = base_next_addr; - base_next_addr = (void *)((uintptr_t)base_next_addr + csize); - malloc_mutex_unlock(&base_mtx); - - return (ret); -} - -static void * -base_calloc(size_t number, size_t size) -{ - void *ret; - - ret = base_alloc(number * size); - if (ret != NULL) - memset(ret, 0, number * size); - - return (ret); -} - -static extent_node_t * -base_node_alloc(void) -{ - extent_node_t *ret; - - malloc_mutex_lock(&base_mtx); - if (base_nodes != NULL) { - ret = base_nodes; - base_nodes = *(extent_node_t **)ret; - malloc_mutex_unlock(&base_mtx); - } else { - malloc_mutex_unlock(&base_mtx); - ret = (extent_node_t *)base_alloc(sizeof(extent_node_t)); - } - - return (ret); -} - -static void -base_node_dealloc(extent_node_t *node) -{ - - malloc_mutex_lock(&base_mtx); - *(extent_node_t **)node = base_nodes; - base_nodes = node; - malloc_mutex_unlock(&base_mtx); -} - -/* - * End Utility functions/macros. - */ -/******************************************************************************/ -/* - * Begin extent tree code. - */ - -#ifdef MALLOC_DSS -static inline int -extent_szad_comp(extent_node_t *a, extent_node_t *b) -{ - int ret; - size_t a_size = a->size; - size_t b_size = b->size; - - ret = (a_size > b_size) - (a_size < b_size); - if (ret == 0) { - uintptr_t a_addr = (uintptr_t)a->addr; - uintptr_t b_addr = (uintptr_t)b->addr; - - ret = (a_addr > b_addr) - (a_addr < b_addr); - } - - return (ret); -} - -/* Wrap red-black tree macros in functions. */ -rb_gen(__unused static, extent_tree_szad_, extent_tree_t, extent_node_t, - link_szad, extent_szad_comp) -#endif - -static inline int -extent_ad_comp(extent_node_t *a, extent_node_t *b) -{ - uintptr_t a_addr = (uintptr_t)a->addr; - uintptr_t b_addr = (uintptr_t)b->addr; - - return ((a_addr > b_addr) - (a_addr < b_addr)); -} - -/* Wrap red-black tree macros in functions. */ -rb_gen(__unused static, extent_tree_ad_, extent_tree_t, extent_node_t, link_ad, - extent_ad_comp) - -/* - * End extent tree code. - */ -/******************************************************************************/ -/* - * Begin chunk management functions. - */ - -static void * -pages_map(void *addr, size_t size) -{ - void *ret; - - /* - * We don't use MAP_FIXED here, because it can cause the *replacement* - * of existing mappings, and we only want to create new mappings. - */ - ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, - -1, 0); - assert(ret != NULL); - - if (ret == MAP_FAILED) - ret = NULL; - else if (addr != NULL && ret != addr) { - /* - * We succeeded in mapping memory, but not in the right place. - */ - if (munmap(ret, size) == -1) { - char buf[STRERROR_BUF]; - - strerror_r(errno, buf, sizeof(buf)); - _malloc_message(_getprogname(), - ": (malloc) Error in munmap(): ", buf, "\n"); - if (opt_abort) - abort(); - } - ret = NULL; - } - - assert(ret == NULL || (addr == NULL && ret != addr) - || (addr != NULL && ret == addr)); - return (ret); -} - -static void -pages_unmap(void *addr, size_t size) -{ - - if (munmap(addr, size) == -1) { - char buf[STRERROR_BUF]; - - strerror_r(errno, buf, sizeof(buf)); - _malloc_message(_getprogname(), - ": (malloc) Error in munmap(): ", buf, "\n"); - if (opt_abort) - abort(); - } -} - -#ifdef MALLOC_DSS -static void * -chunk_alloc_dss(size_t size, bool *zero) -{ - void *ret; - - ret = chunk_recycle_dss(size, zero); - if (ret != NULL) - return (ret); - - /* - * sbrk() uses a signed increment argument, so take care not to - * interpret a huge allocation request as a negative increment. - */ - if ((intptr_t)size < 0) - return (NULL); - - malloc_mutex_lock(&dss_mtx); - if (dss_prev != (void *)-1) { - intptr_t incr; - - /* - * The loop is necessary to recover from races with other - * threads that are using the DSS for something other than - * malloc. - */ - do { - /* Get the current end of the DSS. */ - dss_max = sbrk(0); - - /* - * Calculate how much padding is necessary to - * chunk-align the end of the DSS. - */ - incr = (intptr_t)size - - (intptr_t)CHUNK_ADDR2OFFSET(dss_max); - if (incr == (intptr_t)size) - ret = dss_max; - else { - ret = (void *)((intptr_t)dss_max + incr); - incr += size; - } - - dss_prev = sbrk(incr); - if (dss_prev == dss_max) { - /* Success. */ - dss_max = (void *)((intptr_t)dss_prev + incr); - malloc_mutex_unlock(&dss_mtx); - *zero = true; - return (ret); - } - } while (dss_prev != (void *)-1); - } - malloc_mutex_unlock(&dss_mtx); - - return (NULL); -} - -static void * -chunk_recycle_dss(size_t size, bool *zero) -{ - extent_node_t *node, key; - - key.addr = NULL; - key.size = size; - malloc_mutex_lock(&dss_mtx); - node = extent_tree_szad_nsearch(&dss_chunks_szad, &key); - if (node != NULL) { - void *ret = node->addr; - - /* Remove node from the tree. */ - extent_tree_szad_remove(&dss_chunks_szad, node); - if (node->size == size) { - extent_tree_ad_remove(&dss_chunks_ad, node); - base_node_dealloc(node); - } else { - /* - * Insert the remainder of node's address range as a - * smaller chunk. Its position within dss_chunks_ad - * does not change. - */ - assert(node->size > size); - node->addr = (void *)((uintptr_t)node->addr + size); - node->size -= size; - extent_tree_szad_insert(&dss_chunks_szad, node); - } - malloc_mutex_unlock(&dss_mtx); - - if (*zero) - memset(ret, 0, size); - return (ret); - } - malloc_mutex_unlock(&dss_mtx); - - return (NULL); -} -#endif - -static void * -chunk_alloc_mmap_slow(size_t size, bool unaligned) -{ - void *ret; - size_t offset; - - /* Beware size_t wrap-around. */ - if (size + chunksize <= size) - return (NULL); - - ret = pages_map(NULL, size + chunksize); - if (ret == NULL) - return (NULL); - - /* Clean up unneeded leading/trailing space. */ - offset = CHUNK_ADDR2OFFSET(ret); - if (offset != 0) { - /* Note that mmap() returned an unaligned mapping. */ - unaligned = true; - - /* Leading space. */ - pages_unmap(ret, chunksize - offset); - - ret = (void *)((uintptr_t)ret + - (chunksize - offset)); - - /* Trailing space. */ - pages_unmap((void *)((uintptr_t)ret + size), - offset); - } else { - /* Trailing space only. */ - pages_unmap((void *)((uintptr_t)ret + size), - chunksize); - } - - /* - * If mmap() returned an aligned mapping, reset mmap_unaligned so that - * the next chunk_alloc_mmap() execution tries the fast allocation - * method. - */ - if (unaligned == false) - mmap_unaligned = false; - - return (ret); -} - -static void * -chunk_alloc_mmap(size_t size) -{ - void *ret; - - /* - * Ideally, there would be a way to specify alignment to mmap() (like - * NetBSD has), but in the absence of such a feature, we have to work - * hard to efficiently create aligned mappings. The reliable, but - * slow method is to create a mapping that is over-sized, then trim the - * excess. However, that always results in at least one call to - * pages_unmap(). - * - * A more optimistic approach is to try mapping precisely the right - * amount, then try to append another mapping if alignment is off. In - * practice, this works out well as long as the application is not - * interleaving mappings via direct mmap() calls. If we do run into a - * situation where there is an interleaved mapping and we are unable to - * extend an unaligned mapping, our best option is to switch to the - * slow method until mmap() returns another aligned mapping. This will - * tend to leave a gap in the memory map that is too small to cause - * later problems for the optimistic method. - * - * Another possible confounding factor is address space layout - * randomization (ASLR), which causes mmap(2) to disregard the - * requested address. mmap_unaligned tracks whether the previous - * chunk_alloc_mmap() execution received any unaligned or relocated - * mappings, and if so, the current execution will immediately fall - * back to the slow method. However, we keep track of whether the fast - * method would have succeeded, and if so, we make a note to try the - * fast method next time. - */ - - if (mmap_unaligned == false) { - size_t offset; - - ret = pages_map(NULL, size); - if (ret == NULL) - return (NULL); - - offset = CHUNK_ADDR2OFFSET(ret); - if (offset != 0) { - mmap_unaligned = true; - /* Try to extend chunk boundary. */ - if (pages_map((void *)((uintptr_t)ret + size), - chunksize - offset) == NULL) { - /* - * Extension failed. Clean up, then revert to - * the reliable-but-expensive method. - */ - pages_unmap(ret, size); - ret = chunk_alloc_mmap_slow(size, true); - } else { - /* Clean up unneeded leading space. */ - pages_unmap(ret, chunksize - offset); - ret = (void *)((uintptr_t)ret + (chunksize - - offset)); - } - } - } else - ret = chunk_alloc_mmap_slow(size, false); - - return (ret); -} - -/* - * If the caller specifies (*zero == false), it is still possible to receive - * zeroed memory, in which case *zero is toggled to true. arena_chunk_alloc() - * takes advantage of this to avoid demanding zeroed chunks, but taking - * advantage of them if they are returned. - */ -static void * -chunk_alloc(size_t size, bool *zero) -{ - void *ret; - - assert(size != 0); - assert((size & chunksize_mask) == 0); - -#ifdef MALLOC_DSS - if (opt_mmap) -#endif - { - ret = chunk_alloc_mmap(size); - if (ret != NULL) { - *zero = true; - goto RETURN; - } - } - -#ifdef MALLOC_DSS - if (opt_dss) { - ret = chunk_alloc_dss(size, zero); - if (ret != NULL) - goto RETURN; - } -#endif - - /* All strategies for allocation failed. */ - ret = NULL; -RETURN: -#ifdef MALLOC_STATS - if (ret != NULL) { - malloc_mutex_lock(&chunks_mtx); - stats_chunks.nchunks += (size / chunksize); - stats_chunks.curchunks += (size / chunksize); - if (stats_chunks.curchunks > stats_chunks.highchunks) - stats_chunks.highchunks = stats_chunks.curchunks; - malloc_mutex_unlock(&chunks_mtx); - } -#endif - - assert(CHUNK_ADDR2BASE(ret) == ret); - return (ret); -} - -#ifdef MALLOC_DSS -static extent_node_t * -chunk_dealloc_dss_record(void *chunk, size_t size) -{ - extent_node_t *node, *prev, key; - - key.addr = (void *)((uintptr_t)chunk + size); - node = extent_tree_ad_nsearch(&dss_chunks_ad, &key); - /* Try to coalesce forward. */ - if (node != NULL && node->addr == key.addr) { - /* - * Coalesce chunk with the following address range. This does - * not change the position within dss_chunks_ad, so only - * remove/insert from/into dss_chunks_szad. - */ - extent_tree_szad_remove(&dss_chunks_szad, node); - node->addr = chunk; - node->size += size; - extent_tree_szad_insert(&dss_chunks_szad, node); - } else { - /* - * Coalescing forward failed, so insert a new node. Drop - * dss_mtx during node allocation, since it is possible that a - * new base chunk will be allocated. - */ - malloc_mutex_unlock(&dss_mtx); - node = base_node_alloc(); - malloc_mutex_lock(&dss_mtx); - if (node == NULL) - return (NULL); - node->addr = chunk; - node->size = size; - extent_tree_ad_insert(&dss_chunks_ad, node); - extent_tree_szad_insert(&dss_chunks_szad, node); - } - - /* Try to coalesce backward. */ - prev = extent_tree_ad_prev(&dss_chunks_ad, node); - if (prev != NULL && (void *)((uintptr_t)prev->addr + prev->size) == - chunk) { - /* - * Coalesce chunk with the previous address range. This does - * not change the position within dss_chunks_ad, so only - * remove/insert node from/into dss_chunks_szad. - */ - extent_tree_szad_remove(&dss_chunks_szad, prev); - extent_tree_ad_remove(&dss_chunks_ad, prev); - - extent_tree_szad_remove(&dss_chunks_szad, node); - node->addr = prev->addr; - node->size += prev->size; - extent_tree_szad_insert(&dss_chunks_szad, node); - - base_node_dealloc(prev); - } - - return (node); -} - -static bool -chunk_dealloc_dss(void *chunk, size_t size) -{ - bool ret; - - malloc_mutex_lock(&dss_mtx); - if ((uintptr_t)chunk >= (uintptr_t)dss_base - && (uintptr_t)chunk < (uintptr_t)dss_max) { - extent_node_t *node; - - /* Try to coalesce with other unused chunks. */ - node = chunk_dealloc_dss_record(chunk, size); - if (node != NULL) { - chunk = node->addr; - size = node->size; - } - - /* Get the current end of the DSS. */ - dss_max = sbrk(0); - - /* - * Try to shrink the DSS if this chunk is at the end of the - * DSS. The sbrk() call here is subject to a race condition - * with threads that use brk(2) or sbrk(2) directly, but the - * alternative would be to leak memory for the sake of poorly - * designed multi-threaded programs. - */ - if ((void *)((uintptr_t)chunk + size) == dss_max - && (dss_prev = sbrk(-(intptr_t)size)) == dss_max) { - /* Success. */ - dss_max = (void *)((intptr_t)dss_prev - (intptr_t)size); - - if (node != NULL) { - extent_tree_szad_remove(&dss_chunks_szad, node); - extent_tree_ad_remove(&dss_chunks_ad, node); - base_node_dealloc(node); - } - } else - madvise(chunk, size, MADV_FREE); - - ret = false; - goto RETURN; - } - - ret = true; -RETURN: - malloc_mutex_unlock(&dss_mtx); - return (ret); -} -#endif - -static void -chunk_dealloc_mmap(void *chunk, size_t size) -{ - - pages_unmap(chunk, size); -} - -static void -chunk_dealloc(void *chunk, size_t size) -{ - - assert(chunk != NULL); - assert(CHUNK_ADDR2BASE(chunk) == chunk); - assert(size != 0); - assert((size & chunksize_mask) == 0); - -#ifdef MALLOC_STATS - malloc_mutex_lock(&chunks_mtx); - stats_chunks.curchunks -= (size / chunksize); - malloc_mutex_unlock(&chunks_mtx); -#endif - -#ifdef MALLOC_DSS - if (opt_dss) { - if (chunk_dealloc_dss(chunk, size) == false) - return; - } - - if (opt_mmap) -#endif - chunk_dealloc_mmap(chunk, size); -} - -/* - * End chunk management functions. - */ -/******************************************************************************/ -/* - * Begin arena. - */ - -/* - * Choose an arena based on a per-thread value (fast-path code, calls slow-path - * code if necessary). - */ -static inline arena_t * -choose_arena(void) -{ - arena_t *ret; - - /* - * We can only use TLS if this is a PIC library, since for the static - * library version, libc's malloc is used by TLS allocation, which - * introduces a bootstrapping issue. - */ -#ifndef NO_TLS - if (__isthreaded == false) { - /* Avoid the overhead of TLS for single-threaded operation. */ - return (arenas[0]); - } - - ret = arenas_map; - if (ret == NULL) { - ret = choose_arena_hard(); - assert(ret != NULL); - } -#else - if (__isthreaded && narenas > 1) { - unsigned long ind; - - /* - * Hash _pthread_self() to one of the arenas. There is a prime - * number of arenas, so this has a reasonable chance of - * working. Even so, the hashing can be easily thwarted by - * inconvenient _pthread_self() values. Without specific - * knowledge of how _pthread_self() calculates values, we can't - * easily do much better than this. - */ - ind = (unsigned long) _pthread_self() % narenas; - - /* - * Optimistially assume that arenas[ind] has been initialized. - * At worst, we find out that some other thread has already - * done so, after acquiring the lock in preparation. Note that - * this lazy locking also has the effect of lazily forcing - * cache coherency; without the lock acquisition, there's no - * guarantee that modification of arenas[ind] by another thread - * would be seen on this CPU for an arbitrary amount of time. - * - * In general, this approach to modifying a synchronized value - * isn't a good idea, but in this case we only ever modify the - * value once, so things work out well. - */ - ret = arenas[ind]; - if (ret == NULL) { - /* - * Avoid races with another thread that may have already - * initialized arenas[ind]. - */ - malloc_spin_lock(&arenas_lock); - if (arenas[ind] == NULL) - ret = arenas_extend((unsigned)ind); - else - ret = arenas[ind]; - malloc_spin_unlock(&arenas_lock); - } - } else - ret = arenas[0]; -#endif - - assert(ret != NULL); - return (ret); -} - -#ifndef NO_TLS -/* - * Choose an arena based on a per-thread value (slow-path code only, called - * only by choose_arena()). - */ -static arena_t * -choose_arena_hard(void) -{ - arena_t *ret; - - assert(__isthreaded); - - if (narenas > 1) { - malloc_spin_lock(&arenas_lock); - if ((ret = arenas[next_arena]) == NULL) - ret = arenas_extend(next_arena); - next_arena = (next_arena + 1) % narenas; - malloc_spin_unlock(&arenas_lock); - } else - ret = arenas[0]; - - arenas_map = ret; - - return (ret); -} -#endif - -static inline int -arena_chunk_comp(arena_chunk_t *a, arena_chunk_t *b) -{ - uintptr_t a_chunk = (uintptr_t)a; - uintptr_t b_chunk = (uintptr_t)b; - - assert(a != NULL); - assert(b != NULL); - - return ((a_chunk > b_chunk) - (a_chunk < b_chunk)); -} - -/* Wrap red-black tree macros in functions. */ -rb_gen(__unused static, arena_chunk_tree_dirty_, arena_chunk_tree_t, - arena_chunk_t, link_dirty, arena_chunk_comp) - -static inline int -arena_run_comp(arena_chunk_map_t *a, arena_chunk_map_t *b) -{ - uintptr_t a_mapelm = (uintptr_t)a; - uintptr_t b_mapelm = (uintptr_t)b; - - assert(a != NULL); - assert(b != NULL); - - return ((a_mapelm > b_mapelm) - (a_mapelm < b_mapelm)); -} - -/* Wrap red-black tree macros in functions. */ -rb_gen(__unused static, arena_run_tree_, arena_run_tree_t, arena_chunk_map_t, - link, arena_run_comp) - -static inline int -arena_avail_comp(arena_chunk_map_t *a, arena_chunk_map_t *b) -{ - int ret; - size_t a_size = a->bits & ~PAGE_MASK; - size_t b_size = b->bits & ~PAGE_MASK; - - ret = (a_size > b_size) - (a_size < b_size); - if (ret == 0) { - uintptr_t a_mapelm, b_mapelm; - - if ((a->bits & CHUNK_MAP_KEY) != CHUNK_MAP_KEY) - a_mapelm = (uintptr_t)a; - else { - /* - * Treat keys as though they are lower than anything - * else. - */ - a_mapelm = 0; - } - b_mapelm = (uintptr_t)b; - - ret = (a_mapelm > b_mapelm) - (a_mapelm < b_mapelm); - } - - return (ret); -} - -/* Wrap red-black tree macros in functions. */ -rb_gen(__unused static, arena_avail_tree_, arena_avail_tree_t, - arena_chunk_map_t, link, arena_avail_comp) - -static inline void -arena_run_rc_incr(arena_run_t *run, arena_bin_t *bin, const void *ptr) -{ - arena_chunk_t *chunk; - arena_t *arena; - size_t pagebeg, pageend, i; - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - arena = chunk->arena; - pagebeg = ((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT; - pageend = ((uintptr_t)ptr + (uintptr_t)(bin->reg_size - 1) - - (uintptr_t)chunk) >> PAGE_SHIFT; - - for (i = pagebeg; i <= pageend; i++) { - size_t mapbits = chunk->map[i].bits; - - if (mapbits & CHUNK_MAP_DIRTY) { - assert((mapbits & CHUNK_MAP_RC_MASK) == 0); - chunk->ndirty--; - arena->ndirty--; - mapbits ^= CHUNK_MAP_DIRTY; - } - assert((mapbits & CHUNK_MAP_RC_MASK) != CHUNK_MAP_RC_MASK); - mapbits += CHUNK_MAP_RC_ONE; - chunk->map[i].bits = mapbits; - } -} - -static inline void -arena_run_rc_decr(arena_run_t *run, arena_bin_t *bin, const void *ptr) -{ - arena_chunk_t *chunk; - arena_t *arena; - size_t pagebeg, pageend, mapbits, i; - bool dirtier = false; - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - arena = chunk->arena; - pagebeg = ((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT; - pageend = ((uintptr_t)ptr + (uintptr_t)(bin->reg_size - 1) - - (uintptr_t)chunk) >> PAGE_SHIFT; - - /* First page. */ - mapbits = chunk->map[pagebeg].bits; - mapbits -= CHUNK_MAP_RC_ONE; - if ((mapbits & CHUNK_MAP_RC_MASK) == 0) { - dirtier = true; - assert((mapbits & CHUNK_MAP_DIRTY) == 0); - mapbits |= CHUNK_MAP_DIRTY; - chunk->ndirty++; - arena->ndirty++; - } - chunk->map[pagebeg].bits = mapbits; - - if (pageend - pagebeg >= 1) { - /* - * Interior pages are completely consumed by the object being - * deallocated, which means that the pages can be - * unconditionally marked dirty. - */ - for (i = pagebeg + 1; i < pageend; i++) { - mapbits = chunk->map[i].bits; - mapbits -= CHUNK_MAP_RC_ONE; - assert((mapbits & CHUNK_MAP_RC_MASK) == 0); - dirtier = true; - assert((mapbits & CHUNK_MAP_DIRTY) == 0); - mapbits |= CHUNK_MAP_DIRTY; - chunk->ndirty++; - arena->ndirty++; - chunk->map[i].bits = mapbits; - } - - /* Last page. */ - mapbits = chunk->map[pageend].bits; - mapbits -= CHUNK_MAP_RC_ONE; - if ((mapbits & CHUNK_MAP_RC_MASK) == 0) { - dirtier = true; - assert((mapbits & CHUNK_MAP_DIRTY) == 0); - mapbits |= CHUNK_MAP_DIRTY; - chunk->ndirty++; - arena->ndirty++; - } - chunk->map[pageend].bits = mapbits; - } - - if (dirtier) { - if (chunk->dirtied == false) { - arena_chunk_tree_dirty_insert(&arena->chunks_dirty, - chunk); - chunk->dirtied = true; - } - - /* Enforce opt_lg_dirty_mult. */ - if (opt_lg_dirty_mult >= 0 && (arena->nactive >> - opt_lg_dirty_mult) < arena->ndirty) - arena_purge(arena); - } -} - -static inline void * -arena_run_reg_alloc(arena_run_t *run, arena_bin_t *bin) -{ - void *ret; - unsigned i, mask, bit, regind; - - assert(run->magic == ARENA_RUN_MAGIC); - assert(run->regs_minelm < bin->regs_mask_nelms); - - /* - * Move the first check outside the loop, so that run->regs_minelm can - * be updated unconditionally, without the possibility of updating it - * multiple times. - */ - i = run->regs_minelm; - mask = run->regs_mask[i]; - if (mask != 0) { - /* Usable allocation found. */ - bit = ffs((int)mask) - 1; - - regind = ((i << (LG_SIZEOF_INT + 3)) + bit); - assert(regind < bin->nregs); - ret = (void *)(((uintptr_t)run) + bin->reg0_offset - + (bin->reg_size * regind)); - - /* Clear bit. */ - mask ^= (1U << bit); - run->regs_mask[i] = mask; - - arena_run_rc_incr(run, bin, ret); - - return (ret); - } - - for (i++; i < bin->regs_mask_nelms; i++) { - mask = run->regs_mask[i]; - if (mask != 0) { - /* Usable allocation found. */ - bit = ffs((int)mask) - 1; - - regind = ((i << (LG_SIZEOF_INT + 3)) + bit); - assert(regind < bin->nregs); - ret = (void *)(((uintptr_t)run) + bin->reg0_offset - + (bin->reg_size * regind)); - - /* Clear bit. */ - mask ^= (1U << bit); - run->regs_mask[i] = mask; - - /* - * Make a note that nothing before this element - * contains a free region. - */ - run->regs_minelm = i; /* Low payoff: + (mask == 0); */ - - arena_run_rc_incr(run, bin, ret); - - return (ret); - } - } - /* Not reached. */ - assert(0); - return (NULL); -} - -static inline void -arena_run_reg_dalloc(arena_run_t *run, arena_bin_t *bin, void *ptr, size_t size) -{ - unsigned shift, diff, regind, elm, bit; - - assert(run->magic == ARENA_RUN_MAGIC); - - /* - * Avoid doing division with a variable divisor if possible. Using - * actual division here can reduce allocator throughput by over 20%! - */ - diff = (unsigned)((uintptr_t)ptr - (uintptr_t)run - bin->reg0_offset); - - /* Rescale (factor powers of 2 out of the numerator and denominator). */ - shift = ffs(size) - 1; - diff >>= shift; - size >>= shift; - - if (size == 1) { - /* The divisor was a power of 2. */ - regind = diff; - } else { - /* - * To divide by a number D that is not a power of two we - * multiply by (2^21 / D) and then right shift by 21 positions. - * - * X / D - * - * becomes - * - * (X * size_invs[D - 3]) >> SIZE_INV_SHIFT - * - * We can omit the first three elements, because we never - * divide by 0, and 1 and 2 are both powers of two, which are - * handled above. - */ -#define SIZE_INV_SHIFT 21 -#define SIZE_INV(s) (((1U << SIZE_INV_SHIFT) / (s)) + 1) - static const unsigned size_invs[] = { - SIZE_INV(3), - SIZE_INV(4), SIZE_INV(5), SIZE_INV(6), SIZE_INV(7), - SIZE_INV(8), SIZE_INV(9), SIZE_INV(10), SIZE_INV(11), - SIZE_INV(12), SIZE_INV(13), SIZE_INV(14), SIZE_INV(15), - SIZE_INV(16), SIZE_INV(17), SIZE_INV(18), SIZE_INV(19), - SIZE_INV(20), SIZE_INV(21), SIZE_INV(22), SIZE_INV(23), - SIZE_INV(24), SIZE_INV(25), SIZE_INV(26), SIZE_INV(27), - SIZE_INV(28), SIZE_INV(29), SIZE_INV(30), SIZE_INV(31) - }; - - if (size <= ((sizeof(size_invs) / sizeof(unsigned)) + 2)) - regind = (diff * size_invs[size - 3]) >> SIZE_INV_SHIFT; - else - regind = diff / size; -#undef SIZE_INV -#undef SIZE_INV_SHIFT - } - assert(diff == regind * size); - assert(regind < bin->nregs); - - elm = regind >> (LG_SIZEOF_INT + 3); - if (elm < run->regs_minelm) - run->regs_minelm = elm; - bit = regind - (elm << (LG_SIZEOF_INT + 3)); - assert((run->regs_mask[elm] & (1U << bit)) == 0); - run->regs_mask[elm] |= (1U << bit); - - arena_run_rc_decr(run, bin, ptr); -} - -static void -arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large, - bool zero) -{ - arena_chunk_t *chunk; - size_t old_ndirty, run_ind, total_pages, need_pages, rem_pages, i; - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); - old_ndirty = chunk->ndirty; - run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk) - >> PAGE_SHIFT); - total_pages = (chunk->map[run_ind].bits & ~PAGE_MASK) >> - PAGE_SHIFT; - need_pages = (size >> PAGE_SHIFT); - assert(need_pages > 0); - assert(need_pages <= total_pages); - rem_pages = total_pages - need_pages; - - arena_avail_tree_remove(&arena->runs_avail, &chunk->map[run_ind]); - arena->nactive += need_pages; - - /* Keep track of trailing unused pages for later use. */ - if (rem_pages > 0) { - chunk->map[run_ind+need_pages].bits = (rem_pages << - PAGE_SHIFT) | (chunk->map[run_ind+need_pages].bits & - CHUNK_MAP_FLAGS_MASK); - chunk->map[run_ind+total_pages-1].bits = (rem_pages << - PAGE_SHIFT) | (chunk->map[run_ind+total_pages-1].bits & - CHUNK_MAP_FLAGS_MASK); - arena_avail_tree_insert(&arena->runs_avail, - &chunk->map[run_ind+need_pages]); - } - - for (i = 0; i < need_pages; i++) { - /* Zero if necessary. */ - if (zero) { - if ((chunk->map[run_ind + i].bits & CHUNK_MAP_ZEROED) - == 0) { - memset((void *)((uintptr_t)chunk + ((run_ind - + i) << PAGE_SHIFT)), 0, PAGE_SIZE); - /* CHUNK_MAP_ZEROED is cleared below. */ - } - } - - /* Update dirty page accounting. */ - if (chunk->map[run_ind + i].bits & CHUNK_MAP_DIRTY) { - chunk->ndirty--; - arena->ndirty--; - /* CHUNK_MAP_DIRTY is cleared below. */ - } - - /* Initialize the chunk map. */ - if (large) { - chunk->map[run_ind + i].bits = CHUNK_MAP_LARGE - | CHUNK_MAP_ALLOCATED; - } else { - chunk->map[run_ind + i].bits = (i << CHUNK_MAP_PG_SHIFT) - | CHUNK_MAP_ALLOCATED; - } - } - - if (large) { - /* - * Set the run size only in the first element for large runs. - * This is primarily a debugging aid, since the lack of size - * info for trailing pages only matters if the application - * tries to operate on an interior pointer. - */ - chunk->map[run_ind].bits |= size; - } else { - /* - * Initialize the first page's refcount to 1, so that the run - * header is protected from dirty page purging. - */ - chunk->map[run_ind].bits += CHUNK_MAP_RC_ONE; - } -} - -static arena_chunk_t * -arena_chunk_alloc(arena_t *arena) -{ - arena_chunk_t *chunk; - size_t i; - - if (arena->spare != NULL) { - chunk = arena->spare; - arena->spare = NULL; - } else { - bool zero; - size_t zeroed; - - zero = false; - chunk = (arena_chunk_t *)chunk_alloc(chunksize, &zero); - if (chunk == NULL) - return (NULL); -#ifdef MALLOC_STATS - arena->stats.mapped += chunksize; -#endif - - chunk->arena = arena; - chunk->dirtied = false; - - /* - * Claim that no pages are in use, since the header is merely - * overhead. - */ - chunk->ndirty = 0; - - /* - * Initialize the map to contain one maximal free untouched run. - * Mark the pages as zeroed iff chunk_alloc() returned a zeroed - * chunk. - */ - zeroed = zero ? CHUNK_MAP_ZEROED : 0; - for (i = 0; i < arena_chunk_header_npages; i++) - chunk->map[i].bits = 0; - chunk->map[i].bits = arena_maxclass | zeroed; - for (i++; i < chunk_npages-1; i++) - chunk->map[i].bits = zeroed; - chunk->map[chunk_npages-1].bits = arena_maxclass | zeroed; - } - - /* Insert the run into the runs_avail tree. */ - arena_avail_tree_insert(&arena->runs_avail, - &chunk->map[arena_chunk_header_npages]); - - return (chunk); -} - -static void -arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk) -{ - - if (arena->spare != NULL) { - if (arena->spare->dirtied) { - arena_chunk_tree_dirty_remove( - &chunk->arena->chunks_dirty, arena->spare); - arena->ndirty -= arena->spare->ndirty; - } - chunk_dealloc((void *)arena->spare, chunksize); -#ifdef MALLOC_STATS - arena->stats.mapped -= chunksize; -#endif - } - - /* - * Remove run from runs_avail, regardless of whether this chunk - * will be cached, so that the arena does not use it. Dirty page - * flushing only uses the chunks_dirty tree, so leaving this chunk in - * the chunks_* trees is sufficient for that purpose. - */ - arena_avail_tree_remove(&arena->runs_avail, - &chunk->map[arena_chunk_header_npages]); - - arena->spare = chunk; -} - -static arena_run_t * -arena_run_alloc(arena_t *arena, size_t size, bool large, bool zero) -{ - arena_chunk_t *chunk; - arena_run_t *run; - arena_chunk_map_t *mapelm, key; - - assert(size <= arena_maxclass); - assert((size & PAGE_MASK) == 0); - - /* Search the arena's chunks for the lowest best fit. */ - key.bits = size | CHUNK_MAP_KEY; - mapelm = arena_avail_tree_nsearch(&arena->runs_avail, &key); - if (mapelm != NULL) { - arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm); - size_t pageind = ((uintptr_t)mapelm - (uintptr_t)run_chunk->map) - / sizeof(arena_chunk_map_t); - - run = (arena_run_t *)((uintptr_t)run_chunk + (pageind - << PAGE_SHIFT)); - arena_run_split(arena, run, size, large, zero); - return (run); - } - - /* - * No usable runs. Create a new chunk from which to allocate the run. - */ - chunk = arena_chunk_alloc(arena); - if (chunk == NULL) - return (NULL); - run = (arena_run_t *)((uintptr_t)chunk + (arena_chunk_header_npages << - PAGE_SHIFT)); - /* Update page map. */ - arena_run_split(arena, run, size, large, zero); - return (run); -} - -#ifdef MALLOC_DEBUG -static arena_chunk_t * -chunks_dirty_iter_cb(arena_chunk_tree_t *tree, arena_chunk_t *chunk, void *arg) -{ - size_t *ndirty = (size_t *)arg; - - assert(chunk->dirtied); - *ndirty += chunk->ndirty; - return (NULL); -} -#endif - -static void -arena_purge(arena_t *arena) -{ - arena_chunk_t *chunk; - size_t i, npages; -#ifdef MALLOC_DEBUG - size_t ndirty = 0; - - arena_chunk_tree_dirty_iter(&arena->chunks_dirty, NULL, - chunks_dirty_iter_cb, (void *)&ndirty); - assert(ndirty == arena->ndirty); -#endif - assert((arena->nactive >> opt_lg_dirty_mult) < arena->ndirty); - -#ifdef MALLOC_STATS - arena->stats.npurge++; -#endif - - /* - * Iterate downward through chunks until enough dirty memory has been - * purged. Terminate as soon as possible in order to minimize the - * number of system calls, even if a chunk has only been partially - * purged. - */ - - while ((arena->nactive >> (opt_lg_dirty_mult + 1)) < arena->ndirty) { - chunk = arena_chunk_tree_dirty_last(&arena->chunks_dirty); - assert(chunk != NULL); - - for (i = chunk_npages - 1; chunk->ndirty > 0; i--) { - assert(i >= arena_chunk_header_npages); - if (chunk->map[i].bits & CHUNK_MAP_DIRTY) { - chunk->map[i].bits ^= CHUNK_MAP_DIRTY; - /* Find adjacent dirty run(s). */ - for (npages = 1; i > arena_chunk_header_npages - && (chunk->map[i - 1].bits & - CHUNK_MAP_DIRTY); npages++) { - i--; - chunk->map[i].bits ^= CHUNK_MAP_DIRTY; - } - chunk->ndirty -= npages; - arena->ndirty -= npages; - - madvise((void *)((uintptr_t)chunk + (i << - PAGE_SHIFT)), (npages << PAGE_SHIFT), - MADV_FREE); -#ifdef MALLOC_STATS - arena->stats.nmadvise++; - arena->stats.purged += npages; -#endif - if ((arena->nactive >> (opt_lg_dirty_mult + 1)) - >= arena->ndirty) - break; - } - } - - if (chunk->ndirty == 0) { - arena_chunk_tree_dirty_remove(&arena->chunks_dirty, - chunk); - chunk->dirtied = false; - } - } -} - -static void -arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty) -{ - arena_chunk_t *chunk; - size_t size, run_ind, run_pages; - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); - run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) - >> PAGE_SHIFT); - assert(run_ind >= arena_chunk_header_npages); - assert(run_ind < chunk_npages); - if ((chunk->map[run_ind].bits & CHUNK_MAP_LARGE) != 0) - size = chunk->map[run_ind].bits & ~PAGE_MASK; - else - size = run->bin->run_size; - run_pages = (size >> PAGE_SHIFT); - arena->nactive -= run_pages; - - /* Mark pages as unallocated in the chunk map. */ - if (dirty) { - size_t i; - - for (i = 0; i < run_pages; i++) { - /* - * When (dirty == true), *all* pages within the run - * need to have their dirty bits set, because only - * small runs can create a mixture of clean/dirty - * pages, but such runs are passed to this function - * with (dirty == false). - */ - assert((chunk->map[run_ind + i].bits & CHUNK_MAP_DIRTY) - == 0); - chunk->ndirty++; - arena->ndirty++; - chunk->map[run_ind + i].bits = CHUNK_MAP_DIRTY; - } - } else { - size_t i; - - for (i = 0; i < run_pages; i++) { - chunk->map[run_ind + i].bits &= ~(CHUNK_MAP_LARGE | - CHUNK_MAP_ALLOCATED); - } - } - chunk->map[run_ind].bits = size | (chunk->map[run_ind].bits & - CHUNK_MAP_FLAGS_MASK); - chunk->map[run_ind+run_pages-1].bits = size | - (chunk->map[run_ind+run_pages-1].bits & CHUNK_MAP_FLAGS_MASK); - - /* Try to coalesce forward. */ - if (run_ind + run_pages < chunk_npages && - (chunk->map[run_ind+run_pages].bits & CHUNK_MAP_ALLOCATED) == 0) { - size_t nrun_size = chunk->map[run_ind+run_pages].bits & - ~PAGE_MASK; - - /* - * Remove successor from runs_avail; the coalesced run is - * inserted later. - */ - arena_avail_tree_remove(&arena->runs_avail, - &chunk->map[run_ind+run_pages]); - - size += nrun_size; - run_pages = size >> PAGE_SHIFT; - - assert((chunk->map[run_ind+run_pages-1].bits & ~PAGE_MASK) - == nrun_size); - chunk->map[run_ind].bits = size | (chunk->map[run_ind].bits & - CHUNK_MAP_FLAGS_MASK); - chunk->map[run_ind+run_pages-1].bits = size | - (chunk->map[run_ind+run_pages-1].bits & - CHUNK_MAP_FLAGS_MASK); - } - - /* Try to coalesce backward. */ - if (run_ind > arena_chunk_header_npages && (chunk->map[run_ind-1].bits & - CHUNK_MAP_ALLOCATED) == 0) { - size_t prun_size = chunk->map[run_ind-1].bits & ~PAGE_MASK; - - run_ind -= prun_size >> PAGE_SHIFT; - - /* - * Remove predecessor from runs_avail; the coalesced run is - * inserted later. - */ - arena_avail_tree_remove(&arena->runs_avail, - &chunk->map[run_ind]); - - size += prun_size; - run_pages = size >> PAGE_SHIFT; - - assert((chunk->map[run_ind].bits & ~PAGE_MASK) == prun_size); - chunk->map[run_ind].bits = size | (chunk->map[run_ind].bits & - CHUNK_MAP_FLAGS_MASK); - chunk->map[run_ind+run_pages-1].bits = size | - (chunk->map[run_ind+run_pages-1].bits & - CHUNK_MAP_FLAGS_MASK); - } - - /* Insert into runs_avail, now that coalescing is complete. */ - arena_avail_tree_insert(&arena->runs_avail, &chunk->map[run_ind]); - - /* - * Deallocate chunk if it is now completely unused. The bit - * manipulation checks whether the first run is unallocated and extends - * to the end of the chunk. - */ - if ((chunk->map[arena_chunk_header_npages].bits & (~PAGE_MASK | - CHUNK_MAP_ALLOCATED)) == arena_maxclass) - arena_chunk_dealloc(arena, chunk); - - /* - * It is okay to do dirty page processing even if the chunk was - * deallocated above, since in that case it is the spare. Waiting - * until after possible chunk deallocation to do dirty processing - * allows for an old spare to be fully deallocated, thus decreasing the - * chances of spuriously crossing the dirty page purging threshold. - */ - if (dirty) { - if (chunk->dirtied == false) { - arena_chunk_tree_dirty_insert(&arena->chunks_dirty, - chunk); - chunk->dirtied = true; - } - - /* Enforce opt_lg_dirty_mult. */ - if (opt_lg_dirty_mult >= 0 && (arena->nactive >> - opt_lg_dirty_mult) < arena->ndirty) - arena_purge(arena); - } -} - -static void -arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, - size_t oldsize, size_t newsize) -{ - size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> PAGE_SHIFT; - size_t head_npages = (oldsize - newsize) >> PAGE_SHIFT; - - assert(oldsize > newsize); - - /* - * Update the chunk map so that arena_run_dalloc() can treat the - * leading run as separately allocated. - */ - assert((chunk->map[pageind].bits & CHUNK_MAP_DIRTY) == 0); - chunk->map[pageind].bits = (oldsize - newsize) | CHUNK_MAP_LARGE | - CHUNK_MAP_ALLOCATED; - assert((chunk->map[pageind+head_npages].bits & CHUNK_MAP_DIRTY) == 0); - chunk->map[pageind+head_npages].bits = newsize | CHUNK_MAP_LARGE | - CHUNK_MAP_ALLOCATED; - - arena_run_dalloc(arena, run, false); -} - -static void -arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, - size_t oldsize, size_t newsize, bool dirty) -{ - size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> PAGE_SHIFT; - size_t npages = newsize >> PAGE_SHIFT; - - assert(oldsize > newsize); - - /* - * Update the chunk map so that arena_run_dalloc() can treat the - * trailing run as separately allocated. - */ - assert((chunk->map[pageind].bits & CHUNK_MAP_DIRTY) == 0); - chunk->map[pageind].bits = newsize | CHUNK_MAP_LARGE | - CHUNK_MAP_ALLOCATED; - assert((chunk->map[pageind+npages].bits & CHUNK_MAP_DIRTY) == 0); - chunk->map[pageind+npages].bits = (oldsize - newsize) | CHUNK_MAP_LARGE - | CHUNK_MAP_ALLOCATED; - - arena_run_dalloc(arena, (arena_run_t *)((uintptr_t)run + newsize), - dirty); -} - -static arena_run_t * -arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin) -{ - arena_chunk_map_t *mapelm; - arena_run_t *run; - unsigned i, remainder; - - /* Look for a usable run. */ - mapelm = arena_run_tree_first(&bin->runs); - if (mapelm != NULL) { - arena_chunk_t *chunk; - size_t pageind; - - /* run is guaranteed to have available space. */ - arena_run_tree_remove(&bin->runs, mapelm); - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(mapelm); - pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) / - sizeof(arena_chunk_map_t)); - run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind - - ((mapelm->bits & CHUNK_MAP_PG_MASK) >> CHUNK_MAP_PG_SHIFT)) - << PAGE_SHIFT)); -#ifdef MALLOC_STATS - bin->stats.reruns++; -#endif - return (run); - } - /* No existing runs have any space available. */ - - /* Allocate a new run. */ - run = arena_run_alloc(arena, bin->run_size, false, false); - if (run == NULL) - return (NULL); - - /* Initialize run internals. */ - run->bin = bin; - - for (i = 0; i < bin->regs_mask_nelms - 1; i++) - run->regs_mask[i] = UINT_MAX; - remainder = bin->nregs & ((1U << (LG_SIZEOF_INT + 3)) - 1); - if (remainder == 0) - run->regs_mask[i] = UINT_MAX; - else { - /* The last element has spare bits that need to be unset. */ - run->regs_mask[i] = (UINT_MAX >> ((1U << (LG_SIZEOF_INT + 3)) - - remainder)); - } - - run->regs_minelm = 0; - - run->nfree = bin->nregs; -#ifdef MALLOC_DEBUG - run->magic = ARENA_RUN_MAGIC; -#endif - -#ifdef MALLOC_STATS - bin->stats.nruns++; - bin->stats.curruns++; - if (bin->stats.curruns > bin->stats.highruns) - bin->stats.highruns = bin->stats.curruns; -#endif - return (run); -} - -/* bin->runcur must have space available before this function is called. */ -static inline void * -arena_bin_malloc_easy(arena_t *arena, arena_bin_t *bin, arena_run_t *run) -{ - void *ret; - - assert(run->magic == ARENA_RUN_MAGIC); - assert(run->nfree > 0); - - ret = arena_run_reg_alloc(run, bin); - assert(ret != NULL); - run->nfree--; - - return (ret); -} - -/* Re-fill bin->runcur, then call arena_bin_malloc_easy(). */ -static void * -arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin) -{ - - bin->runcur = arena_bin_nonfull_run_get(arena, bin); - if (bin->runcur == NULL) - return (NULL); - assert(bin->runcur->magic == ARENA_RUN_MAGIC); - assert(bin->runcur->nfree > 0); - - return (arena_bin_malloc_easy(arena, bin, bin->runcur)); -} - -/* - * Calculate bin->run_size such that it meets the following constraints: - * - * *) bin->run_size >= min_run_size - * *) bin->run_size <= arena_maxclass - * *) bin->run_size <= RUN_MAX_SMALL - * *) run header overhead <= RUN_MAX_OVRHD (or header overhead relaxed). - * *) run header size < PAGE_SIZE - * - * bin->nregs, bin->regs_mask_nelms, and bin->reg0_offset are - * also calculated here, since these settings are all interdependent. - */ -static size_t -arena_bin_run_size_calc(arena_bin_t *bin, size_t min_run_size) -{ - size_t try_run_size, good_run_size; - unsigned good_nregs, good_mask_nelms, good_reg0_offset; - unsigned try_nregs, try_mask_nelms, try_reg0_offset; - - assert(min_run_size >= PAGE_SIZE); - assert(min_run_size <= arena_maxclass); - assert(min_run_size <= RUN_MAX_SMALL); - - /* - * Calculate known-valid settings before entering the run_size - * expansion loop, so that the first part of the loop always copies - * valid settings. - * - * The do..while loop iteratively reduces the number of regions until - * the run header and the regions no longer overlap. A closed formula - * would be quite messy, since there is an interdependency between the - * header's mask length and the number of regions. - */ - try_run_size = min_run_size; - try_nregs = ((try_run_size - sizeof(arena_run_t)) / bin->reg_size) - + 1; /* Counter-act try_nregs-- in loop. */ - do { - try_nregs--; - try_mask_nelms = (try_nregs >> (LG_SIZEOF_INT + 3)) + - ((try_nregs & ((1U << (LG_SIZEOF_INT + 3)) - 1)) ? 1 : 0); - try_reg0_offset = try_run_size - (try_nregs * bin->reg_size); - } while (sizeof(arena_run_t) + (sizeof(unsigned) * (try_mask_nelms - 1)) - > try_reg0_offset); - - /* run_size expansion loop. */ - do { - /* - * Copy valid settings before trying more aggressive settings. - */ - good_run_size = try_run_size; - good_nregs = try_nregs; - good_mask_nelms = try_mask_nelms; - good_reg0_offset = try_reg0_offset; - - /* Try more aggressive settings. */ - try_run_size += PAGE_SIZE; - try_nregs = ((try_run_size - sizeof(arena_run_t)) / - bin->reg_size) + 1; /* Counter-act try_nregs-- in loop. */ - do { - try_nregs--; - try_mask_nelms = (try_nregs >> (LG_SIZEOF_INT + 3)) + - ((try_nregs & ((1U << (LG_SIZEOF_INT + 3)) - 1)) ? - 1 : 0); - try_reg0_offset = try_run_size - (try_nregs * - bin->reg_size); - } while (sizeof(arena_run_t) + (sizeof(unsigned) * - (try_mask_nelms - 1)) > try_reg0_offset); - } while (try_run_size <= arena_maxclass && try_run_size <= RUN_MAX_SMALL - && RUN_MAX_OVRHD * (bin->reg_size << 3) > RUN_MAX_OVRHD_RELAX - && (try_reg0_offset << RUN_BFP) > RUN_MAX_OVRHD * try_run_size - && (sizeof(arena_run_t) + (sizeof(unsigned) * (try_mask_nelms - 1))) - < PAGE_SIZE); - - assert(sizeof(arena_run_t) + (sizeof(unsigned) * (good_mask_nelms - 1)) - <= good_reg0_offset); - assert((good_mask_nelms << (LG_SIZEOF_INT + 3)) >= good_nregs); - - /* Copy final settings. */ - bin->run_size = good_run_size; - bin->nregs = good_nregs; - bin->regs_mask_nelms = good_mask_nelms; - bin->reg0_offset = good_reg0_offset; - - return (good_run_size); -} - -#ifdef MALLOC_TCACHE -static inline void -tcache_event(tcache_t *tcache) -{ - - if (tcache_gc_incr == 0) - return; - - tcache->ev_cnt++; - assert(tcache->ev_cnt <= tcache_gc_incr); - if (tcache->ev_cnt >= tcache_gc_incr) { - size_t binind = tcache->next_gc_bin; - tcache_bin_t *tbin = tcache->tbins[binind]; - - if (tbin != NULL) { - if (tbin->high_water == 0) { - /* - * This bin went completely unused for an - * entire GC cycle, so throw away the tbin. - */ - assert(tbin->ncached == 0); - tcache_bin_destroy(tcache, tbin, binind); - tcache->tbins[binind] = NULL; - } else { - if (tbin->low_water > 0) { - /* - * Flush (ceiling) half of the objects - * below the low water mark. - */ - tcache_bin_flush(tbin, binind, - tbin->ncached - (tbin->low_water >> - 1) - (tbin->low_water & 1)); - } - tbin->low_water = tbin->ncached; - tbin->high_water = tbin->ncached; - } - } - - tcache->next_gc_bin++; - if (tcache->next_gc_bin == nbins) - tcache->next_gc_bin = 0; - tcache->ev_cnt = 0; - } -} - -static inline void * -tcache_bin_alloc(tcache_bin_t *tbin) -{ - - if (tbin->ncached == 0) - return (NULL); - tbin->ncached--; - if (tbin->ncached < tbin->low_water) - tbin->low_water = tbin->ncached; - return (tbin->slots[tbin->ncached]); -} - -static void -tcache_bin_fill(tcache_t *tcache, tcache_bin_t *tbin, size_t binind) -{ - arena_t *arena; - arena_bin_t *bin; - arena_run_t *run; - void *ptr; - unsigned i; - - assert(tbin->ncached == 0); - - arena = tcache->arena; - bin = &arena->bins[binind]; - malloc_spin_lock(&arena->lock); - for (i = 0; i < (tcache_nslots >> 1); i++) { - if ((run = bin->runcur) != NULL && run->nfree > 0) - ptr = arena_bin_malloc_easy(arena, bin, run); - else - ptr = arena_bin_malloc_hard(arena, bin); - if (ptr == NULL) - break; - /* - * Fill tbin such that the objects lowest in memory are used - * first. - */ - tbin->slots[(tcache_nslots >> 1) - 1 - i] = ptr; - } -#ifdef MALLOC_STATS - bin->stats.nfills++; - bin->stats.nrequests += tbin->tstats.nrequests; - if (bin->reg_size <= small_maxclass) { - arena->stats.nmalloc_small += (i - tbin->ncached); - arena->stats.allocated_small += (i - tbin->ncached) * - bin->reg_size; - arena->stats.nmalloc_small += tbin->tstats.nrequests; - } else { - arena->stats.nmalloc_medium += (i - tbin->ncached); - arena->stats.allocated_medium += (i - tbin->ncached) * - bin->reg_size; - arena->stats.nmalloc_medium += tbin->tstats.nrequests; - } - tbin->tstats.nrequests = 0; -#endif - malloc_spin_unlock(&arena->lock); - tbin->ncached = i; - if (tbin->ncached > tbin->high_water) - tbin->high_water = tbin->ncached; -} - -static inline void * -tcache_alloc(tcache_t *tcache, size_t size, bool zero) -{ - void *ret; - tcache_bin_t *tbin; - size_t binind; - - if (size <= small_maxclass) - binind = small_size2bin[size]; - else { - binind = mbin0 + ((MEDIUM_CEILING(size) - medium_min) >> - lg_mspace); - } - assert(binind < nbins); - tbin = tcache->tbins[binind]; - if (tbin == NULL) { - tbin = tcache_bin_create(tcache->arena); - if (tbin == NULL) - return (NULL); - tcache->tbins[binind] = tbin; - } - - ret = tcache_bin_alloc(tbin); - if (ret == NULL) { - ret = tcache_alloc_hard(tcache, tbin, binind); - if (ret == NULL) - return (NULL); - } - - if (zero == false) { - if (opt_junk) - memset(ret, 0xa5, size); - else if (opt_zero) - memset(ret, 0, size); - } else - memset(ret, 0, size); - -#ifdef MALLOC_STATS - tbin->tstats.nrequests++; -#endif - tcache_event(tcache); - return (ret); -} - -static void * -tcache_alloc_hard(tcache_t *tcache, tcache_bin_t *tbin, size_t binind) -{ - void *ret; - - tcache_bin_fill(tcache, tbin, binind); - ret = tcache_bin_alloc(tbin); - - return (ret); -} -#endif - -static inline void * -arena_malloc_small(arena_t *arena, size_t size, bool zero) -{ - void *ret; - arena_bin_t *bin; - arena_run_t *run; - size_t binind; - - binind = small_size2bin[size]; - assert(binind < mbin0); - bin = &arena->bins[binind]; - size = bin->reg_size; - - malloc_spin_lock(&arena->lock); - if ((run = bin->runcur) != NULL && run->nfree > 0) - ret = arena_bin_malloc_easy(arena, bin, run); - else - ret = arena_bin_malloc_hard(arena, bin); - - if (ret == NULL) { - malloc_spin_unlock(&arena->lock); - return (NULL); - } - -#ifdef MALLOC_STATS -# ifdef MALLOC_TCACHE - if (__isthreaded == false) { -# endif - bin->stats.nrequests++; - arena->stats.nmalloc_small++; -# ifdef MALLOC_TCACHE - } -# endif - arena->stats.allocated_small += size; -#endif - malloc_spin_unlock(&arena->lock); - - if (zero == false) { - if (opt_junk) - memset(ret, 0xa5, size); - else if (opt_zero) - memset(ret, 0, size); - } else - memset(ret, 0, size); - - return (ret); -} - -static void * -arena_malloc_medium(arena_t *arena, size_t size, bool zero) -{ - void *ret; - arena_bin_t *bin; - arena_run_t *run; - size_t binind; - - size = MEDIUM_CEILING(size); - binind = mbin0 + ((size - medium_min) >> lg_mspace); - assert(binind < nbins); - bin = &arena->bins[binind]; - assert(bin->reg_size == size); - - malloc_spin_lock(&arena->lock); - if ((run = bin->runcur) != NULL && run->nfree > 0) - ret = arena_bin_malloc_easy(arena, bin, run); - else - ret = arena_bin_malloc_hard(arena, bin); - - if (ret == NULL) { - malloc_spin_unlock(&arena->lock); - return (NULL); - } - -#ifdef MALLOC_STATS -# ifdef MALLOC_TCACHE - if (__isthreaded == false) { -# endif - bin->stats.nrequests++; - arena->stats.nmalloc_medium++; -# ifdef MALLOC_TCACHE - } -# endif - arena->stats.allocated_medium += size; -#endif - malloc_spin_unlock(&arena->lock); - - if (zero == false) { - if (opt_junk) - memset(ret, 0xa5, size); - else if (opt_zero) - memset(ret, 0, size); - } else - memset(ret, 0, size); - - return (ret); -} - -static void * -arena_malloc_large(arena_t *arena, size_t size, bool zero) -{ - void *ret; - - /* Large allocation. */ - size = PAGE_CEILING(size); - malloc_spin_lock(&arena->lock); - ret = (void *)arena_run_alloc(arena, size, true, zero); - if (ret == NULL) { - malloc_spin_unlock(&arena->lock); - return (NULL); - } -#ifdef MALLOC_STATS - arena->stats.nmalloc_large++; - arena->stats.allocated_large += size; - arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nrequests++; - arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns++; - if (arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns > - arena->stats.lstats[(size >> PAGE_SHIFT) - 1].highruns) { - arena->stats.lstats[(size >> PAGE_SHIFT) - 1].highruns = - arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns; - } -#endif - malloc_spin_unlock(&arena->lock); - - if (zero == false) { - if (opt_junk) - memset(ret, 0xa5, size); - else if (opt_zero) - memset(ret, 0, size); - } - - return (ret); -} - -static inline void * -arena_malloc(size_t size, bool zero) -{ - - assert(size != 0); - assert(QUANTUM_CEILING(size) <= arena_maxclass); - - if (size <= bin_maxclass) { -#ifdef MALLOC_TCACHE - if (__isthreaded && tcache_nslots) { - tcache_t *tcache = tcache_tls; - if ((uintptr_t)tcache > (uintptr_t)1) - return (tcache_alloc(tcache, size, zero)); - else if (tcache == NULL) { - tcache = tcache_create(choose_arena()); - if (tcache == NULL) - return (NULL); - return (tcache_alloc(tcache, size, zero)); - } - } -#endif - if (size <= small_maxclass) { - return (arena_malloc_small(choose_arena(), size, - zero)); - } else { - return (arena_malloc_medium(choose_arena(), - size, zero)); - } - } else - return (arena_malloc_large(choose_arena(), size, zero)); -} - -static inline void * -imalloc(size_t size) -{ - - assert(size != 0); - - if (size <= arena_maxclass) - return (arena_malloc(size, false)); - else - return (huge_malloc(size, false)); -} - -static inline void * -icalloc(size_t size) -{ - - if (size <= arena_maxclass) - return (arena_malloc(size, true)); - else - return (huge_malloc(size, true)); -} - -/* Only handles large allocations that require more than page alignment. */ -static void * -arena_palloc(arena_t *arena, size_t alignment, size_t size, size_t alloc_size) -{ - void *ret; - size_t offset; - arena_chunk_t *chunk; - - assert((size & PAGE_MASK) == 0); - assert((alignment & PAGE_MASK) == 0); - - malloc_spin_lock(&arena->lock); - ret = (void *)arena_run_alloc(arena, alloc_size, true, false); - if (ret == NULL) { - malloc_spin_unlock(&arena->lock); - return (NULL); - } - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ret); - - offset = (uintptr_t)ret & (alignment - 1); - assert((offset & PAGE_MASK) == 0); - assert(offset < alloc_size); - if (offset == 0) - arena_run_trim_tail(arena, chunk, ret, alloc_size, size, false); - else { - size_t leadsize, trailsize; - - leadsize = alignment - offset; - if (leadsize > 0) { - arena_run_trim_head(arena, chunk, ret, alloc_size, - alloc_size - leadsize); - ret = (void *)((uintptr_t)ret + leadsize); - } - - trailsize = alloc_size - leadsize - size; - if (trailsize != 0) { - /* Trim trailing space. */ - assert(trailsize < alloc_size); - arena_run_trim_tail(arena, chunk, ret, size + trailsize, - size, false); - } - } - -#ifdef MALLOC_STATS - arena->stats.nmalloc_large++; - arena->stats.allocated_large += size; - arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nrequests++; - arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns++; - if (arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns > - arena->stats.lstats[(size >> PAGE_SHIFT) - 1].highruns) { - arena->stats.lstats[(size >> PAGE_SHIFT) - 1].highruns = - arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns; - } -#endif - malloc_spin_unlock(&arena->lock); - - if (opt_junk) - memset(ret, 0xa5, size); - else if (opt_zero) - memset(ret, 0, size); - return (ret); -} - -static inline void * -ipalloc(size_t alignment, size_t size) -{ - void *ret; - size_t ceil_size; - - /* - * Round size up to the nearest multiple of alignment. - * - * This done, we can take advantage of the fact that for each small - * size class, every object is aligned at the smallest power of two - * that is non-zero in the base two representation of the size. For - * example: - * - * Size | Base 2 | Minimum alignment - * -----+----------+------------------ - * 96 | 1100000 | 32 - * 144 | 10100000 | 32 - * 192 | 11000000 | 64 - * - * Depending on runtime settings, it is possible that arena_malloc() - * will further round up to a power of two, but that never causes - * correctness issues. - */ - ceil_size = (size + (alignment - 1)) & (-alignment); - /* - * (ceil_size < size) protects against the combination of maximal - * alignment and size greater than maximal alignment. - */ - if (ceil_size < size) { - /* size_t overflow. */ - return (NULL); - } - - if (ceil_size <= PAGE_SIZE || (alignment <= PAGE_SIZE - && ceil_size <= arena_maxclass)) - ret = arena_malloc(ceil_size, false); - else { - size_t run_size; - - /* - * We can't achieve subpage alignment, so round up alignment - * permanently; it makes later calculations simpler. - */ - alignment = PAGE_CEILING(alignment); - ceil_size = PAGE_CEILING(size); - /* - * (ceil_size < size) protects against very large sizes within - * PAGE_SIZE of SIZE_T_MAX. - * - * (ceil_size + alignment < ceil_size) protects against the - * combination of maximal alignment and ceil_size large enough - * to cause overflow. This is similar to the first overflow - * check above, but it needs to be repeated due to the new - * ceil_size value, which may now be *equal* to maximal - * alignment, whereas before we only detected overflow if the - * original size was *greater* than maximal alignment. - */ - if (ceil_size < size || ceil_size + alignment < ceil_size) { - /* size_t overflow. */ - return (NULL); - } - - /* - * Calculate the size of the over-size run that arena_palloc() - * would need to allocate in order to guarantee the alignment. - */ - if (ceil_size >= alignment) - run_size = ceil_size + alignment - PAGE_SIZE; - else { - /* - * It is possible that (alignment << 1) will cause - * overflow, but it doesn't matter because we also - * subtract PAGE_SIZE, which in the case of overflow - * leaves us with a very large run_size. That causes - * the first conditional below to fail, which means - * that the bogus run_size value never gets used for - * anything important. - */ - run_size = (alignment << 1) - PAGE_SIZE; - } - - if (run_size <= arena_maxclass) { - ret = arena_palloc(choose_arena(), alignment, ceil_size, - run_size); - } else if (alignment <= chunksize) - ret = huge_malloc(ceil_size, false); - else - ret = huge_palloc(alignment, ceil_size); - } - - assert(((uintptr_t)ret & (alignment - 1)) == 0); - return (ret); -} - -static bool -arena_is_large(const void *ptr) -{ - arena_chunk_t *chunk; - size_t pageind, mapbits; - - assert(ptr != NULL); - assert(CHUNK_ADDR2BASE(ptr) != ptr); - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - pageind = (((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT); - mapbits = chunk->map[pageind].bits; - assert((mapbits & CHUNK_MAP_ALLOCATED) != 0); - return ((mapbits & CHUNK_MAP_LARGE) != 0); -} - -/* Return the size of the allocation pointed to by ptr. */ -static size_t -arena_salloc(const void *ptr) -{ - size_t ret; - arena_chunk_t *chunk; - size_t pageind, mapbits; - - assert(ptr != NULL); - assert(CHUNK_ADDR2BASE(ptr) != ptr); - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - pageind = (((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT); - mapbits = chunk->map[pageind].bits; - assert((mapbits & CHUNK_MAP_ALLOCATED) != 0); - if ((mapbits & CHUNK_MAP_LARGE) == 0) { - arena_run_t *run = (arena_run_t *)((uintptr_t)chunk + - (uintptr_t)((pageind - ((mapbits & CHUNK_MAP_PG_MASK) >> - CHUNK_MAP_PG_SHIFT)) << PAGE_SHIFT)); - assert(run->magic == ARENA_RUN_MAGIC); - ret = run->bin->reg_size; - } else { - ret = mapbits & ~PAGE_MASK; - assert(ret != 0); - } - - return (ret); -} - -static inline size_t -isalloc(const void *ptr) -{ - size_t ret; - arena_chunk_t *chunk; - - assert(ptr != NULL); - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - if (chunk != ptr) { - /* Region. */ - assert(chunk->arena->magic == ARENA_MAGIC); - - ret = arena_salloc(ptr); - } else { - extent_node_t *node, key; - - /* Chunk (huge allocation). */ - - malloc_mutex_lock(&huge_mtx); - - /* Extract from tree of huge allocations. */ - key.addr = __DECONST(void *, ptr); - node = extent_tree_ad_search(&huge, &key); - assert(node != NULL); - - ret = node->size; - - malloc_mutex_unlock(&huge_mtx); - } - - return (ret); -} - -static inline void -arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr, - arena_chunk_map_t *mapelm) -{ - size_t pageind; - arena_run_t *run; - arena_bin_t *bin; - size_t size; - - pageind = (((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT); - run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind - - ((mapelm->bits & CHUNK_MAP_PG_MASK) >> CHUNK_MAP_PG_SHIFT)) << - PAGE_SHIFT)); - assert(run->magic == ARENA_RUN_MAGIC); - bin = run->bin; - size = bin->reg_size; - - if (opt_junk) - memset(ptr, 0x5a, size); - - arena_run_reg_dalloc(run, bin, ptr, size); - run->nfree++; - - if (run->nfree == bin->nregs) - arena_dalloc_bin_run(arena, chunk, run, bin); - else if (run->nfree == 1 && run != bin->runcur) { - /* - * Make sure that bin->runcur always refers to the lowest - * non-full run, if one exists. - */ - if (bin->runcur == NULL) - bin->runcur = run; - else if ((uintptr_t)run < (uintptr_t)bin->runcur) { - /* Switch runcur. */ - if (bin->runcur->nfree > 0) { - arena_chunk_t *runcur_chunk = - CHUNK_ADDR2BASE(bin->runcur); - size_t runcur_pageind = - (((uintptr_t)bin->runcur - - (uintptr_t)runcur_chunk)) >> PAGE_SHIFT; - arena_chunk_map_t *runcur_mapelm = - &runcur_chunk->map[runcur_pageind]; - - /* Insert runcur. */ - arena_run_tree_insert(&bin->runs, - runcur_mapelm); - } - bin->runcur = run; - } else { - size_t run_pageind = (((uintptr_t)run - - (uintptr_t)chunk)) >> PAGE_SHIFT; - arena_chunk_map_t *run_mapelm = - &chunk->map[run_pageind]; - - assert(arena_run_tree_search(&bin->runs, run_mapelm) == - NULL); - arena_run_tree_insert(&bin->runs, run_mapelm); - } - } - -#ifdef MALLOC_STATS - if (size <= small_maxclass) { - arena->stats.allocated_small -= size; - arena->stats.ndalloc_small++; - } else { - arena->stats.allocated_medium -= size; - arena->stats.ndalloc_medium++; - } -#endif -} - -static void -arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, - arena_bin_t *bin) -{ - size_t run_ind; - - /* Deallocate run. */ - if (run == bin->runcur) - bin->runcur = NULL; - else if (bin->nregs != 1) { - size_t run_pageind = (((uintptr_t)run - - (uintptr_t)chunk)) >> PAGE_SHIFT; - arena_chunk_map_t *run_mapelm = - &chunk->map[run_pageind]; - /* - * This block's conditional is necessary because if the - * run only contains one region, then it never gets - * inserted into the non-full runs tree. - */ - arena_run_tree_remove(&bin->runs, run_mapelm); - } - /* - * Mark the first page as dirty. The dirty bit for every other page in - * the run is already properly set, which means we can call - * arena_run_dalloc(..., false), thus potentially avoiding the needless - * creation of many dirty pages. - */ - run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> PAGE_SHIFT); - assert((chunk->map[run_ind].bits & CHUNK_MAP_DIRTY) == 0); - chunk->map[run_ind].bits |= CHUNK_MAP_DIRTY; - chunk->ndirty++; - arena->ndirty++; - -#ifdef MALLOC_DEBUG - run->magic = 0; -#endif - arena_run_dalloc(arena, run, false); -#ifdef MALLOC_STATS - bin->stats.curruns--; -#endif - - if (chunk->dirtied == false) { - arena_chunk_tree_dirty_insert(&arena->chunks_dirty, chunk); - chunk->dirtied = true; - } - /* Enforce opt_lg_dirty_mult. */ - if (opt_lg_dirty_mult >= 0 && (arena->nactive >> opt_lg_dirty_mult) < - arena->ndirty) - arena_purge(arena); -} - -#ifdef MALLOC_STATS -static void -arena_stats_print(arena_t *arena) -{ - - malloc_printf("dirty pages: %zu:%zu active:dirty, %"PRIu64" sweep%s," - " %"PRIu64" madvise%s, %"PRIu64" purged\n", - arena->nactive, arena->ndirty, - arena->stats.npurge, arena->stats.npurge == 1 ? "" : "s", - arena->stats.nmadvise, arena->stats.nmadvise == 1 ? "" : "s", - arena->stats.purged); - - malloc_printf(" allocated nmalloc ndalloc\n"); - malloc_printf("small: %12zu %12"PRIu64" %12"PRIu64"\n", - arena->stats.allocated_small, arena->stats.nmalloc_small, - arena->stats.ndalloc_small); - malloc_printf("medium: %12zu %12"PRIu64" %12"PRIu64"\n", - arena->stats.allocated_medium, arena->stats.nmalloc_medium, - arena->stats.ndalloc_medium); - malloc_printf("large: %12zu %12"PRIu64" %12"PRIu64"\n", - arena->stats.allocated_large, arena->stats.nmalloc_large, - arena->stats.ndalloc_large); - malloc_printf("total: %12zu %12"PRIu64" %12"PRIu64"\n", - arena->stats.allocated_small + arena->stats.allocated_medium + - arena->stats.allocated_large, arena->stats.nmalloc_small + - arena->stats.nmalloc_medium + arena->stats.nmalloc_large, - arena->stats.ndalloc_small + arena->stats.ndalloc_medium + - arena->stats.ndalloc_large); - malloc_printf("mapped: %12zu\n", arena->stats.mapped); - - if (arena->stats.nmalloc_small + arena->stats.nmalloc_medium > 0) { - unsigned i, gap_start; -#ifdef MALLOC_TCACHE - malloc_printf("bins: bin size regs pgs requests " - "nfills nflushes newruns reruns maxruns curruns\n"); -#else - malloc_printf("bins: bin size regs pgs requests " - "newruns reruns maxruns curruns\n"); -#endif - for (i = 0, gap_start = UINT_MAX; i < nbins; i++) { - if (arena->bins[i].stats.nruns == 0) { - if (gap_start == UINT_MAX) - gap_start = i; - } else { - if (gap_start != UINT_MAX) { - if (i > gap_start + 1) { - /* - * Gap of more than one size - * class. - */ - malloc_printf("[%u..%u]\n", - gap_start, i - 1); - } else { - /* Gap of one size class. */ - malloc_printf("[%u]\n", - gap_start); - } - gap_start = UINT_MAX; - } - malloc_printf( - "%13u %1s %5u %4u %3u %9"PRIu64" %9"PRIu64 -#ifdef MALLOC_TCACHE - " %9"PRIu64" %9"PRIu64 -#endif - " %9"PRIu64" %7zu %7zu\n", - i, - i < ntbins ? "T" : i < ntbins + nqbins ? - "Q" : i < ntbins + nqbins + ncbins ? "C" : - i < ntbins + nqbins + ncbins + nsbins ? "S" - : "M", - arena->bins[i].reg_size, - arena->bins[i].nregs, - arena->bins[i].run_size >> PAGE_SHIFT, - arena->bins[i].stats.nrequests, -#ifdef MALLOC_TCACHE - arena->bins[i].stats.nfills, - arena->bins[i].stats.nflushes, -#endif - arena->bins[i].stats.nruns, - arena->bins[i].stats.reruns, - arena->bins[i].stats.highruns, - arena->bins[i].stats.curruns); - } - } - if (gap_start != UINT_MAX) { - if (i > gap_start + 1) { - /* Gap of more than one size class. */ - malloc_printf("[%u..%u]\n", gap_start, i - 1); - } else { - /* Gap of one size class. */ - malloc_printf("[%u]\n", gap_start); - } - } - } - - if (arena->stats.nmalloc_large > 0) { - size_t i; - ssize_t gap_start; - size_t nlclasses = (chunksize - PAGE_SIZE) >> PAGE_SHIFT; - - malloc_printf( - "large: size pages nrequests maxruns curruns\n"); - - for (i = 0, gap_start = -1; i < nlclasses; i++) { - if (arena->stats.lstats[i].nrequests == 0) { - if (gap_start == -1) - gap_start = i; - } else { - if (gap_start != -1) { - malloc_printf("[%zu]\n", i - gap_start); - gap_start = -1; - } - malloc_printf( - "%13zu %5zu %9"PRIu64" %9zu %9zu\n", - (i+1) << PAGE_SHIFT, i+1, - arena->stats.lstats[i].nrequests, - arena->stats.lstats[i].highruns, - arena->stats.lstats[i].curruns); - } - } - if (gap_start != -1) - malloc_printf("[%zu]\n", i - gap_start); - } -} -#endif - -static void -stats_print_atexit(void) -{ - -#if (defined(MALLOC_TCACHE) && defined(MALLOC_STATS)) - unsigned i; - - /* - * Merge stats from extant threads. This is racy, since individual - * threads do not lock when recording tcache stats events. As a - * consequence, the final stats may be slightly out of date by the time - * they are reported, if other threads continue to allocate. - */ - for (i = 0; i < narenas; i++) { - arena_t *arena = arenas[i]; - if (arena != NULL) { - tcache_t *tcache; - - malloc_spin_lock(&arena->lock); - ql_foreach(tcache, &arena->tcache_ql, link) { - tcache_stats_merge(tcache, arena); - } - malloc_spin_unlock(&arena->lock); - } - } -#endif - malloc_stats_print(); -} - -#ifdef MALLOC_TCACHE -static void -tcache_bin_flush(tcache_bin_t *tbin, size_t binind, unsigned rem) -{ - arena_chunk_t *chunk; - arena_t *arena; - void *ptr; - unsigned i, ndeferred, ncached; - - for (ndeferred = tbin->ncached - rem; ndeferred > 0;) { - ncached = ndeferred; - /* Lock the arena associated with the first object. */ - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(tbin->slots[0]); - arena = chunk->arena; - malloc_spin_lock(&arena->lock); - /* Deallocate every object that belongs to the locked arena. */ - for (i = ndeferred = 0; i < ncached; i++) { - ptr = tbin->slots[i]; - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - if (chunk->arena == arena) { - size_t pageind = (((uintptr_t)ptr - - (uintptr_t)chunk) >> PAGE_SHIFT); - arena_chunk_map_t *mapelm = - &chunk->map[pageind]; - arena_dalloc_bin(arena, chunk, ptr, mapelm); - } else { - /* - * This object was allocated via a different - * arena than the one that is currently locked. - * Stash the object, so that it can be handled - * in a future pass. - */ - tbin->slots[ndeferred] = ptr; - ndeferred++; - } - } -#ifdef MALLOC_STATS - arena->bins[binind].stats.nflushes++; - { - arena_bin_t *bin = &arena->bins[binind]; - bin->stats.nrequests += tbin->tstats.nrequests; - if (bin->reg_size <= small_maxclass) { - arena->stats.nmalloc_small += - tbin->tstats.nrequests; - } else { - arena->stats.nmalloc_medium += - tbin->tstats.nrequests; - } - tbin->tstats.nrequests = 0; - } -#endif - malloc_spin_unlock(&arena->lock); - } - - if (rem > 0) { - /* - * Shift the remaining valid pointers to the base of the slots - * array. - */ - memmove(&tbin->slots[0], &tbin->slots[tbin->ncached - rem], - rem * sizeof(void *)); - } - tbin->ncached = rem; -} - -static inline void -tcache_dalloc(tcache_t *tcache, void *ptr) -{ - arena_t *arena; - arena_chunk_t *chunk; - arena_run_t *run; - arena_bin_t *bin; - tcache_bin_t *tbin; - size_t pageind, binind; - arena_chunk_map_t *mapelm; - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - arena = chunk->arena; - pageind = (((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT); - mapelm = &chunk->map[pageind]; - run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind - - ((mapelm->bits & CHUNK_MAP_PG_MASK) >> CHUNK_MAP_PG_SHIFT)) << - PAGE_SHIFT)); - assert(run->magic == ARENA_RUN_MAGIC); - bin = run->bin; - binind = ((uintptr_t)bin - (uintptr_t)&arena->bins) / - sizeof(arena_bin_t); - assert(binind < nbins); - - if (opt_junk) - memset(ptr, 0x5a, arena->bins[binind].reg_size); - - tbin = tcache->tbins[binind]; - if (tbin == NULL) { - tbin = tcache_bin_create(choose_arena()); - if (tbin == NULL) { - malloc_spin_lock(&arena->lock); - arena_dalloc_bin(arena, chunk, ptr, mapelm); - malloc_spin_unlock(&arena->lock); - return; - } - tcache->tbins[binind] = tbin; - } - - if (tbin->ncached == tcache_nslots) - tcache_bin_flush(tbin, binind, (tcache_nslots >> 1)); - assert(tbin->ncached < tcache_nslots); - tbin->slots[tbin->ncached] = ptr; - tbin->ncached++; - if (tbin->ncached > tbin->high_water) - tbin->high_water = tbin->ncached; - - tcache_event(tcache); -} -#endif - -static void -arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr) -{ - - /* Large allocation. */ - malloc_spin_lock(&arena->lock); - -#ifndef MALLOC_STATS - if (opt_junk) -#endif - { - size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> - PAGE_SHIFT; - size_t size = chunk->map[pageind].bits & ~PAGE_MASK; - -#ifdef MALLOC_STATS - if (opt_junk) -#endif - memset(ptr, 0x5a, size); -#ifdef MALLOC_STATS - arena->stats.ndalloc_large++; - arena->stats.allocated_large -= size; - arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns--; -#endif - } - - arena_run_dalloc(arena, (arena_run_t *)ptr, true); - malloc_spin_unlock(&arena->lock); -} - -static inline void -arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr) -{ - size_t pageind; - arena_chunk_map_t *mapelm; - - assert(arena != NULL); - assert(arena->magic == ARENA_MAGIC); - assert(chunk->arena == arena); - assert(ptr != NULL); - assert(CHUNK_ADDR2BASE(ptr) != ptr); - - pageind = (((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT); - mapelm = &chunk->map[pageind]; - assert((mapelm->bits & CHUNK_MAP_ALLOCATED) != 0); - if ((mapelm->bits & CHUNK_MAP_LARGE) == 0) { - /* Small allocation. */ -#ifdef MALLOC_TCACHE - if (__isthreaded && tcache_nslots) { - tcache_t *tcache = tcache_tls; - if ((uintptr_t)tcache > (uintptr_t)1) - tcache_dalloc(tcache, ptr); - else { - arena_dalloc_hard(arena, chunk, ptr, mapelm, - tcache); - } - } else { -#endif - malloc_spin_lock(&arena->lock); - arena_dalloc_bin(arena, chunk, ptr, mapelm); - malloc_spin_unlock(&arena->lock); -#ifdef MALLOC_TCACHE - } -#endif - } else - arena_dalloc_large(arena, chunk, ptr); -} - -#ifdef MALLOC_TCACHE -static void -arena_dalloc_hard(arena_t *arena, arena_chunk_t *chunk, void *ptr, - arena_chunk_map_t *mapelm, tcache_t *tcache) -{ - - if (tcache == NULL) { - tcache = tcache_create(arena); - if (tcache == NULL) { - malloc_spin_lock(&arena->lock); - arena_dalloc_bin(arena, chunk, ptr, mapelm); - malloc_spin_unlock(&arena->lock); - } else - tcache_dalloc(tcache, ptr); - } else { - /* This thread is currently exiting, so directly deallocate. */ - assert(tcache == (void *)(uintptr_t)1); - malloc_spin_lock(&arena->lock); - arena_dalloc_bin(arena, chunk, ptr, mapelm); - malloc_spin_unlock(&arena->lock); - } -} -#endif - -static inline void -idalloc(void *ptr) -{ - arena_chunk_t *chunk; - - assert(ptr != NULL); - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - if (chunk != ptr) - arena_dalloc(chunk->arena, chunk, ptr); - else - huge_dalloc(ptr); -} - -static void -arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr, - size_t size, size_t oldsize) -{ - - assert(size < oldsize); - - /* - * Shrink the run, and make trailing pages available for other - * allocations. - */ - malloc_spin_lock(&arena->lock); - arena_run_trim_tail(arena, chunk, (arena_run_t *)ptr, oldsize, size, - true); -#ifdef MALLOC_STATS - arena->stats.ndalloc_large++; - arena->stats.allocated_large -= oldsize; - arena->stats.lstats[(oldsize >> PAGE_SHIFT) - 1].curruns--; - - arena->stats.nmalloc_large++; - arena->stats.allocated_large += size; - arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nrequests++; - arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns++; - if (arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns > - arena->stats.lstats[(size >> PAGE_SHIFT) - 1].highruns) { - arena->stats.lstats[(size >> PAGE_SHIFT) - 1].highruns = - arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns; - } -#endif - malloc_spin_unlock(&arena->lock); -} - -static bool -arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr, - size_t size, size_t oldsize) -{ - size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT; - size_t npages = oldsize >> PAGE_SHIFT; - - assert(oldsize == (chunk->map[pageind].bits & ~PAGE_MASK)); - - /* Try to extend the run. */ - assert(size > oldsize); - malloc_spin_lock(&arena->lock); - if (pageind + npages < chunk_npages && (chunk->map[pageind+npages].bits - & CHUNK_MAP_ALLOCATED) == 0 && (chunk->map[pageind+npages].bits & - ~PAGE_MASK) >= size - oldsize) { - /* - * The next run is available and sufficiently large. Split the - * following run, then merge the first part with the existing - * allocation. - */ - arena_run_split(arena, (arena_run_t *)((uintptr_t)chunk + - ((pageind+npages) << PAGE_SHIFT)), size - oldsize, true, - false); - - chunk->map[pageind].bits = size | CHUNK_MAP_LARGE | - CHUNK_MAP_ALLOCATED; - chunk->map[pageind+npages].bits = CHUNK_MAP_LARGE | - CHUNK_MAP_ALLOCATED; - -#ifdef MALLOC_STATS - arena->stats.ndalloc_large++; - arena->stats.allocated_large -= oldsize; - arena->stats.lstats[(oldsize >> PAGE_SHIFT) - 1].curruns--; - - arena->stats.nmalloc_large++; - arena->stats.allocated_large += size; - arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nrequests++; - arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns++; - if (arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns > - arena->stats.lstats[(size >> PAGE_SHIFT) - 1].highruns) { - arena->stats.lstats[(size >> PAGE_SHIFT) - 1].highruns = - arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns; - } -#endif - malloc_spin_unlock(&arena->lock); - return (false); - } - malloc_spin_unlock(&arena->lock); - - return (true); -} - -/* - * Try to resize a large allocation, in order to avoid copying. This will - * always fail if growing an object, and the following run is already in use. - */ -static bool -arena_ralloc_large(void *ptr, size_t size, size_t oldsize) -{ - size_t psize; - - psize = PAGE_CEILING(size); - if (psize == oldsize) { - /* Same size class. */ - if (opt_junk && size < oldsize) { - memset((void *)((uintptr_t)ptr + size), 0x5a, oldsize - - size); - } - return (false); - } else { - arena_chunk_t *chunk; - arena_t *arena; - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - arena = chunk->arena; - assert(arena->magic == ARENA_MAGIC); - - if (psize < oldsize) { - /* Fill before shrinking in order avoid a race. */ - if (opt_junk) { - memset((void *)((uintptr_t)ptr + size), 0x5a, - oldsize - size); - } - arena_ralloc_large_shrink(arena, chunk, ptr, psize, - oldsize); - return (false); - } else { - bool ret = arena_ralloc_large_grow(arena, chunk, ptr, - psize, oldsize); - if (ret == false && opt_zero) { - memset((void *)((uintptr_t)ptr + oldsize), 0, - size - oldsize); - } - return (ret); - } - } -} - -static void * -arena_ralloc(void *ptr, size_t size, size_t oldsize) -{ - void *ret; - size_t copysize; - - /* - * Try to avoid moving the allocation. - * - * posix_memalign() can cause allocation of "large" objects that are - * smaller than bin_maxclass (in order to meet alignment requirements). - * Therefore, do not assume that (oldsize <= bin_maxclass) indicates - * ptr refers to a bin-allocated object. - */ - if (oldsize <= arena_maxclass) { - if (arena_is_large(ptr) == false ) { - if (size <= small_maxclass) { - if (oldsize <= small_maxclass && - small_size2bin[size] == - small_size2bin[oldsize]) - goto IN_PLACE; - } else if (size <= bin_maxclass) { - if (small_maxclass < oldsize && oldsize <= - bin_maxclass && MEDIUM_CEILING(size) == - MEDIUM_CEILING(oldsize)) - goto IN_PLACE; - } - } else { - assert(size <= arena_maxclass); - if (size > bin_maxclass) { - if (arena_ralloc_large(ptr, size, oldsize) == - false) - return (ptr); - } - } - } - - /* Try to avoid moving the allocation. */ - if (size <= small_maxclass) { - if (oldsize <= small_maxclass && small_size2bin[size] == - small_size2bin[oldsize]) - goto IN_PLACE; - } else if (size <= bin_maxclass) { - if (small_maxclass < oldsize && oldsize <= bin_maxclass && - MEDIUM_CEILING(size) == MEDIUM_CEILING(oldsize)) - goto IN_PLACE; - } else { - if (bin_maxclass < oldsize && oldsize <= arena_maxclass) { - assert(size > bin_maxclass); - if (arena_ralloc_large(ptr, size, oldsize) == false) - return (ptr); - } - } - - /* - * If we get here, then size and oldsize are different enough that we - * need to move the object. In that case, fall back to allocating new - * space and copying. - */ - ret = arena_malloc(size, false); - if (ret == NULL) - return (NULL); - - /* Junk/zero-filling were already done by arena_malloc(). */ - copysize = (size < oldsize) ? size : oldsize; - memcpy(ret, ptr, copysize); - idalloc(ptr); - return (ret); -IN_PLACE: - if (opt_junk && size < oldsize) - memset((void *)((uintptr_t)ptr + size), 0x5a, oldsize - size); - else if (opt_zero && size > oldsize) - memset((void *)((uintptr_t)ptr + oldsize), 0, size - oldsize); - return (ptr); -} - -static inline void * -iralloc(void *ptr, size_t size) -{ - size_t oldsize; - - assert(ptr != NULL); - assert(size != 0); - - oldsize = isalloc(ptr); - - if (size <= arena_maxclass) - return (arena_ralloc(ptr, size, oldsize)); - else - return (huge_ralloc(ptr, size, oldsize)); -} - -static bool -arena_new(arena_t *arena, unsigned ind) -{ - unsigned i; - arena_bin_t *bin; - size_t prev_run_size; - - if (malloc_spin_init(&arena->lock)) - return (true); - -#ifdef MALLOC_STATS - memset(&arena->stats, 0, sizeof(arena_stats_t)); - arena->stats.lstats = (malloc_large_stats_t *)base_alloc( - sizeof(malloc_large_stats_t) * ((chunksize - PAGE_SIZE) >> - PAGE_SHIFT)); - if (arena->stats.lstats == NULL) - return (true); - memset(arena->stats.lstats, 0, sizeof(malloc_large_stats_t) * - ((chunksize - PAGE_SIZE) >> PAGE_SHIFT)); -# ifdef MALLOC_TCACHE - ql_new(&arena->tcache_ql); -# endif -#endif - - /* Initialize chunks. */ - arena_chunk_tree_dirty_new(&arena->chunks_dirty); - arena->spare = NULL; - - arena->nactive = 0; - arena->ndirty = 0; - - arena_avail_tree_new(&arena->runs_avail); - - /* Initialize bins. */ - prev_run_size = PAGE_SIZE; - - i = 0; -#ifdef MALLOC_TINY - /* (2^n)-spaced tiny bins. */ - for (; i < ntbins; i++) { - bin = &arena->bins[i]; - bin->runcur = NULL; - arena_run_tree_new(&bin->runs); - - bin->reg_size = (1U << (LG_TINY_MIN + i)); - - prev_run_size = arena_bin_run_size_calc(bin, prev_run_size); - -#ifdef MALLOC_STATS - memset(&bin->stats, 0, sizeof(malloc_bin_stats_t)); -#endif - } -#endif - - /* Quantum-spaced bins. */ - for (; i < ntbins + nqbins; i++) { - bin = &arena->bins[i]; - bin->runcur = NULL; - arena_run_tree_new(&bin->runs); - - bin->reg_size = (i - ntbins + 1) << LG_QUANTUM; - - prev_run_size = arena_bin_run_size_calc(bin, prev_run_size); - -#ifdef MALLOC_STATS - memset(&bin->stats, 0, sizeof(malloc_bin_stats_t)); -#endif - } - - /* Cacheline-spaced bins. */ - for (; i < ntbins + nqbins + ncbins; i++) { - bin = &arena->bins[i]; - bin->runcur = NULL; - arena_run_tree_new(&bin->runs); - - bin->reg_size = cspace_min + ((i - (ntbins + nqbins)) << - LG_CACHELINE); - - prev_run_size = arena_bin_run_size_calc(bin, prev_run_size); - -#ifdef MALLOC_STATS - memset(&bin->stats, 0, sizeof(malloc_bin_stats_t)); -#endif - } - - /* Subpage-spaced bins. */ - for (; i < ntbins + nqbins + ncbins + nsbins; i++) { - bin = &arena->bins[i]; - bin->runcur = NULL; - arena_run_tree_new(&bin->runs); - - bin->reg_size = sspace_min + ((i - (ntbins + nqbins + ncbins)) - << LG_SUBPAGE); - - prev_run_size = arena_bin_run_size_calc(bin, prev_run_size); - -#ifdef MALLOC_STATS - memset(&bin->stats, 0, sizeof(malloc_bin_stats_t)); -#endif - } - - /* Medium bins. */ - for (; i < nbins; i++) { - bin = &arena->bins[i]; - bin->runcur = NULL; - arena_run_tree_new(&bin->runs); - - bin->reg_size = medium_min + ((i - (ntbins + nqbins + ncbins + - nsbins)) << lg_mspace); - - prev_run_size = arena_bin_run_size_calc(bin, prev_run_size); - -#ifdef MALLOC_STATS - memset(&bin->stats, 0, sizeof(malloc_bin_stats_t)); -#endif - } - -#ifdef MALLOC_DEBUG - arena->magic = ARENA_MAGIC; -#endif - - return (false); -} - -/* Create a new arena and insert it into the arenas array at index ind. */ -static arena_t * -arenas_extend(unsigned ind) -{ - arena_t *ret; - - /* Allocate enough space for trailing bins. */ - ret = (arena_t *)base_alloc(sizeof(arena_t) - + (sizeof(arena_bin_t) * (nbins - 1))); - if (ret != NULL && arena_new(ret, ind) == false) { - arenas[ind] = ret; - return (ret); - } - /* Only reached if there is an OOM error. */ - - /* - * OOM here is quite inconvenient to propagate, since dealing with it - * would require a check for failure in the fast path. Instead, punt - * by using arenas[0]. In practice, this is an extremely unlikely - * failure. - */ - _malloc_message(_getprogname(), - ": (malloc) Error initializing arena\n", "", ""); - if (opt_abort) - abort(); - - return (arenas[0]); -} - -#ifdef MALLOC_TCACHE -static tcache_bin_t * -tcache_bin_create(arena_t *arena) -{ - tcache_bin_t *ret; - size_t tsize; - - tsize = sizeof(tcache_bin_t) + (sizeof(void *) * (tcache_nslots - 1)); - if (tsize <= small_maxclass) - ret = (tcache_bin_t *)arena_malloc_small(arena, tsize, false); - else if (tsize <= bin_maxclass) - ret = (tcache_bin_t *)arena_malloc_medium(arena, tsize, false); - else - ret = (tcache_bin_t *)imalloc(tsize); - if (ret == NULL) - return (NULL); -#ifdef MALLOC_STATS - memset(&ret->tstats, 0, sizeof(tcache_bin_stats_t)); -#endif - ret->low_water = 0; - ret->high_water = 0; - ret->ncached = 0; - - return (ret); -} - -static void -tcache_bin_destroy(tcache_t *tcache, tcache_bin_t *tbin, unsigned binind) -{ - arena_t *arena; - arena_chunk_t *chunk; - size_t pageind, tsize; - arena_chunk_map_t *mapelm; - - chunk = CHUNK_ADDR2BASE(tbin); - arena = chunk->arena; - pageind = (((uintptr_t)tbin - (uintptr_t)chunk) >> PAGE_SHIFT); - mapelm = &chunk->map[pageind]; - -#ifdef MALLOC_STATS - if (tbin->tstats.nrequests != 0) { - arena_t *arena = tcache->arena; - arena_bin_t *bin = &arena->bins[binind]; - malloc_spin_lock(&arena->lock); - bin->stats.nrequests += tbin->tstats.nrequests; - if (bin->reg_size <= small_maxclass) - arena->stats.nmalloc_small += tbin->tstats.nrequests; - else - arena->stats.nmalloc_medium += tbin->tstats.nrequests; - malloc_spin_unlock(&arena->lock); - } -#endif - - assert(tbin->ncached == 0); - tsize = sizeof(tcache_bin_t) + (sizeof(void *) * (tcache_nslots - 1)); - if (tsize <= bin_maxclass) { - malloc_spin_lock(&arena->lock); - arena_dalloc_bin(arena, chunk, tbin, mapelm); - malloc_spin_unlock(&arena->lock); - } else - idalloc(tbin); -} - -#ifdef MALLOC_STATS -static void -tcache_stats_merge(tcache_t *tcache, arena_t *arena) -{ - unsigned i; - - /* Merge and reset tcache stats. */ - for (i = 0; i < mbin0; i++) { - arena_bin_t *bin = &arena->bins[i]; - tcache_bin_t *tbin = tcache->tbins[i]; - if (tbin != NULL) { - bin->stats.nrequests += tbin->tstats.nrequests; - arena->stats.nmalloc_small += tbin->tstats.nrequests; - tbin->tstats.nrequests = 0; - } - } - for (; i < nbins; i++) { - arena_bin_t *bin = &arena->bins[i]; - tcache_bin_t *tbin = tcache->tbins[i]; - if (tbin != NULL) { - bin->stats.nrequests += tbin->tstats.nrequests; - arena->stats.nmalloc_medium += tbin->tstats.nrequests; - tbin->tstats.nrequests = 0; - } - } -} -#endif - -static tcache_t * -tcache_create(arena_t *arena) -{ - tcache_t *tcache; - - if (sizeof(tcache_t) + (sizeof(tcache_bin_t *) * (nbins - 1)) <= - small_maxclass) { - tcache = (tcache_t *)arena_malloc_small(arena, sizeof(tcache_t) - + (sizeof(tcache_bin_t *) * (nbins - 1)), true); - } else if (sizeof(tcache_t) + (sizeof(tcache_bin_t *) * (nbins - 1)) <= - bin_maxclass) { - tcache = (tcache_t *)arena_malloc_medium(arena, sizeof(tcache_t) - + (sizeof(tcache_bin_t *) * (nbins - 1)), true); - } else { - tcache = (tcache_t *)icalloc(sizeof(tcache_t) + - (sizeof(tcache_bin_t *) * (nbins - 1))); - } - - if (tcache == NULL) - return (NULL); - -#ifdef MALLOC_STATS - /* Link into list of extant tcaches. */ - malloc_spin_lock(&arena->lock); - ql_elm_new(tcache, link); - ql_tail_insert(&arena->tcache_ql, tcache, link); - malloc_spin_unlock(&arena->lock); -#endif - - tcache->arena = arena; - - tcache_tls = tcache; - - return (tcache); -} - -static void -tcache_destroy(tcache_t *tcache) -{ - unsigned i; - -#ifdef MALLOC_STATS - /* Unlink from list of extant tcaches. */ - malloc_spin_lock(&tcache->arena->lock); - ql_remove(&tcache->arena->tcache_ql, tcache, link); - tcache_stats_merge(tcache, tcache->arena); - malloc_spin_unlock(&tcache->arena->lock); -#endif - - for (i = 0; i < nbins; i++) { - tcache_bin_t *tbin = tcache->tbins[i]; - if (tbin != NULL) { - tcache_bin_flush(tbin, i, 0); - tcache_bin_destroy(tcache, tbin, i); - } - } - - if (arena_salloc(tcache) <= bin_maxclass) { - arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache); - arena_t *arena = chunk->arena; - size_t pageind = (((uintptr_t)tcache - (uintptr_t)chunk) >> - PAGE_SHIFT); - arena_chunk_map_t *mapelm = &chunk->map[pageind]; - - malloc_spin_lock(&arena->lock); - arena_dalloc_bin(arena, chunk, tcache, mapelm); - malloc_spin_unlock(&arena->lock); - } else - idalloc(tcache); -} -#endif - -/* - * End arena. - */ -/******************************************************************************/ -/* - * Begin general internal functions. - */ - -static void * -huge_malloc(size_t size, bool zero) -{ - void *ret; - size_t csize; - extent_node_t *node; - - /* Allocate one or more contiguous chunks for this request. */ - - csize = CHUNK_CEILING(size); - if (csize == 0) { - /* size is large enough to cause size_t wrap-around. */ - return (NULL); - } - - /* Allocate an extent node with which to track the chunk. */ - node = base_node_alloc(); - if (node == NULL) - return (NULL); - - ret = chunk_alloc(csize, &zero); - if (ret == NULL) { - base_node_dealloc(node); - return (NULL); - } - - /* Insert node into huge. */ - node->addr = ret; - node->size = csize; - - malloc_mutex_lock(&huge_mtx); - extent_tree_ad_insert(&huge, node); -#ifdef MALLOC_STATS - huge_nmalloc++; - huge_allocated += csize; -#endif - malloc_mutex_unlock(&huge_mtx); - - if (zero == false) { - if (opt_junk) - memset(ret, 0xa5, csize); - else if (opt_zero) - memset(ret, 0, csize); - } - - return (ret); -} - -/* Only handles large allocations that require more than chunk alignment. */ -static void * -huge_palloc(size_t alignment, size_t size) -{ - void *ret; - size_t alloc_size, chunk_size, offset; - extent_node_t *node; - bool zero; - - /* - * This allocation requires alignment that is even larger than chunk - * alignment. This means that huge_malloc() isn't good enough. - * - * Allocate almost twice as many chunks as are demanded by the size or - * alignment, in order to assure the alignment can be achieved, then - * unmap leading and trailing chunks. - */ - assert(alignment >= chunksize); - - chunk_size = CHUNK_CEILING(size); - - if (size >= alignment) - alloc_size = chunk_size + alignment - chunksize; - else - alloc_size = (alignment << 1) - chunksize; - - /* Allocate an extent node with which to track the chunk. */ - node = base_node_alloc(); - if (node == NULL) - return (NULL); - - zero = false; - ret = chunk_alloc(alloc_size, &zero); - if (ret == NULL) { - base_node_dealloc(node); - return (NULL); - } - - offset = (uintptr_t)ret & (alignment - 1); - assert((offset & chunksize_mask) == 0); - assert(offset < alloc_size); - if (offset == 0) { - /* Trim trailing space. */ - chunk_dealloc((void *)((uintptr_t)ret + chunk_size), alloc_size - - chunk_size); - } else { - size_t trailsize; - - /* Trim leading space. */ - chunk_dealloc(ret, alignment - offset); - - ret = (void *)((uintptr_t)ret + (alignment - offset)); - - trailsize = alloc_size - (alignment - offset) - chunk_size; - if (trailsize != 0) { - /* Trim trailing space. */ - assert(trailsize < alloc_size); - chunk_dealloc((void *)((uintptr_t)ret + chunk_size), - trailsize); - } - } - - /* Insert node into huge. */ - node->addr = ret; - node->size = chunk_size; - - malloc_mutex_lock(&huge_mtx); - extent_tree_ad_insert(&huge, node); -#ifdef MALLOC_STATS - huge_nmalloc++; - huge_allocated += chunk_size; -#endif - malloc_mutex_unlock(&huge_mtx); - - if (opt_junk) - memset(ret, 0xa5, chunk_size); - else if (opt_zero) - memset(ret, 0, chunk_size); - - return (ret); -} - -static void * -huge_ralloc(void *ptr, size_t size, size_t oldsize) -{ - void *ret; - size_t copysize; - - /* Avoid moving the allocation if the size class would not change. */ - if (oldsize > arena_maxclass && - CHUNK_CEILING(size) == CHUNK_CEILING(oldsize)) { - if (opt_junk && size < oldsize) { - memset((void *)((uintptr_t)ptr + size), 0x5a, oldsize - - size); - } else if (opt_zero && size > oldsize) { - memset((void *)((uintptr_t)ptr + oldsize), 0, size - - oldsize); - } - return (ptr); - } - - /* - * If we get here, then size and oldsize are different enough that we - * need to use a different size class. In that case, fall back to - * allocating new space and copying. - */ - ret = huge_malloc(size, false); - if (ret == NULL) - return (NULL); - - copysize = (size < oldsize) ? size : oldsize; - memcpy(ret, ptr, copysize); - idalloc(ptr); - return (ret); -} - -static void -huge_dalloc(void *ptr) -{ - extent_node_t *node, key; - - malloc_mutex_lock(&huge_mtx); - - /* Extract from tree of huge allocations. */ - key.addr = ptr; - node = extent_tree_ad_search(&huge, &key); - assert(node != NULL); - assert(node->addr == ptr); - extent_tree_ad_remove(&huge, node); - -#ifdef MALLOC_STATS - huge_ndalloc++; - huge_allocated -= node->size; -#endif - - malloc_mutex_unlock(&huge_mtx); - - /* Unmap chunk. */ -#ifdef MALLOC_DSS - if (opt_dss && opt_junk) - memset(node->addr, 0x5a, node->size); -#endif - chunk_dealloc(node->addr, node->size); - - base_node_dealloc(node); -} - -static void -malloc_stats_print(void) -{ - char s[UMAX2S_BUFSIZE]; - - _malloc_message("___ Begin malloc statistics ___\n", "", "", ""); - _malloc_message("Assertions ", -#ifdef NDEBUG - "disabled", -#else - "enabled", -#endif - "\n", ""); - _malloc_message("Boolean MALLOC_OPTIONS: ", opt_abort ? "A" : "a", "", ""); -#ifdef MALLOC_DSS - _malloc_message(opt_dss ? "D" : "d", "", "", ""); -#endif - _malloc_message(opt_junk ? "J" : "j", "", "", ""); -#ifdef MALLOC_DSS - _malloc_message(opt_mmap ? "M" : "m", "", "", ""); -#endif - _malloc_message("P", "", "", ""); - _malloc_message(opt_utrace ? "U" : "u", "", "", ""); - _malloc_message(opt_sysv ? "V" : "v", "", "", ""); - _malloc_message(opt_xmalloc ? "X" : "x", "", "", ""); - _malloc_message(opt_zero ? "Z" : "z", "", "", ""); - _malloc_message("\n", "", "", ""); - - _malloc_message("CPUs: ", umax2s(ncpus, 10, s), "\n", ""); - _malloc_message("Max arenas: ", umax2s(narenas, 10, s), "\n", ""); - _malloc_message("Pointer size: ", umax2s(sizeof(void *), 10, s), "\n", ""); - _malloc_message("Quantum size: ", umax2s(QUANTUM, 10, s), "\n", ""); - _malloc_message("Cacheline size (assumed): ", - umax2s(CACHELINE, 10, s), "\n", ""); - _malloc_message("Subpage spacing: ", umax2s(SUBPAGE, 10, s), "\n", ""); - _malloc_message("Medium spacing: ", umax2s((1U << lg_mspace), 10, s), "\n", - ""); -#ifdef MALLOC_TINY - _malloc_message("Tiny 2^n-spaced sizes: [", umax2s((1U << LG_TINY_MIN), 10, - s), "..", ""); - _malloc_message(umax2s((qspace_min >> 1), 10, s), "]\n", "", ""); -#endif - _malloc_message("Quantum-spaced sizes: [", umax2s(qspace_min, 10, s), "..", - ""); - _malloc_message(umax2s(qspace_max, 10, s), "]\n", "", ""); - _malloc_message("Cacheline-spaced sizes: [", - umax2s(cspace_min, 10, s), "..", ""); - _malloc_message(umax2s(cspace_max, 10, s), "]\n", "", ""); - _malloc_message("Subpage-spaced sizes: [", umax2s(sspace_min, 10, s), "..", - ""); - _malloc_message(umax2s(sspace_max, 10, s), "]\n", "", ""); - _malloc_message("Medium sizes: [", umax2s(medium_min, 10, s), "..", ""); - _malloc_message(umax2s(medium_max, 10, s), "]\n", "", ""); - if (opt_lg_dirty_mult >= 0) { - _malloc_message("Min active:dirty page ratio per arena: ", - umax2s((1U << opt_lg_dirty_mult), 10, s), ":1\n", ""); - } else { - _malloc_message("Min active:dirty page ratio per arena: N/A\n", "", - "", ""); - } -#ifdef MALLOC_TCACHE - _malloc_message("Thread cache slots per size class: ", - tcache_nslots ? umax2s(tcache_nslots, 10, s) : "N/A", "\n", ""); - _malloc_message("Thread cache GC sweep interval: ", - (tcache_nslots && tcache_gc_incr > 0) ? - umax2s((1U << opt_lg_tcache_gc_sweep), 10, s) : "N/A", "", ""); - _malloc_message(" (increment interval: ", - (tcache_nslots && tcache_gc_incr > 0) ? umax2s(tcache_gc_incr, 10, s) - : "N/A", ")\n", ""); -#endif - _malloc_message("Chunk size: ", umax2s(chunksize, 10, s), "", ""); - _malloc_message(" (2^", umax2s(opt_lg_chunk, 10, s), ")\n", ""); - -#ifdef MALLOC_STATS - { - size_t allocated, mapped; - unsigned i; - arena_t *arena; - - /* Calculate and print allocated/mapped stats. */ - - /* arenas. */ - for (i = 0, allocated = 0; i < narenas; i++) { - if (arenas[i] != NULL) { - malloc_spin_lock(&arenas[i]->lock); - allocated += arenas[i]->stats.allocated_small; - allocated += arenas[i]->stats.allocated_large; - malloc_spin_unlock(&arenas[i]->lock); - } - } - - /* huge/base. */ - malloc_mutex_lock(&huge_mtx); - allocated += huge_allocated; - mapped = stats_chunks.curchunks * chunksize; - malloc_mutex_unlock(&huge_mtx); - - malloc_mutex_lock(&base_mtx); - mapped += base_mapped; - malloc_mutex_unlock(&base_mtx); - - malloc_printf("Allocated: %zu, mapped: %zu\n", allocated, - mapped); - - /* Print chunk stats. */ - { - chunk_stats_t chunks_stats; - - malloc_mutex_lock(&huge_mtx); - chunks_stats = stats_chunks; - malloc_mutex_unlock(&huge_mtx); - - malloc_printf("chunks: nchunks " - "highchunks curchunks\n"); - malloc_printf(" %13"PRIu64"%13zu%13zu\n", - chunks_stats.nchunks, chunks_stats.highchunks, - chunks_stats.curchunks); - } - - /* Print chunk stats. */ - malloc_printf( - "huge: nmalloc ndalloc allocated\n"); - malloc_printf(" %12"PRIu64" %12"PRIu64" %12zu\n", huge_nmalloc, - huge_ndalloc, huge_allocated); - - /* Print stats for each arena. */ - for (i = 0; i < narenas; i++) { - arena = arenas[i]; - if (arena != NULL) { - malloc_printf("\narenas[%u]:\n", i); - malloc_spin_lock(&arena->lock); - arena_stats_print(arena); - malloc_spin_unlock(&arena->lock); - } - } - } -#endif /* #ifdef MALLOC_STATS */ - _malloc_message("--- End malloc statistics ---\n", "", "", ""); -} - -#ifdef MALLOC_DEBUG -static void -small_size2bin_validate(void) -{ - size_t i, size, binind; - - assert(small_size2bin[0] == 0xffU); - i = 1; -# ifdef MALLOC_TINY - /* Tiny. */ - for (; i < (1U << LG_TINY_MIN); i++) { - size = pow2_ceil(1U << LG_TINY_MIN); - binind = ffs((int)(size >> (LG_TINY_MIN + 1))); - assert(small_size2bin[i] == binind); - } - for (; i < qspace_min; i++) { - size = pow2_ceil(i); - binind = ffs((int)(size >> (LG_TINY_MIN + 1))); - assert(small_size2bin[i] == binind); - } -# endif - /* Quantum-spaced. */ - for (; i <= qspace_max; i++) { - size = QUANTUM_CEILING(i); - binind = ntbins + (size >> LG_QUANTUM) - 1; - assert(small_size2bin[i] == binind); - } - /* Cacheline-spaced. */ - for (; i <= cspace_max; i++) { - size = CACHELINE_CEILING(i); - binind = ntbins + nqbins + ((size - cspace_min) >> - LG_CACHELINE); - assert(small_size2bin[i] == binind); - } - /* Sub-page. */ - for (; i <= sspace_max; i++) { - size = SUBPAGE_CEILING(i); - binind = ntbins + nqbins + ncbins + ((size - sspace_min) - >> LG_SUBPAGE); - assert(small_size2bin[i] == binind); - } -} -#endif - -static bool -small_size2bin_init(void) -{ - - if (opt_lg_qspace_max != LG_QSPACE_MAX_DEFAULT - || opt_lg_cspace_max != LG_CSPACE_MAX_DEFAULT - || sizeof(const_small_size2bin) != small_maxclass + 1) - return (small_size2bin_init_hard()); - - small_size2bin = const_small_size2bin; -#ifdef MALLOC_DEBUG - assert(sizeof(const_small_size2bin) == small_maxclass + 1); - small_size2bin_validate(); -#endif - return (false); -} - -static bool -small_size2bin_init_hard(void) -{ - size_t i, size, binind; - uint8_t *custom_small_size2bin; - - assert(opt_lg_qspace_max != LG_QSPACE_MAX_DEFAULT - || opt_lg_cspace_max != LG_CSPACE_MAX_DEFAULT - || sizeof(const_small_size2bin) != small_maxclass + 1); - - custom_small_size2bin = (uint8_t *)base_alloc(small_maxclass + 1); - if (custom_small_size2bin == NULL) - return (true); - - custom_small_size2bin[0] = 0xffU; - i = 1; -#ifdef MALLOC_TINY - /* Tiny. */ - for (; i < (1U << LG_TINY_MIN); i++) { - size = pow2_ceil(1U << LG_TINY_MIN); - binind = ffs((int)(size >> (LG_TINY_MIN + 1))); - custom_small_size2bin[i] = binind; - } - for (; i < qspace_min; i++) { - size = pow2_ceil(i); - binind = ffs((int)(size >> (LG_TINY_MIN + 1))); - custom_small_size2bin[i] = binind; - } -#endif - /* Quantum-spaced. */ - for (; i <= qspace_max; i++) { - size = QUANTUM_CEILING(i); - binind = ntbins + (size >> LG_QUANTUM) - 1; - custom_small_size2bin[i] = binind; - } - /* Cacheline-spaced. */ - for (; i <= cspace_max; i++) { - size = CACHELINE_CEILING(i); - binind = ntbins + nqbins + ((size - cspace_min) >> - LG_CACHELINE); - custom_small_size2bin[i] = binind; - } - /* Sub-page. */ - for (; i <= sspace_max; i++) { - size = SUBPAGE_CEILING(i); - binind = ntbins + nqbins + ncbins + ((size - sspace_min) >> - LG_SUBPAGE); - custom_small_size2bin[i] = binind; - } - - small_size2bin = custom_small_size2bin; -#ifdef MALLOC_DEBUG - small_size2bin_validate(); -#endif - return (false); -} - -static unsigned -malloc_ncpus(void) -{ - int mib[2]; - unsigned ret; - int error; - size_t len; - - error = _elf_aux_info(AT_NCPUS, &ret, sizeof(ret)); - if (error != 0 || ret == 0) { - mib[0] = CTL_HW; - mib[1] = HW_NCPU; - len = sizeof(ret); - if (sysctl(mib, 2, &ret, &len, (void *)NULL, 0) == -1) { - /* Error. */ - ret = 1; - } - } - - return (ret); -} - -/* - * FreeBSD's pthreads implementation calls malloc(3), so the malloc - * implementation has to take pains to avoid infinite recursion during - * initialization. - */ -static inline bool -malloc_init(void) -{ - - if (malloc_initialized == false) - return (malloc_init_hard()); - - return (false); -} - -static bool -malloc_init_hard(void) -{ - unsigned i; - int linklen; - char buf[PATH_MAX + 1]; - const char *opts; - - malloc_mutex_lock(&init_lock); - if (malloc_initialized) { - /* - * Another thread initialized the allocator before this one - * acquired init_lock. - */ - malloc_mutex_unlock(&init_lock); - return (false); - } - - /* Get number of CPUs. */ - ncpus = malloc_ncpus(); - - /* - * Increase the chunk size to the largest page size that is greater - * than the default chunk size and less than or equal to 4MB. - */ - { - size_t pagesizes[MAXPAGESIZES]; - int k, nsizes; - - nsizes = getpagesizes(pagesizes, MAXPAGESIZES); - for (k = 0; k < nsizes; k++) - if (pagesizes[k] <= (1LU << 22)) - while ((1LU << opt_lg_chunk) < pagesizes[k]) - opt_lg_chunk++; - } - - for (i = 0; i < 3; i++) { - unsigned j; - - /* Get runtime configuration. */ - switch (i) { - case 0: - if ((linklen = readlink("/etc/malloc.conf", buf, - sizeof(buf) - 1)) != -1) { - /* - * Use the contents of the "/etc/malloc.conf" - * symbolic link's name. - */ - buf[linklen] = '\0'; - opts = buf; - } else { - /* No configuration specified. */ - buf[0] = '\0'; - opts = buf; - } - break; - case 1: - if (issetugid() == 0 && (opts = - getenv("MALLOC_OPTIONS")) != NULL) { - /* - * Do nothing; opts is already initialized to - * the value of the MALLOC_OPTIONS environment - * variable. - */ - } else { - /* No configuration specified. */ - buf[0] = '\0'; - opts = buf; - } - break; - case 2: - if (_malloc_options != NULL) { - /* - * Use options that were compiled into the - * program. - */ - opts = _malloc_options; - } else { - /* No configuration specified. */ - buf[0] = '\0'; - opts = buf; - } - break; - default: - /* NOTREACHED */ - assert(false); - buf[0] = '\0'; - opts = buf; - } - - for (j = 0; opts[j] != '\0'; j++) { - unsigned k, nreps; - bool nseen; - - /* Parse repetition count, if any. */ - for (nreps = 0, nseen = false;; j++, nseen = true) { - switch (opts[j]) { - case '0': case '1': case '2': case '3': - case '4': case '5': case '6': case '7': - case '8': case '9': - nreps *= 10; - nreps += opts[j] - '0'; - break; - default: - goto MALLOC_OUT; - } - } -MALLOC_OUT: - if (nseen == false) - nreps = 1; - - for (k = 0; k < nreps; k++) { - switch (opts[j]) { - case 'a': - opt_abort = false; - break; - case 'A': - opt_abort = true; - break; - case 'c': - if (opt_lg_cspace_max - 1 > - opt_lg_qspace_max && - opt_lg_cspace_max > - LG_CACHELINE) - opt_lg_cspace_max--; - break; - case 'C': - if (opt_lg_cspace_max < PAGE_SHIFT - - 1) - opt_lg_cspace_max++; - break; - case 'd': -#ifdef MALLOC_DSS - opt_dss = false; -#endif - break; - case 'D': -#ifdef MALLOC_DSS - opt_dss = true; -#endif - break; - case 'e': - if (opt_lg_medium_max > PAGE_SHIFT) - opt_lg_medium_max--; - break; - case 'E': - if (opt_lg_medium_max + 1 < - opt_lg_chunk) - opt_lg_medium_max++; - break; - case 'f': - if (opt_lg_dirty_mult + 1 < - (sizeof(size_t) << 3)) - opt_lg_dirty_mult++; - break; - case 'F': - if (opt_lg_dirty_mult >= 0) - opt_lg_dirty_mult--; - break; -#ifdef MALLOC_TCACHE - case 'g': - if (opt_lg_tcache_gc_sweep >= 0) - opt_lg_tcache_gc_sweep--; - break; - case 'G': - if (opt_lg_tcache_gc_sweep + 1 < - (sizeof(size_t) << 3)) - opt_lg_tcache_gc_sweep++; - break; - case 'h': - if (opt_lg_tcache_nslots > 0) - opt_lg_tcache_nslots--; - break; - case 'H': - if (opt_lg_tcache_nslots + 1 < - (sizeof(size_t) << 3)) - opt_lg_tcache_nslots++; - break; -#endif - case 'j': - opt_junk = false; - break; - case 'J': - opt_junk = true; - break; - case 'k': - /* - * Chunks always require at least one - * header page, plus enough room to - * hold a run for the largest medium - * size class (one page more than the - * size). - */ - if ((1U << (opt_lg_chunk - 1)) >= - (2U << PAGE_SHIFT) + (1U << - opt_lg_medium_max)) - opt_lg_chunk--; - break; - case 'K': - if (opt_lg_chunk + 1 < - (sizeof(size_t) << 3)) - opt_lg_chunk++; - break; - case 'm': -#ifdef MALLOC_DSS - opt_mmap = false; -#endif - break; - case 'M': -#ifdef MALLOC_DSS - opt_mmap = true; -#endif - break; - case 'n': - opt_narenas_lshift--; - break; - case 'N': - opt_narenas_lshift++; - break; - case 'p': - opt_stats_print = false; - break; - case 'P': - opt_stats_print = true; - break; - case 'q': - if (opt_lg_qspace_max > LG_QUANTUM) - opt_lg_qspace_max--; - break; - case 'Q': - if (opt_lg_qspace_max + 1 < - opt_lg_cspace_max) - opt_lg_qspace_max++; - break; - case 'u': - opt_utrace = false; - break; - case 'U': - opt_utrace = true; - break; - case 'v': - opt_sysv = false; - break; - case 'V': - opt_sysv = true; - break; - case 'x': - opt_xmalloc = false; - break; - case 'X': - opt_xmalloc = true; - break; - case 'z': - opt_zero = false; - break; - case 'Z': - opt_zero = true; - break; - default: { - char cbuf[2]; - - cbuf[0] = opts[j]; - cbuf[1] = '\0'; - _malloc_message(_getprogname(), - ": (malloc) Unsupported character " - "in malloc options: '", cbuf, - "'\n"); - } - } - } - } - } - -#ifdef MALLOC_DSS - /* Make sure that there is some method for acquiring memory. */ - if (opt_dss == false && opt_mmap == false) - opt_mmap = true; -#endif - if (opt_stats_print) { - /* Print statistics at exit. */ - atexit(stats_print_atexit); - } - - - /* Set variables according to the value of opt_lg_[qc]space_max. */ - qspace_max = (1U << opt_lg_qspace_max); - cspace_min = CACHELINE_CEILING(qspace_max); - if (cspace_min == qspace_max) - cspace_min += CACHELINE; - cspace_max = (1U << opt_lg_cspace_max); - sspace_min = SUBPAGE_CEILING(cspace_max); - if (sspace_min == cspace_max) - sspace_min += SUBPAGE; - assert(sspace_min < PAGE_SIZE); - sspace_max = PAGE_SIZE - SUBPAGE; - medium_max = (1U << opt_lg_medium_max); - -#ifdef MALLOC_TINY - assert(LG_QUANTUM >= LG_TINY_MIN); -#endif - assert(ntbins <= LG_QUANTUM); - nqbins = qspace_max >> LG_QUANTUM; - ncbins = ((cspace_max - cspace_min) >> LG_CACHELINE) + 1; - nsbins = ((sspace_max - sspace_min) >> LG_SUBPAGE) + 1; - - /* - * Compute medium size class spacing and the number of medium size - * classes. Limit spacing to no more than pagesize, but if possible - * use the smallest spacing that does not exceed NMBINS_MAX medium size - * classes. - */ - lg_mspace = LG_SUBPAGE; - nmbins = ((medium_max - medium_min) >> lg_mspace) + 1; - while (lg_mspace < PAGE_SHIFT && nmbins > NMBINS_MAX) { - lg_mspace = lg_mspace + 1; - nmbins = ((medium_max - medium_min) >> lg_mspace) + 1; - } - mspace_mask = (1U << lg_mspace) - 1U; - - mbin0 = ntbins + nqbins + ncbins + nsbins; - nbins = mbin0 + nmbins; - /* - * The small_size2bin lookup table uses uint8_t to encode each bin - * index, so we cannot support more than 256 small size classes. This - * limit is difficult to exceed (not even possible with 16B quantum and - * 4KiB pages), and such configurations are impractical, but - * nonetheless we need to protect against this case in order to avoid - * undefined behavior. - */ - if (mbin0 > 256) { - char line_buf[UMAX2S_BUFSIZE]; - _malloc_message(_getprogname(), - ": (malloc) Too many small size classes (", - umax2s(mbin0, 10, line_buf), " > max 256)\n"); - abort(); - } - - if (small_size2bin_init()) { - malloc_mutex_unlock(&init_lock); - return (true); - } - -#ifdef MALLOC_TCACHE - if (opt_lg_tcache_nslots > 0) { - tcache_nslots = (1U << opt_lg_tcache_nslots); - - /* Compute incremental GC event threshold. */ - if (opt_lg_tcache_gc_sweep >= 0) { - tcache_gc_incr = ((1U << opt_lg_tcache_gc_sweep) / - nbins) + (((1U << opt_lg_tcache_gc_sweep) % nbins == - 0) ? 0 : 1); - } else - tcache_gc_incr = 0; - } else - tcache_nslots = 0; -#endif - - /* Set variables according to the value of opt_lg_chunk. */ - chunksize = (1LU << opt_lg_chunk); - chunksize_mask = chunksize - 1; - chunk_npages = (chunksize >> PAGE_SHIFT); - { - size_t header_size; - - /* - * Compute the header size such that it is large enough to - * contain the page map. - */ - header_size = sizeof(arena_chunk_t) + - (sizeof(arena_chunk_map_t) * (chunk_npages - 1)); - arena_chunk_header_npages = (header_size >> PAGE_SHIFT) + - ((header_size & PAGE_MASK) != 0); - } - arena_maxclass = chunksize - (arena_chunk_header_npages << - PAGE_SHIFT); - - UTRACE((void *)(intptr_t)(-1), 0, 0); - -#ifdef MALLOC_STATS - malloc_mutex_init(&chunks_mtx); - memset(&stats_chunks, 0, sizeof(chunk_stats_t)); -#endif - - /* Various sanity checks that regard configuration. */ - assert(chunksize >= PAGE_SIZE); - - /* Initialize chunks data. */ - malloc_mutex_init(&huge_mtx); - extent_tree_ad_new(&huge); -#ifdef MALLOC_DSS - malloc_mutex_init(&dss_mtx); - dss_base = sbrk(0); - dss_prev = dss_base; - dss_max = dss_base; - extent_tree_szad_new(&dss_chunks_szad); - extent_tree_ad_new(&dss_chunks_ad); -#endif -#ifdef MALLOC_STATS - huge_nmalloc = 0; - huge_ndalloc = 0; - huge_allocated = 0; -#endif - - /* Initialize base allocation data structures. */ -#ifdef MALLOC_STATS - base_mapped = 0; -#endif -#ifdef MALLOC_DSS - /* - * Allocate a base chunk here, since it doesn't actually have to be - * chunk-aligned. Doing this before allocating any other chunks allows - * the use of space that would otherwise be wasted. - */ - if (opt_dss) - base_pages_alloc(0); -#endif - base_nodes = NULL; - malloc_mutex_init(&base_mtx); - - if (ncpus > 1) { - /* - * For SMP systems, create more than one arena per CPU by - * default. - */ -#ifdef MALLOC_TCACHE - if (tcache_nslots) { - /* - * Only large object allocation/deallocation is - * guaranteed to acquire an arena mutex, so we can get - * away with fewer arenas than without thread caching. - */ - opt_narenas_lshift += 1; - } else { -#endif - /* - * All allocations must acquire an arena mutex, so use - * plenty of arenas. - */ - opt_narenas_lshift += 2; -#ifdef MALLOC_TCACHE - } -#endif - } - - /* Determine how many arenas to use. */ - narenas = ncpus; - if (opt_narenas_lshift > 0) { - if ((narenas << opt_narenas_lshift) > narenas) - narenas <<= opt_narenas_lshift; - /* - * Make sure not to exceed the limits of what base_alloc() can - * handle. - */ - if (narenas * sizeof(arena_t *) > chunksize) - narenas = chunksize / sizeof(arena_t *); - } else if (opt_narenas_lshift < 0) { - if ((narenas >> -opt_narenas_lshift) < narenas) - narenas >>= -opt_narenas_lshift; - /* Make sure there is at least one arena. */ - if (narenas == 0) - narenas = 1; - } - -#ifdef NO_TLS - if (narenas > 1) { - static const unsigned primes[] = {1, 3, 5, 7, 11, 13, 17, 19, - 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, - 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, - 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, - 223, 227, 229, 233, 239, 241, 251, 257, 263}; - unsigned nprimes, parenas; - - /* - * Pick a prime number of hash arenas that is more than narenas - * so that direct hashing of pthread_self() pointers tends to - * spread allocations evenly among the arenas. - */ - assert((narenas & 1) == 0); /* narenas must be even. */ - nprimes = (sizeof(primes) >> LG_SIZEOF_INT); - parenas = primes[nprimes - 1]; /* In case not enough primes. */ - for (i = 1; i < nprimes; i++) { - if (primes[i] > narenas) { - parenas = primes[i]; - break; - } - } - narenas = parenas; - } -#endif - -#ifndef NO_TLS - next_arena = 0; -#endif - - /* Allocate and initialize arenas. */ - arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas); - if (arenas == NULL) { - malloc_mutex_unlock(&init_lock); - return (true); - } - /* - * Zero the array. In practice, this should always be pre-zeroed, - * since it was just mmap()ed, but let's be sure. - */ - memset(arenas, 0, sizeof(arena_t *) * narenas); - - /* - * Initialize one arena here. The rest are lazily created in - * choose_arena_hard(). - */ - arenas_extend(0); - if (arenas[0] == NULL) { - malloc_mutex_unlock(&init_lock); - return (true); - } -#ifndef NO_TLS - /* - * Assign the initial arena to the initial thread, in order to avoid - * spurious creation of an extra arena if the application switches to - * threaded mode. - */ - arenas_map = arenas[0]; -#endif - malloc_spin_init(&arenas_lock); - - malloc_initialized = true; - malloc_mutex_unlock(&init_lock); - return (false); -} - -/* - * End general internal functions. - */ -/******************************************************************************/ -/* - * Begin malloc(3)-compatible functions. - */ - -void * -malloc(size_t size) -{ - void *ret; - - if (malloc_init()) { - ret = NULL; - goto OOM; - } - - if (size == 0) { - if (opt_sysv == false) - size = 1; - else { - if (opt_xmalloc) { - _malloc_message(_getprogname(), - ": (malloc) Error in malloc(): " - "invalid size 0\n", "", ""); - abort(); - } - ret = NULL; - goto RETURN; - } - } - - ret = imalloc(size); - -OOM: - if (ret == NULL) { - if (opt_xmalloc) { - _malloc_message(_getprogname(), - ": (malloc) Error in malloc(): out of memory\n", "", - ""); - abort(); - } - errno = ENOMEM; - } - -RETURN: - UTRACE(0, size, ret); - return (ret); -} - -int -posix_memalign(void **memptr, size_t alignment, size_t size) -{ - int ret; - void *result; - - if (malloc_init()) - result = NULL; - else { - if (size == 0) { - if (opt_sysv == false) - size = 1; - else { - if (opt_xmalloc) { - _malloc_message(_getprogname(), - ": (malloc) Error in " - "posix_memalign(): invalid " - "size 0\n", "", ""); - abort(); - } - result = NULL; - *memptr = NULL; - ret = 0; - goto RETURN; - } - } - - /* Make sure that alignment is a large enough power of 2. */ - if (((alignment - 1) & alignment) != 0 - || alignment < sizeof(void *)) { - if (opt_xmalloc) { - _malloc_message(_getprogname(), - ": (malloc) Error in posix_memalign(): " - "invalid alignment\n", "", ""); - abort(); - } - result = NULL; - ret = EINVAL; - goto RETURN; - } - - result = ipalloc(alignment, size); - } - - if (result == NULL) { - if (opt_xmalloc) { - _malloc_message(_getprogname(), - ": (malloc) Error in posix_memalign(): out of memory\n", - "", ""); - abort(); - } - ret = ENOMEM; - goto RETURN; - } - - *memptr = result; - ret = 0; - -RETURN: - UTRACE(0, size, result); - return (ret); -} - -void * -aligned_alloc(size_t alignment, size_t size) -{ - void *memptr; - int ret; - - ret = posix_memalign(&memptr, alignment, size); - if (ret != 0) { - errno = ret; - return (NULL); - } - return (memptr); -} - -void * -calloc(size_t num, size_t size) -{ - void *ret; - size_t num_size; - - if (malloc_init()) { - num_size = 0; - ret = NULL; - goto RETURN; - } - - num_size = num * size; - if (num_size == 0) { - if ((opt_sysv == false) && ((num == 0) || (size == 0))) - num_size = 1; - else { - ret = NULL; - goto RETURN; - } - /* - * Try to avoid division here. We know that it isn't possible to - * overflow during multiplication if neither operand uses any of the - * most significant half of the bits in a size_t. - */ - } else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2))) - && (num_size / size != num)) { - /* size_t overflow. */ - ret = NULL; - goto RETURN; - } - - ret = icalloc(num_size); - -RETURN: - if (ret == NULL) { - if (opt_xmalloc) { - _malloc_message(_getprogname(), - ": (malloc) Error in calloc(): out of memory\n", "", - ""); - abort(); - } - errno = ENOMEM; - } - - UTRACE(0, num_size, ret); - return (ret); -} - -void * -realloc(void *ptr, size_t size) -{ - void *ret; - - if (size == 0) { - if (opt_sysv == false) - size = 1; - else { - if (ptr != NULL) - idalloc(ptr); - ret = NULL; - goto RETURN; - } - } - - if (ptr != NULL) { - assert(malloc_initialized); - - ret = iralloc(ptr, size); - - if (ret == NULL) { - if (opt_xmalloc) { - _malloc_message(_getprogname(), - ": (malloc) Error in realloc(): out of " - "memory\n", "", ""); - abort(); - } - errno = ENOMEM; - } - } else { - if (malloc_init()) - ret = NULL; - else - ret = imalloc(size); - - if (ret == NULL) { - if (opt_xmalloc) { - _malloc_message(_getprogname(), - ": (malloc) Error in realloc(): out of " - "memory\n", "", ""); - abort(); - } - errno = ENOMEM; - } - } - -RETURN: - UTRACE(ptr, size, ret); - return (ret); -} - -void -free(void *ptr) -{ - - UTRACE(ptr, 0, 0); - if (ptr != NULL) { - assert(malloc_initialized); - - idalloc(ptr); - } -} - -/* - * End malloc(3)-compatible functions. - */ -/******************************************************************************/ -/* - * Begin non-standard functions. - */ - -size_t -malloc_usable_size(const void *ptr) -{ - - assert(ptr != NULL); - - return (isalloc(ptr)); -} - -/* - * End non-standard functions. - */ -/******************************************************************************/ -/* - * Begin library-private functions. - */ - -/* - * We provide an unpublished interface in order to receive notifications from - * the pthreads library whenever a thread exits. This allows us to clean up - * thread caches. - */ -void -_malloc_thread_cleanup(void) -{ - -#ifdef MALLOC_TCACHE - tcache_t *tcache = tcache_tls; - - if (tcache != NULL) { - assert(tcache != (void *)(uintptr_t)1); - tcache_destroy(tcache); - tcache_tls = (void *)(uintptr_t)1; - } -#endif -} - -/* - * The following functions are used by threading libraries for protection of - * malloc during fork(). These functions are only called if the program is - * running in threaded mode, so there is no need to check whether the program - * is threaded here. - */ - -void -_malloc_prefork(void) -{ - unsigned i; - - /* Acquire all mutexes in a safe order. */ - malloc_spin_lock(&arenas_lock); - for (i = 0; i < narenas; i++) { - if (arenas[i] != NULL) - malloc_spin_lock(&arenas[i]->lock); - } - - malloc_mutex_lock(&base_mtx); - - malloc_mutex_lock(&huge_mtx); - -#ifdef MALLOC_DSS - malloc_mutex_lock(&dss_mtx); -#endif -} - -void -_malloc_postfork(void) -{ - unsigned i; - - /* Release all mutexes, now that fork() has completed. */ - -#ifdef MALLOC_DSS - malloc_mutex_unlock(&dss_mtx); -#endif - - malloc_mutex_unlock(&huge_mtx); - - malloc_mutex_unlock(&base_mtx); - - for (i = 0; i < narenas; i++) { - if (arenas[i] != NULL) - malloc_spin_unlock(&arenas[i]->lock); - } - malloc_spin_unlock(&arenas_lock); -} - -/* - * End library-private functions. - */ -/******************************************************************************/ Index: lib/libc/gen/tls.c =================================================================== --- lib/libc/gen/tls.c (revision 234236) +++ lib/libc/gen/tls.c (working copy) @@ -39,6 +39,11 @@ #include "libc_private.h" +/* Provided by jemalloc to avoid bootstrapping issues. */ +void *a0malloc(size_t size); +void *a0calloc(size_t num, size_t size); +void a0free(void *ptr); + __weak_reference(__libc_allocate_tls, _rtld_allocate_tls); __weak_reference(__libc_free_tls, _rtld_free_tls); @@ -120,8 +125,8 @@ tls = (Elf_Addr **)((Elf_Addr)tcb + tcbsize - TLS_TCB_SIZE); dtv = tls[0]; - free(dtv); - free(tcb); + a0free(dtv); + a0free(tcb); } /* @@ -137,18 +142,18 @@ if (oldtcb != NULL && tcbsize == TLS_TCB_SIZE) return (oldtcb); - tcb = calloc(1, tls_static_space + tcbsize - TLS_TCB_SIZE); + tcb = a0calloc(1, tls_static_space + tcbsize - TLS_TCB_SIZE); tls = (Elf_Addr **)(tcb + tcbsize - TLS_TCB_SIZE); if (oldtcb != NULL) { memcpy(tls, oldtcb, tls_static_space); - free(oldtcb); + a0free(oldtcb); /* Adjust the DTV. */ dtv = tls[0]; dtv[2] = (Elf_Addr)tls + TLS_TCB_SIZE; } else { - dtv = malloc(3 * sizeof(Elf_Addr)); + dtv = a0malloc(3 * sizeof(Elf_Addr)); tls[0] = dtv; dtv[0] = 1; dtv[1] = 1; @@ -189,8 +194,8 @@ dtv = ((Elf_Addr**)tcb)[1]; tlsend = (Elf_Addr) tcb; tlsstart = tlsend - size; - free((void*) tlsstart); - free(dtv); + a0free((void*) tlsstart); + a0free(dtv); } /* @@ -208,8 +213,8 @@ if (tcbsize < 2 * sizeof(Elf_Addr)) tcbsize = 2 * sizeof(Elf_Addr); - tls = calloc(1, size + tcbsize); - dtv = malloc(3 * sizeof(Elf_Addr)); + tls = a0calloc(1, size + tcbsize); + dtv = a0malloc(3 * sizeof(Elf_Addr)); segbase = (Elf_Addr)(tls + size); ((Elf_Addr*)segbase)[0] = segbase; Index: lib/libc/Makefile =================================================================== --- lib/libc/Makefile (revision 234236) +++ lib/libc/Makefile (working copy) @@ -79,6 +79,7 @@ .include "${.CURDIR}/resolv/Makefile.inc" .include "${.CURDIR}/stdio/Makefile.inc" .include "${.CURDIR}/stdlib/Makefile.inc" +.include "${.CURDIR}/stdlib/jemalloc/Makefile.inc" .include "${.CURDIR}/stdtime/Makefile.inc" .include "${.CURDIR}/string/Makefile.inc" .include "${.CURDIR}/sys/Makefile.inc" Index: include/malloc_np.h =================================================================== --- include/malloc_np.h (revision 234236) +++ include/malloc_np.h (working copy) @@ -33,9 +33,36 @@ #define _MALLOC_NP_H_ #include #include +#include __BEGIN_DECLS size_t malloc_usable_size(const void *ptr); + +void malloc_stats_print(void (*write_cb)(void *, const char *), + void *cbopaque, const char *opts); +int mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp, + size_t newlen); +int mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp); +int mallctlbymib(const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen); + +#define ALLOCM_LG_ALIGN(la) (la) +#define ALLOCM_ALIGN(a) (ffsl(a)-1) +#define ALLOCM_ZERO ((int)0x40) +#define ALLOCM_NO_MOVE ((int)0x80) + +#define ALLOCM_SUCCESS 0 +#define ALLOCM_ERR_OOM 1 +#define ALLOCM_ERR_NOT_MOVED 2 + +int allocm(void **ptr, size_t *rsize, size_t size, int flags) + __attribute__(nonnull(1)); +int rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, + int flags) __attribute__(nonnull(1)); +int sallocm(const void *ptr, size_t *rsize, int flags) + __attribute__(nonnull(1)); +int dallocm(void *ptr, int flags) __attribute__(nonnull(1)); +int nallocm(size_t *rsize, size_t size, int flags); __END_DECLS #endif /* _MALLOC_NP_H_ */ Index: include/stdlib.h =================================================================== --- include/stdlib.h (revision 234236) +++ include/stdlib.h (working copy) @@ -155,7 +155,7 @@ * If we're in a mode greater than C99, expose C11 functions. */ #if __ISO_C_VISIBLE >= 2011 || __cplusplus >= 201103L -void * aligned_alloc(size_t, size_t); +void * aligned_alloc(size_t, size_t) __malloc_like; int at_quick_exit(void (*)(void)); _Noreturn void quick_exit(int); @@ -228,9 +228,8 @@ #endif /* __XSI_VISIBLE */ #if __BSD_VISIBLE -extern const char *_malloc_options; -extern void (*_malloc_message)(const char *, const char *, const char *, - const char *); +extern const char *malloc_conf; +extern void (*malloc_message)(void *, const char *); /* * The alloca() function can't be implemented in C, and on some Index: contrib/jemalloc/include/jemalloc/jemalloc_FreeBSD.h =================================================================== --- contrib/jemalloc/include/jemalloc/jemalloc_FreeBSD.h (revision 0) +++ contrib/jemalloc/include/jemalloc/jemalloc_FreeBSD.h (working copy) @@ -0,0 +1,76 @@ +/* + * Override settings that were generated in jemalloc_defs.h as necessary. + */ + +#undef JEMALLOC_OVERRIDE_VALLOC + +#ifndef MALLOC_PRODUCTION +#define JEMALLOC_DEBUG +#endif + +/* + * The following are architecture-dependent, so conditionally define them for + * each supported architecture. + */ +#undef CPU_SPINWAIT +#undef JEMALLOC_TLS_MODEL +#undef STATIC_PAGE_SHIFT +#undef LG_SIZEOF_PTR +#undef LG_SIZEOF_INT +#undef LG_SIZEOF_LONG +#undef LG_SIZEOF_INTMAX_T + +#ifdef __i386__ +# define LG_SIZEOF_PTR 2 +# define CPU_SPINWAIT __asm__ volatile("pause") +# define JEMALLOC_TLS_MODEL __attribute__((tls_model("initial-exec"))) +#endif +#ifdef __ia64__ +# define LG_SIZEOF_PTR 3 +#endif +#ifdef __sparc64__ +# define LG_SIZEOF_PTR 3 +# define JEMALLOC_TLS_MODEL __attribute__((tls_model("initial-exec"))) +#endif +#ifdef __amd64__ +# define LG_SIZEOF_PTR 3 +# define CPU_SPINWAIT __asm__ volatile("pause") +# define JEMALLOC_TLS_MODEL __attribute__((tls_model("initial-exec"))) +#endif +#ifdef __arm__ +# define LG_SIZEOF_PTR 2 +#endif +#ifdef __mips__ +# define LG_SIZEOF_PTR 2 +#endif +#ifdef __powerpc64__ +# define LG_SIZEOF_PTR 3 +#elif defined(__powerpc__) +# define LG_SIZEOF_PTR 2 +#endif + +#ifndef JEMALLOC_TLS_MODEL +# define JEMALLOC_TLS_MODEL /* Default. */ +#endif +#ifdef __clang__ +# undef JEMALLOC_TLS_MODEL +# define JEMALLOC_TLS_MODEL /* clang does not support tls_model yet. */ +#endif + +#define STATIC_PAGE_SHIFT PAGE_SHIFT +#define LG_SIZEOF_INT 2 +#define LG_SIZEOF_LONG LG_SIZEOF_PTR +#define LG_SIZEOF_INTMAX_T 3 + +/* Disable lazy-lock machinery, mangle isthreaded, and adjust its type. */ +#undef JEMALLOC_LAZY_LOCK +extern int __isthreaded; +#define isthreaded ((bool)__isthreaded) + +/* Mangle. */ +#define open _open +#define read _read +#define write _write +#define close _close +#define pthread_mutex_lock _pthread_mutex_lock +#define pthread_mutex_unlock _pthread_mutex_unlock Property changes on: contrib/jemalloc/include/jemalloc/jemalloc_FreeBSD.h ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H Added: svn:eol-style ## -0,0 +1 ## +native Index: contrib/jemalloc/include/jemalloc/jemalloc.h =================================================================== --- contrib/jemalloc/include/jemalloc/jemalloc.h (revision 0) +++ contrib/jemalloc/include/jemalloc/jemalloc.h (working copy) @@ -0,0 +1,141 @@ +#ifndef JEMALLOC_H_ +#define JEMALLOC_H_ +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include + +#define JEMALLOC_VERSION "1.0.0-254-g7ca0fdfb85b2a9fc7a112e158892c098e004385b" +#define JEMALLOC_VERSION_MAJOR 1 +#define JEMALLOC_VERSION_MINOR 0 +#define JEMALLOC_VERSION_BUGFIX 0 +#define JEMALLOC_VERSION_NREV 254 +#define JEMALLOC_VERSION_GID "7ca0fdfb85b2a9fc7a112e158892c098e004385b" + +#include "jemalloc_defs.h" +#include "jemalloc_FreeBSD.h" + +#ifdef JEMALLOC_EXPERIMENTAL +#define ALLOCM_LG_ALIGN(la) (la) +#if LG_SIZEOF_PTR == 2 +#define ALLOCM_ALIGN(a) (ffs(a)-1) +#else +#define ALLOCM_ALIGN(a) ((a < (size_t)INT_MAX) ? ffs(a)-1 : ffs(a>>32)+31) +#endif +#define ALLOCM_ZERO ((int)0x40) +#define ALLOCM_NO_MOVE ((int)0x80) + +#define ALLOCM_SUCCESS 0 +#define ALLOCM_ERR_OOM 1 +#define ALLOCM_ERR_NOT_MOVED 2 +#endif + +/* + * The je_ prefix on the following public symbol declarations is an artifact of + * namespace management, and should be omitted in application code unless + * JEMALLOC_NO_DEMANGLE is defined (see below). + */ +extern const char *je_malloc_conf; +extern void (*je_malloc_message)(void *, const char *); + +void *je_malloc(size_t size) JEMALLOC_ATTR(malloc); +void *je_calloc(size_t num, size_t size) JEMALLOC_ATTR(malloc); +int je_posix_memalign(void **memptr, size_t alignment, size_t size) + JEMALLOC_ATTR(nonnull(1)); +void *je_aligned_alloc(size_t alignment, size_t size) JEMALLOC_ATTR(malloc); +void *je_realloc(void *ptr, size_t size); +void je_free(void *ptr); + +size_t je_malloc_usable_size(const void *ptr); +void je_malloc_stats_print(void (*write_cb)(void *, const char *), + void *je_cbopaque, const char *opts); +int je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp, + size_t newlen); +int je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp); +int je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen); + +#ifdef JEMALLOC_EXPERIMENTAL +int je_allocm(void **ptr, size_t *rsize, size_t size, int flags) + JEMALLOC_ATTR(nonnull(1)); +int je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, + int flags) JEMALLOC_ATTR(nonnull(1)); +int je_sallocm(const void *ptr, size_t *rsize, int flags) + JEMALLOC_ATTR(nonnull(1)); +int je_dallocm(void *ptr, int flags) JEMALLOC_ATTR(nonnull(1)); +int je_nallocm(size_t *rsize, size_t size, int flags); +#endif + +/* + * By default application code must explicitly refer to mangled symbol names, + * so that it is possible to use jemalloc in conjunction with another allocator + * in the same application. Define JEMALLOC_MANGLE in order to cause automatic + * name mangling that matches the API prefixing that happened as a result of + * --with-mangling and/or --with-jemalloc-prefix configuration settings. + */ +#ifdef JEMALLOC_MANGLE +#ifndef JEMALLOC_NO_DEMANGLE +#define JEMALLOC_NO_DEMANGLE +#endif +#define malloc_conf je_malloc_conf +#define malloc_message je_malloc_message +#define malloc je_malloc +#define calloc je_calloc +#define posix_memalign je_posix_memalign +#define aligned_alloc je_aligned_alloc +#define realloc je_realloc +#define free je_free +#define malloc_usable_size je_malloc_usable_size +#define malloc_stats_print je_malloc_stats_print +#define mallctl je_mallctl +#define mallctlnametomib je_mallctlnametomib +#define mallctlbymib je_mallctlbymib +#define memalign je_memalign +#define valloc je_valloc +#ifdef JEMALLOC_EXPERIMENTAL +#define allocm je_allocm +#define rallocm je_rallocm +#define sallocm je_sallocm +#define dallocm je_dallocm +#define nallocm je_nallocm +#endif +#endif + +/* + * The je_* macros can be used as stable alternative names for the public + * jemalloc API if JEMALLOC_NO_DEMANGLE is defined. This is primarily meant + * for use in jemalloc itself, but it can be used by application code to + * provide isolation from the name mangling specified via --with-mangling + * and/or --with-jemalloc-prefix. + */ +#ifndef JEMALLOC_NO_DEMANGLE +#undef je_malloc_conf +#undef je_malloc_message +#undef je_malloc +#undef je_calloc +#undef je_posix_memalign +#undef je_aligned_alloc +#undef je_realloc +#undef je_free +#undef je_malloc_usable_size +#undef je_malloc_stats_print +#undef je_mallctl +#undef je_mallctlnametomib +#undef je_mallctlbymib +#undef je_memalign +#undef je_valloc +#ifdef JEMALLOC_EXPERIMENTAL +#undef je_allocm +#undef je_rallocm +#undef je_sallocm +#undef je_dallocm +#undef je_nallocm +#endif +#endif + +#ifdef __cplusplus +}; +#endif +#endif /* JEMALLOC_H_ */ Property changes on: contrib/jemalloc/include/jemalloc/jemalloc.h ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H Added: svn:eol-style ## -0,0 +1 ## +native Index: contrib/jemalloc/include/jemalloc/internal/tsd.h =================================================================== --- contrib/jemalloc/include/jemalloc/internal/tsd.h (revision 0) +++ contrib/jemalloc/include/jemalloc/internal/tsd.h (working copy) @@ -0,0 +1,309 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +/* Maximum number of malloc_tsd users with cleanup functions. */ +#define MALLOC_TSD_CLEANUPS_MAX 8 + +typedef struct malloc_tsd_cleanup_s malloc_tsd_cleanup_t; +struct malloc_tsd_cleanup_s { + bool (*f)(void *); + void *arg; +}; + +/* + * TLS/TSD-agnostic macro-based implementation of thread-specific data. There + * are four macros that support (at least) three use cases: file-private, + * library-private, and library-private inlined. Following is an example + * library-private tsd variable: + * + * In example.h: + * typedef struct { + * int x; + * int y; + * } example_t; + * #define EX_INITIALIZER JEMALLOC_CONCAT({0, 0}) + * malloc_tsd_protos(, example, example_t *) + * malloc_tsd_externs(example, example_t *) + * In example.c: + * malloc_tsd_data(, example, example_t *, EX_INITIALIZER) + * malloc_tsd_funcs(, example, example_t *, EX_INITIALIZER, + * example_tsd_cleanup) + * + * The result is a set of generated functions, e.g.: + * + * bool example_tsd_boot(void) {...} + * example_t **example_tsd_get() {...} + * void example_tsd_set(example_t **val) {...} + * + * Note that all of the functions deal in terms of (a_type *) rather than + * (a_type) so that it is possible to support non-pointer types (unlike + * pthreads TSD). example_tsd_cleanup() is passed an (a_type *) pointer that is + * cast to (void *). This means that the cleanup function needs to cast *and* + * dereference the function argument, e.g.: + * + * void + * example_tsd_cleanup(void *arg) + * { + * example_t *example = *(example_t **)arg; + * + * [...] + * if ([want the cleanup function to be called again]) { + * example_tsd_set(&example); + * } + * } + * + * If example_tsd_set() is called within example_tsd_cleanup(), it will be + * called again. This is similar to how pthreads TSD destruction works, except + * that pthreads only calls the cleanup function again if the value was set to + * non-NULL. + */ + +/* malloc_tsd_protos(). */ +#define malloc_tsd_protos(a_attr, a_name, a_type) \ +a_attr bool \ +a_name##_tsd_boot(void); \ +a_attr a_type * \ +a_name##_tsd_get(void); \ +a_attr void \ +a_name##_tsd_set(a_type *val); + +/* malloc_tsd_externs(). */ +#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP +#define malloc_tsd_externs(a_name, a_type) \ +extern __thread a_type a_name##_tls; \ +extern __thread bool a_name##_initialized; \ +extern bool a_name##_booted; +#elif (defined(JEMALLOC_TLS)) +#define malloc_tsd_externs(a_name, a_type) \ +extern __thread a_type a_name##_tls; \ +extern pthread_key_t a_name##_tsd; \ +extern bool a_name##_booted; +#else +#define malloc_tsd_externs(a_name, a_type) \ +extern pthread_key_t a_name##_tsd; \ +extern bool a_name##_booted; +#endif + +/* malloc_tsd_data(). */ +#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP +#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \ +a_attr __thread a_type JEMALLOC_TLS_MODEL \ + a_name##_tls = a_initializer; \ +a_attr __thread bool JEMALLOC_TLS_MODEL \ + a_name##_initialized = false; \ +a_attr bool a_name##_booted = false; +#elif (defined(JEMALLOC_TLS)) +#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \ +a_attr __thread a_type JEMALLOC_TLS_MODEL \ + a_name##_tls = a_initializer; \ +a_attr pthread_key_t a_name##_tsd; \ +a_attr bool a_name##_booted = false; +#else +#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \ +a_attr pthread_key_t a_name##_tsd; \ +a_attr bool a_name##_booted = false; +#endif + +/* malloc_tsd_funcs(). */ +#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP +#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \ + a_cleanup) \ +/* Initialization/cleanup. */ \ +a_attr bool \ +a_name##_tsd_cleanup_wrapper(void *arg) \ +{ \ + bool (*cleanup)(void *) = arg; \ + \ + if (a_name##_initialized) { \ + a_name##_initialized = false; \ + cleanup(&a_name##_tls); \ + } \ + return (a_name##_initialized); \ +} \ +a_attr bool \ +a_name##_tsd_boot(void) \ +{ \ + \ + if (a_cleanup != malloc_tsd_no_cleanup) { \ + malloc_tsd_cleanup_register( \ + &a_name##_tsd_cleanup_wrapper, a_cleanup); \ + } \ + a_name##_booted = true; \ + return (false); \ +} \ +/* Get/set. */ \ +a_attr a_type * \ +a_name##_tsd_get(void) \ +{ \ + \ + assert(a_name##_booted); \ + return (&a_name##_tls); \ +} \ +a_attr void \ +a_name##_tsd_set(a_type *val) \ +{ \ + \ + assert(a_name##_booted); \ + a_name##_tls = (*val); \ + if (a_cleanup != malloc_tsd_no_cleanup) \ + a_name##_initialized = true; \ +} +#elif (defined(JEMALLOC_TLS)) +#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \ + a_cleanup) \ +/* Initialization/cleanup. */ \ +a_attr bool \ +a_name##_tsd_boot(void) \ +{ \ + \ + if (a_cleanup != malloc_tsd_no_cleanup) { \ + if (pthread_key_create(&a_name##_tsd, a_cleanup) != 0) \ + return (true); \ + } \ + a_name##_booted = true; \ + return (false); \ +} \ +/* Get/set. */ \ +a_attr a_type * \ +a_name##_tsd_get(void) \ +{ \ + \ + assert(a_name##_booted); \ + return (&a_name##_tls); \ +} \ +a_attr void \ +a_name##_tsd_set(a_type *val) \ +{ \ + \ + assert(a_name##_booted); \ + a_name##_tls = (*val); \ + if (a_cleanup != malloc_tsd_no_cleanup) { \ + if (pthread_setspecific(a_name##_tsd, \ + (void *)(&a_name##_tls))) { \ + malloc_write(": Error" \ + " setting TSD for "#a_name"\n"); \ + if (opt_abort) \ + abort(); \ + } \ + } \ +} +#else +#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \ + a_cleanup) \ +/* Data structure. */ \ +typedef struct { \ + bool isstatic; \ + bool initialized; \ + a_type val; \ +} a_name##_tsd_wrapper_t; \ +/* Initialization/cleanup. */ \ +a_attr void \ +a_name##_tsd_cleanup_wrapper(void *arg) \ +{ \ + a_name##_tsd_wrapper_t *wrapper = (a_name##_tsd_wrapper_t *)arg;\ + \ + if (a_cleanup != malloc_tsd_no_cleanup && \ + wrapper->initialized) { \ + wrapper->initialized = false; \ + a_cleanup(&wrapper->val); \ + if (wrapper->initialized) { \ + /* Trigger another cleanup round. */ \ + if (pthread_setspecific(a_name##_tsd, \ + (void *)wrapper)) { \ + malloc_write(": Error" \ + " setting TSD for "#a_name"\n"); \ + if (opt_abort) \ + abort(); \ + } \ + return; \ + } \ + } \ + if (wrapper->isstatic == false) \ + malloc_tsd_dalloc(wrapper); \ +} \ +a_attr bool \ +a_name##_tsd_boot(void) \ +{ \ + \ + if (pthread_key_create(&a_name##_tsd, \ + a_name##_tsd_cleanup_wrapper) != 0) \ + return (true); \ + a_name##_booted = true; \ + return (false); \ +} \ +/* Get/set. */ \ +a_attr a_name##_tsd_wrapper_t * \ +a_name##_tsd_get_wrapper(void) \ +{ \ + a_name##_tsd_wrapper_t *wrapper = (a_name##_tsd_wrapper_t *) \ + pthread_getspecific(a_name##_tsd); \ + \ + if (wrapper == NULL) { \ + wrapper = (a_name##_tsd_wrapper_t *) \ + malloc_tsd_malloc(sizeof(a_name##_tsd_wrapper_t)); \ + if (wrapper == NULL) { \ + static a_name##_tsd_wrapper_t \ + a_name##_tsd_static_data = \ + {true, false, a_initializer}; \ + malloc_write(": Error allocating" \ + " TSD for "#a_name"\n"); \ + if (opt_abort) \ + abort(); \ + wrapper = &a_name##_tsd_static_data; \ + } else { \ + static a_type tsd_static_data = a_initializer; \ + wrapper->isstatic = false; \ + wrapper->val = tsd_static_data; \ + } \ + if (pthread_setspecific(a_name##_tsd, \ + (void *)wrapper)) { \ + malloc_write(": Error setting" \ + " TSD for "#a_name"\n"); \ + if (opt_abort) \ + abort(); \ + } \ + } \ + return (wrapper); \ +} \ +a_attr a_type * \ +a_name##_tsd_get(void) \ +{ \ + a_name##_tsd_wrapper_t *wrapper; \ + \ + assert(a_name##_booted); \ + wrapper = a_name##_tsd_get_wrapper(); \ + return (&wrapper->val); \ +} \ +a_attr void \ +a_name##_tsd_set(a_type *val) \ +{ \ + a_name##_tsd_wrapper_t *wrapper; \ + \ + assert(a_name##_booted); \ + wrapper = a_name##_tsd_get_wrapper(); \ + wrapper->val = *(val); \ + if (a_cleanup != malloc_tsd_no_cleanup) \ + wrapper->initialized = true; \ +} +#endif + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +void *malloc_tsd_malloc(size_t size); +void malloc_tsd_dalloc(void *wrapper); +void malloc_tsd_no_cleanup(void *); +void malloc_tsd_cleanup_register(bool (*f)(void *), void *arg); +void malloc_tsd_boot(void); + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ Property changes on: contrib/jemalloc/include/jemalloc/internal/tsd.h ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H Added: svn:eol-style ## -0,0 +1 ## +native Index: contrib/jemalloc/include/jemalloc/internal/stats.h =================================================================== --- contrib/jemalloc/include/jemalloc/internal/stats.h (revision 0) +++ contrib/jemalloc/include/jemalloc/internal/stats.h (working copy) @@ -0,0 +1,173 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +typedef struct tcache_bin_stats_s tcache_bin_stats_t; +typedef struct malloc_bin_stats_s malloc_bin_stats_t; +typedef struct malloc_large_stats_s malloc_large_stats_t; +typedef struct arena_stats_s arena_stats_t; +typedef struct chunk_stats_s chunk_stats_t; + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +struct tcache_bin_stats_s { + /* + * Number of allocation requests that corresponded to the size of this + * bin. + */ + uint64_t nrequests; +}; + +struct malloc_bin_stats_s { + /* + * Current number of bytes allocated, including objects currently + * cached by tcache. + */ + size_t allocated; + + /* + * Total number of allocation/deallocation requests served directly by + * the bin. Note that tcache may allocate an object, then recycle it + * many times, resulting many increments to nrequests, but only one + * each to nmalloc and ndalloc. + */ + uint64_t nmalloc; + uint64_t ndalloc; + + /* + * Number of allocation requests that correspond to the size of this + * bin. This includes requests served by tcache, though tcache only + * periodically merges into this counter. + */ + uint64_t nrequests; + + /* Number of tcache fills from this bin. */ + uint64_t nfills; + + /* Number of tcache flushes to this bin. */ + uint64_t nflushes; + + /* Total number of runs created for this bin's size class. */ + uint64_t nruns; + + /* + * Total number of runs reused by extracting them from the runs tree for + * this bin's size class. + */ + uint64_t reruns; + + /* Current number of runs in this bin. */ + size_t curruns; +}; + +struct malloc_large_stats_s { + /* + * Total number of allocation/deallocation requests served directly by + * the arena. Note that tcache may allocate an object, then recycle it + * many times, resulting many increments to nrequests, but only one + * each to nmalloc and ndalloc. + */ + uint64_t nmalloc; + uint64_t ndalloc; + + /* + * Number of allocation requests that correspond to this size class. + * This includes requests served by tcache, though tcache only + * periodically merges into this counter. + */ + uint64_t nrequests; + + /* Current number of runs of this size class. */ + size_t curruns; +}; + +struct arena_stats_s { + /* Number of bytes currently mapped. */ + size_t mapped; + + /* + * Total number of purge sweeps, total number of madvise calls made, + * and total pages purged in order to keep dirty unused memory under + * control. + */ + uint64_t npurge; + uint64_t nmadvise; + uint64_t purged; + + /* Per-size-category statistics. */ + size_t allocated_large; + uint64_t nmalloc_large; + uint64_t ndalloc_large; + uint64_t nrequests_large; + + /* + * One element for each possible size class, including sizes that + * overlap with bin size classes. This is necessary because ipalloc() + * sometimes has to use such large objects in order to assure proper + * alignment. + */ + malloc_large_stats_t *lstats; +}; + +struct chunk_stats_s { + /* Number of chunks that were allocated. */ + uint64_t nchunks; + + /* High-water mark for number of chunks allocated. */ + size_t highchunks; + + /* + * Current number of chunks allocated. This value isn't maintained for + * any other purpose, so keep track of it in order to be able to set + * highchunks. + */ + size_t curchunks; +}; + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +extern bool opt_stats_print; + +extern size_t stats_cactive; + +void stats_print(void (*write)(void *, const char *), void *cbopaque, + const char *opts); + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#ifndef JEMALLOC_ENABLE_INLINE +size_t stats_cactive_get(void); +void stats_cactive_add(size_t size); +void stats_cactive_sub(size_t size); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_STATS_C_)) +JEMALLOC_INLINE size_t +stats_cactive_get(void) +{ + + return (atomic_read_z(&stats_cactive)); +} + +JEMALLOC_INLINE void +stats_cactive_add(size_t size) +{ + + atomic_add_z(&stats_cactive, size); +} + +JEMALLOC_INLINE void +stats_cactive_sub(size_t size) +{ + + atomic_sub_z(&stats_cactive, size); +} +#endif + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ Property changes on: contrib/jemalloc/include/jemalloc/internal/stats.h ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H Added: svn:eol-style ## -0,0 +1 ## +native Index: contrib/jemalloc/include/jemalloc/internal/mb.h =================================================================== --- contrib/jemalloc/include/jemalloc/internal/mb.h (revision 0) +++ contrib/jemalloc/include/jemalloc/internal/mb.h (working copy) @@ -0,0 +1,115 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#ifndef JEMALLOC_ENABLE_INLINE +void mb_write(void); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MB_C_)) +#ifdef __i386__ +/* + * According to the Intel Architecture Software Developer's Manual, current + * processors execute instructions in order from the perspective of other + * processors in a multiprocessor system, but 1) Intel reserves the right to + * change that, and 2) the compiler's optimizer could re-order instructions if + * there weren't some form of barrier. Therefore, even if running on an + * architecture that does not need memory barriers (everything through at least + * i686), an "optimizer barrier" is necessary. + */ +JEMALLOC_INLINE void +mb_write(void) +{ + +# if 0 + /* This is a true memory barrier. */ + asm volatile ("pusha;" + "xor %%eax,%%eax;" + "cpuid;" + "popa;" + : /* Outputs. */ + : /* Inputs. */ + : "memory" /* Clobbers. */ + ); +#else + /* + * This is hopefully enough to keep the compiler from reordering + * instructions around this one. + */ + asm volatile ("nop;" + : /* Outputs. */ + : /* Inputs. */ + : "memory" /* Clobbers. */ + ); +#endif +} +#elif (defined(__amd64__) || defined(__x86_64__)) +JEMALLOC_INLINE void +mb_write(void) +{ + + asm volatile ("sfence" + : /* Outputs. */ + : /* Inputs. */ + : "memory" /* Clobbers. */ + ); +} +#elif defined(__powerpc__) +JEMALLOC_INLINE void +mb_write(void) +{ + + asm volatile ("eieio" + : /* Outputs. */ + : /* Inputs. */ + : "memory" /* Clobbers. */ + ); +} +#elif defined(__sparc64__) +JEMALLOC_INLINE void +mb_write(void) +{ + + asm volatile ("membar #StoreStore" + : /* Outputs. */ + : /* Inputs. */ + : "memory" /* Clobbers. */ + ); +} +#elif defined(__tile__) +JEMALLOC_INLINE void +mb_write(void) +{ + + __sync_synchronize(); +} +#else +/* + * This is much slower than a simple memory barrier, but the semantics of mutex + * unlock make this work. + */ +JEMALLOC_INLINE void +mb_write(void) +{ + malloc_mutex_t mtx; + + malloc_mutex_init(&mtx); + malloc_mutex_lock(&mtx); + malloc_mutex_unlock(&mtx); +} +#endif +#endif + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ Property changes on: contrib/jemalloc/include/jemalloc/internal/mb.h ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H Added: svn:eol-style ## -0,0 +1 ## +native Index: contrib/jemalloc/include/jemalloc/internal/mutex.h =================================================================== --- contrib/jemalloc/include/jemalloc/internal/mutex.h (revision 0) +++ contrib/jemalloc/include/jemalloc/internal/mutex.h (working copy) @@ -0,0 +1,88 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +typedef struct malloc_mutex_s malloc_mutex_t; + +#ifdef JEMALLOC_OSSPIN +#define MALLOC_MUTEX_INITIALIZER {0} +#elif (defined(JEMALLOC_MUTEX_INIT_CB)) +#define MALLOC_MUTEX_INITIALIZER {PTHREAD_MUTEX_INITIALIZER, NULL} +#else +# if (defined(PTHREAD_MUTEX_ADAPTIVE_NP) && \ + defined(PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP)) +# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_ADAPTIVE_NP +# define MALLOC_MUTEX_INITIALIZER {PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP} +# else +# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT +# define MALLOC_MUTEX_INITIALIZER {PTHREAD_MUTEX_INITIALIZER} +# endif +#endif + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +struct malloc_mutex_s { +#ifdef JEMALLOC_OSSPIN + OSSpinLock lock; +#elif (defined(JEMALLOC_MUTEX_INIT_CB)) + pthread_mutex_t lock; + malloc_mutex_t *postponed_next; +#else + pthread_mutex_t lock; +#endif +}; + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +#ifdef JEMALLOC_LAZY_LOCK +extern bool isthreaded; +#endif + +bool malloc_mutex_init(malloc_mutex_t *mutex); +void malloc_mutex_prefork(malloc_mutex_t *mutex); +void malloc_mutex_postfork_parent(malloc_mutex_t *mutex); +void malloc_mutex_postfork_child(malloc_mutex_t *mutex); +bool mutex_boot(void); + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#ifndef JEMALLOC_ENABLE_INLINE +void malloc_mutex_lock(malloc_mutex_t *mutex); +void malloc_mutex_unlock(malloc_mutex_t *mutex); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MUTEX_C_)) +JEMALLOC_INLINE void +malloc_mutex_lock(malloc_mutex_t *mutex) +{ + + if (isthreaded) { +#ifdef JEMALLOC_OSSPIN + OSSpinLockLock(&mutex->lock); +#else + pthread_mutex_lock(&mutex->lock); +#endif + } +} + +JEMALLOC_INLINE void +malloc_mutex_unlock(malloc_mutex_t *mutex) +{ + + if (isthreaded) { +#ifdef JEMALLOC_OSSPIN + OSSpinLockUnlock(&mutex->lock); +#else + pthread_mutex_unlock(&mutex->lock); +#endif + } +} +#endif + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ Property changes on: contrib/jemalloc/include/jemalloc/internal/mutex.h ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H Added: svn:eol-style ## -0,0 +1 ## +native Index: contrib/jemalloc/include/jemalloc/internal/prng.h =================================================================== --- contrib/jemalloc/include/jemalloc/internal/prng.h (revision 0) +++ contrib/jemalloc/include/jemalloc/internal/prng.h (working copy) @@ -0,0 +1,60 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +/* + * Simple linear congruential pseudo-random number generator: + * + * prng(y) = (a*x + c) % m + * + * where the following constants ensure maximal period: + * + * a == Odd number (relatively prime to 2^n), and (a-1) is a multiple of 4. + * c == Odd number (relatively prime to 2^n). + * m == 2^32 + * + * See Knuth's TAOCP 3rd Ed., Vol. 2, pg. 17 for details on these constraints. + * + * This choice of m has the disadvantage that the quality of the bits is + * proportional to bit position. For example. the lowest bit has a cycle of 2, + * the next has a cycle of 4, etc. For this reason, we prefer to use the upper + * bits. + * + * Macro parameters: + * uint32_t r : Result. + * unsigned lg_range : (0..32], number of least significant bits to return. + * uint32_t state : Seed value. + * const uint32_t a, c : See above discussion. + */ +#define prng32(r, lg_range, state, a, c) do { \ + assert(lg_range > 0); \ + assert(lg_range <= 32); \ + \ + r = (state * (a)) + (c); \ + state = r; \ + r >>= (32 - lg_range); \ +} while (false) + +/* Same as prng32(), but 64 bits of pseudo-randomness, using uint64_t. */ +#define prng64(r, lg_range, state, a, c) do { \ + assert(lg_range > 0); \ + assert(lg_range <= 64); \ + \ + r = (state * (a)) + (c); \ + state = r; \ + r >>= (64 - lg_range); \ +} while (false) + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ Property changes on: contrib/jemalloc/include/jemalloc/internal/prng.h ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H Added: svn:eol-style ## -0,0 +1 ## +native Index: contrib/jemalloc/include/jemalloc/internal/extent.h =================================================================== --- contrib/jemalloc/include/jemalloc/internal/extent.h (revision 0) +++ contrib/jemalloc/include/jemalloc/internal/extent.h (working copy) @@ -0,0 +1,43 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +typedef struct extent_node_s extent_node_t; + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +/* Tree of extents. */ +struct extent_node_s { + /* Linkage for the size/address-ordered tree. */ + rb_node(extent_node_t) link_szad; + + /* Linkage for the address-ordered tree. */ + rb_node(extent_node_t) link_ad; + + /* Profile counters, used for huge objects. */ + prof_ctx_t *prof_ctx; + + /* Pointer to the extent that this tree node is responsible for. */ + void *addr; + + /* Total region size. */ + size_t size; +}; +typedef rb_tree(extent_node_t) extent_tree_t; + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +rb_proto(, extent_tree_szad_, extent_tree_t, extent_node_t) + +rb_proto(, extent_tree_ad_, extent_tree_t, extent_node_t) + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ + Property changes on: contrib/jemalloc/include/jemalloc/internal/extent.h ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H Added: svn:eol-style ## -0,0 +1 ## +native Index: contrib/jemalloc/include/jemalloc/internal/base.h =================================================================== --- contrib/jemalloc/include/jemalloc/internal/base.h (revision 0) +++ contrib/jemalloc/include/jemalloc/internal/base.h (working copy) @@ -0,0 +1,26 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +void *base_alloc(size_t size); +void *base_calloc(size_t number, size_t size); +extent_node_t *base_node_alloc(void); +void base_node_dealloc(extent_node_t *node); +bool base_boot(void); +void base_prefork(void); +void base_postfork_parent(void); +void base_postfork_child(void); + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ Property changes on: contrib/jemalloc/include/jemalloc/internal/base.h ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H Added: svn:eol-style ## -0,0 +1 ## +native Index: contrib/jemalloc/include/jemalloc/internal/ql.h =================================================================== --- contrib/jemalloc/include/jemalloc/internal/ql.h (revision 0) +++ contrib/jemalloc/include/jemalloc/internal/ql.h (working copy) @@ -0,0 +1,83 @@ +/* + * List definitions. + */ +#define ql_head(a_type) \ +struct { \ + a_type *qlh_first; \ +} + +#define ql_head_initializer(a_head) {NULL} + +#define ql_elm(a_type) qr(a_type) + +/* List functions. */ +#define ql_new(a_head) do { \ + (a_head)->qlh_first = NULL; \ +} while (0) + +#define ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field) + +#define ql_first(a_head) ((a_head)->qlh_first) + +#define ql_last(a_head, a_field) \ + ((ql_first(a_head) != NULL) \ + ? qr_prev(ql_first(a_head), a_field) : NULL) + +#define ql_next(a_head, a_elm, a_field) \ + ((ql_last(a_head, a_field) != (a_elm)) \ + ? qr_next((a_elm), a_field) : NULL) + +#define ql_prev(a_head, a_elm, a_field) \ + ((ql_first(a_head) != (a_elm)) ? qr_prev((a_elm), a_field) \ + : NULL) + +#define ql_before_insert(a_head, a_qlelm, a_elm, a_field) do { \ + qr_before_insert((a_qlelm), (a_elm), a_field); \ + if (ql_first(a_head) == (a_qlelm)) { \ + ql_first(a_head) = (a_elm); \ + } \ +} while (0) + +#define ql_after_insert(a_qlelm, a_elm, a_field) \ + qr_after_insert((a_qlelm), (a_elm), a_field) + +#define ql_head_insert(a_head, a_elm, a_field) do { \ + if (ql_first(a_head) != NULL) { \ + qr_before_insert(ql_first(a_head), (a_elm), a_field); \ + } \ + ql_first(a_head) = (a_elm); \ +} while (0) + +#define ql_tail_insert(a_head, a_elm, a_field) do { \ + if (ql_first(a_head) != NULL) { \ + qr_before_insert(ql_first(a_head), (a_elm), a_field); \ + } \ + ql_first(a_head) = qr_next((a_elm), a_field); \ +} while (0) + +#define ql_remove(a_head, a_elm, a_field) do { \ + if (ql_first(a_head) == (a_elm)) { \ + ql_first(a_head) = qr_next(ql_first(a_head), a_field); \ + } \ + if (ql_first(a_head) != (a_elm)) { \ + qr_remove((a_elm), a_field); \ + } else { \ + ql_first(a_head) = NULL; \ + } \ +} while (0) + +#define ql_head_remove(a_head, a_type, a_field) do { \ + a_type *t = ql_first(a_head); \ + ql_remove((a_head), t, a_field); \ +} while (0) + +#define ql_tail_remove(a_head, a_type, a_field) do { \ + a_type *t = ql_last(a_head, a_field); \ + ql_remove((a_head), t, a_field); \ +} while (0) + +#define ql_foreach(a_var, a_head, a_field) \ + qr_foreach((a_var), ql_first(a_head), a_field) + +#define ql_reverse_foreach(a_var, a_head, a_field) \ + qr_reverse_foreach((a_var), ql_first(a_head), a_field) Property changes on: contrib/jemalloc/include/jemalloc/internal/ql.h ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H Added: svn:eol-style ## -0,0 +1 ## +native Index: contrib/jemalloc/include/jemalloc/internal/util.h =================================================================== --- contrib/jemalloc/include/jemalloc/internal/util.h (revision 0) +++ contrib/jemalloc/include/jemalloc/internal/util.h (working copy) @@ -0,0 +1,146 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +/* Size of stack-allocated buffer passed to buferror(). */ +#define BUFERROR_BUF 64 + +/* + * Size of stack-allocated buffer used by malloc_{,v,vc}printf(). This must be + * large enough for all possible uses within jemalloc. + */ +#define MALLOC_PRINTF_BUFSIZE 4096 + +/* + * Wrap a cpp argument that contains commas such that it isn't broken up into + * multiple arguments. + */ +#define JEMALLOC_CONCAT(...) __VA_ARGS__ + +/* + * Silence compiler warnings due to uninitialized values. This is used + * wherever the compiler fails to recognize that the variable is never used + * uninitialized. + */ +#ifdef JEMALLOC_CC_SILENCE +# define JEMALLOC_CC_SILENCE_INIT(v) = v +#else +# define JEMALLOC_CC_SILENCE_INIT(v) +#endif + +/* + * Define a custom assert() in order to reduce the chances of deadlock during + * assertion failure. + */ +#ifndef assert +#define assert(e) do { \ + if (config_debug && !(e)) { \ + malloc_printf( \ + ": %s:%d: Failed assertion: \"%s\"\n", \ + __FILE__, __LINE__, #e); \ + abort(); \ + } \ +} while (0) +#endif + +/* Use to assert a particular configuration, e.g., cassert(config_debug). */ +#define cassert(c) do { \ + if ((c) == false) \ + assert(false); \ +} while (0) + +#ifndef not_reached +#define not_reached() do { \ + if (config_debug) { \ + malloc_printf( \ + ": %s:%d: Unreachable code reached\n", \ + __FILE__, __LINE__); \ + abort(); \ + } \ +} while (0) +#endif + +#ifndef not_implemented +#define not_implemented() do { \ + if (config_debug) { \ + malloc_printf(": %s:%d: Not implemented\n", \ + __FILE__, __LINE__); \ + abort(); \ + } \ +} while (0) +#endif + +#define assert_not_implemented(e) do { \ + if (config_debug && !(e)) \ + not_implemented(); \ +} while (0) + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +extern void (*je_malloc_message)(void *wcbopaque, const char *s); + +int buferror(int errnum, char *buf, size_t buflen); +uintmax_t malloc_strtoumax(const char *nptr, char **endptr, int base); + +/* + * malloc_vsnprintf() supports a subset of snprintf(3) that avoids floating + * point math. + */ +int malloc_vsnprintf(char *str, size_t size, const char *format, + va_list ap); +int malloc_snprintf(char *str, size_t size, const char *format, ...) + JEMALLOC_ATTR(format(printf, 3, 4)); +void malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque, + const char *format, va_list ap); +void malloc_cprintf(void (*write)(void *, const char *), void *cbopaque, + const char *format, ...) JEMALLOC_ATTR(format(printf, 3, 4)); +void malloc_printf(const char *format, ...) + JEMALLOC_ATTR(format(printf, 1, 2)); + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#ifndef JEMALLOC_ENABLE_INLINE +size_t pow2_ceil(size_t x); +void malloc_write(const char *s); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_UTIL_C_)) +/* Compute the smallest power of 2 that is >= x. */ +JEMALLOC_INLINE size_t +pow2_ceil(size_t x) +{ + + x--; + x |= x >> 1; + x |= x >> 2; + x |= x >> 4; + x |= x >> 8; + x |= x >> 16; +#if (LG_SIZEOF_PTR == 3) + x |= x >> 32; +#endif + x++; + return (x); +} + +/* + * Wrapper around malloc_message() that avoids the need for + * je_malloc_message(...) throughout the code. + */ +JEMALLOC_INLINE void +malloc_write(const char *s) +{ + + je_malloc_message(NULL, s); +} +#endif + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ Property changes on: contrib/jemalloc/include/jemalloc/internal/util.h ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H Added: svn:eol-style ## -0,0 +1 ## +native Index: contrib/jemalloc/include/jemalloc/internal/rtree.h =================================================================== --- contrib/jemalloc/include/jemalloc/internal/rtree.h (revision 0) +++ contrib/jemalloc/include/jemalloc/internal/rtree.h (working copy) @@ -0,0 +1,161 @@ +/* + * This radix tree implementation is tailored to the singular purpose of + * tracking which chunks are currently owned by jemalloc. This functionality + * is mandatory for OS X, where jemalloc must be able to respond to object + * ownership queries. + * + ******************************************************************************* + */ +#ifdef JEMALLOC_H_TYPES + +typedef struct rtree_s rtree_t; + +/* + * Size of each radix tree node (must be a power of 2). This impacts tree + * depth. + */ +#if (LG_SIZEOF_PTR == 2) +# define RTREE_NODESIZE (1U << 14) +#else +# define RTREE_NODESIZE CACHELINE +#endif + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +struct rtree_s { + malloc_mutex_t mutex; + void **root; + unsigned height; + unsigned level2bits[1]; /* Dynamically sized. */ +}; + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +rtree_t *rtree_new(unsigned bits); + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#ifndef JEMALLOC_ENABLE_INLINE +#ifndef JEMALLOC_DEBUG +void *rtree_get_locked(rtree_t *rtree, uintptr_t key); +#endif +void *rtree_get(rtree_t *rtree, uintptr_t key); +bool rtree_set(rtree_t *rtree, uintptr_t key, void *val); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_RTREE_C_)) +#define RTREE_GET_GENERATE(f) \ +/* The least significant bits of the key are ignored. */ \ +JEMALLOC_INLINE void * \ +f(rtree_t *rtree, uintptr_t key) \ +{ \ + void *ret; \ + uintptr_t subkey; \ + unsigned i, lshift, height, bits; \ + void **node, **child; \ + \ + RTREE_LOCK(&rtree->mutex); \ + for (i = lshift = 0, height = rtree->height, node = rtree->root;\ + i < height - 1; \ + i++, lshift += bits, node = child) { \ + bits = rtree->level2bits[i]; \ + subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR + \ + 3)) - bits); \ + child = (void**)node[subkey]; \ + if (child == NULL) { \ + RTREE_UNLOCK(&rtree->mutex); \ + return (NULL); \ + } \ + } \ + \ + /* \ + * node is a leaf, so it contains values rather than node \ + * pointers. \ + */ \ + bits = rtree->level2bits[i]; \ + subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - \ + bits); \ + ret = node[subkey]; \ + RTREE_UNLOCK(&rtree->mutex); \ + \ + RTREE_GET_VALIDATE \ + return (ret); \ +} + +#ifdef JEMALLOC_DEBUG +# define RTREE_LOCK(l) malloc_mutex_lock(l) +# define RTREE_UNLOCK(l) malloc_mutex_unlock(l) +# define RTREE_GET_VALIDATE +RTREE_GET_GENERATE(rtree_get_locked) +# undef RTREE_LOCK +# undef RTREE_UNLOCK +# undef RTREE_GET_VALIDATE +#endif + +#define RTREE_LOCK(l) +#define RTREE_UNLOCK(l) +#ifdef JEMALLOC_DEBUG + /* + * Suppose that it were possible for a jemalloc-allocated chunk to be + * munmap()ped, followed by a different allocator in another thread re-using + * overlapping virtual memory, all without invalidating the cached rtree + * value. The result would be a false positive (the rtree would claim that + * jemalloc owns memory that it had actually discarded). This scenario + * seems impossible, but the following assertion is a prudent sanity check. + */ +# define RTREE_GET_VALIDATE \ + assert(rtree_get_locked(rtree, key) == ret); +#else +# define RTREE_GET_VALIDATE +#endif +RTREE_GET_GENERATE(rtree_get) +#undef RTREE_LOCK +#undef RTREE_UNLOCK +#undef RTREE_GET_VALIDATE + +JEMALLOC_INLINE bool +rtree_set(rtree_t *rtree, uintptr_t key, void *val) +{ + uintptr_t subkey; + unsigned i, lshift, height, bits; + void **node, **child; + + malloc_mutex_lock(&rtree->mutex); + for (i = lshift = 0, height = rtree->height, node = rtree->root; + i < height - 1; + i++, lshift += bits, node = child) { + bits = rtree->level2bits[i]; + subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - + bits); + child = (void**)node[subkey]; + if (child == NULL) { + child = (void**)base_alloc(sizeof(void *) << + rtree->level2bits[i+1]); + if (child == NULL) { + malloc_mutex_unlock(&rtree->mutex); + return (true); + } + memset(child, 0, sizeof(void *) << + rtree->level2bits[i+1]); + node[subkey] = child; + } + } + + /* node is a leaf, so it contains values rather than node pointers. */ + bits = rtree->level2bits[i]; + subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - bits); + node[subkey] = val; + malloc_mutex_unlock(&rtree->mutex); + + return (false); +} +#endif + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ Property changes on: contrib/jemalloc/include/jemalloc/internal/rtree.h ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H Added: svn:eol-style ## -0,0 +1 ## +native Index: contrib/jemalloc/include/jemalloc/internal/chunk_dss.h =================================================================== --- contrib/jemalloc/include/jemalloc/internal/chunk_dss.h (revision 0) +++ contrib/jemalloc/include/jemalloc/internal/chunk_dss.h (working copy) @@ -0,0 +1,24 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +void *chunk_alloc_dss(size_t size, size_t alignment, bool *zero); +bool chunk_in_dss(void *chunk); +bool chunk_dss_boot(void); +void chunk_dss_prefork(void); +void chunk_dss_postfork_parent(void); +void chunk_dss_postfork_child(void); + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ Property changes on: contrib/jemalloc/include/jemalloc/internal/chunk_dss.h ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H Added: svn:eol-style ## -0,0 +1 ## +native Index: contrib/jemalloc/include/jemalloc/internal/qr.h =================================================================== --- contrib/jemalloc/include/jemalloc/internal/qr.h (revision 0) +++ contrib/jemalloc/include/jemalloc/internal/qr.h (working copy) @@ -0,0 +1,67 @@ +/* Ring definitions. */ +#define qr(a_type) \ +struct { \ + a_type *qre_next; \ + a_type *qre_prev; \ +} + +/* Ring functions. */ +#define qr_new(a_qr, a_field) do { \ + (a_qr)->a_field.qre_next = (a_qr); \ + (a_qr)->a_field.qre_prev = (a_qr); \ +} while (0) + +#define qr_next(a_qr, a_field) ((a_qr)->a_field.qre_next) + +#define qr_prev(a_qr, a_field) ((a_qr)->a_field.qre_prev) + +#define qr_before_insert(a_qrelm, a_qr, a_field) do { \ + (a_qr)->a_field.qre_prev = (a_qrelm)->a_field.qre_prev; \ + (a_qr)->a_field.qre_next = (a_qrelm); \ + (a_qr)->a_field.qre_prev->a_field.qre_next = (a_qr); \ + (a_qrelm)->a_field.qre_prev = (a_qr); \ +} while (0) + +#define qr_after_insert(a_qrelm, a_qr, a_field) \ + do \ + { \ + (a_qr)->a_field.qre_next = (a_qrelm)->a_field.qre_next; \ + (a_qr)->a_field.qre_prev = (a_qrelm); \ + (a_qr)->a_field.qre_next->a_field.qre_prev = (a_qr); \ + (a_qrelm)->a_field.qre_next = (a_qr); \ + } while (0) + +#define qr_meld(a_qr_a, a_qr_b, a_field) do { \ + void *t; \ + (a_qr_a)->a_field.qre_prev->a_field.qre_next = (a_qr_b); \ + (a_qr_b)->a_field.qre_prev->a_field.qre_next = (a_qr_a); \ + t = (a_qr_a)->a_field.qre_prev; \ + (a_qr_a)->a_field.qre_prev = (a_qr_b)->a_field.qre_prev; \ + (a_qr_b)->a_field.qre_prev = t; \ +} while (0) + +/* qr_meld() and qr_split() are functionally equivalent, so there's no need to + * have two copies of the code. */ +#define qr_split(a_qr_a, a_qr_b, a_field) \ + qr_meld((a_qr_a), (a_qr_b), a_field) + +#define qr_remove(a_qr, a_field) do { \ + (a_qr)->a_field.qre_prev->a_field.qre_next \ + = (a_qr)->a_field.qre_next; \ + (a_qr)->a_field.qre_next->a_field.qre_prev \ + = (a_qr)->a_field.qre_prev; \ + (a_qr)->a_field.qre_next = (a_qr); \ + (a_qr)->a_field.qre_prev = (a_qr); \ +} while (0) + +#define qr_foreach(var, a_qr, a_field) \ + for ((var) = (a_qr); \ + (var) != NULL; \ + (var) = (((var)->a_field.qre_next != (a_qr)) \ + ? (var)->a_field.qre_next : NULL)) + +#define qr_reverse_foreach(var, a_qr, a_field) \ + for ((var) = ((a_qr) != NULL) ? qr_prev(a_qr, a_field) : NULL; \ + (var) != NULL; \ + (var) = (((var) != (a_qr)) \ + ? (var)->a_field.qre_prev : NULL)) Property changes on: contrib/jemalloc/include/jemalloc/internal/qr.h ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H Added: svn:eol-style ## -0,0 +1 ## +native Index: contrib/jemalloc/include/jemalloc/internal/chunk_mmap.h =================================================================== --- contrib/jemalloc/include/jemalloc/internal/chunk_mmap.h (revision 0) +++ contrib/jemalloc/include/jemalloc/internal/chunk_mmap.h (working copy) @@ -0,0 +1,22 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +void *chunk_alloc_mmap(size_t size, size_t alignment); +bool chunk_dealloc_mmap(void *chunk, size_t size); + +bool chunk_mmap_boot(void); + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ Property changes on: contrib/jemalloc/include/jemalloc/internal/chunk_mmap.h ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H Added: svn:eol-style ## -0,0 +1 ## +native Index: contrib/jemalloc/include/jemalloc/internal/ctl.h =================================================================== --- contrib/jemalloc/include/jemalloc/internal/ctl.h (revision 0) +++ contrib/jemalloc/include/jemalloc/internal/ctl.h (working copy) @@ -0,0 +1,109 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +typedef struct ctl_node_s ctl_node_t; +typedef struct ctl_arena_stats_s ctl_arena_stats_t; +typedef struct ctl_stats_s ctl_stats_t; + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +struct ctl_node_s { + bool named; + union { + struct { + const char *name; + /* If (nchildren == 0), this is a terminal node. */ + unsigned nchildren; + const ctl_node_t *children; + } named; + struct { + const ctl_node_t *(*index)(const size_t *, size_t, + size_t); + } indexed; + } u; + int (*ctl)(const size_t *, size_t, void *, size_t *, void *, + size_t); +}; + +struct ctl_arena_stats_s { + bool initialized; + unsigned nthreads; + size_t pactive; + size_t pdirty; + arena_stats_t astats; + + /* Aggregate stats for small size classes, based on bin stats. */ + size_t allocated_small; + uint64_t nmalloc_small; + uint64_t ndalloc_small; + uint64_t nrequests_small; + + malloc_bin_stats_t bstats[NBINS]; + malloc_large_stats_t *lstats; /* nlclasses elements. */ +}; + +struct ctl_stats_s { + size_t allocated; + size_t active; + size_t mapped; + struct { + size_t current; /* stats_chunks.curchunks */ + uint64_t total; /* stats_chunks.nchunks */ + size_t high; /* stats_chunks.highchunks */ + } chunks; + struct { + size_t allocated; /* huge_allocated */ + uint64_t nmalloc; /* huge_nmalloc */ + uint64_t ndalloc; /* huge_ndalloc */ + } huge; + ctl_arena_stats_t *arenas; /* (narenas + 1) elements. */ +}; + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +int ctl_byname(const char *name, void *oldp, size_t *oldlenp, void *newp, + size_t newlen); +int ctl_nametomib(const char *name, size_t *mibp, size_t *miblenp); + +int ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, + void *newp, size_t newlen); +bool ctl_boot(void); + +#define xmallctl(name, oldp, oldlenp, newp, newlen) do { \ + if (je_mallctl(name, oldp, oldlenp, newp, newlen) \ + != 0) { \ + malloc_printf( \ + ": Failure in xmallctl(\"%s\", ...)\n", \ + name); \ + abort(); \ + } \ +} while (0) + +#define xmallctlnametomib(name, mibp, miblenp) do { \ + if (je_mallctlnametomib(name, mibp, miblenp) != 0) { \ + malloc_printf(": Failure in " \ + "xmallctlnametomib(\"%s\", ...)\n", name); \ + abort(); \ + } \ +} while (0) + +#define xmallctlbymib(mib, miblen, oldp, oldlenp, newp, newlen) do { \ + if (je_mallctlbymib(mib, miblen, oldp, oldlenp, newp, \ + newlen) != 0) { \ + malloc_write( \ + ": Failure in xmallctlbymib()\n"); \ + abort(); \ + } \ +} while (0) + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ + Property changes on: contrib/jemalloc/include/jemalloc/internal/ctl.h ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H Added: svn:eol-style ## -0,0 +1 ## +native Index: contrib/jemalloc/include/jemalloc/internal/hash.h =================================================================== --- contrib/jemalloc/include/jemalloc/internal/hash.h (revision 0) +++ contrib/jemalloc/include/jemalloc/internal/hash.h (working copy) @@ -0,0 +1,70 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#ifndef JEMALLOC_ENABLE_INLINE +uint64_t hash(const void *key, size_t len, uint64_t seed); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_HASH_C_)) +/* + * The following hash function is based on MurmurHash64A(), placed into the + * public domain by Austin Appleby. See http://murmurhash.googlepages.com/ for + * details. + */ +JEMALLOC_INLINE uint64_t +hash(const void *key, size_t len, uint64_t seed) +{ + const uint64_t m = UINT64_C(0xc6a4a7935bd1e995); + const int r = 47; + uint64_t h = seed ^ (len * m); + const uint64_t *data = (const uint64_t *)key; + const uint64_t *end = data + (len/8); + const unsigned char *data2; + + assert(((uintptr_t)key & 0x7) == 0); + + while(data != end) { + uint64_t k = *data++; + + k *= m; + k ^= k >> r; + k *= m; + + h ^= k; + h *= m; + } + + data2 = (const unsigned char *)data; + switch(len & 7) { + case 7: h ^= ((uint64_t)(data2[6])) << 48; + case 6: h ^= ((uint64_t)(data2[5])) << 40; + case 5: h ^= ((uint64_t)(data2[4])) << 32; + case 4: h ^= ((uint64_t)(data2[3])) << 24; + case 3: h ^= ((uint64_t)(data2[2])) << 16; + case 2: h ^= ((uint64_t)(data2[1])) << 8; + case 1: h ^= ((uint64_t)(data2[0])); + h *= m; + } + + h ^= h >> r; + h *= m; + h ^= h >> r; + + return (h); +} +#endif + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ Property changes on: contrib/jemalloc/include/jemalloc/internal/hash.h ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H Added: svn:eol-style ## -0,0 +1 ## +native Index: contrib/jemalloc/include/jemalloc/internal/arena.h =================================================================== --- contrib/jemalloc/include/jemalloc/internal/arena.h (revision 0) +++ contrib/jemalloc/include/jemalloc/internal/arena.h (working copy) @@ -0,0 +1,685 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +/* + * RUN_MAX_OVRHD indicates maximum desired run header overhead. Runs are sized + * as small as possible such that this setting is still honored, without + * violating other constraints. The goal is to make runs as small as possible + * without exceeding a per run external fragmentation threshold. + * + * We use binary fixed point math for overhead computations, where the binary + * point is implicitly RUN_BFP bits to the left. + * + * Note that it is possible to set RUN_MAX_OVRHD low enough that it cannot be + * honored for some/all object sizes, since when heap profiling is enabled + * there is one pointer of header overhead per object (plus a constant). This + * constraint is relaxed (ignored) for runs that are so small that the + * per-region overhead is greater than: + * + * (RUN_MAX_OVRHD / (reg_interval << (3+RUN_BFP)) + */ +#define RUN_BFP 12 +/* \/ Implicit binary fixed point. */ +#define RUN_MAX_OVRHD 0x0000003dU +#define RUN_MAX_OVRHD_RELAX 0x00001800U + +/* Maximum number of regions in one run. */ +#define LG_RUN_MAXREGS 11 +#define RUN_MAXREGS (1U << LG_RUN_MAXREGS) + +/* + * Minimum redzone size. Redzones may be larger than this if necessary to + * preserve region alignment. + */ +#define REDZONE_MINSIZE 16 + +/* + * The minimum ratio of active:dirty pages per arena is computed as: + * + * (nactive >> opt_lg_dirty_mult) >= ndirty + * + * So, supposing that opt_lg_dirty_mult is 5, there can be no less than 32 + * times as many active pages as dirty pages. + */ +#define LG_DIRTY_MULT_DEFAULT 5 + +typedef struct arena_chunk_map_s arena_chunk_map_t; +typedef struct arena_chunk_s arena_chunk_t; +typedef struct arena_run_s arena_run_t; +typedef struct arena_bin_info_s arena_bin_info_t; +typedef struct arena_bin_s arena_bin_t; +typedef struct arena_s arena_t; + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +/* Each element of the chunk map corresponds to one page within the chunk. */ +struct arena_chunk_map_s { +#ifndef JEMALLOC_PROF + /* + * Overlay prof_ctx in order to allow it to be referenced by dead code. + * Such antics aren't warranted for per arena data structures, but + * chunk map overhead accounts for a percentage of memory, rather than + * being just a fixed cost. + */ + union { +#endif + union { + /* + * Linkage for run trees. There are two disjoint uses: + * + * 1) arena_t's runs_avail_{clean,dirty} trees. + * 2) arena_run_t conceptually uses this linkage for in-use + * non-full runs, rather than directly embedding linkage. + */ + rb_node(arena_chunk_map_t) rb_link; + /* + * List of runs currently in purgatory. arena_chunk_purge() + * temporarily allocates runs that contain dirty pages while + * purging, so that other threads cannot use the runs while the + * purging thread is operating without the arena lock held. + */ + ql_elm(arena_chunk_map_t) ql_link; + } u; + + /* Profile counters, used for large object runs. */ + prof_ctx_t *prof_ctx; +#ifndef JEMALLOC_PROF + }; /* union { ... }; */ +#endif + + /* + * Run address (or size) and various flags are stored together. The bit + * layout looks like (assuming 32-bit system): + * + * ???????? ???????? ????---- ----dula + * + * ? : Unallocated: Run address for first/last pages, unset for internal + * pages. + * Small: Run page offset. + * Large: Run size for first page, unset for trailing pages. + * - : Unused. + * d : dirty? + * u : unzeroed? + * l : large? + * a : allocated? + * + * Following are example bit patterns for the three types of runs. + * + * p : run page offset + * s : run size + * c : (binind+1) for size class (used only if prof_promote is true) + * x : don't care + * - : 0 + * + : 1 + * [DULA] : bit set + * [dula] : bit unset + * + * Unallocated (clean): + * ssssssss ssssssss ssss---- ----du-a + * xxxxxxxx xxxxxxxx xxxx---- -----Uxx + * ssssssss ssssssss ssss---- ----dU-a + * + * Unallocated (dirty): + * ssssssss ssssssss ssss---- ----D--a + * xxxxxxxx xxxxxxxx xxxx---- ----xxxx + * ssssssss ssssssss ssss---- ----D--a + * + * Small: + * pppppppp pppppppp pppp---- ----d--A + * pppppppp pppppppp pppp---- -------A + * pppppppp pppppppp pppp---- ----d--A + * + * Large: + * ssssssss ssssssss ssss---- ----D-LA + * xxxxxxxx xxxxxxxx xxxx---- ----xxxx + * -------- -------- -------- ----D-LA + * + * Large (sampled, size <= PAGE): + * ssssssss ssssssss sssscccc ccccD-LA + * + * Large (not sampled, size == PAGE): + * ssssssss ssssssss ssss---- ----D-LA + */ + size_t bits; +#define CHUNK_MAP_CLASS_SHIFT 4 +#define CHUNK_MAP_CLASS_MASK ((size_t)0xff0U) +#define CHUNK_MAP_FLAGS_MASK ((size_t)0xfU) +#define CHUNK_MAP_DIRTY ((size_t)0x8U) +#define CHUNK_MAP_UNZEROED ((size_t)0x4U) +#define CHUNK_MAP_LARGE ((size_t)0x2U) +#define CHUNK_MAP_ALLOCATED ((size_t)0x1U) +#define CHUNK_MAP_KEY CHUNK_MAP_ALLOCATED +}; +typedef rb_tree(arena_chunk_map_t) arena_avail_tree_t; +typedef rb_tree(arena_chunk_map_t) arena_run_tree_t; + +/* Arena chunk header. */ +struct arena_chunk_s { + /* Arena that owns the chunk. */ + arena_t *arena; + + /* Linkage for the arena's chunks_dirty list. */ + ql_elm(arena_chunk_t) link_dirty; + + /* + * True if the chunk is currently in the chunks_dirty list, due to + * having at some point contained one or more dirty pages. Removal + * from chunks_dirty is lazy, so (dirtied && ndirty == 0) is possible. + */ + bool dirtied; + + /* Number of dirty pages. */ + size_t ndirty; + + /* + * Map of pages within chunk that keeps track of free/large/small. The + * first map_bias entries are omitted, since the chunk header does not + * need to be tracked in the map. This omission saves a header page + * for common chunk sizes (e.g. 4 MiB). + */ + arena_chunk_map_t map[1]; /* Dynamically sized. */ +}; +typedef rb_tree(arena_chunk_t) arena_chunk_tree_t; + +struct arena_run_s { + /* Bin this run is associated with. */ + arena_bin_t *bin; + + /* Index of next region that has never been allocated, or nregs. */ + uint32_t nextind; + + /* Number of free regions in run. */ + unsigned nfree; +}; + +/* + * Read-only information associated with each element of arena_t's bins array + * is stored separately, partly to reduce memory usage (only one copy, rather + * than one per arena), but mainly to avoid false cacheline sharing. + * + * Each run has the following layout: + * + * /--------------------\ + * | arena_run_t header | + * | ... | + * bitmap_offset | bitmap | + * | ... | + * ctx0_offset | ctx map | + * | ... | + * |--------------------| + * | redzone | + * reg0_offset | region 0 | + * | redzone | + * |--------------------| \ + * | redzone | | + * | region 1 | > reg_interval + * | redzone | / + * |--------------------| + * | ... | + * | ... | + * | ... | + * |--------------------| + * | redzone | + * | region nregs-1 | + * | redzone | + * |--------------------| + * | alignment pad? | + * \--------------------/ + * + * reg_interval has at least the same minimum alignment as reg_size; this + * preserves the alignment constraint that sa2u() depends on. Alignment pad is + * either 0 or redzone_size; it is present only if needed to align reg0_offset. + */ +struct arena_bin_info_s { + /* Size of regions in a run for this bin's size class. */ + size_t reg_size; + + /* Redzone size. */ + size_t redzone_size; + + /* Interval between regions (reg_size + (redzone_size << 1)). */ + size_t reg_interval; + + /* Total size of a run for this bin's size class. */ + size_t run_size; + + /* Total number of regions in a run for this bin's size class. */ + uint32_t nregs; + + /* + * Offset of first bitmap_t element in a run header for this bin's size + * class. + */ + uint32_t bitmap_offset; + + /* + * Metadata used to manipulate bitmaps for runs associated with this + * bin. + */ + bitmap_info_t bitmap_info; + + /* + * Offset of first (prof_ctx_t *) in a run header for this bin's size + * class, or 0 if (config_prof == false || opt_prof == false). + */ + uint32_t ctx0_offset; + + /* Offset of first region in a run for this bin's size class. */ + uint32_t reg0_offset; +}; + +struct arena_bin_s { + /* + * All operations on runcur, runs, and stats require that lock be + * locked. Run allocation/deallocation are protected by the arena lock, + * which may be acquired while holding one or more bin locks, but not + * vise versa. + */ + malloc_mutex_t lock; + + /* + * Current run being used to service allocations of this bin's size + * class. + */ + arena_run_t *runcur; + + /* + * Tree of non-full runs. This tree is used when looking for an + * existing run when runcur is no longer usable. We choose the + * non-full run that is lowest in memory; this policy tends to keep + * objects packed well, and it can also help reduce the number of + * almost-empty chunks. + */ + arena_run_tree_t runs; + + /* Bin statistics. */ + malloc_bin_stats_t stats; +}; + +struct arena_s { + /* This arena's index within the arenas array. */ + unsigned ind; + + /* + * Number of threads currently assigned to this arena. This field is + * protected by arenas_lock. + */ + unsigned nthreads; + + /* + * There are three classes of arena operations from a locking + * perspective: + * 1) Thread asssignment (modifies nthreads) is protected by + * arenas_lock. + * 2) Bin-related operations are protected by bin locks. + * 3) Chunk- and run-related operations are protected by this mutex. + */ + malloc_mutex_t lock; + + arena_stats_t stats; + /* + * List of tcaches for extant threads associated with this arena. + * Stats from these are merged incrementally, and at exit. + */ + ql_head(tcache_t) tcache_ql; + + uint64_t prof_accumbytes; + + /* List of dirty-page-containing chunks this arena manages. */ + ql_head(arena_chunk_t) chunks_dirty; + + /* + * In order to avoid rapid chunk allocation/deallocation when an arena + * oscillates right on the cusp of needing a new chunk, cache the most + * recently freed chunk. The spare is left in the arena's chunk trees + * until it is deleted. + * + * There is one spare chunk per arena, rather than one spare total, in + * order to avoid interactions between multiple threads that could make + * a single spare inadequate. + */ + arena_chunk_t *spare; + + /* Number of pages in active runs. */ + size_t nactive; + + /* + * Current count of pages within unused runs that are potentially + * dirty, and for which madvise(... MADV_DONTNEED) has not been called. + * By tracking this, we can institute a limit on how much dirty unused + * memory is mapped for each arena. + */ + size_t ndirty; + + /* + * Approximate number of pages being purged. It is possible for + * multiple threads to purge dirty pages concurrently, and they use + * npurgatory to indicate the total number of pages all threads are + * attempting to purge. + */ + size_t npurgatory; + + /* + * Size/address-ordered trees of this arena's available runs. The trees + * are used for first-best-fit run allocation. The dirty tree contains + * runs with dirty pages (i.e. very likely to have been touched and + * therefore have associated physical pages), whereas the clean tree + * contains runs with pages that either have no associated physical + * pages, or have pages that the kernel may recycle at any time due to + * previous madvise(2) calls. The dirty tree is used in preference to + * the clean tree for allocations, because using dirty pages reduces + * the amount of dirty purging necessary to keep the active:dirty page + * ratio below the purge threshold. + */ + arena_avail_tree_t runs_avail_clean; + arena_avail_tree_t runs_avail_dirty; + + /* bins is used to store trees of free regions. */ + arena_bin_t bins[NBINS]; +}; + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +extern ssize_t opt_lg_dirty_mult; +/* + * small_size2bin is a compact lookup table that rounds request sizes up to + * size classes. In order to reduce cache footprint, the table is compressed, + * and all accesses are via the SMALL_SIZE2BIN macro. + */ +extern uint8_t const small_size2bin[]; +#define SMALL_SIZE2BIN(s) (small_size2bin[(s-1) >> LG_TINY_MIN]) + +extern arena_bin_info_t arena_bin_info[NBINS]; + +/* Number of large size classes. */ +#define nlclasses (chunk_npages - map_bias) + +void arena_purge_all(arena_t *arena); +void arena_prof_accum(arena_t *arena, uint64_t accumbytes); +void arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, + size_t binind, uint64_t prof_accumbytes); +void arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, + bool zero); +void arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info); +void *arena_malloc_small(arena_t *arena, size_t size, bool zero); +void *arena_malloc_large(arena_t *arena, size_t size, bool zero); +void *arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero); +size_t arena_salloc(const void *ptr, bool demote); +void arena_prof_promoted(const void *ptr, size_t size); +void arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr, + arena_chunk_map_t *mapelm); +void arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr); +void arena_stats_merge(arena_t *arena, size_t *nactive, size_t *ndirty, + arena_stats_t *astats, malloc_bin_stats_t *bstats, + malloc_large_stats_t *lstats); +void *arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, + size_t extra, bool zero); +void *arena_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra, + size_t alignment, bool zero, bool try_tcache); +bool arena_new(arena_t *arena, unsigned ind); +void arena_boot(void); +void arena_prefork(arena_t *arena); +void arena_postfork_parent(arena_t *arena); +void arena_postfork_child(arena_t *arena); + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#ifndef JEMALLOC_ENABLE_INLINE +size_t arena_bin_index(arena_t *arena, arena_bin_t *bin); +unsigned arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, + const void *ptr); +prof_ctx_t *arena_prof_ctx_get(const void *ptr); +void arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx); +void *arena_malloc(arena_t *arena, size_t size, bool zero, bool try_tcache); +void arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr, + bool try_tcache); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_)) +JEMALLOC_INLINE size_t +arena_bin_index(arena_t *arena, arena_bin_t *bin) +{ + size_t binind = bin - arena->bins; + assert(binind < NBINS); + return (binind); +} + +JEMALLOC_INLINE unsigned +arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr) +{ + unsigned shift, diff, regind; + size_t interval; + + /* + * Freeing a pointer lower than region zero can cause assertion + * failure. + */ + assert((uintptr_t)ptr >= (uintptr_t)run + + (uintptr_t)bin_info->reg0_offset); + + /* + * Avoid doing division with a variable divisor if possible. Using + * actual division here can reduce allocator throughput by over 20%! + */ + diff = (unsigned)((uintptr_t)ptr - (uintptr_t)run - + bin_info->reg0_offset); + + /* Rescale (factor powers of 2 out of the numerator and denominator). */ + interval = bin_info->reg_interval; + shift = ffs(interval) - 1; + diff >>= shift; + interval >>= shift; + + if (interval == 1) { + /* The divisor was a power of 2. */ + regind = diff; + } else { + /* + * To divide by a number D that is not a power of two we + * multiply by (2^21 / D) and then right shift by 21 positions. + * + * X / D + * + * becomes + * + * (X * interval_invs[D - 3]) >> SIZE_INV_SHIFT + * + * We can omit the first three elements, because we never + * divide by 0, and 1 and 2 are both powers of two, which are + * handled above. + */ +#define SIZE_INV_SHIFT ((sizeof(unsigned) << 3) - LG_RUN_MAXREGS) +#define SIZE_INV(s) (((1U << SIZE_INV_SHIFT) / (s)) + 1) + static const unsigned interval_invs[] = { + SIZE_INV(3), + SIZE_INV(4), SIZE_INV(5), SIZE_INV(6), SIZE_INV(7), + SIZE_INV(8), SIZE_INV(9), SIZE_INV(10), SIZE_INV(11), + SIZE_INV(12), SIZE_INV(13), SIZE_INV(14), SIZE_INV(15), + SIZE_INV(16), SIZE_INV(17), SIZE_INV(18), SIZE_INV(19), + SIZE_INV(20), SIZE_INV(21), SIZE_INV(22), SIZE_INV(23), + SIZE_INV(24), SIZE_INV(25), SIZE_INV(26), SIZE_INV(27), + SIZE_INV(28), SIZE_INV(29), SIZE_INV(30), SIZE_INV(31) + }; + + if (interval <= ((sizeof(interval_invs) / sizeof(unsigned)) + + 2)) { + regind = (diff * interval_invs[interval - 3]) >> + SIZE_INV_SHIFT; + } else + regind = diff / interval; +#undef SIZE_INV +#undef SIZE_INV_SHIFT + } + assert(diff == regind * interval); + assert(regind < bin_info->nregs); + + return (regind); +} + +JEMALLOC_INLINE prof_ctx_t * +arena_prof_ctx_get(const void *ptr) +{ + prof_ctx_t *ret; + arena_chunk_t *chunk; + size_t pageind, mapbits; + + cassert(config_prof); + assert(ptr != NULL); + assert(CHUNK_ADDR2BASE(ptr) != ptr); + + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); + pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; + mapbits = chunk->map[pageind-map_bias].bits; + assert((mapbits & CHUNK_MAP_ALLOCATED) != 0); + if ((mapbits & CHUNK_MAP_LARGE) == 0) { + if (prof_promote) + ret = (prof_ctx_t *)(uintptr_t)1U; + else { + arena_run_t *run = (arena_run_t *)((uintptr_t)chunk + + (uintptr_t)((pageind - (mapbits >> LG_PAGE)) << + LG_PAGE)); + size_t binind = arena_bin_index(chunk->arena, run->bin); + arena_bin_info_t *bin_info = &arena_bin_info[binind]; + unsigned regind; + + regind = arena_run_regind(run, bin_info, ptr); + ret = *(prof_ctx_t **)((uintptr_t)run + + bin_info->ctx0_offset + (regind * + sizeof(prof_ctx_t *))); + } + } else + ret = chunk->map[pageind-map_bias].prof_ctx; + + return (ret); +} + +JEMALLOC_INLINE void +arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx) +{ + arena_chunk_t *chunk; + size_t pageind, mapbits; + + cassert(config_prof); + assert(ptr != NULL); + assert(CHUNK_ADDR2BASE(ptr) != ptr); + + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); + pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; + mapbits = chunk->map[pageind-map_bias].bits; + assert((mapbits & CHUNK_MAP_ALLOCATED) != 0); + if ((mapbits & CHUNK_MAP_LARGE) == 0) { + if (prof_promote == false) { + arena_run_t *run = (arena_run_t *)((uintptr_t)chunk + + (uintptr_t)((pageind - (mapbits >> LG_PAGE)) << + LG_PAGE)); + arena_bin_t *bin = run->bin; + size_t binind; + arena_bin_info_t *bin_info; + unsigned regind; + + binind = arena_bin_index(chunk->arena, bin); + bin_info = &arena_bin_info[binind]; + regind = arena_run_regind(run, bin_info, ptr); + + *((prof_ctx_t **)((uintptr_t)run + bin_info->ctx0_offset + + (regind * sizeof(prof_ctx_t *)))) = ctx; + } else + assert((uintptr_t)ctx == (uintptr_t)1U); + } else + chunk->map[pageind-map_bias].prof_ctx = ctx; +} + +JEMALLOC_INLINE void * +arena_malloc(arena_t *arena, size_t size, bool zero, bool try_tcache) +{ + tcache_t *tcache; + + assert(size != 0); + assert(size <= arena_maxclass); + + if (size <= SMALL_MAXCLASS) { + if (try_tcache && (tcache = tcache_get(true)) != NULL) + return (tcache_alloc_small(tcache, size, zero)); + else { + return (arena_malloc_small(choose_arena(arena), size, + zero)); + } + } else { + /* + * Initialize tcache after checking size in order to avoid + * infinite recursion during tcache initialization. + */ + if (try_tcache && size <= tcache_maxclass && (tcache = + tcache_get(true)) != NULL) + return (tcache_alloc_large(tcache, size, zero)); + else { + return (arena_malloc_large(choose_arena(arena), size, + zero)); + } + } +} + +JEMALLOC_INLINE void +arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr, bool try_tcache) +{ + size_t pageind; + arena_chunk_map_t *mapelm; + tcache_t *tcache; + + assert(arena != NULL); + assert(chunk->arena == arena); + assert(ptr != NULL); + assert(CHUNK_ADDR2BASE(ptr) != ptr); + + pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; + mapelm = &chunk->map[pageind-map_bias]; + assert((mapelm->bits & CHUNK_MAP_ALLOCATED) != 0); + if ((mapelm->bits & CHUNK_MAP_LARGE) == 0) { + /* Small allocation. */ + if (try_tcache && (tcache = tcache_get(false)) != NULL) + tcache_dalloc_small(tcache, ptr); + else { + arena_run_t *run; + arena_bin_t *bin; + + run = (arena_run_t *)((uintptr_t)chunk + + (uintptr_t)((pageind - (mapelm->bits >> LG_PAGE)) << + LG_PAGE)); + bin = run->bin; + if (config_debug) { + size_t binind = arena_bin_index(arena, bin); + UNUSED arena_bin_info_t *bin_info = + &arena_bin_info[binind]; + assert(((uintptr_t)ptr - ((uintptr_t)run + + (uintptr_t)bin_info->reg0_offset)) % + bin_info->reg_interval == 0); + } + malloc_mutex_lock(&bin->lock); + arena_dalloc_bin(arena, chunk, ptr, mapelm); + malloc_mutex_unlock(&bin->lock); + } + } else { + size_t size = mapelm->bits & ~PAGE_MASK; + + assert(((uintptr_t)ptr & PAGE_MASK) == 0); + + if (try_tcache && size <= tcache_maxclass && (tcache = + tcache_get(false)) != NULL) { + tcache_dalloc_large(tcache, ptr, size); + } else { + malloc_mutex_lock(&arena->lock); + arena_dalloc_large(arena, chunk, ptr); + malloc_mutex_unlock(&arena->lock); + } + } +} +#endif + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ Property changes on: contrib/jemalloc/include/jemalloc/internal/arena.h ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H Added: svn:eol-style ## -0,0 +1 ## +native Index: contrib/jemalloc/include/jemalloc/internal/private_namespace.h =================================================================== --- contrib/jemalloc/include/jemalloc/internal/private_namespace.h (revision 0) +++ contrib/jemalloc/include/jemalloc/internal/private_namespace.h (working copy) @@ -0,0 +1,235 @@ +#define arena_alloc_junk_small JEMALLOC_N(arena_alloc_junk_small) +#define arena_bin_index JEMALLOC_N(arena_bin_index) +#define arena_boot JEMALLOC_N(arena_boot) +#define arena_dalloc JEMALLOC_N(arena_dalloc) +#define arena_dalloc_bin JEMALLOC_N(arena_dalloc_bin) +#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small) +#define arena_dalloc_large JEMALLOC_N(arena_dalloc_large) +#define arena_malloc JEMALLOC_N(arena_malloc) +#define arena_malloc_large JEMALLOC_N(arena_malloc_large) +#define arena_malloc_small JEMALLOC_N(arena_malloc_small) +#define arena_new JEMALLOC_N(arena_new) +#define arena_palloc JEMALLOC_N(arena_palloc) +#define arena_postfork_child JEMALLOC_N(arena_postfork_child) +#define arena_postfork_parent JEMALLOC_N(arena_postfork_parent) +#define arena_prefork JEMALLOC_N(arena_prefork) +#define arena_prof_accum JEMALLOC_N(arena_prof_accum) +#define arena_prof_ctx_get JEMALLOC_N(arena_prof_ctx_get) +#define arena_prof_ctx_set JEMALLOC_N(arena_prof_ctx_set) +#define arena_prof_promoted JEMALLOC_N(arena_prof_promoted) +#define arena_purge_all JEMALLOC_N(arena_purge_all) +#define arena_ralloc JEMALLOC_N(arena_ralloc) +#define arena_ralloc_no_move JEMALLOC_N(arena_ralloc_no_move) +#define arena_run_regind JEMALLOC_N(arena_run_regind) +#define arena_salloc JEMALLOC_N(arena_salloc) +#define arena_stats_merge JEMALLOC_N(arena_stats_merge) +#define arena_tcache_fill_small JEMALLOC_N(arena_tcache_fill_small) +#define arenas_bin_i_index JEMALLOC_N(arenas_bin_i_index) +#define arenas_extend JEMALLOC_N(arenas_extend) +#define arenas_lrun_i_index JEMALLOC_N(arenas_lrun_i_index) +#define arenas_tls JEMALLOC_N(arenas_tls) +#define atomic_add_uint32 JEMALLOC_N(atomic_add_uint32) +#define atomic_add_uint64 JEMALLOC_N(atomic_add_uint64) +#define atomic_sub_uint32 JEMALLOC_N(atomic_sub_uint32) +#define atomic_sub_uint64 JEMALLOC_N(atomic_sub_uint64) +#define base_alloc JEMALLOC_N(base_alloc) +#define base_boot JEMALLOC_N(base_boot) +#define base_node_alloc JEMALLOC_N(base_node_alloc) +#define base_node_dealloc JEMALLOC_N(base_node_dealloc) +#define base_postfork_child JEMALLOC_N(base_postfork_child) +#define base_postfork_parent JEMALLOC_N(base_postfork_parent) +#define base_prefork JEMALLOC_N(base_prefork) +#define bitmap_full JEMALLOC_N(bitmap_full) +#define bitmap_get JEMALLOC_N(bitmap_get) +#define bitmap_info_init JEMALLOC_N(bitmap_info_init) +#define bitmap_info_ngroups JEMALLOC_N(bitmap_info_ngroups) +#define bitmap_init JEMALLOC_N(bitmap_init) +#define bitmap_set JEMALLOC_N(bitmap_set) +#define bitmap_sfu JEMALLOC_N(bitmap_sfu) +#define bitmap_size JEMALLOC_N(bitmap_size) +#define bitmap_unset JEMALLOC_N(bitmap_unset) +#define bt_init JEMALLOC_N(bt_init) +#define buferror JEMALLOC_N(buferror) +#define choose_arena JEMALLOC_N(choose_arena) +#define choose_arena_hard JEMALLOC_N(choose_arena_hard) +#define chunk_alloc JEMALLOC_N(chunk_alloc) +#define chunk_alloc_dss JEMALLOC_N(chunk_alloc_dss) +#define chunk_alloc_mmap JEMALLOC_N(chunk_alloc_mmap) +#define chunk_boot JEMALLOC_N(chunk_boot) +#define chunk_dealloc JEMALLOC_N(chunk_dealloc) +#define chunk_dealloc_mmap JEMALLOC_N(chunk_dealloc_mmap) +#define chunk_dss_boot JEMALLOC_N(chunk_dss_boot) +#define chunk_dss_postfork_child JEMALLOC_N(chunk_dss_postfork_child) +#define chunk_dss_postfork_parent JEMALLOC_N(chunk_dss_postfork_parent) +#define chunk_dss_prefork JEMALLOC_N(chunk_dss_prefork) +#define chunk_in_dss JEMALLOC_N(chunk_in_dss) +#define chunk_mmap_boot JEMALLOC_N(chunk_mmap_boot) +#define ckh_bucket_search JEMALLOC_N(ckh_bucket_search) +#define ckh_count JEMALLOC_N(ckh_count) +#define ckh_delete JEMALLOC_N(ckh_delete) +#define ckh_evict_reloc_insert JEMALLOC_N(ckh_evict_reloc_insert) +#define ckh_insert JEMALLOC_N(ckh_insert) +#define ckh_isearch JEMALLOC_N(ckh_isearch) +#define ckh_iter JEMALLOC_N(ckh_iter) +#define ckh_new JEMALLOC_N(ckh_new) +#define ckh_pointer_hash JEMALLOC_N(ckh_pointer_hash) +#define ckh_pointer_keycomp JEMALLOC_N(ckh_pointer_keycomp) +#define ckh_rebuild JEMALLOC_N(ckh_rebuild) +#define ckh_remove JEMALLOC_N(ckh_remove) +#define ckh_search JEMALLOC_N(ckh_search) +#define ckh_string_hash JEMALLOC_N(ckh_string_hash) +#define ckh_string_keycomp JEMALLOC_N(ckh_string_keycomp) +#define ckh_try_bucket_insert JEMALLOC_N(ckh_try_bucket_insert) +#define ckh_try_insert JEMALLOC_N(ckh_try_insert) +#define ctl_boot JEMALLOC_N(ctl_boot) +#define ctl_bymib JEMALLOC_N(ctl_bymib) +#define ctl_byname JEMALLOC_N(ctl_byname) +#define ctl_nametomib JEMALLOC_N(ctl_nametomib) +#define extent_tree_ad_first JEMALLOC_N(extent_tree_ad_first) +#define extent_tree_ad_insert JEMALLOC_N(extent_tree_ad_insert) +#define extent_tree_ad_iter JEMALLOC_N(extent_tree_ad_iter) +#define extent_tree_ad_iter_recurse JEMALLOC_N(extent_tree_ad_iter_recurse) +#define extent_tree_ad_iter_start JEMALLOC_N(extent_tree_ad_iter_start) +#define extent_tree_ad_last JEMALLOC_N(extent_tree_ad_last) +#define extent_tree_ad_new JEMALLOC_N(extent_tree_ad_new) +#define extent_tree_ad_next JEMALLOC_N(extent_tree_ad_next) +#define extent_tree_ad_nsearch JEMALLOC_N(extent_tree_ad_nsearch) +#define extent_tree_ad_prev JEMALLOC_N(extent_tree_ad_prev) +#define extent_tree_ad_psearch JEMALLOC_N(extent_tree_ad_psearch) +#define extent_tree_ad_remove JEMALLOC_N(extent_tree_ad_remove) +#define extent_tree_ad_reverse_iter JEMALLOC_N(extent_tree_ad_reverse_iter) +#define extent_tree_ad_reverse_iter_recurse JEMALLOC_N(extent_tree_ad_reverse_iter_recurse) +#define extent_tree_ad_reverse_iter_start JEMALLOC_N(extent_tree_ad_reverse_iter_start) +#define extent_tree_ad_search JEMALLOC_N(extent_tree_ad_search) +#define extent_tree_szad_first JEMALLOC_N(extent_tree_szad_first) +#define extent_tree_szad_insert JEMALLOC_N(extent_tree_szad_insert) +#define extent_tree_szad_iter JEMALLOC_N(extent_tree_szad_iter) +#define extent_tree_szad_iter_recurse JEMALLOC_N(extent_tree_szad_iter_recurse) +#define extent_tree_szad_iter_start JEMALLOC_N(extent_tree_szad_iter_start) +#define extent_tree_szad_last JEMALLOC_N(extent_tree_szad_last) +#define extent_tree_szad_new JEMALLOC_N(extent_tree_szad_new) +#define extent_tree_szad_next JEMALLOC_N(extent_tree_szad_next) +#define extent_tree_szad_nsearch JEMALLOC_N(extent_tree_szad_nsearch) +#define extent_tree_szad_prev JEMALLOC_N(extent_tree_szad_prev) +#define extent_tree_szad_psearch JEMALLOC_N(extent_tree_szad_psearch) +#define extent_tree_szad_remove JEMALLOC_N(extent_tree_szad_remove) +#define extent_tree_szad_reverse_iter JEMALLOC_N(extent_tree_szad_reverse_iter) +#define extent_tree_szad_reverse_iter_recurse JEMALLOC_N(extent_tree_szad_reverse_iter_recurse) +#define extent_tree_szad_reverse_iter_start JEMALLOC_N(extent_tree_szad_reverse_iter_start) +#define extent_tree_szad_search JEMALLOC_N(extent_tree_szad_search) +#define hash JEMALLOC_N(hash) +#define huge_boot JEMALLOC_N(huge_boot) +#define huge_dalloc JEMALLOC_N(huge_dalloc) +#define huge_malloc JEMALLOC_N(huge_malloc) +#define huge_palloc JEMALLOC_N(huge_palloc) +#define huge_postfork_child JEMALLOC_N(huge_postfork_child) +#define huge_postfork_parent JEMALLOC_N(huge_postfork_parent) +#define huge_prefork JEMALLOC_N(huge_prefork) +#define huge_prof_ctx_get JEMALLOC_N(huge_prof_ctx_get) +#define huge_prof_ctx_set JEMALLOC_N(huge_prof_ctx_set) +#define huge_ralloc JEMALLOC_N(huge_ralloc) +#define huge_ralloc_no_move JEMALLOC_N(huge_ralloc_no_move) +#define huge_salloc JEMALLOC_N(huge_salloc) +#define iallocm JEMALLOC_N(iallocm) +#define icalloc JEMALLOC_N(icalloc) +#define idalloc JEMALLOC_N(idalloc) +#define imalloc JEMALLOC_N(imalloc) +#define ipalloc JEMALLOC_N(ipalloc) +#define iqalloc JEMALLOC_N(iqalloc) +#define iralloc JEMALLOC_N(iralloc) +#define isalloc JEMALLOC_N(isalloc) +#define ivsalloc JEMALLOC_N(ivsalloc) +#define jemalloc_darwin_init JEMALLOC_N(jemalloc_darwin_init) +#define jemalloc_postfork_child JEMALLOC_N(jemalloc_postfork_child) +#define jemalloc_postfork_parent JEMALLOC_N(jemalloc_postfork_parent) +#define jemalloc_prefork JEMALLOC_N(jemalloc_prefork) +#define malloc_cprintf JEMALLOC_N(malloc_cprintf) +#define malloc_mutex_destroy JEMALLOC_N(malloc_mutex_destroy) +#define malloc_mutex_init JEMALLOC_N(malloc_mutex_init) +#define malloc_mutex_lock JEMALLOC_N(malloc_mutex_lock) +#define malloc_mutex_postfork_child JEMALLOC_N(malloc_mutex_postfork_child) +#define malloc_mutex_postfork_parent JEMALLOC_N(malloc_mutex_postfork_parent) +#define malloc_mutex_prefork JEMALLOC_N(malloc_mutex_prefork) +#define malloc_mutex_trylock JEMALLOC_N(malloc_mutex_trylock) +#define malloc_mutex_unlock JEMALLOC_N(malloc_mutex_unlock) +#define malloc_printf JEMALLOC_N(malloc_printf) +#define malloc_snprintf JEMALLOC_N(malloc_snprintf) +#define malloc_vcprintf JEMALLOC_N(malloc_vcprintf) +#define malloc_vsnprintf JEMALLOC_N(malloc_vsnprintf) +#define malloc_write JEMALLOC_N(malloc_write) +#define mb_write JEMALLOC_N(mb_write) +#define opt_abort JEMALLOC_N(opt_abort) +#define opt_junk JEMALLOC_N(opt_junk) +#define opt_lg_chunk JEMALLOC_N(opt_lg_chunk) +#define opt_lg_dirty_mult JEMALLOC_N(opt_lg_dirty_mult) +#define opt_lg_prof_interval JEMALLOC_N(opt_lg_prof_interval) +#define opt_lg_prof_sample JEMALLOC_N(opt_lg_prof_sample) +#define opt_lg_tcache_max JEMALLOC_N(opt_lg_tcache_max) +#define opt_narenas JEMALLOC_N(opt_narenas) +#define opt_prof JEMALLOC_N(opt_prof) +#define opt_prof_accum JEMALLOC_N(opt_prof_accum) +#define opt_prof_active JEMALLOC_N(opt_prof_active) +#define opt_prof_gdump JEMALLOC_N(opt_prof_gdump) +#define opt_prof_leak JEMALLOC_N(opt_prof_leak) +#define opt_stats_print JEMALLOC_N(opt_stats_print) +#define opt_tcache JEMALLOC_N(opt_tcache) +#define opt_utrace JEMALLOC_N(opt_utrace) +#define opt_xmalloc JEMALLOC_N(opt_xmalloc) +#define opt_zero JEMALLOC_N(opt_zero) +#define p2rz JEMALLOC_N(p2rz) +#define pow2_ceil JEMALLOC_N(pow2_ceil) +#define prof_backtrace JEMALLOC_N(prof_backtrace) +#define prof_boot0 JEMALLOC_N(prof_boot0) +#define prof_boot1 JEMALLOC_N(prof_boot1) +#define prof_boot2 JEMALLOC_N(prof_boot2) +#define prof_ctx_get JEMALLOC_N(prof_ctx_get) +#define prof_ctx_set JEMALLOC_N(prof_ctx_set) +#define prof_free JEMALLOC_N(prof_free) +#define prof_gdump JEMALLOC_N(prof_gdump) +#define prof_idump JEMALLOC_N(prof_idump) +#define prof_lookup JEMALLOC_N(prof_lookup) +#define prof_malloc JEMALLOC_N(prof_malloc) +#define prof_mdump JEMALLOC_N(prof_mdump) +#define prof_realloc JEMALLOC_N(prof_realloc) +#define prof_sample_accum_update JEMALLOC_N(prof_sample_accum_update) +#define prof_sample_threshold_update JEMALLOC_N(prof_sample_threshold_update) +#define prof_tdata_init JEMALLOC_N(prof_tdata_init) +#define prof_tdata_tls JEMALLOC_N(prof_tdata_tls) +#define pthread_create JEMALLOC_N(pthread_create) +#define quarantine JEMALLOC_N(quarantine) +#define quarantine_boot JEMALLOC_N(quarantine_boot) +#define register_zone JEMALLOC_N(register_zone) +#define rtree_get JEMALLOC_N(rtree_get) +#define rtree_get_locked JEMALLOC_N(rtree_get_locked) +#define rtree_new JEMALLOC_N(rtree_new) +#define rtree_set JEMALLOC_N(rtree_set) +#define s2u JEMALLOC_N(s2u) +#define sa2u JEMALLOC_N(sa2u) +#define stats_arenas_i_bins_j_index JEMALLOC_N(stats_arenas_i_bins_j_index) +#define stats_arenas_i_index JEMALLOC_N(stats_arenas_i_index) +#define stats_arenas_i_lruns_j_index JEMALLOC_N(stats_arenas_i_lruns_j_index) +#define stats_cactive JEMALLOC_N(stats_cactive) +#define stats_cactive_add JEMALLOC_N(stats_cactive_add) +#define stats_cactive_get JEMALLOC_N(stats_cactive_get) +#define stats_cactive_sub JEMALLOC_N(stats_cactive_sub) +#define stats_print JEMALLOC_N(stats_print) +#define szone2ozone JEMALLOC_N(szone2ozone) +#define tcache_alloc_easy JEMALLOC_N(tcache_alloc_easy) +#define tcache_alloc_large JEMALLOC_N(tcache_alloc_large) +#define tcache_alloc_small JEMALLOC_N(tcache_alloc_small) +#define tcache_alloc_small_hard JEMALLOC_N(tcache_alloc_small_hard) +#define tcache_bin_flush_large JEMALLOC_N(tcache_bin_flush_large) +#define tcache_bin_flush_small JEMALLOC_N(tcache_bin_flush_small) +#define tcache_boot JEMALLOC_N(tcache_boot) +#define tcache_create JEMALLOC_N(tcache_create) +#define tcache_dalloc_large JEMALLOC_N(tcache_dalloc_large) +#define tcache_dalloc_small JEMALLOC_N(tcache_dalloc_small) +#define tcache_destroy JEMALLOC_N(tcache_destroy) +#define tcache_event JEMALLOC_N(tcache_event) +#define tcache_get JEMALLOC_N(tcache_get) +#define tcache_stats_merge JEMALLOC_N(tcache_stats_merge) +#define tcache_tls JEMALLOC_N(tcache_tls) +#define thread_allocated_get JEMALLOC_N(thread_allocated_get) +#define thread_allocated_get_hard JEMALLOC_N(thread_allocated_get_hard) +#define thread_allocated_tls JEMALLOC_N(thread_allocated_tls) +#define u2rz JEMALLOC_N(u2rz) Property changes on: contrib/jemalloc/include/jemalloc/internal/private_namespace.h ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H Added: svn:eol-style ## -0,0 +1 ## +native Index: contrib/jemalloc/include/jemalloc/internal/size_classes.h =================================================================== --- contrib/jemalloc/include/jemalloc/internal/size_classes.h (revision 0) +++ contrib/jemalloc/include/jemalloc/internal/size_classes.h (working copy) @@ -0,0 +1,721 @@ +/* This file was automatically generated by size_classes.sh. */ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +#if (LG_TINY_MIN == 3 && LG_QUANTUM == 3 && LG_PAGE == 12) +#define SIZE_CLASSES_DEFINED +/* SIZE_CLASS(bin, delta, sz) */ +#define SIZE_CLASSES \ + SIZE_CLASS(0, 8, 8) \ + SIZE_CLASS(1, 8, 16) \ + SIZE_CLASS(2, 8, 24) \ + SIZE_CLASS(3, 8, 32) \ + SIZE_CLASS(4, 8, 40) \ + SIZE_CLASS(5, 8, 48) \ + SIZE_CLASS(6, 8, 56) \ + SIZE_CLASS(7, 8, 64) \ + SIZE_CLASS(8, 16, 80) \ + SIZE_CLASS(9, 16, 96) \ + SIZE_CLASS(10, 16, 112) \ + SIZE_CLASS(11, 16, 128) \ + SIZE_CLASS(12, 32, 160) \ + SIZE_CLASS(13, 32, 192) \ + SIZE_CLASS(14, 32, 224) \ + SIZE_CLASS(15, 32, 256) \ + SIZE_CLASS(16, 64, 320) \ + SIZE_CLASS(17, 64, 384) \ + SIZE_CLASS(18, 64, 448) \ + SIZE_CLASS(19, 64, 512) \ + SIZE_CLASS(20, 128, 640) \ + SIZE_CLASS(21, 128, 768) \ + SIZE_CLASS(22, 128, 896) \ + SIZE_CLASS(23, 128, 1024) \ + SIZE_CLASS(24, 256, 1280) \ + SIZE_CLASS(25, 256, 1536) \ + SIZE_CLASS(26, 256, 1792) \ + SIZE_CLASS(27, 256, 2048) \ + SIZE_CLASS(28, 512, 2560) \ + SIZE_CLASS(29, 512, 3072) \ + SIZE_CLASS(30, 512, 3584) \ + +#define NBINS 31 +#define SMALL_MAXCLASS 3584 +#endif + +#if (LG_TINY_MIN == 3 && LG_QUANTUM == 3 && LG_PAGE == 13) +#define SIZE_CLASSES_DEFINED +/* SIZE_CLASS(bin, delta, sz) */ +#define SIZE_CLASSES \ + SIZE_CLASS(0, 8, 8) \ + SIZE_CLASS(1, 8, 16) \ + SIZE_CLASS(2, 8, 24) \ + SIZE_CLASS(3, 8, 32) \ + SIZE_CLASS(4, 8, 40) \ + SIZE_CLASS(5, 8, 48) \ + SIZE_CLASS(6, 8, 56) \ + SIZE_CLASS(7, 8, 64) \ + SIZE_CLASS(8, 16, 80) \ + SIZE_CLASS(9, 16, 96) \ + SIZE_CLASS(10, 16, 112) \ + SIZE_CLASS(11, 16, 128) \ + SIZE_CLASS(12, 32, 160) \ + SIZE_CLASS(13, 32, 192) \ + SIZE_CLASS(14, 32, 224) \ + SIZE_CLASS(15, 32, 256) \ + SIZE_CLASS(16, 64, 320) \ + SIZE_CLASS(17, 64, 384) \ + SIZE_CLASS(18, 64, 448) \ + SIZE_CLASS(19, 64, 512) \ + SIZE_CLASS(20, 128, 640) \ + SIZE_CLASS(21, 128, 768) \ + SIZE_CLASS(22, 128, 896) \ + SIZE_CLASS(23, 128, 1024) \ + SIZE_CLASS(24, 256, 1280) \ + SIZE_CLASS(25, 256, 1536) \ + SIZE_CLASS(26, 256, 1792) \ + SIZE_CLASS(27, 256, 2048) \ + SIZE_CLASS(28, 512, 2560) \ + SIZE_CLASS(29, 512, 3072) \ + SIZE_CLASS(30, 512, 3584) \ + SIZE_CLASS(31, 512, 4096) \ + SIZE_CLASS(32, 1024, 5120) \ + SIZE_CLASS(33, 1024, 6144) \ + SIZE_CLASS(34, 1024, 7168) \ + +#define NBINS 35 +#define SMALL_MAXCLASS 7168 +#endif + +#if (LG_TINY_MIN == 3 && LG_QUANTUM == 3 && LG_PAGE == 14) +#define SIZE_CLASSES_DEFINED +/* SIZE_CLASS(bin, delta, sz) */ +#define SIZE_CLASSES \ + SIZE_CLASS(0, 8, 8) \ + SIZE_CLASS(1, 8, 16) \ + SIZE_CLASS(2, 8, 24) \ + SIZE_CLASS(3, 8, 32) \ + SIZE_CLASS(4, 8, 40) \ + SIZE_CLASS(5, 8, 48) \ + SIZE_CLASS(6, 8, 56) \ + SIZE_CLASS(7, 8, 64) \ + SIZE_CLASS(8, 16, 80) \ + SIZE_CLASS(9, 16, 96) \ + SIZE_CLASS(10, 16, 112) \ + SIZE_CLASS(11, 16, 128) \ + SIZE_CLASS(12, 32, 160) \ + SIZE_CLASS(13, 32, 192) \ + SIZE_CLASS(14, 32, 224) \ + SIZE_CLASS(15, 32, 256) \ + SIZE_CLASS(16, 64, 320) \ + SIZE_CLASS(17, 64, 384) \ + SIZE_CLASS(18, 64, 448) \ + SIZE_CLASS(19, 64, 512) \ + SIZE_CLASS(20, 128, 640) \ + SIZE_CLASS(21, 128, 768) \ + SIZE_CLASS(22, 128, 896) \ + SIZE_CLASS(23, 128, 1024) \ + SIZE_CLASS(24, 256, 1280) \ + SIZE_CLASS(25, 256, 1536) \ + SIZE_CLASS(26, 256, 1792) \ + SIZE_CLASS(27, 256, 2048) \ + SIZE_CLASS(28, 512, 2560) \ + SIZE_CLASS(29, 512, 3072) \ + SIZE_CLASS(30, 512, 3584) \ + SIZE_CLASS(31, 512, 4096) \ + SIZE_CLASS(32, 1024, 5120) \ + SIZE_CLASS(33, 1024, 6144) \ + SIZE_CLASS(34, 1024, 7168) \ + SIZE_CLASS(35, 1024, 8192) \ + SIZE_CLASS(36, 2048, 10240) \ + SIZE_CLASS(37, 2048, 12288) \ + SIZE_CLASS(38, 2048, 14336) \ + +#define NBINS 39 +#define SMALL_MAXCLASS 14336 +#endif + +#if (LG_TINY_MIN == 3 && LG_QUANTUM == 3 && LG_PAGE == 15) +#define SIZE_CLASSES_DEFINED +/* SIZE_CLASS(bin, delta, sz) */ +#define SIZE_CLASSES \ + SIZE_CLASS(0, 8, 8) \ + SIZE_CLASS(1, 8, 16) \ + SIZE_CLASS(2, 8, 24) \ + SIZE_CLASS(3, 8, 32) \ + SIZE_CLASS(4, 8, 40) \ + SIZE_CLASS(5, 8, 48) \ + SIZE_CLASS(6, 8, 56) \ + SIZE_CLASS(7, 8, 64) \ + SIZE_CLASS(8, 16, 80) \ + SIZE_CLASS(9, 16, 96) \ + SIZE_CLASS(10, 16, 112) \ + SIZE_CLASS(11, 16, 128) \ + SIZE_CLASS(12, 32, 160) \ + SIZE_CLASS(13, 32, 192) \ + SIZE_CLASS(14, 32, 224) \ + SIZE_CLASS(15, 32, 256) \ + SIZE_CLASS(16, 64, 320) \ + SIZE_CLASS(17, 64, 384) \ + SIZE_CLASS(18, 64, 448) \ + SIZE_CLASS(19, 64, 512) \ + SIZE_CLASS(20, 128, 640) \ + SIZE_CLASS(21, 128, 768) \ + SIZE_CLASS(22, 128, 896) \ + SIZE_CLASS(23, 128, 1024) \ + SIZE_CLASS(24, 256, 1280) \ + SIZE_CLASS(25, 256, 1536) \ + SIZE_CLASS(26, 256, 1792) \ + SIZE_CLASS(27, 256, 2048) \ + SIZE_CLASS(28, 512, 2560) \ + SIZE_CLASS(29, 512, 3072) \ + SIZE_CLASS(30, 512, 3584) \ + SIZE_CLASS(31, 512, 4096) \ + SIZE_CLASS(32, 1024, 5120) \ + SIZE_CLASS(33, 1024, 6144) \ + SIZE_CLASS(34, 1024, 7168) \ + SIZE_CLASS(35, 1024, 8192) \ + SIZE_CLASS(36, 2048, 10240) \ + SIZE_CLASS(37, 2048, 12288) \ + SIZE_CLASS(38, 2048, 14336) \ + SIZE_CLASS(39, 2048, 16384) \ + SIZE_CLASS(40, 4096, 20480) \ + SIZE_CLASS(41, 4096, 24576) \ + SIZE_CLASS(42, 4096, 28672) \ + +#define NBINS 43 +#define SMALL_MAXCLASS 28672 +#endif + +#if (LG_TINY_MIN == 3 && LG_QUANTUM == 3 && LG_PAGE == 16) +#define SIZE_CLASSES_DEFINED +/* SIZE_CLASS(bin, delta, sz) */ +#define SIZE_CLASSES \ + SIZE_CLASS(0, 8, 8) \ + SIZE_CLASS(1, 8, 16) \ + SIZE_CLASS(2, 8, 24) \ + SIZE_CLASS(3, 8, 32) \ + SIZE_CLASS(4, 8, 40) \ + SIZE_CLASS(5, 8, 48) \ + SIZE_CLASS(6, 8, 56) \ + SIZE_CLASS(7, 8, 64) \ + SIZE_CLASS(8, 16, 80) \ + SIZE_CLASS(9, 16, 96) \ + SIZE_CLASS(10, 16, 112) \ + SIZE_CLASS(11, 16, 128) \ + SIZE_CLASS(12, 32, 160) \ + SIZE_CLASS(13, 32, 192) \ + SIZE_CLASS(14, 32, 224) \ + SIZE_CLASS(15, 32, 256) \ + SIZE_CLASS(16, 64, 320) \ + SIZE_CLASS(17, 64, 384) \ + SIZE_CLASS(18, 64, 448) \ + SIZE_CLASS(19, 64, 512) \ + SIZE_CLASS(20, 128, 640) \ + SIZE_CLASS(21, 128, 768) \ + SIZE_CLASS(22, 128, 896) \ + SIZE_CLASS(23, 128, 1024) \ + SIZE_CLASS(24, 256, 1280) \ + SIZE_CLASS(25, 256, 1536) \ + SIZE_CLASS(26, 256, 1792) \ + SIZE_CLASS(27, 256, 2048) \ + SIZE_CLASS(28, 512, 2560) \ + SIZE_CLASS(29, 512, 3072) \ + SIZE_CLASS(30, 512, 3584) \ + SIZE_CLASS(31, 512, 4096) \ + SIZE_CLASS(32, 1024, 5120) \ + SIZE_CLASS(33, 1024, 6144) \ + SIZE_CLASS(34, 1024, 7168) \ + SIZE_CLASS(35, 1024, 8192) \ + SIZE_CLASS(36, 2048, 10240) \ + SIZE_CLASS(37, 2048, 12288) \ + SIZE_CLASS(38, 2048, 14336) \ + SIZE_CLASS(39, 2048, 16384) \ + SIZE_CLASS(40, 4096, 20480) \ + SIZE_CLASS(41, 4096, 24576) \ + SIZE_CLASS(42, 4096, 28672) \ + SIZE_CLASS(43, 4096, 32768) \ + SIZE_CLASS(44, 8192, 40960) \ + SIZE_CLASS(45, 8192, 49152) \ + SIZE_CLASS(46, 8192, 57344) \ + +#define NBINS 47 +#define SMALL_MAXCLASS 57344 +#endif + +#if (LG_TINY_MIN == 3 && LG_QUANTUM == 4 && LG_PAGE == 12) +#define SIZE_CLASSES_DEFINED +/* SIZE_CLASS(bin, delta, sz) */ +#define SIZE_CLASSES \ + SIZE_CLASS(0, 8, 8) \ + SIZE_CLASS(1, 8, 16) \ + SIZE_CLASS(2, 16, 32) \ + SIZE_CLASS(3, 16, 48) \ + SIZE_CLASS(4, 16, 64) \ + SIZE_CLASS(5, 16, 80) \ + SIZE_CLASS(6, 16, 96) \ + SIZE_CLASS(7, 16, 112) \ + SIZE_CLASS(8, 16, 128) \ + SIZE_CLASS(9, 32, 160) \ + SIZE_CLASS(10, 32, 192) \ + SIZE_CLASS(11, 32, 224) \ + SIZE_CLASS(12, 32, 256) \ + SIZE_CLASS(13, 64, 320) \ + SIZE_CLASS(14, 64, 384) \ + SIZE_CLASS(15, 64, 448) \ + SIZE_CLASS(16, 64, 512) \ + SIZE_CLASS(17, 128, 640) \ + SIZE_CLASS(18, 128, 768) \ + SIZE_CLASS(19, 128, 896) \ + SIZE_CLASS(20, 128, 1024) \ + SIZE_CLASS(21, 256, 1280) \ + SIZE_CLASS(22, 256, 1536) \ + SIZE_CLASS(23, 256, 1792) \ + SIZE_CLASS(24, 256, 2048) \ + SIZE_CLASS(25, 512, 2560) \ + SIZE_CLASS(26, 512, 3072) \ + SIZE_CLASS(27, 512, 3584) \ + +#define NBINS 28 +#define SMALL_MAXCLASS 3584 +#endif + +#if (LG_TINY_MIN == 3 && LG_QUANTUM == 4 && LG_PAGE == 13) +#define SIZE_CLASSES_DEFINED +/* SIZE_CLASS(bin, delta, sz) */ +#define SIZE_CLASSES \ + SIZE_CLASS(0, 8, 8) \ + SIZE_CLASS(1, 8, 16) \ + SIZE_CLASS(2, 16, 32) \ + SIZE_CLASS(3, 16, 48) \ + SIZE_CLASS(4, 16, 64) \ + SIZE_CLASS(5, 16, 80) \ + SIZE_CLASS(6, 16, 96) \ + SIZE_CLASS(7, 16, 112) \ + SIZE_CLASS(8, 16, 128) \ + SIZE_CLASS(9, 32, 160) \ + SIZE_CLASS(10, 32, 192) \ + SIZE_CLASS(11, 32, 224) \ + SIZE_CLASS(12, 32, 256) \ + SIZE_CLASS(13, 64, 320) \ + SIZE_CLASS(14, 64, 384) \ + SIZE_CLASS(15, 64, 448) \ + SIZE_CLASS(16, 64, 512) \ + SIZE_CLASS(17, 128, 640) \ + SIZE_CLASS(18, 128, 768) \ + SIZE_CLASS(19, 128, 896) \ + SIZE_CLASS(20, 128, 1024) \ + SIZE_CLASS(21, 256, 1280) \ + SIZE_CLASS(22, 256, 1536) \ + SIZE_CLASS(23, 256, 1792) \ + SIZE_CLASS(24, 256, 2048) \ + SIZE_CLASS(25, 512, 2560) \ + SIZE_CLASS(26, 512, 3072) \ + SIZE_CLASS(27, 512, 3584) \ + SIZE_CLASS(28, 512, 4096) \ + SIZE_CLASS(29, 1024, 5120) \ + SIZE_CLASS(30, 1024, 6144) \ + SIZE_CLASS(31, 1024, 7168) \ + +#define NBINS 32 +#define SMALL_MAXCLASS 7168 +#endif + +#if (LG_TINY_MIN == 3 && LG_QUANTUM == 4 && LG_PAGE == 14) +#define SIZE_CLASSES_DEFINED +/* SIZE_CLASS(bin, delta, sz) */ +#define SIZE_CLASSES \ + SIZE_CLASS(0, 8, 8) \ + SIZE_CLASS(1, 8, 16) \ + SIZE_CLASS(2, 16, 32) \ + SIZE_CLASS(3, 16, 48) \ + SIZE_CLASS(4, 16, 64) \ + SIZE_CLASS(5, 16, 80) \ + SIZE_CLASS(6, 16, 96) \ + SIZE_CLASS(7, 16, 112) \ + SIZE_CLASS(8, 16, 128) \ + SIZE_CLASS(9, 32, 160) \ + SIZE_CLASS(10, 32, 192) \ + SIZE_CLASS(11, 32, 224) \ + SIZE_CLASS(12, 32, 256) \ + SIZE_CLASS(13, 64, 320) \ + SIZE_CLASS(14, 64, 384) \ + SIZE_CLASS(15, 64, 448) \ + SIZE_CLASS(16, 64, 512) \ + SIZE_CLASS(17, 128, 640) \ + SIZE_CLASS(18, 128, 768) \ + SIZE_CLASS(19, 128, 896) \ + SIZE_CLASS(20, 128, 1024) \ + SIZE_CLASS(21, 256, 1280) \ + SIZE_CLASS(22, 256, 1536) \ + SIZE_CLASS(23, 256, 1792) \ + SIZE_CLASS(24, 256, 2048) \ + SIZE_CLASS(25, 512, 2560) \ + SIZE_CLASS(26, 512, 3072) \ + SIZE_CLASS(27, 512, 3584) \ + SIZE_CLASS(28, 512, 4096) \ + SIZE_CLASS(29, 1024, 5120) \ + SIZE_CLASS(30, 1024, 6144) \ + SIZE_CLASS(31, 1024, 7168) \ + SIZE_CLASS(32, 1024, 8192) \ + SIZE_CLASS(33, 2048, 10240) \ + SIZE_CLASS(34, 2048, 12288) \ + SIZE_CLASS(35, 2048, 14336) \ + +#define NBINS 36 +#define SMALL_MAXCLASS 14336 +#endif + +#if (LG_TINY_MIN == 3 && LG_QUANTUM == 4 && LG_PAGE == 15) +#define SIZE_CLASSES_DEFINED +/* SIZE_CLASS(bin, delta, sz) */ +#define SIZE_CLASSES \ + SIZE_CLASS(0, 8, 8) \ + SIZE_CLASS(1, 8, 16) \ + SIZE_CLASS(2, 16, 32) \ + SIZE_CLASS(3, 16, 48) \ + SIZE_CLASS(4, 16, 64) \ + SIZE_CLASS(5, 16, 80) \ + SIZE_CLASS(6, 16, 96) \ + SIZE_CLASS(7, 16, 112) \ + SIZE_CLASS(8, 16, 128) \ + SIZE_CLASS(9, 32, 160) \ + SIZE_CLASS(10, 32, 192) \ + SIZE_CLASS(11, 32, 224) \ + SIZE_CLASS(12, 32, 256) \ + SIZE_CLASS(13, 64, 320) \ + SIZE_CLASS(14, 64, 384) \ + SIZE_CLASS(15, 64, 448) \ + SIZE_CLASS(16, 64, 512) \ + SIZE_CLASS(17, 128, 640) \ + SIZE_CLASS(18, 128, 768) \ + SIZE_CLASS(19, 128, 896) \ + SIZE_CLASS(20, 128, 1024) \ + SIZE_CLASS(21, 256, 1280) \ + SIZE_CLASS(22, 256, 1536) \ + SIZE_CLASS(23, 256, 1792) \ + SIZE_CLASS(24, 256, 2048) \ + SIZE_CLASS(25, 512, 2560) \ + SIZE_CLASS(26, 512, 3072) \ + SIZE_CLASS(27, 512, 3584) \ + SIZE_CLASS(28, 512, 4096) \ + SIZE_CLASS(29, 1024, 5120) \ + SIZE_CLASS(30, 1024, 6144) \ + SIZE_CLASS(31, 1024, 7168) \ + SIZE_CLASS(32, 1024, 8192) \ + SIZE_CLASS(33, 2048, 10240) \ + SIZE_CLASS(34, 2048, 12288) \ + SIZE_CLASS(35, 2048, 14336) \ + SIZE_CLASS(36, 2048, 16384) \ + SIZE_CLASS(37, 4096, 20480) \ + SIZE_CLASS(38, 4096, 24576) \ + SIZE_CLASS(39, 4096, 28672) \ + +#define NBINS 40 +#define SMALL_MAXCLASS 28672 +#endif + +#if (LG_TINY_MIN == 3 && LG_QUANTUM == 4 && LG_PAGE == 16) +#define SIZE_CLASSES_DEFINED +/* SIZE_CLASS(bin, delta, sz) */ +#define SIZE_CLASSES \ + SIZE_CLASS(0, 8, 8) \ + SIZE_CLASS(1, 8, 16) \ + SIZE_CLASS(2, 16, 32) \ + SIZE_CLASS(3, 16, 48) \ + SIZE_CLASS(4, 16, 64) \ + SIZE_CLASS(5, 16, 80) \ + SIZE_CLASS(6, 16, 96) \ + SIZE_CLASS(7, 16, 112) \ + SIZE_CLASS(8, 16, 128) \ + SIZE_CLASS(9, 32, 160) \ + SIZE_CLASS(10, 32, 192) \ + SIZE_CLASS(11, 32, 224) \ + SIZE_CLASS(12, 32, 256) \ + SIZE_CLASS(13, 64, 320) \ + SIZE_CLASS(14, 64, 384) \ + SIZE_CLASS(15, 64, 448) \ + SIZE_CLASS(16, 64, 512) \ + SIZE_CLASS(17, 128, 640) \ + SIZE_CLASS(18, 128, 768) \ + SIZE_CLASS(19, 128, 896) \ + SIZE_CLASS(20, 128, 1024) \ + SIZE_CLASS(21, 256, 1280) \ + SIZE_CLASS(22, 256, 1536) \ + SIZE_CLASS(23, 256, 1792) \ + SIZE_CLASS(24, 256, 2048) \ + SIZE_CLASS(25, 512, 2560) \ + SIZE_CLASS(26, 512, 3072) \ + SIZE_CLASS(27, 512, 3584) \ + SIZE_CLASS(28, 512, 4096) \ + SIZE_CLASS(29, 1024, 5120) \ + SIZE_CLASS(30, 1024, 6144) \ + SIZE_CLASS(31, 1024, 7168) \ + SIZE_CLASS(32, 1024, 8192) \ + SIZE_CLASS(33, 2048, 10240) \ + SIZE_CLASS(34, 2048, 12288) \ + SIZE_CLASS(35, 2048, 14336) \ + SIZE_CLASS(36, 2048, 16384) \ + SIZE_CLASS(37, 4096, 20480) \ + SIZE_CLASS(38, 4096, 24576) \ + SIZE_CLASS(39, 4096, 28672) \ + SIZE_CLASS(40, 4096, 32768) \ + SIZE_CLASS(41, 8192, 40960) \ + SIZE_CLASS(42, 8192, 49152) \ + SIZE_CLASS(43, 8192, 57344) \ + +#define NBINS 44 +#define SMALL_MAXCLASS 57344 +#endif + +#if (LG_TINY_MIN == 4 && LG_QUANTUM == 4 && LG_PAGE == 12) +#define SIZE_CLASSES_DEFINED +/* SIZE_CLASS(bin, delta, sz) */ +#define SIZE_CLASSES \ + SIZE_CLASS(0, 16, 16) \ + SIZE_CLASS(1, 16, 32) \ + SIZE_CLASS(2, 16, 48) \ + SIZE_CLASS(3, 16, 64) \ + SIZE_CLASS(4, 16, 80) \ + SIZE_CLASS(5, 16, 96) \ + SIZE_CLASS(6, 16, 112) \ + SIZE_CLASS(7, 16, 128) \ + SIZE_CLASS(8, 32, 160) \ + SIZE_CLASS(9, 32, 192) \ + SIZE_CLASS(10, 32, 224) \ + SIZE_CLASS(11, 32, 256) \ + SIZE_CLASS(12, 64, 320) \ + SIZE_CLASS(13, 64, 384) \ + SIZE_CLASS(14, 64, 448) \ + SIZE_CLASS(15, 64, 512) \ + SIZE_CLASS(16, 128, 640) \ + SIZE_CLASS(17, 128, 768) \ + SIZE_CLASS(18, 128, 896) \ + SIZE_CLASS(19, 128, 1024) \ + SIZE_CLASS(20, 256, 1280) \ + SIZE_CLASS(21, 256, 1536) \ + SIZE_CLASS(22, 256, 1792) \ + SIZE_CLASS(23, 256, 2048) \ + SIZE_CLASS(24, 512, 2560) \ + SIZE_CLASS(25, 512, 3072) \ + SIZE_CLASS(26, 512, 3584) \ + +#define NBINS 27 +#define SMALL_MAXCLASS 3584 +#endif + +#if (LG_TINY_MIN == 4 && LG_QUANTUM == 4 && LG_PAGE == 13) +#define SIZE_CLASSES_DEFINED +/* SIZE_CLASS(bin, delta, sz) */ +#define SIZE_CLASSES \ + SIZE_CLASS(0, 16, 16) \ + SIZE_CLASS(1, 16, 32) \ + SIZE_CLASS(2, 16, 48) \ + SIZE_CLASS(3, 16, 64) \ + SIZE_CLASS(4, 16, 80) \ + SIZE_CLASS(5, 16, 96) \ + SIZE_CLASS(6, 16, 112) \ + SIZE_CLASS(7, 16, 128) \ + SIZE_CLASS(8, 32, 160) \ + SIZE_CLASS(9, 32, 192) \ + SIZE_CLASS(10, 32, 224) \ + SIZE_CLASS(11, 32, 256) \ + SIZE_CLASS(12, 64, 320) \ + SIZE_CLASS(13, 64, 384) \ + SIZE_CLASS(14, 64, 448) \ + SIZE_CLASS(15, 64, 512) \ + SIZE_CLASS(16, 128, 640) \ + SIZE_CLASS(17, 128, 768) \ + SIZE_CLASS(18, 128, 896) \ + SIZE_CLASS(19, 128, 1024) \ + SIZE_CLASS(20, 256, 1280) \ + SIZE_CLASS(21, 256, 1536) \ + SIZE_CLASS(22, 256, 1792) \ + SIZE_CLASS(23, 256, 2048) \ + SIZE_CLASS(24, 512, 2560) \ + SIZE_CLASS(25, 512, 3072) \ + SIZE_CLASS(26, 512, 3584) \ + SIZE_CLASS(27, 512, 4096) \ + SIZE_CLASS(28, 1024, 5120) \ + SIZE_CLASS(29, 1024, 6144) \ + SIZE_CLASS(30, 1024, 7168) \ + +#define NBINS 31 +#define SMALL_MAXCLASS 7168 +#endif + +#if (LG_TINY_MIN == 4 && LG_QUANTUM == 4 && LG_PAGE == 14) +#define SIZE_CLASSES_DEFINED +/* SIZE_CLASS(bin, delta, sz) */ +#define SIZE_CLASSES \ + SIZE_CLASS(0, 16, 16) \ + SIZE_CLASS(1, 16, 32) \ + SIZE_CLASS(2, 16, 48) \ + SIZE_CLASS(3, 16, 64) \ + SIZE_CLASS(4, 16, 80) \ + SIZE_CLASS(5, 16, 96) \ + SIZE_CLASS(6, 16, 112) \ + SIZE_CLASS(7, 16, 128) \ + SIZE_CLASS(8, 32, 160) \ + SIZE_CLASS(9, 32, 192) \ + SIZE_CLASS(10, 32, 224) \ + SIZE_CLASS(11, 32, 256) \ + SIZE_CLASS(12, 64, 320) \ + SIZE_CLASS(13, 64, 384) \ + SIZE_CLASS(14, 64, 448) \ + SIZE_CLASS(15, 64, 512) \ + SIZE_CLASS(16, 128, 640) \ + SIZE_CLASS(17, 128, 768) \ + SIZE_CLASS(18, 128, 896) \ + SIZE_CLASS(19, 128, 1024) \ + SIZE_CLASS(20, 256, 1280) \ + SIZE_CLASS(21, 256, 1536) \ + SIZE_CLASS(22, 256, 1792) \ + SIZE_CLASS(23, 256, 2048) \ + SIZE_CLASS(24, 512, 2560) \ + SIZE_CLASS(25, 512, 3072) \ + SIZE_CLASS(26, 512, 3584) \ + SIZE_CLASS(27, 512, 4096) \ + SIZE_CLASS(28, 1024, 5120) \ + SIZE_CLASS(29, 1024, 6144) \ + SIZE_CLASS(30, 1024, 7168) \ + SIZE_CLASS(31, 1024, 8192) \ + SIZE_CLASS(32, 2048, 10240) \ + SIZE_CLASS(33, 2048, 12288) \ + SIZE_CLASS(34, 2048, 14336) \ + +#define NBINS 35 +#define SMALL_MAXCLASS 14336 +#endif + +#if (LG_TINY_MIN == 4 && LG_QUANTUM == 4 && LG_PAGE == 15) +#define SIZE_CLASSES_DEFINED +/* SIZE_CLASS(bin, delta, sz) */ +#define SIZE_CLASSES \ + SIZE_CLASS(0, 16, 16) \ + SIZE_CLASS(1, 16, 32) \ + SIZE_CLASS(2, 16, 48) \ + SIZE_CLASS(3, 16, 64) \ + SIZE_CLASS(4, 16, 80) \ + SIZE_CLASS(5, 16, 96) \ + SIZE_CLASS(6, 16, 112) \ + SIZE_CLASS(7, 16, 128) \ + SIZE_CLASS(8, 32, 160) \ + SIZE_CLASS(9, 32, 192) \ + SIZE_CLASS(10, 32, 224) \ + SIZE_CLASS(11, 32, 256) \ + SIZE_CLASS(12, 64, 320) \ + SIZE_CLASS(13, 64, 384) \ + SIZE_CLASS(14, 64, 448) \ + SIZE_CLASS(15, 64, 512) \ + SIZE_CLASS(16, 128, 640) \ + SIZE_CLASS(17, 128, 768) \ + SIZE_CLASS(18, 128, 896) \ + SIZE_CLASS(19, 128, 1024) \ + SIZE_CLASS(20, 256, 1280) \ + SIZE_CLASS(21, 256, 1536) \ + SIZE_CLASS(22, 256, 1792) \ + SIZE_CLASS(23, 256, 2048) \ + SIZE_CLASS(24, 512, 2560) \ + SIZE_CLASS(25, 512, 3072) \ + SIZE_CLASS(26, 512, 3584) \ + SIZE_CLASS(27, 512, 4096) \ + SIZE_CLASS(28, 1024, 5120) \ + SIZE_CLASS(29, 1024, 6144) \ + SIZE_CLASS(30, 1024, 7168) \ + SIZE_CLASS(31, 1024, 8192) \ + SIZE_CLASS(32, 2048, 10240) \ + SIZE_CLASS(33, 2048, 12288) \ + SIZE_CLASS(34, 2048, 14336) \ + SIZE_CLASS(35, 2048, 16384) \ + SIZE_CLASS(36, 4096, 20480) \ + SIZE_CLASS(37, 4096, 24576) \ + SIZE_CLASS(38, 4096, 28672) \ + +#define NBINS 39 +#define SMALL_MAXCLASS 28672 +#endif + +#if (LG_TINY_MIN == 4 && LG_QUANTUM == 4 && LG_PAGE == 16) +#define SIZE_CLASSES_DEFINED +/* SIZE_CLASS(bin, delta, sz) */ +#define SIZE_CLASSES \ + SIZE_CLASS(0, 16, 16) \ + SIZE_CLASS(1, 16, 32) \ + SIZE_CLASS(2, 16, 48) \ + SIZE_CLASS(3, 16, 64) \ + SIZE_CLASS(4, 16, 80) \ + SIZE_CLASS(5, 16, 96) \ + SIZE_CLASS(6, 16, 112) \ + SIZE_CLASS(7, 16, 128) \ + SIZE_CLASS(8, 32, 160) \ + SIZE_CLASS(9, 32, 192) \ + SIZE_CLASS(10, 32, 224) \ + SIZE_CLASS(11, 32, 256) \ + SIZE_CLASS(12, 64, 320) \ + SIZE_CLASS(13, 64, 384) \ + SIZE_CLASS(14, 64, 448) \ + SIZE_CLASS(15, 64, 512) \ + SIZE_CLASS(16, 128, 640) \ + SIZE_CLASS(17, 128, 768) \ + SIZE_CLASS(18, 128, 896) \ + SIZE_CLASS(19, 128, 1024) \ + SIZE_CLASS(20, 256, 1280) \ + SIZE_CLASS(21, 256, 1536) \ + SIZE_CLASS(22, 256, 1792) \ + SIZE_CLASS(23, 256, 2048) \ + SIZE_CLASS(24, 512, 2560) \ + SIZE_CLASS(25, 512, 3072) \ + SIZE_CLASS(26, 512, 3584) \ + SIZE_CLASS(27, 512, 4096) \ + SIZE_CLASS(28, 1024, 5120) \ + SIZE_CLASS(29, 1024, 6144) \ + SIZE_CLASS(30, 1024, 7168) \ + SIZE_CLASS(31, 1024, 8192) \ + SIZE_CLASS(32, 2048, 10240) \ + SIZE_CLASS(33, 2048, 12288) \ + SIZE_CLASS(34, 2048, 14336) \ + SIZE_CLASS(35, 2048, 16384) \ + SIZE_CLASS(36, 4096, 20480) \ + SIZE_CLASS(37, 4096, 24576) \ + SIZE_CLASS(38, 4096, 28672) \ + SIZE_CLASS(39, 4096, 32768) \ + SIZE_CLASS(40, 8192, 40960) \ + SIZE_CLASS(41, 8192, 49152) \ + SIZE_CLASS(42, 8192, 57344) \ + +#define NBINS 43 +#define SMALL_MAXCLASS 57344 +#endif + +#ifndef SIZE_CLASSES_DEFINED +# error "No size class definitions match configuration" +#endif +#undef SIZE_CLASSES_DEFINED +/* + * The small_size2bin lookup table uses uint8_t to encode each bin index, so we + * cannot support more than 256 small size classes. Further constrain NBINS to + * 255 to support prof_promote, since all small size classes, plus a "not + * small" size class must be stored in 8 bits of arena_chunk_map_t's bits + * field. + */ +#if (NBINS > 255) +# error "Too many small size classes" +#endif + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ Property changes on: contrib/jemalloc/include/jemalloc/internal/size_classes.h ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H Added: svn:eol-style ## -0,0 +1 ## +native Index: contrib/jemalloc/include/jemalloc/internal/tcache.h =================================================================== --- contrib/jemalloc/include/jemalloc/internal/tcache.h (revision 0) +++ contrib/jemalloc/include/jemalloc/internal/tcache.h (working copy) @@ -0,0 +1,494 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +typedef struct tcache_bin_info_s tcache_bin_info_t; +typedef struct tcache_bin_s tcache_bin_t; +typedef struct tcache_s tcache_t; + +/* + * tcache pointers close to NULL are used to encode state information that is + * used for two purposes: preventing thread caching on a per thread basis and + * cleaning up during thread shutdown. + */ +#define TCACHE_STATE_DISABLED ((tcache_t *)(uintptr_t)1) +#define TCACHE_STATE_REINCARNATED ((tcache_t *)(uintptr_t)2) +#define TCACHE_STATE_PURGATORY ((tcache_t *)(uintptr_t)3) +#define TCACHE_STATE_MAX TCACHE_STATE_PURGATORY + +/* + * Absolute maximum number of cache slots for each small bin in the thread + * cache. This is an additional constraint beyond that imposed as: twice the + * number of regions per run for this size class. + * + * This constant must be an even number. + */ +#define TCACHE_NSLOTS_SMALL_MAX 200 + +/* Number of cache slots for large size classes. */ +#define TCACHE_NSLOTS_LARGE 20 + +/* (1U << opt_lg_tcache_max) is used to compute tcache_maxclass. */ +#define LG_TCACHE_MAXCLASS_DEFAULT 15 + +/* + * TCACHE_GC_SWEEP is the approximate number of allocation events between + * full GC sweeps. Integer rounding may cause the actual number to be + * slightly higher, since GC is performed incrementally. + */ +#define TCACHE_GC_SWEEP 8192 + +/* Number of tcache allocation/deallocation events between incremental GCs. */ +#define TCACHE_GC_INCR \ + ((TCACHE_GC_SWEEP / NBINS) + ((TCACHE_GC_SWEEP / NBINS == 0) ? 0 : 1)) + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +typedef enum { + tcache_enabled_false = 0, /* Enable cast to/from bool. */ + tcache_enabled_true = 1, + tcache_enabled_default = 2 +} tcache_enabled_t; + +/* + * Read-only information associated with each element of tcache_t's tbins array + * is stored separately, mainly to reduce memory usage. + */ +struct tcache_bin_info_s { + unsigned ncached_max; /* Upper limit on ncached. */ +}; + +struct tcache_bin_s { + tcache_bin_stats_t tstats; + int low_water; /* Min # cached since last GC. */ + unsigned lg_fill_div; /* Fill (ncached_max >> lg_fill_div). */ + unsigned ncached; /* # of cached objects. */ + void **avail; /* Stack of available objects. */ +}; + +struct tcache_s { + ql_elm(tcache_t) link; /* Used for aggregating stats. */ + uint64_t prof_accumbytes;/* Cleared after arena_prof_accum() */ + arena_t *arena; /* This thread's arena. */ + unsigned ev_cnt; /* Event count since incremental GC. */ + unsigned next_gc_bin; /* Next bin to GC. */ + tcache_bin_t tbins[1]; /* Dynamically sized. */ + /* + * The pointer stacks associated with tbins follow as a contiguous + * array. During tcache initialization, the avail pointer in each + * element of tbins is initialized to point to the proper offset within + * this array. + */ +}; + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +extern bool opt_tcache; +extern ssize_t opt_lg_tcache_max; + +extern tcache_bin_info_t *tcache_bin_info; + +/* + * Number of tcache bins. There are NBINS small-object bins, plus 0 or more + * large-object bins. + */ +extern size_t nhbins; + +/* Maximum cached size class. */ +extern size_t tcache_maxclass; + +void tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem, + tcache_t *tcache); +void tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem, + tcache_t *tcache); +void tcache_arena_associate(tcache_t *tcache, arena_t *arena); +void tcache_arena_dissociate(tcache_t *tcache); +tcache_t *tcache_create(arena_t *arena); +void *tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin, + size_t binind); +void tcache_destroy(tcache_t *tcache); +void tcache_thread_cleanup(void *arg); +void tcache_stats_merge(tcache_t *tcache, arena_t *arena); +bool tcache_boot0(void); +bool tcache_boot1(void); + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#ifndef JEMALLOC_ENABLE_INLINE +malloc_tsd_protos(JEMALLOC_ATTR(unused), tcache, tcache_t *) +malloc_tsd_protos(JEMALLOC_ATTR(unused), tcache_enabled, tcache_enabled_t) + +void tcache_event(tcache_t *tcache); +void tcache_flush(void); +bool tcache_enabled_get(void); +tcache_t *tcache_get(bool create); +void tcache_enabled_set(bool enabled); +void *tcache_alloc_easy(tcache_bin_t *tbin); +void *tcache_alloc_small(tcache_t *tcache, size_t size, bool zero); +void *tcache_alloc_large(tcache_t *tcache, size_t size, bool zero); +void tcache_dalloc_small(tcache_t *tcache, void *ptr); +void tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TCACHE_C_)) +/* Map of thread-specific caches. */ +malloc_tsd_externs(tcache, tcache_t *) +malloc_tsd_funcs(JEMALLOC_INLINE, tcache, tcache_t *, NULL, + tcache_thread_cleanup) +/* Per thread flag that allows thread caches to be disabled. */ +malloc_tsd_externs(tcache_enabled, tcache_enabled_t) +malloc_tsd_funcs(JEMALLOC_INLINE, tcache_enabled, tcache_enabled_t, + tcache_enabled_default, malloc_tsd_no_cleanup) + +JEMALLOC_INLINE void +tcache_flush(void) +{ + tcache_t *tcache; + + cassert(config_tcache); + + tcache = *tcache_tsd_get(); + if ((uintptr_t)tcache <= (uintptr_t)TCACHE_STATE_MAX) + return; + tcache_destroy(tcache); + tcache = NULL; + tcache_tsd_set(&tcache); +} + +JEMALLOC_INLINE bool +tcache_enabled_get(void) +{ + tcache_enabled_t tcache_enabled; + + cassert(config_tcache); + + tcache_enabled = *tcache_enabled_tsd_get(); + if (tcache_enabled == tcache_enabled_default) { + tcache_enabled = (tcache_enabled_t)opt_tcache; + tcache_enabled_tsd_set(&tcache_enabled); + } + + return ((bool)tcache_enabled); +} + +JEMALLOC_INLINE void +tcache_enabled_set(bool enabled) +{ + tcache_enabled_t tcache_enabled; + tcache_t *tcache; + + cassert(config_tcache); + + tcache_enabled = (tcache_enabled_t)enabled; + tcache_enabled_tsd_set(&tcache_enabled); + tcache = *tcache_tsd_get(); + if (enabled) { + if (tcache == TCACHE_STATE_DISABLED) { + tcache = NULL; + tcache_tsd_set(&tcache); + } + } else /* disabled */ { + if (tcache > TCACHE_STATE_MAX) { + tcache_destroy(tcache); + tcache = NULL; + } + if (tcache == NULL) { + tcache = TCACHE_STATE_DISABLED; + tcache_tsd_set(&tcache); + } + } +} + +JEMALLOC_INLINE tcache_t * +tcache_get(bool create) +{ + tcache_t *tcache; + + if (config_tcache == false) + return (NULL); + if (config_lazy_lock && isthreaded == false) + return (NULL); + + tcache = *tcache_tsd_get(); + if ((uintptr_t)tcache <= (uintptr_t)TCACHE_STATE_MAX) { + if (tcache == TCACHE_STATE_DISABLED) + return (NULL); + if (tcache == NULL) { + if (create == false) { + /* + * Creating a tcache here would cause + * allocation as a side effect of free(). + * Ordinarily that would be okay since + * tcache_create() failure is a soft failure + * that doesn't propagate. However, if TLS + * data are freed via free() as in glibc, + * subtle corruption could result from setting + * a TLS variable after its backing memory is + * freed. + */ + return (NULL); + } + if (tcache_enabled_get() == false) { + tcache_enabled_set(false); /* Memoize. */ + return (NULL); + } + return (tcache_create(choose_arena(NULL))); + } + if (tcache == TCACHE_STATE_PURGATORY) { + /* + * Make a note that an allocator function was called + * after tcache_thread_cleanup() was called. + */ + tcache = TCACHE_STATE_REINCARNATED; + tcache_tsd_set(&tcache); + return (NULL); + } + if (tcache == TCACHE_STATE_REINCARNATED) + return (NULL); + not_reached(); + } + + return (tcache); +} + +JEMALLOC_INLINE void +tcache_event(tcache_t *tcache) +{ + + if (TCACHE_GC_INCR == 0) + return; + + tcache->ev_cnt++; + assert(tcache->ev_cnt <= TCACHE_GC_INCR); + if (tcache->ev_cnt == TCACHE_GC_INCR) { + size_t binind = tcache->next_gc_bin; + tcache_bin_t *tbin = &tcache->tbins[binind]; + tcache_bin_info_t *tbin_info = &tcache_bin_info[binind]; + + if (tbin->low_water > 0) { + /* + * Flush (ceiling) 3/4 of the objects below the low + * water mark. + */ + if (binind < NBINS) { + tcache_bin_flush_small(tbin, binind, + tbin->ncached - tbin->low_water + + (tbin->low_water >> 2), tcache); + } else { + tcache_bin_flush_large(tbin, binind, + tbin->ncached - tbin->low_water + + (tbin->low_water >> 2), tcache); + } + /* + * Reduce fill count by 2X. Limit lg_fill_div such that + * the fill count is always at least 1. + */ + if ((tbin_info->ncached_max >> (tbin->lg_fill_div+1)) + >= 1) + tbin->lg_fill_div++; + } else if (tbin->low_water < 0) { + /* + * Increase fill count by 2X. Make sure lg_fill_div + * stays greater than 0. + */ + if (tbin->lg_fill_div > 1) + tbin->lg_fill_div--; + } + tbin->low_water = tbin->ncached; + + tcache->next_gc_bin++; + if (tcache->next_gc_bin == nhbins) + tcache->next_gc_bin = 0; + tcache->ev_cnt = 0; + } +} + +JEMALLOC_INLINE void * +tcache_alloc_easy(tcache_bin_t *tbin) +{ + void *ret; + + if (tbin->ncached == 0) { + tbin->low_water = -1; + return (NULL); + } + tbin->ncached--; + if ((int)tbin->ncached < tbin->low_water) + tbin->low_water = tbin->ncached; + ret = tbin->avail[tbin->ncached]; + return (ret); +} + +JEMALLOC_INLINE void * +tcache_alloc_small(tcache_t *tcache, size_t size, bool zero) +{ + void *ret; + size_t binind; + tcache_bin_t *tbin; + + binind = SMALL_SIZE2BIN(size); + assert(binind < NBINS); + tbin = &tcache->tbins[binind]; + ret = tcache_alloc_easy(tbin); + if (ret == NULL) { + ret = tcache_alloc_small_hard(tcache, tbin, binind); + if (ret == NULL) + return (NULL); + } + assert(arena_salloc(ret, false) == arena_bin_info[binind].reg_size); + + if (zero == false) { + if (config_fill) { + if (opt_junk) { + arena_alloc_junk_small(ret, + &arena_bin_info[binind], false); + } else if (opt_zero) + memset(ret, 0, size); + } + } else { + if (config_fill && opt_junk) { + arena_alloc_junk_small(ret, &arena_bin_info[binind], + true); + } + VALGRIND_MAKE_MEM_UNDEFINED(ret, size); + memset(ret, 0, size); + } + + if (config_stats) + tbin->tstats.nrequests++; + if (config_prof) + tcache->prof_accumbytes += arena_bin_info[binind].reg_size; + tcache_event(tcache); + return (ret); +} + +JEMALLOC_INLINE void * +tcache_alloc_large(tcache_t *tcache, size_t size, bool zero) +{ + void *ret; + size_t binind; + tcache_bin_t *tbin; + + size = PAGE_CEILING(size); + assert(size <= tcache_maxclass); + binind = NBINS + (size >> LG_PAGE) - 1; + assert(binind < nhbins); + tbin = &tcache->tbins[binind]; + ret = tcache_alloc_easy(tbin); + if (ret == NULL) { + /* + * Only allocate one large object at a time, because it's quite + * expensive to create one and not use it. + */ + ret = arena_malloc_large(tcache->arena, size, zero); + if (ret == NULL) + return (NULL); + } else { + if (config_prof) { + arena_chunk_t *chunk = + (arena_chunk_t *)CHUNK_ADDR2BASE(ret); + size_t pageind = (((uintptr_t)ret - (uintptr_t)chunk) >> + LG_PAGE); + chunk->map[pageind-map_bias].bits &= + ~CHUNK_MAP_CLASS_MASK; + } + if (zero == false) { + if (config_fill) { + if (opt_junk) + memset(ret, 0xa5, size); + else if (opt_zero) + memset(ret, 0, size); + } + } else { + VALGRIND_MAKE_MEM_UNDEFINED(ret, size); + memset(ret, 0, size); + } + + if (config_stats) + tbin->tstats.nrequests++; + if (config_prof) + tcache->prof_accumbytes += size; + } + + tcache_event(tcache); + return (ret); +} + +JEMALLOC_INLINE void +tcache_dalloc_small(tcache_t *tcache, void *ptr) +{ + arena_t *arena; + arena_chunk_t *chunk; + arena_run_t *run; + arena_bin_t *bin; + tcache_bin_t *tbin; + tcache_bin_info_t *tbin_info; + size_t pageind, binind; + arena_chunk_map_t *mapelm; + + assert(arena_salloc(ptr, false) <= SMALL_MAXCLASS); + + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); + arena = chunk->arena; + pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; + mapelm = &chunk->map[pageind-map_bias]; + run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind - + (mapelm->bits >> LG_PAGE)) << LG_PAGE)); + bin = run->bin; + binind = ((uintptr_t)bin - (uintptr_t)&arena->bins) / + sizeof(arena_bin_t); + assert(binind < NBINS); + + if (config_fill && opt_junk) + arena_dalloc_junk_small(ptr, &arena_bin_info[binind]); + + tbin = &tcache->tbins[binind]; + tbin_info = &tcache_bin_info[binind]; + if (tbin->ncached == tbin_info->ncached_max) { + tcache_bin_flush_small(tbin, binind, (tbin_info->ncached_max >> + 1), tcache); + } + assert(tbin->ncached < tbin_info->ncached_max); + tbin->avail[tbin->ncached] = ptr; + tbin->ncached++; + + tcache_event(tcache); +} + +JEMALLOC_INLINE void +tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size) +{ + size_t binind; + tcache_bin_t *tbin; + tcache_bin_info_t *tbin_info; + + assert((size & PAGE_MASK) == 0); + assert(arena_salloc(ptr, false) > SMALL_MAXCLASS); + assert(arena_salloc(ptr, false) <= tcache_maxclass); + + binind = NBINS + (size >> LG_PAGE) - 1; + + if (config_fill && opt_junk) + memset(ptr, 0x5a, size); + + tbin = &tcache->tbins[binind]; + tbin_info = &tcache_bin_info[binind]; + if (tbin->ncached == tbin_info->ncached_max) { + tcache_bin_flush_large(tbin, binind, (tbin_info->ncached_max >> + 1), tcache); + } + assert(tbin->ncached < tbin_info->ncached_max); + tbin->avail[tbin->ncached] = ptr; + tbin->ncached++; + + tcache_event(tcache); +} +#endif + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ Property changes on: contrib/jemalloc/include/jemalloc/internal/tcache.h ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H Added: svn:eol-style ## -0,0 +1 ## +native Index: contrib/jemalloc/include/jemalloc/internal/huge.h =================================================================== --- contrib/jemalloc/include/jemalloc/internal/huge.h (revision 0) +++ contrib/jemalloc/include/jemalloc/internal/huge.h (working copy) @@ -0,0 +1,40 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +/* Huge allocation statistics. */ +extern uint64_t huge_nmalloc; +extern uint64_t huge_ndalloc; +extern size_t huge_allocated; + +/* Protects chunk-related data structures. */ +extern malloc_mutex_t huge_mtx; + +void *huge_malloc(size_t size, bool zero); +void *huge_palloc(size_t size, size_t alignment, bool zero); +void *huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, + size_t extra); +void *huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra, + size_t alignment, bool zero); +void huge_dalloc(void *ptr, bool unmap); +size_t huge_salloc(const void *ptr); +prof_ctx_t *huge_prof_ctx_get(const void *ptr); +void huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx); +bool huge_boot(void); +void huge_prefork(void); +void huge_postfork_parent(void); +void huge_postfork_child(void); + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ Property changes on: contrib/jemalloc/include/jemalloc/internal/huge.h ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H Added: svn:eol-style ## -0,0 +1 ## +native Index: contrib/jemalloc/include/jemalloc/internal/rb.h =================================================================== --- contrib/jemalloc/include/jemalloc/internal/rb.h (revision 0) +++ contrib/jemalloc/include/jemalloc/internal/rb.h (working copy) @@ -0,0 +1,973 @@ +/*- + ******************************************************************************* + * + * cpp macro implementation of left-leaning 2-3 red-black trees. Parent + * pointers are not used, and color bits are stored in the least significant + * bit of right-child pointers (if RB_COMPACT is defined), thus making node + * linkage as compact as is possible for red-black trees. + * + * Usage: + * + * #include + * #include + * #define NDEBUG // (Optional, see assert(3).) + * #include + * #define RB_COMPACT // (Optional, embed color bits in right-child pointers.) + * #include + * ... + * + ******************************************************************************* + */ + +#ifndef RB_H_ +#define RB_H_ + +#if 0 +__FBSDID("$FreeBSD$"); +#endif + +#ifdef RB_COMPACT +/* Node structure. */ +#define rb_node(a_type) \ +struct { \ + a_type *rbn_left; \ + a_type *rbn_right_red; \ +} +#else +#define rb_node(a_type) \ +struct { \ + a_type *rbn_left; \ + a_type *rbn_right; \ + bool rbn_red; \ +} +#endif + +/* Root structure. */ +#define rb_tree(a_type) \ +struct { \ + a_type *rbt_root; \ + a_type rbt_nil; \ +} + +/* Left accessors. */ +#define rbtn_left_get(a_type, a_field, a_node) \ + ((a_node)->a_field.rbn_left) +#define rbtn_left_set(a_type, a_field, a_node, a_left) do { \ + (a_node)->a_field.rbn_left = a_left; \ +} while (0) + +#ifdef RB_COMPACT +/* Right accessors. */ +#define rbtn_right_get(a_type, a_field, a_node) \ + ((a_type *) (((intptr_t) (a_node)->a_field.rbn_right_red) \ + & ((ssize_t)-2))) +#define rbtn_right_set(a_type, a_field, a_node, a_right) do { \ + (a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) a_right) \ + | (((uintptr_t) (a_node)->a_field.rbn_right_red) & ((size_t)1))); \ +} while (0) + +/* Color accessors. */ +#define rbtn_red_get(a_type, a_field, a_node) \ + ((bool) (((uintptr_t) (a_node)->a_field.rbn_right_red) \ + & ((size_t)1))) +#define rbtn_color_set(a_type, a_field, a_node, a_red) do { \ + (a_node)->a_field.rbn_right_red = (a_type *) ((((intptr_t) \ + (a_node)->a_field.rbn_right_red) & ((ssize_t)-2)) \ + | ((ssize_t)a_red)); \ +} while (0) +#define rbtn_red_set(a_type, a_field, a_node) do { \ + (a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) \ + (a_node)->a_field.rbn_right_red) | ((size_t)1)); \ +} while (0) +#define rbtn_black_set(a_type, a_field, a_node) do { \ + (a_node)->a_field.rbn_right_red = (a_type *) (((intptr_t) \ + (a_node)->a_field.rbn_right_red) & ((ssize_t)-2)); \ +} while (0) +#else +/* Right accessors. */ +#define rbtn_right_get(a_type, a_field, a_node) \ + ((a_node)->a_field.rbn_right) +#define rbtn_right_set(a_type, a_field, a_node, a_right) do { \ + (a_node)->a_field.rbn_right = a_right; \ +} while (0) + +/* Color accessors. */ +#define rbtn_red_get(a_type, a_field, a_node) \ + ((a_node)->a_field.rbn_red) +#define rbtn_color_set(a_type, a_field, a_node, a_red) do { \ + (a_node)->a_field.rbn_red = (a_red); \ +} while (0) +#define rbtn_red_set(a_type, a_field, a_node) do { \ + (a_node)->a_field.rbn_red = true; \ +} while (0) +#define rbtn_black_set(a_type, a_field, a_node) do { \ + (a_node)->a_field.rbn_red = false; \ +} while (0) +#endif + +/* Node initializer. */ +#define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \ + rbtn_left_set(a_type, a_field, (a_node), &(a_rbt)->rbt_nil); \ + rbtn_right_set(a_type, a_field, (a_node), &(a_rbt)->rbt_nil); \ + rbtn_red_set(a_type, a_field, (a_node)); \ +} while (0) + +/* Tree initializer. */ +#define rb_new(a_type, a_field, a_rbt) do { \ + (a_rbt)->rbt_root = &(a_rbt)->rbt_nil; \ + rbt_node_new(a_type, a_field, a_rbt, &(a_rbt)->rbt_nil); \ + rbtn_black_set(a_type, a_field, &(a_rbt)->rbt_nil); \ +} while (0) + +/* Internal utility macros. */ +#define rbtn_first(a_type, a_field, a_rbt, a_root, r_node) do { \ + (r_node) = (a_root); \ + if ((r_node) != &(a_rbt)->rbt_nil) { \ + for (; \ + rbtn_left_get(a_type, a_field, (r_node)) != &(a_rbt)->rbt_nil;\ + (r_node) = rbtn_left_get(a_type, a_field, (r_node))) { \ + } \ + } \ +} while (0) + +#define rbtn_last(a_type, a_field, a_rbt, a_root, r_node) do { \ + (r_node) = (a_root); \ + if ((r_node) != &(a_rbt)->rbt_nil) { \ + for (; rbtn_right_get(a_type, a_field, (r_node)) != \ + &(a_rbt)->rbt_nil; (r_node) = rbtn_right_get(a_type, a_field, \ + (r_node))) { \ + } \ + } \ +} while (0) + +#define rbtn_rotate_left(a_type, a_field, a_node, r_node) do { \ + (r_node) = rbtn_right_get(a_type, a_field, (a_node)); \ + rbtn_right_set(a_type, a_field, (a_node), \ + rbtn_left_get(a_type, a_field, (r_node))); \ + rbtn_left_set(a_type, a_field, (r_node), (a_node)); \ +} while (0) + +#define rbtn_rotate_right(a_type, a_field, a_node, r_node) do { \ + (r_node) = rbtn_left_get(a_type, a_field, (a_node)); \ + rbtn_left_set(a_type, a_field, (a_node), \ + rbtn_right_get(a_type, a_field, (r_node))); \ + rbtn_right_set(a_type, a_field, (r_node), (a_node)); \ +} while (0) + +/* + * The rb_proto() macro generates function prototypes that correspond to the + * functions generated by an equivalently parameterized call to rb_gen(). + */ + +#define rb_proto(a_attr, a_prefix, a_rbt_type, a_type) \ +a_attr void \ +a_prefix##new(a_rbt_type *rbtree); \ +a_attr a_type * \ +a_prefix##first(a_rbt_type *rbtree); \ +a_attr a_type * \ +a_prefix##last(a_rbt_type *rbtree); \ +a_attr a_type * \ +a_prefix##next(a_rbt_type *rbtree, a_type *node); \ +a_attr a_type * \ +a_prefix##prev(a_rbt_type *rbtree, a_type *node); \ +a_attr a_type * \ +a_prefix##search(a_rbt_type *rbtree, a_type *key); \ +a_attr a_type * \ +a_prefix##nsearch(a_rbt_type *rbtree, a_type *key); \ +a_attr a_type * \ +a_prefix##psearch(a_rbt_type *rbtree, a_type *key); \ +a_attr void \ +a_prefix##insert(a_rbt_type *rbtree, a_type *node); \ +a_attr void \ +a_prefix##remove(a_rbt_type *rbtree, a_type *node); \ +a_attr a_type * \ +a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \ + a_rbt_type *, a_type *, void *), void *arg); \ +a_attr a_type * \ +a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \ + a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg); + +/* + * The rb_gen() macro generates a type-specific red-black tree implementation, + * based on the above cpp macros. + * + * Arguments: + * + * a_attr : Function attribute for generated functions (ex: static). + * a_prefix : Prefix for generated functions (ex: ex_). + * a_rb_type : Type for red-black tree data structure (ex: ex_t). + * a_type : Type for red-black tree node data structure (ex: ex_node_t). + * a_field : Name of red-black tree node linkage (ex: ex_link). + * a_cmp : Node comparison function name, with the following prototype: + * int (a_cmp *)(a_type *a_node, a_type *a_other); + * ^^^^^^ + * or a_key + * Interpretation of comparision function return values: + * -1 : a_node < a_other + * 0 : a_node == a_other + * 1 : a_node > a_other + * In all cases, the a_node or a_key macro argument is the first + * argument to the comparison function, which makes it possible + * to write comparison functions that treat the first argument + * specially. + * + * Assuming the following setup: + * + * typedef struct ex_node_s ex_node_t; + * struct ex_node_s { + * rb_node(ex_node_t) ex_link; + * }; + * typedef rb_tree(ex_node_t) ex_t; + * rb_gen(static, ex_, ex_t, ex_node_t, ex_link, ex_cmp) + * + * The following API is generated: + * + * static void + * ex_new(ex_t *tree); + * Description: Initialize a red-black tree structure. + * Args: + * tree: Pointer to an uninitialized red-black tree object. + * + * static ex_node_t * + * ex_first(ex_t *tree); + * static ex_node_t * + * ex_last(ex_t *tree); + * Description: Get the first/last node in tree. + * Args: + * tree: Pointer to an initialized red-black tree object. + * Ret: First/last node in tree, or NULL if tree is empty. + * + * static ex_node_t * + * ex_next(ex_t *tree, ex_node_t *node); + * static ex_node_t * + * ex_prev(ex_t *tree, ex_node_t *node); + * Description: Get node's successor/predecessor. + * Args: + * tree: Pointer to an initialized red-black tree object. + * node: A node in tree. + * Ret: node's successor/predecessor in tree, or NULL if node is + * last/first. + * + * static ex_node_t * + * ex_search(ex_t *tree, ex_node_t *key); + * Description: Search for node that matches key. + * Args: + * tree: Pointer to an initialized red-black tree object. + * key : Search key. + * Ret: Node in tree that matches key, or NULL if no match. + * + * static ex_node_t * + * ex_nsearch(ex_t *tree, ex_node_t *key); + * static ex_node_t * + * ex_psearch(ex_t *tree, ex_node_t *key); + * Description: Search for node that matches key. If no match is found, + * return what would be key's successor/predecessor, were + * key in tree. + * Args: + * tree: Pointer to an initialized red-black tree object. + * key : Search key. + * Ret: Node in tree that matches key, or if no match, hypothetical node's + * successor/predecessor (NULL if no successor/predecessor). + * + * static void + * ex_insert(ex_t *tree, ex_node_t *node); + * Description: Insert node into tree. + * Args: + * tree: Pointer to an initialized red-black tree object. + * node: Node to be inserted into tree. + * + * static void + * ex_remove(ex_t *tree, ex_node_t *node); + * Description: Remove node from tree. + * Args: + * tree: Pointer to an initialized red-black tree object. + * node: Node in tree to be removed. + * + * static ex_node_t * + * ex_iter(ex_t *tree, ex_node_t *start, ex_node_t *(*cb)(ex_t *, + * ex_node_t *, void *), void *arg); + * static ex_node_t * + * ex_reverse_iter(ex_t *tree, ex_node_t *start, ex_node *(*cb)(ex_t *, + * ex_node_t *, void *), void *arg); + * Description: Iterate forward/backward over tree, starting at node. If + * tree is modified, iteration must be immediately + * terminated by the callback function that causes the + * modification. + * Args: + * tree : Pointer to an initialized red-black tree object. + * start: Node at which to start iteration, or NULL to start at + * first/last node. + * cb : Callback function, which is called for each node during + * iteration. Under normal circumstances the callback function + * should return NULL, which causes iteration to continue. If a + * callback function returns non-NULL, iteration is immediately + * terminated and the non-NULL return value is returned by the + * iterator. This is useful for re-starting iteration after + * modifying tree. + * arg : Opaque pointer passed to cb(). + * Ret: NULL if iteration completed, or the non-NULL callback return value + * that caused termination of the iteration. + */ +#define rb_gen(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp) \ +a_attr void \ +a_prefix##new(a_rbt_type *rbtree) { \ + rb_new(a_type, a_field, rbtree); \ +} \ +a_attr a_type * \ +a_prefix##first(a_rbt_type *rbtree) { \ + a_type *ret; \ + rbtn_first(a_type, a_field, rbtree, rbtree->rbt_root, ret); \ + if (ret == &rbtree->rbt_nil) { \ + ret = NULL; \ + } \ + return (ret); \ +} \ +a_attr a_type * \ +a_prefix##last(a_rbt_type *rbtree) { \ + a_type *ret; \ + rbtn_last(a_type, a_field, rbtree, rbtree->rbt_root, ret); \ + if (ret == &rbtree->rbt_nil) { \ + ret = NULL; \ + } \ + return (ret); \ +} \ +a_attr a_type * \ +a_prefix##next(a_rbt_type *rbtree, a_type *node) { \ + a_type *ret; \ + if (rbtn_right_get(a_type, a_field, node) != &rbtree->rbt_nil) { \ + rbtn_first(a_type, a_field, rbtree, rbtn_right_get(a_type, \ + a_field, node), ret); \ + } else { \ + a_type *tnode = rbtree->rbt_root; \ + assert(tnode != &rbtree->rbt_nil); \ + ret = &rbtree->rbt_nil; \ + while (true) { \ + int cmp = (a_cmp)(node, tnode); \ + if (cmp < 0) { \ + ret = tnode; \ + tnode = rbtn_left_get(a_type, a_field, tnode); \ + } else if (cmp > 0) { \ + tnode = rbtn_right_get(a_type, a_field, tnode); \ + } else { \ + break; \ + } \ + assert(tnode != &rbtree->rbt_nil); \ + } \ + } \ + if (ret == &rbtree->rbt_nil) { \ + ret = (NULL); \ + } \ + return (ret); \ +} \ +a_attr a_type * \ +a_prefix##prev(a_rbt_type *rbtree, a_type *node) { \ + a_type *ret; \ + if (rbtn_left_get(a_type, a_field, node) != &rbtree->rbt_nil) { \ + rbtn_last(a_type, a_field, rbtree, rbtn_left_get(a_type, \ + a_field, node), ret); \ + } else { \ + a_type *tnode = rbtree->rbt_root; \ + assert(tnode != &rbtree->rbt_nil); \ + ret = &rbtree->rbt_nil; \ + while (true) { \ + int cmp = (a_cmp)(node, tnode); \ + if (cmp < 0) { \ + tnode = rbtn_left_get(a_type, a_field, tnode); \ + } else if (cmp > 0) { \ + ret = tnode; \ + tnode = rbtn_right_get(a_type, a_field, tnode); \ + } else { \ + break; \ + } \ + assert(tnode != &rbtree->rbt_nil); \ + } \ + } \ + if (ret == &rbtree->rbt_nil) { \ + ret = (NULL); \ + } \ + return (ret); \ +} \ +a_attr a_type * \ +a_prefix##search(a_rbt_type *rbtree, a_type *key) { \ + a_type *ret; \ + int cmp; \ + ret = rbtree->rbt_root; \ + while (ret != &rbtree->rbt_nil \ + && (cmp = (a_cmp)(key, ret)) != 0) { \ + if (cmp < 0) { \ + ret = rbtn_left_get(a_type, a_field, ret); \ + } else { \ + ret = rbtn_right_get(a_type, a_field, ret); \ + } \ + } \ + if (ret == &rbtree->rbt_nil) { \ + ret = (NULL); \ + } \ + return (ret); \ +} \ +a_attr a_type * \ +a_prefix##nsearch(a_rbt_type *rbtree, a_type *key) { \ + a_type *ret; \ + a_type *tnode = rbtree->rbt_root; \ + ret = &rbtree->rbt_nil; \ + while (tnode != &rbtree->rbt_nil) { \ + int cmp = (a_cmp)(key, tnode); \ + if (cmp < 0) { \ + ret = tnode; \ + tnode = rbtn_left_get(a_type, a_field, tnode); \ + } else if (cmp > 0) { \ + tnode = rbtn_right_get(a_type, a_field, tnode); \ + } else { \ + ret = tnode; \ + break; \ + } \ + } \ + if (ret == &rbtree->rbt_nil) { \ + ret = (NULL); \ + } \ + return (ret); \ +} \ +a_attr a_type * \ +a_prefix##psearch(a_rbt_type *rbtree, a_type *key) { \ + a_type *ret; \ + a_type *tnode = rbtree->rbt_root; \ + ret = &rbtree->rbt_nil; \ + while (tnode != &rbtree->rbt_nil) { \ + int cmp = (a_cmp)(key, tnode); \ + if (cmp < 0) { \ + tnode = rbtn_left_get(a_type, a_field, tnode); \ + } else if (cmp > 0) { \ + ret = tnode; \ + tnode = rbtn_right_get(a_type, a_field, tnode); \ + } else { \ + ret = tnode; \ + break; \ + } \ + } \ + if (ret == &rbtree->rbt_nil) { \ + ret = (NULL); \ + } \ + return (ret); \ +} \ +a_attr void \ +a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \ + struct { \ + a_type *node; \ + int cmp; \ + } path[sizeof(void *) << 4], *pathp; \ + rbt_node_new(a_type, a_field, rbtree, node); \ + /* Wind. */ \ + path->node = rbtree->rbt_root; \ + for (pathp = path; pathp->node != &rbtree->rbt_nil; pathp++) { \ + int cmp = pathp->cmp = a_cmp(node, pathp->node); \ + assert(cmp != 0); \ + if (cmp < 0) { \ + pathp[1].node = rbtn_left_get(a_type, a_field, \ + pathp->node); \ + } else { \ + pathp[1].node = rbtn_right_get(a_type, a_field, \ + pathp->node); \ + } \ + } \ + pathp->node = node; \ + /* Unwind. */ \ + for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) { \ + a_type *cnode = pathp->node; \ + if (pathp->cmp < 0) { \ + a_type *left = pathp[1].node; \ + rbtn_left_set(a_type, a_field, cnode, left); \ + if (rbtn_red_get(a_type, a_field, left)) { \ + a_type *leftleft = rbtn_left_get(a_type, a_field, left);\ + if (rbtn_red_get(a_type, a_field, leftleft)) { \ + /* Fix up 4-node. */ \ + a_type *tnode; \ + rbtn_black_set(a_type, a_field, leftleft); \ + rbtn_rotate_right(a_type, a_field, cnode, tnode); \ + cnode = tnode; \ + } \ + } else { \ + return; \ + } \ + } else { \ + a_type *right = pathp[1].node; \ + rbtn_right_set(a_type, a_field, cnode, right); \ + if (rbtn_red_get(a_type, a_field, right)) { \ + a_type *left = rbtn_left_get(a_type, a_field, cnode); \ + if (rbtn_red_get(a_type, a_field, left)) { \ + /* Split 4-node. */ \ + rbtn_black_set(a_type, a_field, left); \ + rbtn_black_set(a_type, a_field, right); \ + rbtn_red_set(a_type, a_field, cnode); \ + } else { \ + /* Lean left. */ \ + a_type *tnode; \ + bool tred = rbtn_red_get(a_type, a_field, cnode); \ + rbtn_rotate_left(a_type, a_field, cnode, tnode); \ + rbtn_color_set(a_type, a_field, tnode, tred); \ + rbtn_red_set(a_type, a_field, cnode); \ + cnode = tnode; \ + } \ + } else { \ + return; \ + } \ + } \ + pathp->node = cnode; \ + } \ + /* Set root, and make it black. */ \ + rbtree->rbt_root = path->node; \ + rbtn_black_set(a_type, a_field, rbtree->rbt_root); \ +} \ +a_attr void \ +a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ + struct { \ + a_type *node; \ + int cmp; \ + } *pathp, *nodep, path[sizeof(void *) << 4]; \ + /* Wind. */ \ + nodep = NULL; /* Silence compiler warning. */ \ + path->node = rbtree->rbt_root; \ + for (pathp = path; pathp->node != &rbtree->rbt_nil; pathp++) { \ + int cmp = pathp->cmp = a_cmp(node, pathp->node); \ + if (cmp < 0) { \ + pathp[1].node = rbtn_left_get(a_type, a_field, \ + pathp->node); \ + } else { \ + pathp[1].node = rbtn_right_get(a_type, a_field, \ + pathp->node); \ + if (cmp == 0) { \ + /* Find node's successor, in preparation for swap. */ \ + pathp->cmp = 1; \ + nodep = pathp; \ + for (pathp++; pathp->node != &rbtree->rbt_nil; \ + pathp++) { \ + pathp->cmp = -1; \ + pathp[1].node = rbtn_left_get(a_type, a_field, \ + pathp->node); \ + } \ + break; \ + } \ + } \ + } \ + assert(nodep->node == node); \ + pathp--; \ + if (pathp->node != node) { \ + /* Swap node with its successor. */ \ + bool tred = rbtn_red_get(a_type, a_field, pathp->node); \ + rbtn_color_set(a_type, a_field, pathp->node, \ + rbtn_red_get(a_type, a_field, node)); \ + rbtn_left_set(a_type, a_field, pathp->node, \ + rbtn_left_get(a_type, a_field, node)); \ + /* If node's successor is its right child, the following code */\ + /* will do the wrong thing for the right child pointer. */\ + /* However, it doesn't matter, because the pointer will be */\ + /* properly set when the successor is pruned. */\ + rbtn_right_set(a_type, a_field, pathp->node, \ + rbtn_right_get(a_type, a_field, node)); \ + rbtn_color_set(a_type, a_field, node, tred); \ + /* The pruned leaf node's child pointers are never accessed */\ + /* again, so don't bother setting them to nil. */\ + nodep->node = pathp->node; \ + pathp->node = node; \ + if (nodep == path) { \ + rbtree->rbt_root = nodep->node; \ + } else { \ + if (nodep[-1].cmp < 0) { \ + rbtn_left_set(a_type, a_field, nodep[-1].node, \ + nodep->node); \ + } else { \ + rbtn_right_set(a_type, a_field, nodep[-1].node, \ + nodep->node); \ + } \ + } \ + } else { \ + a_type *left = rbtn_left_get(a_type, a_field, node); \ + if (left != &rbtree->rbt_nil) { \ + /* node has no successor, but it has a left child. */\ + /* Splice node out, without losing the left child. */\ + assert(rbtn_red_get(a_type, a_field, node) == false); \ + assert(rbtn_red_get(a_type, a_field, left)); \ + rbtn_black_set(a_type, a_field, left); \ + if (pathp == path) { \ + rbtree->rbt_root = left; \ + } else { \ + if (pathp[-1].cmp < 0) { \ + rbtn_left_set(a_type, a_field, pathp[-1].node, \ + left); \ + } else { \ + rbtn_right_set(a_type, a_field, pathp[-1].node, \ + left); \ + } \ + } \ + return; \ + } else if (pathp == path) { \ + /* The tree only contained one node. */ \ + rbtree->rbt_root = &rbtree->rbt_nil; \ + return; \ + } \ + } \ + if (rbtn_red_get(a_type, a_field, pathp->node)) { \ + /* Prune red node, which requires no fixup. */ \ + assert(pathp[-1].cmp < 0); \ + rbtn_left_set(a_type, a_field, pathp[-1].node, \ + &rbtree->rbt_nil); \ + return; \ + } \ + /* The node to be pruned is black, so unwind until balance is */\ + /* restored. */\ + pathp->node = &rbtree->rbt_nil; \ + for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) { \ + assert(pathp->cmp != 0); \ + if (pathp->cmp < 0) { \ + rbtn_left_set(a_type, a_field, pathp->node, \ + pathp[1].node); \ + assert(rbtn_red_get(a_type, a_field, pathp[1].node) \ + == false); \ + if (rbtn_red_get(a_type, a_field, pathp->node)) { \ + a_type *right = rbtn_right_get(a_type, a_field, \ + pathp->node); \ + a_type *rightleft = rbtn_left_get(a_type, a_field, \ + right); \ + a_type *tnode; \ + if (rbtn_red_get(a_type, a_field, rightleft)) { \ + /* In the following diagrams, ||, //, and \\ */\ + /* indicate the path to the removed node. */\ + /* */\ + /* || */\ + /* pathp(r) */\ + /* // \ */\ + /* (b) (b) */\ + /* / */\ + /* (r) */\ + /* */\ + rbtn_black_set(a_type, a_field, pathp->node); \ + rbtn_rotate_right(a_type, a_field, right, tnode); \ + rbtn_right_set(a_type, a_field, pathp->node, tnode);\ + rbtn_rotate_left(a_type, a_field, pathp->node, \ + tnode); \ + } else { \ + /* || */\ + /* pathp(r) */\ + /* // \ */\ + /* (b) (b) */\ + /* / */\ + /* (b) */\ + /* */\ + rbtn_rotate_left(a_type, a_field, pathp->node, \ + tnode); \ + } \ + /* Balance restored, but rotation modified subtree */\ + /* root. */\ + assert((uintptr_t)pathp > (uintptr_t)path); \ + if (pathp[-1].cmp < 0) { \ + rbtn_left_set(a_type, a_field, pathp[-1].node, \ + tnode); \ + } else { \ + rbtn_right_set(a_type, a_field, pathp[-1].node, \ + tnode); \ + } \ + return; \ + } else { \ + a_type *right = rbtn_right_get(a_type, a_field, \ + pathp->node); \ + a_type *rightleft = rbtn_left_get(a_type, a_field, \ + right); \ + if (rbtn_red_get(a_type, a_field, rightleft)) { \ + /* || */\ + /* pathp(b) */\ + /* // \ */\ + /* (b) (b) */\ + /* / */\ + /* (r) */\ + a_type *tnode; \ + rbtn_black_set(a_type, a_field, rightleft); \ + rbtn_rotate_right(a_type, a_field, right, tnode); \ + rbtn_right_set(a_type, a_field, pathp->node, tnode);\ + rbtn_rotate_left(a_type, a_field, pathp->node, \ + tnode); \ + /* Balance restored, but rotation modified */\ + /* subree root, which may actually be the tree */\ + /* root. */\ + if (pathp == path) { \ + /* Set root. */ \ + rbtree->rbt_root = tnode; \ + } else { \ + if (pathp[-1].cmp < 0) { \ + rbtn_left_set(a_type, a_field, \ + pathp[-1].node, tnode); \ + } else { \ + rbtn_right_set(a_type, a_field, \ + pathp[-1].node, tnode); \ + } \ + } \ + return; \ + } else { \ + /* || */\ + /* pathp(b) */\ + /* // \ */\ + /* (b) (b) */\ + /* / */\ + /* (b) */\ + a_type *tnode; \ + rbtn_red_set(a_type, a_field, pathp->node); \ + rbtn_rotate_left(a_type, a_field, pathp->node, \ + tnode); \ + pathp->node = tnode; \ + } \ + } \ + } else { \ + a_type *left; \ + rbtn_right_set(a_type, a_field, pathp->node, \ + pathp[1].node); \ + left = rbtn_left_get(a_type, a_field, pathp->node); \ + if (rbtn_red_get(a_type, a_field, left)) { \ + a_type *tnode; \ + a_type *leftright = rbtn_right_get(a_type, a_field, \ + left); \ + a_type *leftrightleft = rbtn_left_get(a_type, a_field, \ + leftright); \ + if (rbtn_red_get(a_type, a_field, leftrightleft)) { \ + /* || */\ + /* pathp(b) */\ + /* / \\ */\ + /* (r) (b) */\ + /* \ */\ + /* (b) */\ + /* / */\ + /* (r) */\ + a_type *unode; \ + rbtn_black_set(a_type, a_field, leftrightleft); \ + rbtn_rotate_right(a_type, a_field, pathp->node, \ + unode); \ + rbtn_rotate_right(a_type, a_field, pathp->node, \ + tnode); \ + rbtn_right_set(a_type, a_field, unode, tnode); \ + rbtn_rotate_left(a_type, a_field, unode, tnode); \ + } else { \ + /* || */\ + /* pathp(b) */\ + /* / \\ */\ + /* (r) (b) */\ + /* \ */\ + /* (b) */\ + /* / */\ + /* (b) */\ + assert(leftright != &rbtree->rbt_nil); \ + rbtn_red_set(a_type, a_field, leftright); \ + rbtn_rotate_right(a_type, a_field, pathp->node, \ + tnode); \ + rbtn_black_set(a_type, a_field, tnode); \ + } \ + /* Balance restored, but rotation modified subtree */\ + /* root, which may actually be the tree root. */\ + if (pathp == path) { \ + /* Set root. */ \ + rbtree->rbt_root = tnode; \ + } else { \ + if (pathp[-1].cmp < 0) { \ + rbtn_left_set(a_type, a_field, pathp[-1].node, \ + tnode); \ + } else { \ + rbtn_right_set(a_type, a_field, pathp[-1].node, \ + tnode); \ + } \ + } \ + return; \ + } else if (rbtn_red_get(a_type, a_field, pathp->node)) { \ + a_type *leftleft = rbtn_left_get(a_type, a_field, left);\ + if (rbtn_red_get(a_type, a_field, leftleft)) { \ + /* || */\ + /* pathp(r) */\ + /* / \\ */\ + /* (b) (b) */\ + /* / */\ + /* (r) */\ + a_type *tnode; \ + rbtn_black_set(a_type, a_field, pathp->node); \ + rbtn_red_set(a_type, a_field, left); \ + rbtn_black_set(a_type, a_field, leftleft); \ + rbtn_rotate_right(a_type, a_field, pathp->node, \ + tnode); \ + /* Balance restored, but rotation modified */\ + /* subtree root. */\ + assert((uintptr_t)pathp > (uintptr_t)path); \ + if (pathp[-1].cmp < 0) { \ + rbtn_left_set(a_type, a_field, pathp[-1].node, \ + tnode); \ + } else { \ + rbtn_right_set(a_type, a_field, pathp[-1].node, \ + tnode); \ + } \ + return; \ + } else { \ + /* || */\ + /* pathp(r) */\ + /* / \\ */\ + /* (b) (b) */\ + /* / */\ + /* (b) */\ + rbtn_red_set(a_type, a_field, left); \ + rbtn_black_set(a_type, a_field, pathp->node); \ + /* Balance restored. */ \ + return; \ + } \ + } else { \ + a_type *leftleft = rbtn_left_get(a_type, a_field, left);\ + if (rbtn_red_get(a_type, a_field, leftleft)) { \ + /* || */\ + /* pathp(b) */\ + /* / \\ */\ + /* (b) (b) */\ + /* / */\ + /* (r) */\ + a_type *tnode; \ + rbtn_black_set(a_type, a_field, leftleft); \ + rbtn_rotate_right(a_type, a_field, pathp->node, \ + tnode); \ + /* Balance restored, but rotation modified */\ + /* subtree root, which may actually be the tree */\ + /* root. */\ + if (pathp == path) { \ + /* Set root. */ \ + rbtree->rbt_root = tnode; \ + } else { \ + if (pathp[-1].cmp < 0) { \ + rbtn_left_set(a_type, a_field, \ + pathp[-1].node, tnode); \ + } else { \ + rbtn_right_set(a_type, a_field, \ + pathp[-1].node, tnode); \ + } \ + } \ + return; \ + } else { \ + /* || */\ + /* pathp(b) */\ + /* / \\ */\ + /* (b) (b) */\ + /* / */\ + /* (b) */\ + rbtn_red_set(a_type, a_field, left); \ + } \ + } \ + } \ + } \ + /* Set root. */ \ + rbtree->rbt_root = path->node; \ + assert(rbtn_red_get(a_type, a_field, rbtree->rbt_root) == false); \ +} \ +a_attr a_type * \ +a_prefix##iter_recurse(a_rbt_type *rbtree, a_type *node, \ + a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ + if (node == &rbtree->rbt_nil) { \ + return (&rbtree->rbt_nil); \ + } else { \ + a_type *ret; \ + if ((ret = a_prefix##iter_recurse(rbtree, rbtn_left_get(a_type, \ + a_field, node), cb, arg)) != &rbtree->rbt_nil \ + || (ret = cb(rbtree, node, arg)) != NULL) { \ + return (ret); \ + } \ + return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \ + a_field, node), cb, arg)); \ + } \ +} \ +a_attr a_type * \ +a_prefix##iter_start(a_rbt_type *rbtree, a_type *start, a_type *node, \ + a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ + int cmp = a_cmp(start, node); \ + if (cmp < 0) { \ + a_type *ret; \ + if ((ret = a_prefix##iter_start(rbtree, start, \ + rbtn_left_get(a_type, a_field, node), cb, arg)) != \ + &rbtree->rbt_nil || (ret = cb(rbtree, node, arg)) != NULL) { \ + return (ret); \ + } \ + return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \ + a_field, node), cb, arg)); \ + } else if (cmp > 0) { \ + return (a_prefix##iter_start(rbtree, start, \ + rbtn_right_get(a_type, a_field, node), cb, arg)); \ + } else { \ + a_type *ret; \ + if ((ret = cb(rbtree, node, arg)) != NULL) { \ + return (ret); \ + } \ + return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \ + a_field, node), cb, arg)); \ + } \ +} \ +a_attr a_type * \ +a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \ + a_rbt_type *, a_type *, void *), void *arg) { \ + a_type *ret; \ + if (start != NULL) { \ + ret = a_prefix##iter_start(rbtree, start, rbtree->rbt_root, \ + cb, arg); \ + } else { \ + ret = a_prefix##iter_recurse(rbtree, rbtree->rbt_root, cb, arg);\ + } \ + if (ret == &rbtree->rbt_nil) { \ + ret = NULL; \ + } \ + return (ret); \ +} \ +a_attr a_type * \ +a_prefix##reverse_iter_recurse(a_rbt_type *rbtree, a_type *node, \ + a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ + if (node == &rbtree->rbt_nil) { \ + return (&rbtree->rbt_nil); \ + } else { \ + a_type *ret; \ + if ((ret = a_prefix##reverse_iter_recurse(rbtree, \ + rbtn_right_get(a_type, a_field, node), cb, arg)) != \ + &rbtree->rbt_nil || (ret = cb(rbtree, node, arg)) != NULL) { \ + return (ret); \ + } \ + return (a_prefix##reverse_iter_recurse(rbtree, \ + rbtn_left_get(a_type, a_field, node), cb, arg)); \ + } \ +} \ +a_attr a_type * \ +a_prefix##reverse_iter_start(a_rbt_type *rbtree, a_type *start, \ + a_type *node, a_type *(*cb)(a_rbt_type *, a_type *, void *), \ + void *arg) { \ + int cmp = a_cmp(start, node); \ + if (cmp > 0) { \ + a_type *ret; \ + if ((ret = a_prefix##reverse_iter_start(rbtree, start, \ + rbtn_right_get(a_type, a_field, node), cb, arg)) != \ + &rbtree->rbt_nil || (ret = cb(rbtree, node, arg)) != NULL) { \ + return (ret); \ + } \ + return (a_prefix##reverse_iter_recurse(rbtree, \ + rbtn_left_get(a_type, a_field, node), cb, arg)); \ + } else if (cmp < 0) { \ + return (a_prefix##reverse_iter_start(rbtree, start, \ + rbtn_left_get(a_type, a_field, node), cb, arg)); \ + } else { \ + a_type *ret; \ + if ((ret = cb(rbtree, node, arg)) != NULL) { \ + return (ret); \ + } \ + return (a_prefix##reverse_iter_recurse(rbtree, \ + rbtn_left_get(a_type, a_field, node), cb, arg)); \ + } \ +} \ +a_attr a_type * \ +a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \ + a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ + a_type *ret; \ + if (start != NULL) { \ + ret = a_prefix##reverse_iter_start(rbtree, start, \ + rbtree->rbt_root, cb, arg); \ + } else { \ + ret = a_prefix##reverse_iter_recurse(rbtree, rbtree->rbt_root, \ + cb, arg); \ + } \ + if (ret == &rbtree->rbt_nil) { \ + ret = NULL; \ + } \ + return (ret); \ +} + +#endif /* RB_H_ */ Property changes on: contrib/jemalloc/include/jemalloc/internal/rb.h ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H Added: svn:eol-style ## -0,0 +1 ## +native Index: contrib/jemalloc/include/jemalloc/internal/ckh.h =================================================================== --- contrib/jemalloc/include/jemalloc/internal/ckh.h (revision 0) +++ contrib/jemalloc/include/jemalloc/internal/ckh.h (working copy) @@ -0,0 +1,90 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +typedef struct ckh_s ckh_t; +typedef struct ckhc_s ckhc_t; + +/* Typedefs to allow easy function pointer passing. */ +typedef void ckh_hash_t (const void *, unsigned, size_t *, size_t *); +typedef bool ckh_keycomp_t (const void *, const void *); + +/* Maintain counters used to get an idea of performance. */ +/* #define CKH_COUNT */ +/* Print counter values in ckh_delete() (requires CKH_COUNT). */ +/* #define CKH_VERBOSE */ + +/* + * There are 2^LG_CKH_BUCKET_CELLS cells in each hash table bucket. Try to fit + * one bucket per L1 cache line. + */ +#define LG_CKH_BUCKET_CELLS (LG_CACHELINE - LG_SIZEOF_PTR - 1) + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +/* Hash table cell. */ +struct ckhc_s { + const void *key; + const void *data; +}; + +struct ckh_s { +#ifdef CKH_COUNT + /* Counters used to get an idea of performance. */ + uint64_t ngrows; + uint64_t nshrinks; + uint64_t nshrinkfails; + uint64_t ninserts; + uint64_t nrelocs; +#endif + + /* Used for pseudo-random number generation. */ +#define CKH_A 1103515241 +#define CKH_C 12347 + uint32_t prng_state; + + /* Total number of items. */ + size_t count; + + /* + * Minimum and current number of hash table buckets. There are + * 2^LG_CKH_BUCKET_CELLS cells per bucket. + */ + unsigned lg_minbuckets; + unsigned lg_curbuckets; + + /* Hash and comparison functions. */ + ckh_hash_t *hash; + ckh_keycomp_t *keycomp; + + /* Hash table with 2^lg_curbuckets buckets. */ + ckhc_t *tab; +}; + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +bool ckh_new(ckh_t *ckh, size_t minitems, ckh_hash_t *hash, + ckh_keycomp_t *keycomp); +void ckh_delete(ckh_t *ckh); +size_t ckh_count(ckh_t *ckh); +bool ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data); +bool ckh_insert(ckh_t *ckh, const void *key, const void *data); +bool ckh_remove(ckh_t *ckh, const void *searchkey, void **key, + void **data); +bool ckh_search(ckh_t *ckh, const void *seachkey, void **key, void **data); +void ckh_string_hash(const void *key, unsigned minbits, size_t *hash1, + size_t *hash2); +bool ckh_string_keycomp(const void *k1, const void *k2); +void ckh_pointer_hash(const void *key, unsigned minbits, size_t *hash1, + size_t *hash2); +bool ckh_pointer_keycomp(const void *k1, const void *k2); + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ Property changes on: contrib/jemalloc/include/jemalloc/internal/ckh.h ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H Added: svn:eol-style ## -0,0 +1 ## +native Index: contrib/jemalloc/include/jemalloc/internal/prof.h =================================================================== --- contrib/jemalloc/include/jemalloc/internal/prof.h (revision 0) +++ contrib/jemalloc/include/jemalloc/internal/prof.h (working copy) @@ -0,0 +1,535 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +typedef struct prof_bt_s prof_bt_t; +typedef struct prof_cnt_s prof_cnt_t; +typedef struct prof_thr_cnt_s prof_thr_cnt_t; +typedef struct prof_ctx_s prof_ctx_t; +typedef struct prof_tdata_s prof_tdata_t; + +/* Option defaults. */ +#define PROF_PREFIX_DEFAULT "jeprof" +#define LG_PROF_SAMPLE_DEFAULT 0 +#define LG_PROF_INTERVAL_DEFAULT -1 + +/* + * Hard limit on stack backtrace depth. The version of prof_backtrace() that + * is based on __builtin_return_address() necessarily has a hard-coded number + * of backtrace frame handlers, and should be kept in sync with this setting. + */ +#define PROF_BT_MAX 128 + +/* Maximum number of backtraces to store in each per thread LRU cache. */ +#define PROF_TCMAX 1024 + +/* Initial hash table size. */ +#define PROF_CKH_MINITEMS 64 + +/* Size of memory buffer to use when writing dump files. */ +#define PROF_DUMP_BUFSIZE 65536 + +/* Size of stack-allocated buffer used by prof_printf(). */ +#define PROF_PRINTF_BUFSIZE 128 + +/* + * Number of mutexes shared among all ctx's. No space is allocated for these + * unless profiling is enabled, so it's okay to over-provision. + */ +#define PROF_NCTX_LOCKS 1024 + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +struct prof_bt_s { + /* Backtrace, stored as len program counters. */ + void **vec; + unsigned len; +}; + +#ifdef JEMALLOC_PROF_LIBGCC +/* Data structure passed to libgcc _Unwind_Backtrace() callback functions. */ +typedef struct { + prof_bt_t *bt; + unsigned nignore; + unsigned max; +} prof_unwind_data_t; +#endif + +struct prof_cnt_s { + /* + * Profiling counters. An allocation/deallocation pair can operate on + * different prof_thr_cnt_t objects that are linked into the same + * prof_ctx_t cnts_ql, so it is possible for the cur* counters to go + * negative. In principle it is possible for the *bytes counters to + * overflow/underflow, but a general solution would require something + * like 128-bit counters; this implementation doesn't bother to solve + * that problem. + */ + int64_t curobjs; + int64_t curbytes; + uint64_t accumobjs; + uint64_t accumbytes; +}; + +struct prof_thr_cnt_s { + /* Linkage into prof_ctx_t's cnts_ql. */ + ql_elm(prof_thr_cnt_t) cnts_link; + + /* Linkage into thread's LRU. */ + ql_elm(prof_thr_cnt_t) lru_link; + + /* + * Associated context. If a thread frees an object that it did not + * allocate, it is possible that the context is not cached in the + * thread's hash table, in which case it must be able to look up the + * context, insert a new prof_thr_cnt_t into the thread's hash table, + * and link it into the prof_ctx_t's cnts_ql. + */ + prof_ctx_t *ctx; + + /* + * Threads use memory barriers to update the counters. Since there is + * only ever one writer, the only challenge is for the reader to get a + * consistent read of the counters. + * + * The writer uses this series of operations: + * + * 1) Increment epoch to an odd number. + * 2) Update counters. + * 3) Increment epoch to an even number. + * + * The reader must assure 1) that the epoch is even while it reads the + * counters, and 2) that the epoch doesn't change between the time it + * starts and finishes reading the counters. + */ + unsigned epoch; + + /* Profiling counters. */ + prof_cnt_t cnts; +}; + +struct prof_ctx_s { + /* Associated backtrace. */ + prof_bt_t *bt; + + /* Protects cnt_merged and cnts_ql. */ + malloc_mutex_t *lock; + + /* Temporary storage for summation during dump. */ + prof_cnt_t cnt_summed; + + /* When threads exit, they merge their stats into cnt_merged. */ + prof_cnt_t cnt_merged; + + /* + * List of profile counters, one for each thread that has allocated in + * this context. + */ + ql_head(prof_thr_cnt_t) cnts_ql; +}; + +struct prof_tdata_s { + /* + * Hash of (prof_bt_t *)-->(prof_thr_cnt_t *). Each thread keeps a + * cache of backtraces, with associated thread-specific prof_thr_cnt_t + * objects. Other threads may read the prof_thr_cnt_t contents, but no + * others will ever write them. + * + * Upon thread exit, the thread must merge all the prof_thr_cnt_t + * counter data into the associated prof_ctx_t objects, and unlink/free + * the prof_thr_cnt_t objects. + */ + ckh_t bt2cnt; + + /* LRU for contents of bt2cnt. */ + ql_head(prof_thr_cnt_t) lru_ql; + + /* Backtrace vector, used for calls to prof_backtrace(). */ + void **vec; + + /* Sampling state. */ + uint64_t prng_state; + uint64_t threshold; + uint64_t accum; +}; + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +extern bool opt_prof; +/* + * Even if opt_prof is true, sampling can be temporarily disabled by setting + * opt_prof_active to false. No locking is used when updating opt_prof_active, + * so there are no guarantees regarding how long it will take for all threads + * to notice state changes. + */ +extern bool opt_prof_active; +extern size_t opt_lg_prof_sample; /* Mean bytes between samples. */ +extern ssize_t opt_lg_prof_interval; /* lg(prof_interval). */ +extern bool opt_prof_gdump; /* High-water memory dumping. */ +extern bool opt_prof_leak; /* Dump leak summary at exit. */ +extern bool opt_prof_accum; /* Report cumulative bytes. */ +extern char opt_prof_prefix[PATH_MAX + 1]; + +/* + * Profile dump interval, measured in bytes allocated. Each arena triggers a + * profile dump when it reaches this threshold. The effect is that the + * interval between profile dumps averages prof_interval, though the actual + * interval between dumps will tend to be sporadic, and the interval will be a + * maximum of approximately (prof_interval * narenas). + */ +extern uint64_t prof_interval; + +/* + * If true, promote small sampled objects to large objects, since small run + * headers do not have embedded profile context pointers. + */ +extern bool prof_promote; + +void bt_init(prof_bt_t *bt, void **vec); +void prof_backtrace(prof_bt_t *bt, unsigned nignore); +prof_thr_cnt_t *prof_lookup(prof_bt_t *bt); +void prof_idump(void); +bool prof_mdump(const char *filename); +void prof_gdump(void); +prof_tdata_t *prof_tdata_init(void); +void prof_tdata_cleanup(void *arg); +void prof_boot0(void); +void prof_boot1(void); +bool prof_boot2(void); + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#define PROF_ALLOC_PREP(nignore, size, ret) do { \ + prof_tdata_t *prof_tdata; \ + prof_bt_t bt; \ + \ + assert(size == s2u(size)); \ + \ + prof_tdata = *prof_tdata_tsd_get(); \ + if (prof_tdata == NULL) { \ + prof_tdata = prof_tdata_init(); \ + if (prof_tdata == NULL) { \ + ret = NULL; \ + break; \ + } \ + } \ + \ + if (opt_prof_active == false) { \ + /* Sampling is currently inactive, so avoid sampling. */\ + ret = (prof_thr_cnt_t *)(uintptr_t)1U; \ + } else if (opt_lg_prof_sample == 0) { \ + /* Don't bother with sampling logic, since sampling */\ + /* interval is 1. */\ + bt_init(&bt, prof_tdata->vec); \ + prof_backtrace(&bt, nignore); \ + ret = prof_lookup(&bt); \ + } else { \ + if (prof_tdata->threshold == 0) { \ + /* Initialize. Seed the prng differently for */\ + /* each thread. */\ + prof_tdata->prng_state = \ + (uint64_t)(uintptr_t)&size; \ + prof_sample_threshold_update(prof_tdata); \ + } \ + \ + /* Determine whether to capture a backtrace based on */\ + /* whether size is enough for prof_accum to reach */\ + /* prof_tdata->threshold. However, delay updating */\ + /* these variables until prof_{m,re}alloc(), because */\ + /* we don't know for sure that the allocation will */\ + /* succeed. */\ + /* */\ + /* Use subtraction rather than addition to avoid */\ + /* potential integer overflow. */\ + if (size >= prof_tdata->threshold - \ + prof_tdata->accum) { \ + bt_init(&bt, prof_tdata->vec); \ + prof_backtrace(&bt, nignore); \ + ret = prof_lookup(&bt); \ + } else \ + ret = (prof_thr_cnt_t *)(uintptr_t)1U; \ + } \ +} while (0) + +#ifndef JEMALLOC_ENABLE_INLINE +malloc_tsd_protos(JEMALLOC_ATTR(unused), prof_tdata, prof_tdata_t *) + +void prof_sample_threshold_update(prof_tdata_t *prof_tdata); +prof_ctx_t *prof_ctx_get(const void *ptr); +void prof_ctx_set(const void *ptr, prof_ctx_t *ctx); +bool prof_sample_accum_update(size_t size); +void prof_malloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt); +void prof_realloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt, + size_t old_size, prof_ctx_t *old_ctx); +void prof_free(const void *ptr, size_t size); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_PROF_C_)) +/* Thread-specific backtrace cache, used to reduce bt2ctx contention. */ +malloc_tsd_externs(prof_tdata, prof_tdata_t *) +malloc_tsd_funcs(JEMALLOC_INLINE, prof_tdata, prof_tdata_t *, NULL, + prof_tdata_cleanup) + +JEMALLOC_INLINE void +prof_sample_threshold_update(prof_tdata_t *prof_tdata) +{ + uint64_t r; + double u; + + cassert(config_prof); + + /* + * Compute sample threshold as a geometrically distributed random + * variable with mean (2^opt_lg_prof_sample). + * + * __ __ + * | log(u) | 1 + * prof_tdata->threshold = | -------- |, where p = ------------------- + * | log(1-p) | opt_lg_prof_sample + * 2 + * + * For more information on the math, see: + * + * Non-Uniform Random Variate Generation + * Luc Devroye + * Springer-Verlag, New York, 1986 + * pp 500 + * (http://cg.scs.carleton.ca/~luc/rnbookindex.html) + */ + prng64(r, 53, prof_tdata->prng_state, + UINT64_C(6364136223846793005), UINT64_C(1442695040888963407)); + u = (double)r * (1.0/9007199254740992.0L); + prof_tdata->threshold = (uint64_t)(log(u) / + log(1.0 - (1.0 / (double)((uint64_t)1U << opt_lg_prof_sample)))) + + (uint64_t)1U; +} + +JEMALLOC_INLINE prof_ctx_t * +prof_ctx_get(const void *ptr) +{ + prof_ctx_t *ret; + arena_chunk_t *chunk; + + cassert(config_prof); + assert(ptr != NULL); + + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); + if (chunk != ptr) { + /* Region. */ + ret = arena_prof_ctx_get(ptr); + } else + ret = huge_prof_ctx_get(ptr); + + return (ret); +} + +JEMALLOC_INLINE void +prof_ctx_set(const void *ptr, prof_ctx_t *ctx) +{ + arena_chunk_t *chunk; + + cassert(config_prof); + assert(ptr != NULL); + + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); + if (chunk != ptr) { + /* Region. */ + arena_prof_ctx_set(ptr, ctx); + } else + huge_prof_ctx_set(ptr, ctx); +} + +JEMALLOC_INLINE bool +prof_sample_accum_update(size_t size) +{ + prof_tdata_t *prof_tdata; + + cassert(config_prof); + /* Sampling logic is unnecessary if the interval is 1. */ + assert(opt_lg_prof_sample != 0); + + prof_tdata = *prof_tdata_tsd_get(); + assert(prof_tdata != NULL); + + /* Take care to avoid integer overflow. */ + if (size >= prof_tdata->threshold - prof_tdata->accum) { + prof_tdata->accum -= (prof_tdata->threshold - size); + /* Compute new sample threshold. */ + prof_sample_threshold_update(prof_tdata); + while (prof_tdata->accum >= prof_tdata->threshold) { + prof_tdata->accum -= prof_tdata->threshold; + prof_sample_threshold_update(prof_tdata); + } + return (false); + } else { + prof_tdata->accum += size; + return (true); + } +} + +JEMALLOC_INLINE void +prof_malloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt) +{ + + cassert(config_prof); + assert(ptr != NULL); + assert(size == isalloc(ptr, true)); + + if (opt_lg_prof_sample != 0) { + if (prof_sample_accum_update(size)) { + /* + * Don't sample. For malloc()-like allocation, it is + * always possible to tell in advance how large an + * object's usable size will be, so there should never + * be a difference between the size passed to + * PROF_ALLOC_PREP() and prof_malloc(). + */ + assert((uintptr_t)cnt == (uintptr_t)1U); + } + } + + if ((uintptr_t)cnt > (uintptr_t)1U) { + prof_ctx_set(ptr, cnt->ctx); + + cnt->epoch++; + /*********/ + mb_write(); + /*********/ + cnt->cnts.curobjs++; + cnt->cnts.curbytes += size; + if (opt_prof_accum) { + cnt->cnts.accumobjs++; + cnt->cnts.accumbytes += size; + } + /*********/ + mb_write(); + /*********/ + cnt->epoch++; + /*********/ + mb_write(); + /*********/ + } else + prof_ctx_set(ptr, (prof_ctx_t *)(uintptr_t)1U); +} + +JEMALLOC_INLINE void +prof_realloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt, + size_t old_size, prof_ctx_t *old_ctx) +{ + prof_thr_cnt_t *told_cnt; + + cassert(config_prof); + assert(ptr != NULL || (uintptr_t)cnt <= (uintptr_t)1U); + + if (ptr != NULL) { + assert(size == isalloc(ptr, true)); + if (opt_lg_prof_sample != 0) { + if (prof_sample_accum_update(size)) { + /* + * Don't sample. The size passed to + * PROF_ALLOC_PREP() was larger than what + * actually got allocated, so a backtrace was + * captured for this allocation, even though + * its actual size was insufficient to cross + * the sample threshold. + */ + cnt = (prof_thr_cnt_t *)(uintptr_t)1U; + } + } + } + + if ((uintptr_t)old_ctx > (uintptr_t)1U) { + told_cnt = prof_lookup(old_ctx->bt); + if (told_cnt == NULL) { + /* + * It's too late to propagate OOM for this realloc(), + * so operate directly on old_cnt->ctx->cnt_merged. + */ + malloc_mutex_lock(old_ctx->lock); + old_ctx->cnt_merged.curobjs--; + old_ctx->cnt_merged.curbytes -= old_size; + malloc_mutex_unlock(old_ctx->lock); + told_cnt = (prof_thr_cnt_t *)(uintptr_t)1U; + } + } else + told_cnt = (prof_thr_cnt_t *)(uintptr_t)1U; + + if ((uintptr_t)told_cnt > (uintptr_t)1U) + told_cnt->epoch++; + if ((uintptr_t)cnt > (uintptr_t)1U) { + prof_ctx_set(ptr, cnt->ctx); + cnt->epoch++; + } else + prof_ctx_set(ptr, (prof_ctx_t *)(uintptr_t)1U); + /*********/ + mb_write(); + /*********/ + if ((uintptr_t)told_cnt > (uintptr_t)1U) { + told_cnt->cnts.curobjs--; + told_cnt->cnts.curbytes -= old_size; + } + if ((uintptr_t)cnt > (uintptr_t)1U) { + cnt->cnts.curobjs++; + cnt->cnts.curbytes += size; + if (opt_prof_accum) { + cnt->cnts.accumobjs++; + cnt->cnts.accumbytes += size; + } + } + /*********/ + mb_write(); + /*********/ + if ((uintptr_t)told_cnt > (uintptr_t)1U) + told_cnt->epoch++; + if ((uintptr_t)cnt > (uintptr_t)1U) + cnt->epoch++; + /*********/ + mb_write(); /* Not strictly necessary. */ +} + +JEMALLOC_INLINE void +prof_free(const void *ptr, size_t size) +{ + prof_ctx_t *ctx = prof_ctx_get(ptr); + + cassert(config_prof); + + if ((uintptr_t)ctx > (uintptr_t)1) { + assert(size == isalloc(ptr, true)); + prof_thr_cnt_t *tcnt = prof_lookup(ctx->bt); + + if (tcnt != NULL) { + tcnt->epoch++; + /*********/ + mb_write(); + /*********/ + tcnt->cnts.curobjs--; + tcnt->cnts.curbytes -= size; + /*********/ + mb_write(); + /*********/ + tcnt->epoch++; + /*********/ + mb_write(); + /*********/ + } else { + /* + * OOM during free() cannot be propagated, so operate + * directly on cnt->ctx->cnt_merged. + */ + malloc_mutex_lock(ctx->lock); + ctx->cnt_merged.curobjs--; + ctx->cnt_merged.curbytes -= size; + malloc_mutex_unlock(ctx->lock); + } + } +} +#endif + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ Property changes on: contrib/jemalloc/include/jemalloc/internal/prof.h ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H Added: svn:eol-style ## -0,0 +1 ## +native Index: contrib/jemalloc/include/jemalloc/internal/quarantine.h =================================================================== --- contrib/jemalloc/include/jemalloc/internal/quarantine.h (revision 0) +++ contrib/jemalloc/include/jemalloc/internal/quarantine.h (working copy) @@ -0,0 +1,24 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +/* Default per thread quarantine size if valgrind is enabled. */ +#define JEMALLOC_VALGRIND_QUARANTINE_DEFAULT (ZU(1) << 24) + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +void quarantine(void *ptr); +bool quarantine_boot(void); + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ + Property changes on: contrib/jemalloc/include/jemalloc/internal/quarantine.h ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H Added: svn:eol-style ## -0,0 +1 ## +native Index: contrib/jemalloc/include/jemalloc/internal/chunk.h =================================================================== --- contrib/jemalloc/include/jemalloc/internal/chunk.h (revision 0) +++ contrib/jemalloc/include/jemalloc/internal/chunk.h (working copy) @@ -0,0 +1,58 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +/* + * Size and alignment of memory chunks that are allocated by the OS's virtual + * memory system. + */ +#define LG_CHUNK_DEFAULT 22 + +/* Return the chunk address for allocation address a. */ +#define CHUNK_ADDR2BASE(a) \ + ((void *)((uintptr_t)(a) & ~chunksize_mask)) + +/* Return the chunk offset of address a. */ +#define CHUNK_ADDR2OFFSET(a) \ + ((size_t)((uintptr_t)(a) & chunksize_mask)) + +/* Return the smallest chunk multiple that is >= s. */ +#define CHUNK_CEILING(s) \ + (((s) + chunksize_mask) & ~chunksize_mask) + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +extern size_t opt_lg_chunk; + +/* Protects stats_chunks; currently not used for any other purpose. */ +extern malloc_mutex_t chunks_mtx; +/* Chunk statistics. */ +extern chunk_stats_t stats_chunks; + +extern rtree_t *chunks_rtree; + +extern size_t chunksize; +extern size_t chunksize_mask; /* (chunksize - 1). */ +extern size_t chunk_npages; +extern size_t map_bias; /* Number of arena chunk header pages. */ +extern size_t arena_maxclass; /* Max size class for arenas. */ + +void *chunk_alloc(size_t size, size_t alignment, bool base, bool *zero); +void chunk_dealloc(void *chunk, size_t size, bool unmap); +bool chunk_boot0(void); +bool chunk_boot1(void); + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ + +#include "jemalloc/internal/chunk_dss.h" +#include "jemalloc/internal/chunk_mmap.h" Property changes on: contrib/jemalloc/include/jemalloc/internal/chunk.h ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H Added: svn:eol-style ## -0,0 +1 ## +native Index: contrib/jemalloc/include/jemalloc/internal/atomic.h =================================================================== --- contrib/jemalloc/include/jemalloc/internal/atomic.h (revision 0) +++ contrib/jemalloc/include/jemalloc/internal/atomic.h (working copy) @@ -0,0 +1,240 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +#define atomic_read_uint64(p) atomic_add_uint64(p, 0) +#define atomic_read_uint32(p) atomic_add_uint32(p, 0) +#define atomic_read_z(p) atomic_add_z(p, 0) +#define atomic_read_u(p) atomic_add_u(p, 0) + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#ifndef JEMALLOC_ENABLE_INLINE +uint64_t atomic_add_uint64(uint64_t *p, uint64_t x); +uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x); +uint32_t atomic_add_uint32(uint32_t *p, uint32_t x); +uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x); +size_t atomic_add_z(size_t *p, size_t x); +size_t atomic_sub_z(size_t *p, size_t x); +unsigned atomic_add_u(unsigned *p, unsigned x); +unsigned atomic_sub_u(unsigned *p, unsigned x); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ATOMIC_C_)) +/******************************************************************************/ +/* 64-bit operations. */ +#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 +JEMALLOC_INLINE uint64_t +atomic_add_uint64(uint64_t *p, uint64_t x) +{ + + return (__sync_add_and_fetch(p, x)); +} + +JEMALLOC_INLINE uint64_t +atomic_sub_uint64(uint64_t *p, uint64_t x) +{ + + return (__sync_sub_and_fetch(p, x)); +} +#elif (defined(JEMALLOC_OSATOMIC)) +JEMALLOC_INLINE uint64_t +atomic_add_uint64(uint64_t *p, uint64_t x) +{ + + return (OSAtomicAdd64((int64_t)x, (int64_t *)p)); +} + +JEMALLOC_INLINE uint64_t +atomic_sub_uint64(uint64_t *p, uint64_t x) +{ + + return (OSAtomicAdd64(-((int64_t)x), (int64_t *)p)); +} +#elif (defined(__amd64__) || defined(__x86_64__)) +JEMALLOC_INLINE uint64_t +atomic_add_uint64(uint64_t *p, uint64_t x) +{ + + asm volatile ( + "lock; xaddq %0, %1;" + : "+r" (x), "=m" (*p) /* Outputs. */ + : "m" (*p) /* Inputs. */ + ); + + return (x); +} + +JEMALLOC_INLINE uint64_t +atomic_sub_uint64(uint64_t *p, uint64_t x) +{ + + x = (uint64_t)(-(int64_t)x); + asm volatile ( + "lock; xaddq %0, %1;" + : "+r" (x), "=m" (*p) /* Outputs. */ + : "m" (*p) /* Inputs. */ + ); + + return (x); +} +#elif (defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_8)) +JEMALLOC_INLINE uint64_t +atomic_add_uint64(uint64_t *p, uint64_t x) +{ + + return (__sync_add_and_fetch(p, x)); +} + +JEMALLOC_INLINE uint64_t +atomic_sub_uint64(uint64_t *p, uint64_t x) +{ + + return (__sync_sub_and_fetch(p, x)); +} +#else +# if (LG_SIZEOF_PTR == 3) +# error "Missing implementation for 64-bit atomic operations" +# endif +#endif + +/******************************************************************************/ +/* 32-bit operations. */ +#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 +JEMALLOC_INLINE uint32_t +atomic_add_uint32(uint32_t *p, uint32_t x) +{ + + return (__sync_add_and_fetch(p, x)); +} + +JEMALLOC_INLINE uint32_t +atomic_sub_uint32(uint32_t *p, uint32_t x) +{ + + return (__sync_sub_and_fetch(p, x)); +} +#elif (defined(JEMALLOC_OSATOMIC)) +JEMALLOC_INLINE uint32_t +atomic_add_uint32(uint32_t *p, uint32_t x) +{ + + return (OSAtomicAdd32((int32_t)x, (int32_t *)p)); +} + +JEMALLOC_INLINE uint32_t +atomic_sub_uint32(uint32_t *p, uint32_t x) +{ + + return (OSAtomicAdd32(-((int32_t)x), (int32_t *)p)); +} +#elif (defined(__i386__) || defined(__amd64__) || defined(__x86_64__)) +JEMALLOC_INLINE uint32_t +atomic_add_uint32(uint32_t *p, uint32_t x) +{ + + asm volatile ( + "lock; xaddl %0, %1;" + : "+r" (x), "=m" (*p) /* Outputs. */ + : "m" (*p) /* Inputs. */ + ); + + return (x); +} + +JEMALLOC_INLINE uint32_t +atomic_sub_uint32(uint32_t *p, uint32_t x) +{ + + x = (uint32_t)(-(int32_t)x); + asm volatile ( + "lock; xaddl %0, %1;" + : "+r" (x), "=m" (*p) /* Outputs. */ + : "m" (*p) /* Inputs. */ + ); + + return (x); +} +#elif (defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_4)) +JEMALLOC_INLINE uint32_t +atomic_add_uint32(uint32_t *p, uint32_t x) +{ + + return (__sync_add_and_fetch(p, x)); +} + +JEMALLOC_INLINE uint32_t +atomic_sub_uint32(uint32_t *p, uint32_t x) +{ + + return (__sync_sub_and_fetch(p, x)); +} +#else +# error "Missing implementation for 32-bit atomic operations" +#endif + +/******************************************************************************/ +/* size_t operations. */ +JEMALLOC_INLINE size_t +atomic_add_z(size_t *p, size_t x) +{ + +#if (LG_SIZEOF_PTR == 3) + return ((size_t)atomic_add_uint64((uint64_t *)p, (uint64_t)x)); +#elif (LG_SIZEOF_PTR == 2) + return ((size_t)atomic_add_uint32((uint32_t *)p, (uint32_t)x)); +#endif +} + +JEMALLOC_INLINE size_t +atomic_sub_z(size_t *p, size_t x) +{ + +#if (LG_SIZEOF_PTR == 3) + return ((size_t)atomic_add_uint64((uint64_t *)p, + (uint64_t)-((int64_t)x))); +#elif (LG_SIZEOF_PTR == 2) + return ((size_t)atomic_add_uint32((uint32_t *)p, + (uint32_t)-((int32_t)x))); +#endif +} + +/******************************************************************************/ +/* unsigned operations. */ +JEMALLOC_INLINE unsigned +atomic_add_u(unsigned *p, unsigned x) +{ + +#if (LG_SIZEOF_INT == 3) + return ((unsigned)atomic_add_uint64((uint64_t *)p, (uint64_t)x)); +#elif (LG_SIZEOF_INT == 2) + return ((unsigned)atomic_add_uint32((uint32_t *)p, (uint32_t)x)); +#endif +} + +JEMALLOC_INLINE unsigned +atomic_sub_u(unsigned *p, unsigned x) +{ + +#if (LG_SIZEOF_INT == 3) + return ((unsigned)atomic_add_uint64((uint64_t *)p, + (uint64_t)-((int64_t)x))); +#elif (LG_SIZEOF_INT == 2) + return ((unsigned)atomic_add_uint32((uint32_t *)p, + (uint32_t)-((int32_t)x))); +#endif +} +/******************************************************************************/ +#endif + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ Property changes on: contrib/jemalloc/include/jemalloc/internal/atomic.h ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H Added: svn:eol-style ## -0,0 +1 ## +native Index: contrib/jemalloc/include/jemalloc/internal/bitmap.h =================================================================== --- contrib/jemalloc/include/jemalloc/internal/bitmap.h (revision 0) +++ contrib/jemalloc/include/jemalloc/internal/bitmap.h (working copy) @@ -0,0 +1,184 @@ +/******************************************************************************/ +#ifdef JEMALLOC_H_TYPES + +/* Maximum bitmap bit count is 2^LG_BITMAP_MAXBITS. */ +#define LG_BITMAP_MAXBITS LG_RUN_MAXREGS + +typedef struct bitmap_level_s bitmap_level_t; +typedef struct bitmap_info_s bitmap_info_t; +typedef unsigned long bitmap_t; +#define LG_SIZEOF_BITMAP LG_SIZEOF_LONG + +/* Number of bits per group. */ +#define LG_BITMAP_GROUP_NBITS (LG_SIZEOF_BITMAP + 3) +#define BITMAP_GROUP_NBITS (ZU(1) << LG_BITMAP_GROUP_NBITS) +#define BITMAP_GROUP_NBITS_MASK (BITMAP_GROUP_NBITS-1) + +/* Maximum number of levels possible. */ +#define BITMAP_MAX_LEVELS \ + (LG_BITMAP_MAXBITS / LG_SIZEOF_BITMAP) \ + + !!(LG_BITMAP_MAXBITS % LG_SIZEOF_BITMAP) + +#endif /* JEMALLOC_H_TYPES */ +/******************************************************************************/ +#ifdef JEMALLOC_H_STRUCTS + +struct bitmap_level_s { + /* Offset of this level's groups within the array of groups. */ + size_t group_offset; +}; + +struct bitmap_info_s { + /* Logical number of bits in bitmap (stored at bottom level). */ + size_t nbits; + + /* Number of levels necessary for nbits. */ + unsigned nlevels; + + /* + * Only the first (nlevels+1) elements are used, and levels are ordered + * bottom to top (e.g. the bottom level is stored in levels[0]). + */ + bitmap_level_t levels[BITMAP_MAX_LEVELS+1]; +}; + +#endif /* JEMALLOC_H_STRUCTS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_EXTERNS + +void bitmap_info_init(bitmap_info_t *binfo, size_t nbits); +size_t bitmap_info_ngroups(const bitmap_info_t *binfo); +size_t bitmap_size(size_t nbits); +void bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo); + +#endif /* JEMALLOC_H_EXTERNS */ +/******************************************************************************/ +#ifdef JEMALLOC_H_INLINES + +#ifndef JEMALLOC_ENABLE_INLINE +bool bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo); +bool bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit); +void bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit); +size_t bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo); +void bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_BITMAP_C_)) +JEMALLOC_INLINE bool +bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo) +{ + unsigned rgoff = binfo->levels[binfo->nlevels].group_offset - 1; + bitmap_t rg = bitmap[rgoff]; + /* The bitmap is full iff the root group is 0. */ + return (rg == 0); +} + +JEMALLOC_INLINE bool +bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) +{ + size_t goff; + bitmap_t g; + + assert(bit < binfo->nbits); + goff = bit >> LG_BITMAP_GROUP_NBITS; + g = bitmap[goff]; + return (!(g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK)))); +} + +JEMALLOC_INLINE void +bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) +{ + size_t goff; + bitmap_t *gp; + bitmap_t g; + + assert(bit < binfo->nbits); + assert(bitmap_get(bitmap, binfo, bit) == false); + goff = bit >> LG_BITMAP_GROUP_NBITS; + gp = &bitmap[goff]; + g = *gp; + assert(g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK))); + g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK); + *gp = g; + assert(bitmap_get(bitmap, binfo, bit)); + /* Propagate group state transitions up the tree. */ + if (g == 0) { + unsigned i; + for (i = 1; i < binfo->nlevels; i++) { + bit = goff; + goff = bit >> LG_BITMAP_GROUP_NBITS; + gp = &bitmap[binfo->levels[i].group_offset + goff]; + g = *gp; + assert(g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK))); + g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK); + *gp = g; + if (g != 0) + break; + } + } +} + +/* sfu: set first unset. */ +JEMALLOC_INLINE size_t +bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo) +{ + size_t bit; + bitmap_t g; + unsigned i; + + assert(bitmap_full(bitmap, binfo) == false); + + i = binfo->nlevels - 1; + g = bitmap[binfo->levels[i].group_offset]; + bit = ffsl(g) - 1; + while (i > 0) { + i--; + g = bitmap[binfo->levels[i].group_offset + bit]; + bit = (bit << LG_BITMAP_GROUP_NBITS) + (ffsl(g) - 1); + } + + bitmap_set(bitmap, binfo, bit); + return (bit); +} + +JEMALLOC_INLINE void +bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) +{ + size_t goff; + bitmap_t *gp; + bitmap_t g; + bool propagate; + + assert(bit < binfo->nbits); + assert(bitmap_get(bitmap, binfo, bit)); + goff = bit >> LG_BITMAP_GROUP_NBITS; + gp = &bitmap[goff]; + g = *gp; + propagate = (g == 0); + assert((g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK))) == 0); + g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK); + *gp = g; + assert(bitmap_get(bitmap, binfo, bit) == false); + /* Propagate group state transitions up the tree. */ + if (propagate) { + unsigned i; + for (i = 1; i < binfo->nlevels; i++) { + bit = goff; + goff = bit >> LG_BITMAP_GROUP_NBITS; + gp = &bitmap[binfo->levels[i].group_offset + goff]; + g = *gp; + propagate = (g == 0); + assert((g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK))) + == 0); + g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK); + *gp = g; + if (propagate == false) + break; + } + } +} + +#endif + +#endif /* JEMALLOC_H_INLINES */ +/******************************************************************************/ Property changes on: contrib/jemalloc/include/jemalloc/internal/bitmap.h ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H Added: svn:eol-style ## -0,0 +1 ## +native Index: contrib/jemalloc/include/jemalloc/internal/jemalloc_internal.h =================================================================== --- contrib/jemalloc/include/jemalloc/internal/jemalloc_internal.h (revision 0) +++ contrib/jemalloc/include/jemalloc/internal/jemalloc_internal.h (working copy) @@ -0,0 +1,876 @@ +#include "libc_private.h" +#include "namespace.h" + +#include +#include +#include +#if !defined(SYS_write) && defined(__NR_write) +#define SYS_write __NR_write +#endif +#include +#include +#include + +#include +#include +#ifndef SIZE_T_MAX +# define SIZE_T_MAX SIZE_MAX +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#ifndef offsetof +# define offsetof(type, member) ((size_t)&(((type *)NULL)->member)) +#endif +#include +#include +#include +#include +#include +#include +#include +#include + +#include "un-namespace.h" +#include "libc_private.h" + +#define JEMALLOC_NO_DEMANGLE +#include "../jemalloc.h" + +#ifdef JEMALLOC_UTRACE +#include +#endif + +#ifdef JEMALLOC_VALGRIND +#include +#include +#endif + +#include "jemalloc/internal/private_namespace.h" + +#ifdef JEMALLOC_CC_SILENCE +#define UNUSED JEMALLOC_ATTR(unused) +#else +#define UNUSED +#endif + +static const bool config_debug = +#ifdef JEMALLOC_DEBUG + true +#else + false +#endif + ; +static const bool config_dss = +#ifdef JEMALLOC_DSS + true +#else + false +#endif + ; +static const bool config_fill = +#ifdef JEMALLOC_FILL + true +#else + false +#endif + ; +static const bool config_lazy_lock = +#ifdef JEMALLOC_LAZY_LOCK + true +#else + false +#endif + ; +static const bool config_prof = +#ifdef JEMALLOC_PROF + true +#else + false +#endif + ; +static const bool config_prof_libgcc = +#ifdef JEMALLOC_PROF_LIBGCC + true +#else + false +#endif + ; +static const bool config_prof_libunwind = +#ifdef JEMALLOC_PROF_LIBUNWIND + true +#else + false +#endif + ; +static const bool config_munmap = +#ifdef JEMALLOC_MUNMAP + true +#else + false +#endif + ; +static const bool config_stats = +#ifdef JEMALLOC_STATS + true +#else + false +#endif + ; +static const bool config_tcache = +#ifdef JEMALLOC_TCACHE + true +#else + false +#endif + ; +static const bool config_tls = +#ifdef JEMALLOC_TLS + true +#else + false +#endif + ; +static const bool config_utrace = +#ifdef JEMALLOC_UTRACE + true +#else + false +#endif + ; +static const bool config_valgrind = +#ifdef JEMALLOC_VALGRIND + true +#else + false +#endif + ; +static const bool config_xmalloc = +#ifdef JEMALLOC_XMALLOC + true +#else + false +#endif + ; +static const bool config_ivsalloc = +#ifdef JEMALLOC_IVSALLOC + true +#else + false +#endif + ; + +#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN)) +#include +#endif + +#ifdef JEMALLOC_ZONE +#include +#include +#include +#include +#endif + +#define RB_COMPACT +#include "jemalloc/internal/rb.h" +#include "jemalloc/internal/qr.h" +#include "jemalloc/internal/ql.h" + +/* + * jemalloc can conceptually be broken into components (arena, tcache, etc.), + * but there are circular dependencies that cannot be broken without + * substantial performance degradation. In order to reduce the effect on + * visual code flow, read the header files in multiple passes, with one of the + * following cpp variables defined during each pass: + * + * JEMALLOC_H_TYPES : Preprocessor-defined constants and psuedo-opaque data + * types. + * JEMALLOC_H_STRUCTS : Data structures. + * JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes. + * JEMALLOC_H_INLINES : Inline functions. + */ +/******************************************************************************/ +#define JEMALLOC_H_TYPES + +#define ALLOCM_LG_ALIGN_MASK ((int)0x3f) + +#define ZU(z) ((size_t)z) + +#ifndef __DECONST +# define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var)) +#endif + +#ifdef JEMALLOC_DEBUG + /* Disable inlining to make debugging easier. */ +# define JEMALLOC_INLINE +# define inline +#else +# define JEMALLOC_ENABLE_INLINE +# define JEMALLOC_INLINE static inline +#endif + +/* Smallest size class to support. */ +#define LG_TINY_MIN 3 +#define TINY_MIN (1U << LG_TINY_MIN) + +/* + * Minimum alignment of allocations is 2^LG_QUANTUM bytes (ignoring tiny size + * classes). + */ +#ifndef LG_QUANTUM +# ifdef __i386__ +# define LG_QUANTUM 4 +# endif +# ifdef __ia64__ +# define LG_QUANTUM 4 +# endif +# ifdef __alpha__ +# define LG_QUANTUM 4 +# endif +# ifdef __sparc64__ +# define LG_QUANTUM 4 +# endif +# if (defined(__amd64__) || defined(__x86_64__)) +# define LG_QUANTUM 4 +# endif +# ifdef __arm__ +# define LG_QUANTUM 3 +# endif +# ifdef __mips__ +# define LG_QUANTUM 3 +# endif +# ifdef __powerpc__ +# define LG_QUANTUM 4 +# endif +# ifdef __s390x__ +# define LG_QUANTUM 4 +# endif +# ifdef __SH4__ +# define LG_QUANTUM 4 +# endif +# ifdef __tile__ +# define LG_QUANTUM 4 +# endif +# ifndef LG_QUANTUM +# error "No LG_QUANTUM definition for architecture; specify via CPPFLAGS" +# endif +#endif + +#define QUANTUM ((size_t)(1U << LG_QUANTUM)) +#define QUANTUM_MASK (QUANTUM - 1) + +/* Return the smallest quantum multiple that is >= a. */ +#define QUANTUM_CEILING(a) \ + (((a) + QUANTUM_MASK) & ~QUANTUM_MASK) + +#define LONG ((size_t)(1U << LG_SIZEOF_LONG)) +#define LONG_MASK (LONG - 1) + +/* Return the smallest long multiple that is >= a. */ +#define LONG_CEILING(a) \ + (((a) + LONG_MASK) & ~LONG_MASK) + +#define SIZEOF_PTR (1U << LG_SIZEOF_PTR) +#define PTR_MASK (SIZEOF_PTR - 1) + +/* Return the smallest (void *) multiple that is >= a. */ +#define PTR_CEILING(a) \ + (((a) + PTR_MASK) & ~PTR_MASK) + +/* + * Maximum size of L1 cache line. This is used to avoid cache line aliasing. + * In addition, this controls the spacing of cacheline-spaced size classes. + */ +#define LG_CACHELINE 6 +#define CACHELINE ((size_t)(1U << LG_CACHELINE)) +#define CACHELINE_MASK (CACHELINE - 1) + +/* Return the smallest cacheline multiple that is >= s. */ +#define CACHELINE_CEILING(s) \ + (((s) + CACHELINE_MASK) & ~CACHELINE_MASK) + +/* Page size. STATIC_PAGE_SHIFT is determined by the configure script. */ +#ifdef PAGE_MASK +# undef PAGE_MASK +#endif +#define LG_PAGE STATIC_PAGE_SHIFT +#define PAGE ((size_t)(1U << STATIC_PAGE_SHIFT)) +#define PAGE_MASK ((size_t)(PAGE - 1)) + +/* Return the smallest pagesize multiple that is >= s. */ +#define PAGE_CEILING(s) \ + (((s) + PAGE_MASK) & ~PAGE_MASK) + +/* Return the nearest aligned address at or below a. */ +#define ALIGNMENT_ADDR2BASE(a, alignment) \ + ((void *)((uintptr_t)(a) & (-(alignment)))) + +/* Return the offset between a and the nearest aligned address at or below a. */ +#define ALIGNMENT_ADDR2OFFSET(a, alignment) \ + ((size_t)((uintptr_t)(a) & (alignment - 1))) + +/* Return the smallest alignment multiple that is >= s. */ +#define ALIGNMENT_CEILING(s, alignment) \ + (((s) + (alignment - 1)) & (-(alignment))) + +#ifdef JEMALLOC_VALGRIND +/* + * The JEMALLOC_VALGRIND_*() macros must be macros rather than functions + * so that when Valgrind reports errors, there are no extra stack frames + * in the backtraces. + * + * The size that is reported to valgrind must be consistent through a chain of + * malloc..realloc..realloc calls. Request size isn't recorded anywhere in + * jemalloc, so it is critical that all callers of these macros provide usize + * rather than request size. As a result, buffer overflow detection is + * technically weakened for the standard API, though it is generally accepted + * practice to consider any extra bytes reported by malloc_usable_size() as + * usable space. + */ +#define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do { \ + if (config_valgrind && opt_valgrind && cond) \ + VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, p2rz(ptr), zero); \ +} while (0) +#define JEMALLOC_VALGRIND_REALLOC(ptr, usize, old_ptr, old_usize, \ + old_rzsize, zero) do { \ + if (config_valgrind && opt_valgrind) { \ + size_t rzsize = p2rz(ptr); \ + \ + if (ptr == old_ptr) { \ + VALGRIND_RESIZEINPLACE_BLOCK(ptr, old_usize, \ + usize, rzsize); \ + if (zero && old_usize < usize) { \ + VALGRIND_MAKE_MEM_DEFINED( \ + (void *)((uintptr_t)ptr + \ + old_usize), usize - old_usize); \ + } \ + } else { \ + if (old_ptr != NULL) { \ + VALGRIND_FREELIKE_BLOCK(old_ptr, \ + old_rzsize); \ + } \ + if (ptr != NULL) { \ + size_t copy_size = (old_usize < usize) \ + ? old_usize : usize; \ + size_t tail_size = usize - copy_size; \ + VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, \ + rzsize, false); \ + if (copy_size > 0) { \ + VALGRIND_MAKE_MEM_DEFINED(ptr, \ + copy_size); \ + } \ + if (zero && tail_size > 0) { \ + VALGRIND_MAKE_MEM_DEFINED( \ + (void *)((uintptr_t)ptr + \ + copy_size), tail_size); \ + } \ + } \ + } \ + } \ +} while (0) +#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do { \ + if (config_valgrind && opt_valgrind) \ + VALGRIND_FREELIKE_BLOCK(ptr, rzsize); \ +} while (0) +#else +#define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed) +#define VALGRIND_RESIZEINPLACE_BLOCK(addr, oldSizeB, newSizeB, rzB) +#define VALGRIND_FREELIKE_BLOCK(addr, rzB) +#define VALGRIND_MAKE_MEM_UNDEFINED(_qzz_addr, _qzz_len) +#define VALGRIND_MAKE_MEM_DEFINED(_qzz_addr, _qzz_len) +#define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) +#define JEMALLOC_VALGRIND_REALLOC(ptr, usize, old_ptr, old_usize, \ + old_rzsize, zero) +#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) +#endif + +#include "jemalloc/internal/util.h" +#include "jemalloc/internal/atomic.h" +#include "jemalloc/internal/prng.h" +#include "jemalloc/internal/ckh.h" +#include "jemalloc/internal/size_classes.h" +#include "jemalloc/internal/stats.h" +#include "jemalloc/internal/ctl.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/tsd.h" +#include "jemalloc/internal/mb.h" +#include "jemalloc/internal/extent.h" +#include "jemalloc/internal/arena.h" +#include "jemalloc/internal/bitmap.h" +#include "jemalloc/internal/base.h" +#include "jemalloc/internal/chunk.h" +#include "jemalloc/internal/huge.h" +#include "jemalloc/internal/rtree.h" +#include "jemalloc/internal/tcache.h" +#include "jemalloc/internal/hash.h" +#include "jemalloc/internal/quarantine.h" +#include "jemalloc/internal/prof.h" + +#undef JEMALLOC_H_TYPES +/******************************************************************************/ +#define JEMALLOC_H_STRUCTS + +#include "jemalloc/internal/util.h" +#include "jemalloc/internal/atomic.h" +#include "jemalloc/internal/prng.h" +#include "jemalloc/internal/ckh.h" +#include "jemalloc/internal/size_classes.h" +#include "jemalloc/internal/stats.h" +#include "jemalloc/internal/ctl.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/tsd.h" +#include "jemalloc/internal/mb.h" +#include "jemalloc/internal/bitmap.h" +#include "jemalloc/internal/extent.h" +#include "jemalloc/internal/arena.h" +#include "jemalloc/internal/base.h" +#include "jemalloc/internal/chunk.h" +#include "jemalloc/internal/huge.h" +#include "jemalloc/internal/rtree.h" +#include "jemalloc/internal/tcache.h" +#include "jemalloc/internal/hash.h" +#include "jemalloc/internal/quarantine.h" +#include "jemalloc/internal/prof.h" + +typedef struct { + uint64_t allocated; + uint64_t deallocated; +} thread_allocated_t; +/* + * The JEMALLOC_CONCAT() wrapper is necessary to pass {0, 0} via a cpp macro + * argument. + */ +#define THREAD_ALLOCATED_INITIALIZER JEMALLOC_CONCAT({0, 0}) + +#undef JEMALLOC_H_STRUCTS +/******************************************************************************/ +#define JEMALLOC_H_EXTERNS + +extern bool opt_abort; +extern bool opt_junk; +extern size_t opt_quarantine; +extern bool opt_redzone; +extern bool opt_utrace; +extern bool opt_valgrind; +extern bool opt_xmalloc; +extern bool opt_zero; +extern size_t opt_narenas; + +/* Number of CPUs. */ +extern unsigned ncpus; + +extern malloc_mutex_t arenas_lock; /* Protects arenas initialization. */ +/* + * Arenas that are used to service external requests. Not all elements of the + * arenas array are necessarily used; arenas are created lazily as needed. + */ +extern arena_t **arenas; +extern unsigned narenas; + +arena_t *arenas_extend(unsigned ind); +void arenas_cleanup(void *arg); +arena_t *choose_arena_hard(void); +void jemalloc_prefork(void); +void jemalloc_postfork_parent(void); +void jemalloc_postfork_child(void); + +#include "jemalloc/internal/util.h" +#include "jemalloc/internal/atomic.h" +#include "jemalloc/internal/prng.h" +#include "jemalloc/internal/ckh.h" +#include "jemalloc/internal/size_classes.h" +#include "jemalloc/internal/stats.h" +#include "jemalloc/internal/ctl.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/tsd.h" +#include "jemalloc/internal/mb.h" +#include "jemalloc/internal/bitmap.h" +#include "jemalloc/internal/extent.h" +#include "jemalloc/internal/arena.h" +#include "jemalloc/internal/base.h" +#include "jemalloc/internal/chunk.h" +#include "jemalloc/internal/huge.h" +#include "jemalloc/internal/rtree.h" +#include "jemalloc/internal/tcache.h" +#include "jemalloc/internal/hash.h" +#include "jemalloc/internal/quarantine.h" +#include "jemalloc/internal/prof.h" + +#undef JEMALLOC_H_EXTERNS +/******************************************************************************/ +#define JEMALLOC_H_INLINES + +#include "jemalloc/internal/util.h" +#include "jemalloc/internal/atomic.h" +#include "jemalloc/internal/prng.h" +#include "jemalloc/internal/ckh.h" +#include "jemalloc/internal/size_classes.h" +#include "jemalloc/internal/stats.h" +#include "jemalloc/internal/ctl.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/tsd.h" +#include "jemalloc/internal/mb.h" +#include "jemalloc/internal/extent.h" +#include "jemalloc/internal/base.h" +#include "jemalloc/internal/chunk.h" +#include "jemalloc/internal/huge.h" + +#ifndef JEMALLOC_ENABLE_INLINE +malloc_tsd_protos(JEMALLOC_ATTR(unused), arenas, arena_t *) + +size_t s2u(size_t size); +size_t sa2u(size_t size, size_t alignment); +arena_t *choose_arena(arena_t *arena); +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_)) +/* + * Map of pthread_self() --> arenas[???], used for selecting an arena to use + * for allocations. + */ +malloc_tsd_externs(arenas, arena_t *) +malloc_tsd_funcs(JEMALLOC_INLINE, arenas, arena_t *, NULL, arenas_cleanup) + +/* + * Compute usable size that would result from allocating an object with the + * specified size. + */ +JEMALLOC_INLINE size_t +s2u(size_t size) +{ + + if (size <= SMALL_MAXCLASS) + return (arena_bin_info[SMALL_SIZE2BIN(size)].reg_size); + if (size <= arena_maxclass) + return (PAGE_CEILING(size)); + return (CHUNK_CEILING(size)); +} + +/* + * Compute usable size that would result from allocating an object with the + * specified size and alignment. + */ +JEMALLOC_INLINE size_t +sa2u(size_t size, size_t alignment) +{ + size_t usize; + + assert(alignment != 0 && ((alignment - 1) & alignment) == 0); + + /* + * Round size up to the nearest multiple of alignment. + * + * This done, we can take advantage of the fact that for each small + * size class, every object is aligned at the smallest power of two + * that is non-zero in the base two representation of the size. For + * example: + * + * Size | Base 2 | Minimum alignment + * -----+----------+------------------ + * 96 | 1100000 | 32 + * 144 | 10100000 | 32 + * 192 | 11000000 | 64 + */ + usize = ALIGNMENT_CEILING(size, alignment); + /* + * (usize < size) protects against the combination of maximal + * alignment and size greater than maximal alignment. + */ + if (usize < size) { + /* size_t overflow. */ + return (0); + } + + if (usize <= arena_maxclass && alignment <= PAGE) { + if (usize <= SMALL_MAXCLASS) + return (arena_bin_info[SMALL_SIZE2BIN(usize)].reg_size); + return (PAGE_CEILING(usize)); + } else { + size_t run_size; + + /* + * We can't achieve subpage alignment, so round up alignment + * permanently; it makes later calculations simpler. + */ + alignment = PAGE_CEILING(alignment); + usize = PAGE_CEILING(size); + /* + * (usize < size) protects against very large sizes within + * PAGE of SIZE_T_MAX. + * + * (usize + alignment < usize) protects against the + * combination of maximal alignment and usize large enough + * to cause overflow. This is similar to the first overflow + * check above, but it needs to be repeated due to the new + * usize value, which may now be *equal* to maximal + * alignment, whereas before we only detected overflow if the + * original size was *greater* than maximal alignment. + */ + if (usize < size || usize + alignment < usize) { + /* size_t overflow. */ + return (0); + } + + /* + * Calculate the size of the over-size run that arena_palloc() + * would need to allocate in order to guarantee the alignment. + * If the run wouldn't fit within a chunk, round up to a huge + * allocation size. + */ + run_size = usize + alignment - PAGE; + if (run_size <= arena_maxclass) + return (PAGE_CEILING(usize)); + return (CHUNK_CEILING(usize)); + } +} + +/* Choose an arena based on a per-thread value. */ +JEMALLOC_INLINE arena_t * +choose_arena(arena_t *arena) +{ + arena_t *ret; + + if (arena != NULL) + return (arena); + + if ((ret = *arenas_tsd_get()) == NULL) { + ret = choose_arena_hard(); + assert(ret != NULL); + } + + return (ret); +} +#endif + +#include "jemalloc/internal/bitmap.h" +#include "jemalloc/internal/rtree.h" +#include "jemalloc/internal/tcache.h" +#include "jemalloc/internal/arena.h" +#include "jemalloc/internal/hash.h" +#include "jemalloc/internal/quarantine.h" + +#ifndef JEMALLOC_ENABLE_INLINE +void *imalloc(size_t size); +void *icalloc(size_t size); +void *ipalloc(size_t usize, size_t alignment, bool zero); +size_t isalloc(const void *ptr, bool demote); +size_t ivsalloc(const void *ptr, bool demote); +size_t u2rz(size_t usize); +size_t p2rz(const void *ptr); +void idalloc(void *ptr); +void iqalloc(void *ptr); +void *iralloc(void *ptr, size_t size, size_t extra, size_t alignment, + bool zero, bool no_move); +malloc_tsd_protos(JEMALLOC_ATTR(unused), thread_allocated, thread_allocated_t) +#endif + +#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_)) +JEMALLOC_INLINE void * +imalloc(size_t size) +{ + + assert(size != 0); + + if (size <= arena_maxclass) + return (arena_malloc(NULL, size, false, true)); + else + return (huge_malloc(size, false)); +} + +JEMALLOC_INLINE void * +icalloc(size_t size) +{ + + if (size <= arena_maxclass) + return (arena_malloc(NULL, size, true, true)); + else + return (huge_malloc(size, true)); +} + +JEMALLOC_INLINE void * +ipalloc(size_t usize, size_t alignment, bool zero) +{ + void *ret; + + assert(usize != 0); + assert(usize == sa2u(usize, alignment)); + + if (usize <= arena_maxclass && alignment <= PAGE) + ret = arena_malloc(NULL, usize, zero, true); + else { + if (usize <= arena_maxclass) { + ret = arena_palloc(choose_arena(NULL), usize, alignment, + zero); + } else if (alignment <= chunksize) + ret = huge_malloc(usize, zero); + else + ret = huge_palloc(usize, alignment, zero); + } + + assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret); + return (ret); +} + +/* + * Typical usage: + * void *ptr = [...] + * size_t sz = isalloc(ptr, config_prof); + */ +JEMALLOC_INLINE size_t +isalloc(const void *ptr, bool demote) +{ + size_t ret; + arena_chunk_t *chunk; + + assert(ptr != NULL); + /* Demotion only makes sense if config_prof is true. */ + assert(config_prof || demote == false); + + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); + if (chunk != ptr) { + /* Region. */ + ret = arena_salloc(ptr, demote); + } else + ret = huge_salloc(ptr); + + return (ret); +} + +JEMALLOC_INLINE size_t +ivsalloc(const void *ptr, bool demote) +{ + + /* Return 0 if ptr is not within a chunk managed by jemalloc. */ + if (rtree_get(chunks_rtree, (uintptr_t)CHUNK_ADDR2BASE(ptr)) == NULL) + return (0); + + return (isalloc(ptr, demote)); +} + +JEMALLOC_INLINE size_t +u2rz(size_t usize) +{ + size_t ret; + + if (usize <= SMALL_MAXCLASS) { + size_t binind = SMALL_SIZE2BIN(usize); + ret = arena_bin_info[binind].redzone_size; + } else + ret = 0; + + return (ret); +} + +JEMALLOC_INLINE size_t +p2rz(const void *ptr) +{ + size_t usize = isalloc(ptr, false); + + return (u2rz(usize)); +} + +JEMALLOC_INLINE void +idalloc(void *ptr) +{ + arena_chunk_t *chunk; + + assert(ptr != NULL); + + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); + if (chunk != ptr) + arena_dalloc(chunk->arena, chunk, ptr, true); + else + huge_dalloc(ptr, true); +} + +JEMALLOC_INLINE void +iqalloc(void *ptr) +{ + + if (config_fill && opt_quarantine) + quarantine(ptr); + else + idalloc(ptr); +} + +JEMALLOC_INLINE void * +iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero, + bool no_move) +{ + void *ret; + size_t oldsize; + + assert(ptr != NULL); + assert(size != 0); + + oldsize = isalloc(ptr, config_prof); + + if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1)) + != 0) { + size_t usize, copysize; + + /* + * Existing object alignment is inadequate; allocate new space + * and copy. + */ + if (no_move) + return (NULL); + usize = sa2u(size + extra, alignment); + if (usize == 0) + return (NULL); + ret = ipalloc(usize, alignment, zero); + if (ret == NULL) { + if (extra == 0) + return (NULL); + /* Try again, without extra this time. */ + usize = sa2u(size, alignment); + if (usize == 0) + return (NULL); + ret = ipalloc(usize, alignment, zero); + if (ret == NULL) + return (NULL); + } + /* + * Copy at most size bytes (not size+extra), since the caller + * has no expectation that the extra bytes will be reliably + * preserved. + */ + copysize = (size < oldsize) ? size : oldsize; + memcpy(ret, ptr, copysize); + iqalloc(ptr); + return (ret); + } + + if (no_move) { + if (size <= arena_maxclass) { + return (arena_ralloc_no_move(ptr, oldsize, size, + extra, zero)); + } else { + return (huge_ralloc_no_move(ptr, oldsize, size, + extra)); + } + } else { + if (size + extra <= arena_maxclass) { + return (arena_ralloc(ptr, oldsize, size, extra, + alignment, zero, true)); + } else { + return (huge_ralloc(ptr, oldsize, size, extra, + alignment, zero)); + } + } +} + +malloc_tsd_externs(thread_allocated, thread_allocated_t) +malloc_tsd_funcs(JEMALLOC_INLINE, thread_allocated, thread_allocated_t, + THREAD_ALLOCATED_INITIALIZER, malloc_tsd_no_cleanup) +#endif + +#include "jemalloc/internal/prof.h" + +#undef JEMALLOC_H_INLINES +/******************************************************************************/ Property changes on: contrib/jemalloc/include/jemalloc/internal/jemalloc_internal.h ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H Added: svn:eol-style ## -0,0 +1 ## +native Index: contrib/jemalloc/include/jemalloc/jemalloc_defs.h =================================================================== --- contrib/jemalloc/include/jemalloc/jemalloc_defs.h (revision 0) +++ contrib/jemalloc/include/jemalloc/jemalloc_defs.h (working copy) @@ -0,0 +1,239 @@ +/* include/jemalloc/jemalloc_defs.h. Generated from jemalloc_defs.h.in by configure. */ +/* + * If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all + * public APIs to be prefixed. This makes it possible, with some care, to use + * multiple allocators simultaneously. + */ +/* #undef JEMALLOC_PREFIX */ +/* #undef JEMALLOC_CPREFIX */ + +/* + * Name mangling for public symbols is controlled by --with-mangling and + * --with-jemalloc-prefix. With default settings the je_ prefix is stripped by + * these macro definitions. + */ +#define je_malloc_conf malloc_conf +#define je_malloc_message malloc_message +#define je_malloc malloc +#define je_calloc calloc +#define je_posix_memalign posix_memalign +#define je_aligned_alloc aligned_alloc +#define je_realloc realloc +#define je_free free +#define je_malloc_usable_size malloc_usable_size +#define je_malloc_stats_print malloc_stats_print +#define je_mallctl mallctl +#define je_mallctlnametomib mallctlnametomib +#define je_mallctlbymib mallctlbymib +/* #undef je_memalign */ +#define je_valloc valloc +#define je_allocm allocm +#define je_rallocm rallocm +#define je_sallocm sallocm +#define je_dallocm dallocm +#define je_nallocm nallocm + +/* + * JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs. + * For shared libraries, symbol visibility mechanisms prevent these symbols + * from being exported, but for static libraries, naming collisions are a real + * possibility. + */ +#define JEMALLOC_PRIVATE_NAMESPACE "" +#define JEMALLOC_N(string_that_no_one_should_want_to_use_as_a_jemalloc_private_namespace_prefix) string_that_no_one_should_want_to_use_as_a_jemalloc_private_namespace_prefix + +/* + * Hyper-threaded CPUs may need a special instruction inside spin loops in + * order to yield to another virtual CPU. + */ +#define CPU_SPINWAIT __asm__ volatile("pause") + +/* + * Defined if OSAtomic*() functions are available, as provided by Darwin, and + * documented in the atomic(3) manual page. + */ +/* #undef JEMALLOC_OSATOMIC */ + +/* + * Defined if __sync_add_and_fetch(uint32_t *, uint32_t) and + * __sync_sub_and_fetch(uint32_t *, uint32_t) are available, despite + * __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 not being defined (which means the + * functions are defined in libgcc instead of being inlines) + */ +#define JE_FORCE_SYNC_COMPARE_AND_SWAP_4 + +/* + * Defined if __sync_add_and_fetch(uint64_t *, uint64_t) and + * __sync_sub_and_fetch(uint64_t *, uint64_t) are available, despite + * __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 not being defined (which means the + * functions are defined in libgcc instead of being inlines) + */ +#define JE_FORCE_SYNC_COMPARE_AND_SWAP_8 + +/* + * Defined if OSSpin*() functions are available, as provided by Darwin, and + * documented in the spinlock(3) manual page. + */ +/* #undef JEMALLOC_OSSPIN */ + +/* + * Defined if _malloc_thread_cleanup() exists. At least in the case of + * FreeBSD, pthread_key_create() allocates, which if used during malloc + * bootstrapping will cause recursion into the pthreads library. Therefore, if + * _malloc_thread_cleanup() exists, use it as the basis for thread cleanup in + * malloc_tsd. + */ +#define JEMALLOC_MALLOC_THREAD_CLEANUP + +/* + * Defined if threaded initialization is known to be safe on this platform. + * Among other things, it must be possible to initialize a mutex without + * triggering allocation in order for threaded allocation to be safe. + */ +/* #undef JEMALLOC_THREADED_INIT */ + +/* + * Defined if the pthreads implementation defines + * _pthread_mutex_init_calloc_cb(), in which case the function is used in order + * to avoid recursive allocation during mutex initialization. + */ +#define JEMALLOC_MUTEX_INIT_CB 1 + +/* Defined if __attribute__((...)) syntax is supported. */ +#define JEMALLOC_HAVE_ATTR +#ifdef JEMALLOC_HAVE_ATTR +# define JEMALLOC_CATTR(s, a) __attribute__((s)) +# define JEMALLOC_ATTR(s) JEMALLOC_CATTR(s,) +#else +# define JEMALLOC_CATTR(s, a) a +# define JEMALLOC_ATTR(s) JEMALLOC_CATTR(s,) +#endif + +/* Defined if sbrk() is supported. */ +#define JEMALLOC_HAVE_SBRK + +/* Non-empty if the tls_model attribute is supported. */ +#define JEMALLOC_TLS_MODEL __attribute__((tls_model("initial-exec"))) + +/* JEMALLOC_CC_SILENCE enables code that silences unuseful compiler warnings. */ +#define JEMALLOC_CC_SILENCE + +/* + * JEMALLOC_DEBUG enables assertions and other sanity checks, and disables + * inline functions. + */ +/* #undef JEMALLOC_DEBUG */ + +/* JEMALLOC_STATS enables statistics calculation. */ +#define JEMALLOC_STATS + +/* JEMALLOC_PROF enables allocation profiling. */ +/* #undef JEMALLOC_PROF */ + +/* Use libunwind for profile backtracing if defined. */ +/* #undef JEMALLOC_PROF_LIBUNWIND */ + +/* Use libgcc for profile backtracing if defined. */ +/* #undef JEMALLOC_PROF_LIBGCC */ + +/* Use gcc intrinsics for profile backtracing if defined. */ +/* #undef JEMALLOC_PROF_GCC */ + +/* + * JEMALLOC_TCACHE enables a thread-specific caching layer for small objects. + * This makes it possible to allocate/deallocate objects without any locking + * when the cache is in the steady state. + */ +#define JEMALLOC_TCACHE + +/* + * JEMALLOC_DSS enables use of sbrk(2) to allocate chunks from the data storage + * segment (DSS). + */ +#define JEMALLOC_DSS + +/* Support memory filling (junk/zero/quarantine/redzone). */ +#define JEMALLOC_FILL + +/* Support the experimental API. */ +#define JEMALLOC_EXPERIMENTAL + +/* Support utrace(2)-based tracing. */ +#define JEMALLOC_UTRACE + +/* Support Valgrind. */ +/* #undef JEMALLOC_VALGRIND */ + +/* Support optional abort() on OOM. */ +#define JEMALLOC_XMALLOC + +/* Support lazy locking (avoid locking unless a second thread is launched). */ +#define JEMALLOC_LAZY_LOCK + +/* One page is 2^STATIC_PAGE_SHIFT bytes. */ +#define STATIC_PAGE_SHIFT 12 + +/* + * If defined, use munmap() to unmap freed chunks, rather than storing them for + * later reuse. This is automatically disabled if configuration determines + * that common sequences of mmap()/munmap() calls will cause virtual memory map + * holes. + */ +#define JEMALLOC_MUNMAP + +/* TLS is used to map arenas and magazine caches to threads. */ +#define JEMALLOC_TLS + +/* + * JEMALLOC_IVSALLOC enables ivsalloc(), which verifies that pointers reside + * within jemalloc-owned chunks before dereferencing them. + */ +/* #undef JEMALLOC_IVSALLOC */ + +/* + * Define overrides for non-standard allocator-related functions if they + * are present on the system. + */ +/* #undef JEMALLOC_OVERRIDE_MEMALIGN */ +#define JEMALLOC_OVERRIDE_VALLOC + +/* + * Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings. + */ +/* #undef JEMALLOC_ZONE */ +/* #undef JEMALLOC_ZONE_VERSION */ + +/* If defined, use mremap(...MREMAP_FIXED...) for huge realloc(). */ +/* #undef JEMALLOC_MREMAP_FIXED */ + +/* + * Methods for purging unused pages differ between operating systems. + * + * madvise(..., MADV_DONTNEED) : On Linux, this immediately discards pages, + * such that new pages will be demand-zeroed if + * the address region is later touched. + * madvise(..., MADV_FREE) : On FreeBSD and Darwin, this marks pages as being + * unused, such that they will be discarded rather + * than swapped out. + */ +/* #undef JEMALLOC_PURGE_MADVISE_DONTNEED */ +#define JEMALLOC_PURGE_MADVISE_FREE +#ifdef JEMALLOC_PURGE_MADVISE_DONTNEED +# define JEMALLOC_MADV_PURGE MADV_DONTNEED +#elif defined(JEMALLOC_PURGE_MADVISE_FREE) +# define JEMALLOC_MADV_PURGE MADV_FREE +#else +# error "No method defined for purging unused dirty pages." +#endif + +/* sizeof(void *) == 2^LG_SIZEOF_PTR. */ +#define LG_SIZEOF_PTR 3 + +/* sizeof(int) == 2^LG_SIZEOF_INT. */ +#define LG_SIZEOF_INT 2 + +/* sizeof(long) == 2^LG_SIZEOF_LONG. */ +#define LG_SIZEOF_LONG 3 + +/* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */ +#define LG_SIZEOF_INTMAX_T 3 Property changes on: contrib/jemalloc/include/jemalloc/jemalloc_defs.h ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H Added: svn:eol-style ## -0,0 +1 ## +native Index: contrib/jemalloc/VERSION =================================================================== --- contrib/jemalloc/VERSION (revision 0) +++ contrib/jemalloc/VERSION (working copy) @@ -0,0 +1 @@ +1.0.0-254-g7ca0fdfb85b2a9fc7a112e158892c098e004385b Index: contrib/jemalloc/src/stats.c =================================================================== --- contrib/jemalloc/src/stats.c (revision 0) +++ contrib/jemalloc/src/stats.c (working copy) @@ -0,0 +1,550 @@ +#define JEMALLOC_STATS_C_ +#include "jemalloc/internal/jemalloc_internal.h" + +#define CTL_GET(n, v, t) do { \ + size_t sz = sizeof(t); \ + xmallctl(n, v, &sz, NULL, 0); \ +} while (0) + +#define CTL_I_GET(n, v, t) do { \ + size_t mib[6]; \ + size_t miblen = sizeof(mib) / sizeof(size_t); \ + size_t sz = sizeof(t); \ + xmallctlnametomib(n, mib, &miblen); \ + mib[2] = i; \ + xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \ +} while (0) + +#define CTL_J_GET(n, v, t) do { \ + size_t mib[6]; \ + size_t miblen = sizeof(mib) / sizeof(size_t); \ + size_t sz = sizeof(t); \ + xmallctlnametomib(n, mib, &miblen); \ + mib[2] = j; \ + xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \ +} while (0) + +#define CTL_IJ_GET(n, v, t) do { \ + size_t mib[6]; \ + size_t miblen = sizeof(mib) / sizeof(size_t); \ + size_t sz = sizeof(t); \ + xmallctlnametomib(n, mib, &miblen); \ + mib[2] = i; \ + mib[4] = j; \ + xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \ +} while (0) + +/******************************************************************************/ +/* Data. */ + +bool opt_stats_print = false; + +size_t stats_cactive = 0; + +/******************************************************************************/ +/* Function prototypes for non-inline static functions. */ + +static void stats_arena_bins_print(void (*write_cb)(void *, const char *), + void *cbopaque, unsigned i); +static void stats_arena_lruns_print(void (*write_cb)(void *, const char *), + void *cbopaque, unsigned i); +static void stats_arena_print(void (*write_cb)(void *, const char *), + void *cbopaque, unsigned i, bool bins, bool large); + +/******************************************************************************/ + +static void +stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque, + unsigned i) +{ + size_t page; + bool config_tcache; + unsigned nbins, j, gap_start; + + CTL_GET("arenas.page", &page, size_t); + + CTL_GET("config.tcache", &config_tcache, bool); + if (config_tcache) { + malloc_cprintf(write_cb, cbopaque, + "bins: bin size regs pgs allocated nmalloc" + " ndalloc nrequests nfills nflushes" + " newruns reruns curruns\n"); + } else { + malloc_cprintf(write_cb, cbopaque, + "bins: bin size regs pgs allocated nmalloc" + " ndalloc newruns reruns curruns\n"); + } + CTL_GET("arenas.nbins", &nbins, unsigned); + for (j = 0, gap_start = UINT_MAX; j < nbins; j++) { + uint64_t nruns; + + CTL_IJ_GET("stats.arenas.0.bins.0.nruns", &nruns, uint64_t); + if (nruns == 0) { + if (gap_start == UINT_MAX) + gap_start = j; + } else { + size_t reg_size, run_size, allocated; + uint32_t nregs; + uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes; + uint64_t reruns; + size_t curruns; + + if (gap_start != UINT_MAX) { + if (j > gap_start + 1) { + /* Gap of more than one size class. */ + malloc_cprintf(write_cb, cbopaque, + "[%u..%u]\n", gap_start, + j - 1); + } else { + /* Gap of one size class. */ + malloc_cprintf(write_cb, cbopaque, + "[%u]\n", gap_start); + } + gap_start = UINT_MAX; + } + CTL_J_GET("arenas.bin.0.size", ®_size, size_t); + CTL_J_GET("arenas.bin.0.nregs", &nregs, uint32_t); + CTL_J_GET("arenas.bin.0.run_size", &run_size, size_t); + CTL_IJ_GET("stats.arenas.0.bins.0.allocated", + &allocated, size_t); + CTL_IJ_GET("stats.arenas.0.bins.0.nmalloc", + &nmalloc, uint64_t); + CTL_IJ_GET("stats.arenas.0.bins.0.ndalloc", + &ndalloc, uint64_t); + if (config_tcache) { + CTL_IJ_GET("stats.arenas.0.bins.0.nrequests", + &nrequests, uint64_t); + CTL_IJ_GET("stats.arenas.0.bins.0.nfills", + &nfills, uint64_t); + CTL_IJ_GET("stats.arenas.0.bins.0.nflushes", + &nflushes, uint64_t); + } + CTL_IJ_GET("stats.arenas.0.bins.0.nreruns", &reruns, + uint64_t); + CTL_IJ_GET("stats.arenas.0.bins.0.curruns", &curruns, + size_t); + if (config_tcache) { + malloc_cprintf(write_cb, cbopaque, + "%13u %5zu %4u %3zu %12zu %12"PRIu64 + " %12"PRIu64" %12"PRIu64" %12"PRIu64 + " %12"PRIu64" %12"PRIu64" %12"PRIu64 + " %12zu\n", + j, reg_size, nregs, run_size / page, + allocated, nmalloc, ndalloc, nrequests, + nfills, nflushes, nruns, reruns, curruns); + } else { + malloc_cprintf(write_cb, cbopaque, + "%13u %5zu %4u %3zu %12zu %12"PRIu64 + " %12"PRIu64" %12"PRIu64" %12"PRIu64 + " %12zu\n", + j, reg_size, nregs, run_size / page, + allocated, nmalloc, ndalloc, nruns, reruns, + curruns); + } + } + } + if (gap_start != UINT_MAX) { + if (j > gap_start + 1) { + /* Gap of more than one size class. */ + malloc_cprintf(write_cb, cbopaque, "[%u..%u]\n", + gap_start, j - 1); + } else { + /* Gap of one size class. */ + malloc_cprintf(write_cb, cbopaque, "[%u]\n", gap_start); + } + } +} + +static void +stats_arena_lruns_print(void (*write_cb)(void *, const char *), void *cbopaque, + unsigned i) +{ + size_t page, nlruns, j; + ssize_t gap_start; + + CTL_GET("arenas.page", &page, size_t); + + malloc_cprintf(write_cb, cbopaque, + "large: size pages nmalloc ndalloc nrequests" + " curruns\n"); + CTL_GET("arenas.nlruns", &nlruns, size_t); + for (j = 0, gap_start = -1; j < nlruns; j++) { + uint64_t nmalloc, ndalloc, nrequests; + size_t run_size, curruns; + + CTL_IJ_GET("stats.arenas.0.lruns.0.nmalloc", &nmalloc, + uint64_t); + CTL_IJ_GET("stats.arenas.0.lruns.0.ndalloc", &ndalloc, + uint64_t); + CTL_IJ_GET("stats.arenas.0.lruns.0.nrequests", &nrequests, + uint64_t); + if (nrequests == 0) { + if (gap_start == -1) + gap_start = j; + } else { + CTL_J_GET("arenas.lrun.0.size", &run_size, size_t); + CTL_IJ_GET("stats.arenas.0.lruns.0.curruns", &curruns, + size_t); + if (gap_start != -1) { + malloc_cprintf(write_cb, cbopaque, "[%zu]\n", + j - gap_start); + gap_start = -1; + } + malloc_cprintf(write_cb, cbopaque, + "%13zu %5zu %12"PRIu64" %12"PRIu64" %12"PRIu64 + " %12zu\n", + run_size, run_size / page, nmalloc, ndalloc, + nrequests, curruns); + } + } + if (gap_start != -1) + malloc_cprintf(write_cb, cbopaque, "[%zu]\n", j - gap_start); +} + +static void +stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque, + unsigned i, bool bins, bool large) +{ + unsigned nthreads; + size_t page, pactive, pdirty, mapped; + uint64_t npurge, nmadvise, purged; + size_t small_allocated; + uint64_t small_nmalloc, small_ndalloc, small_nrequests; + size_t large_allocated; + uint64_t large_nmalloc, large_ndalloc, large_nrequests; + + CTL_GET("arenas.page", &page, size_t); + + CTL_I_GET("stats.arenas.0.nthreads", &nthreads, unsigned); + malloc_cprintf(write_cb, cbopaque, + "assigned threads: %u\n", nthreads); + CTL_I_GET("stats.arenas.0.pactive", &pactive, size_t); + CTL_I_GET("stats.arenas.0.pdirty", &pdirty, size_t); + CTL_I_GET("stats.arenas.0.npurge", &npurge, uint64_t); + CTL_I_GET("stats.arenas.0.nmadvise", &nmadvise, uint64_t); + CTL_I_GET("stats.arenas.0.purged", &purged, uint64_t); + malloc_cprintf(write_cb, cbopaque, + "dirty pages: %zu:%zu active:dirty, %"PRIu64" sweep%s," + " %"PRIu64" madvise%s, %"PRIu64" purged\n", + pactive, pdirty, npurge, npurge == 1 ? "" : "s", + nmadvise, nmadvise == 1 ? "" : "s", purged); + + malloc_cprintf(write_cb, cbopaque, + " allocated nmalloc ndalloc nrequests\n"); + CTL_I_GET("stats.arenas.0.small.allocated", &small_allocated, size_t); + CTL_I_GET("stats.arenas.0.small.nmalloc", &small_nmalloc, uint64_t); + CTL_I_GET("stats.arenas.0.small.ndalloc", &small_ndalloc, uint64_t); + CTL_I_GET("stats.arenas.0.small.nrequests", &small_nrequests, uint64_t); + malloc_cprintf(write_cb, cbopaque, + "small: %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n", + small_allocated, small_nmalloc, small_ndalloc, small_nrequests); + CTL_I_GET("stats.arenas.0.large.allocated", &large_allocated, size_t); + CTL_I_GET("stats.arenas.0.large.nmalloc", &large_nmalloc, uint64_t); + CTL_I_GET("stats.arenas.0.large.ndalloc", &large_ndalloc, uint64_t); + CTL_I_GET("stats.arenas.0.large.nrequests", &large_nrequests, uint64_t); + malloc_cprintf(write_cb, cbopaque, + "large: %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n", + large_allocated, large_nmalloc, large_ndalloc, large_nrequests); + malloc_cprintf(write_cb, cbopaque, + "total: %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n", + small_allocated + large_allocated, + small_nmalloc + large_nmalloc, + small_ndalloc + large_ndalloc, + small_nrequests + large_nrequests); + malloc_cprintf(write_cb, cbopaque, "active: %12zu\n", pactive * page); + CTL_I_GET("stats.arenas.0.mapped", &mapped, size_t); + malloc_cprintf(write_cb, cbopaque, "mapped: %12zu\n", mapped); + + if (bins) + stats_arena_bins_print(write_cb, cbopaque, i); + if (large) + stats_arena_lruns_print(write_cb, cbopaque, i); +} + +void +stats_print(void (*write_cb)(void *, const char *), void *cbopaque, + const char *opts) +{ + int err; + uint64_t epoch; + size_t u64sz; + bool general = true; + bool merged = true; + bool unmerged = true; + bool bins = true; + bool large = true; + + /* + * Refresh stats, in case mallctl() was called by the application. + * + * Check for OOM here, since refreshing the ctl cache can trigger + * allocation. In practice, none of the subsequent mallctl()-related + * calls in this function will cause OOM if this one succeeds. + * */ + epoch = 1; + u64sz = sizeof(uint64_t); + err = je_mallctl("epoch", &epoch, &u64sz, &epoch, sizeof(uint64_t)); + if (err != 0) { + if (err == EAGAIN) { + malloc_write(": Memory allocation failure in " + "mallctl(\"epoch\", ...)\n"); + return; + } + malloc_write(": Failure in mallctl(\"epoch\", " + "...)\n"); + abort(); + } + + if (write_cb == NULL) { + /* + * The caller did not provide an alternate write_cb callback + * function, so use the default one. malloc_write() is an + * inline function, so use malloc_message() directly here. + */ + write_cb = je_malloc_message; + cbopaque = NULL; + } + + if (opts != NULL) { + unsigned i; + + for (i = 0; opts[i] != '\0'; i++) { + switch (opts[i]) { + case 'g': + general = false; + break; + case 'm': + merged = false; + break; + case 'a': + unmerged = false; + break; + case 'b': + bins = false; + break; + case 'l': + large = false; + break; + default:; + } + } + } + + write_cb(cbopaque, "___ Begin jemalloc statistics ___\n"); + if (general) { + int err; + const char *cpv; + bool bv; + unsigned uv; + ssize_t ssv; + size_t sv, bsz, ssz, sssz, cpsz; + + bsz = sizeof(bool); + ssz = sizeof(size_t); + sssz = sizeof(ssize_t); + cpsz = sizeof(const char *); + + CTL_GET("version", &cpv, const char *); + malloc_cprintf(write_cb, cbopaque, "Version: %s\n", cpv); + CTL_GET("config.debug", &bv, bool); + malloc_cprintf(write_cb, cbopaque, "Assertions %s\n", + bv ? "enabled" : "disabled"); + +#define OPT_WRITE_BOOL(n) \ + if ((err = je_mallctl("opt."#n, &bv, &bsz, NULL, 0)) \ + == 0) { \ + malloc_cprintf(write_cb, cbopaque, \ + " opt."#n": %s\n", bv ? "true" : "false"); \ + } +#define OPT_WRITE_SIZE_T(n) \ + if ((err = je_mallctl("opt."#n, &sv, &ssz, NULL, 0)) \ + == 0) { \ + malloc_cprintf(write_cb, cbopaque, \ + " opt."#n": %zu\n", sv); \ + } +#define OPT_WRITE_SSIZE_T(n) \ + if ((err = je_mallctl("opt."#n, &ssv, &sssz, NULL, 0)) \ + == 0) { \ + malloc_cprintf(write_cb, cbopaque, \ + " opt."#n": %zd\n", ssv); \ + } +#define OPT_WRITE_CHAR_P(n) \ + if ((err = je_mallctl("opt."#n, &cpv, &cpsz, NULL, 0)) \ + == 0) { \ + malloc_cprintf(write_cb, cbopaque, \ + " opt."#n": \"%s\"\n", cpv); \ + } + + write_cb(cbopaque, "Run-time option settings:\n"); + OPT_WRITE_BOOL(abort) + OPT_WRITE_SIZE_T(lg_chunk) + OPT_WRITE_SIZE_T(narenas) + OPT_WRITE_SSIZE_T(lg_dirty_mult) + OPT_WRITE_BOOL(stats_print) + OPT_WRITE_BOOL(junk) + OPT_WRITE_SIZE_T(quarantine) + OPT_WRITE_BOOL(redzone) + OPT_WRITE_BOOL(zero) + OPT_WRITE_BOOL(utrace) + OPT_WRITE_BOOL(valgrind) + OPT_WRITE_BOOL(xmalloc) + OPT_WRITE_BOOL(tcache) + OPT_WRITE_SSIZE_T(lg_tcache_max) + OPT_WRITE_BOOL(prof) + OPT_WRITE_CHAR_P(prof_prefix) + OPT_WRITE_BOOL(prof_active) + OPT_WRITE_SSIZE_T(lg_prof_sample) + OPT_WRITE_BOOL(prof_accum) + OPT_WRITE_SSIZE_T(lg_prof_interval) + OPT_WRITE_BOOL(prof_gdump) + OPT_WRITE_BOOL(prof_leak) + +#undef OPT_WRITE_BOOL +#undef OPT_WRITE_SIZE_T +#undef OPT_WRITE_SSIZE_T +#undef OPT_WRITE_CHAR_P + + malloc_cprintf(write_cb, cbopaque, "CPUs: %u\n", ncpus); + + CTL_GET("arenas.narenas", &uv, unsigned); + malloc_cprintf(write_cb, cbopaque, "Max arenas: %u\n", uv); + + malloc_cprintf(write_cb, cbopaque, "Pointer size: %zu\n", + sizeof(void *)); + + CTL_GET("arenas.quantum", &sv, size_t); + malloc_cprintf(write_cb, cbopaque, "Quantum size: %zu\n", sv); + + CTL_GET("arenas.page", &sv, size_t); + malloc_cprintf(write_cb, cbopaque, "Page size: %zu\n", sv); + + CTL_GET("opt.lg_dirty_mult", &ssv, ssize_t); + if (ssv >= 0) { + malloc_cprintf(write_cb, cbopaque, + "Min active:dirty page ratio per arena: %u:1\n", + (1U << ssv)); + } else { + write_cb(cbopaque, + "Min active:dirty page ratio per arena: N/A\n"); + } + if ((err = je_mallctl("arenas.tcache_max", &sv, &ssz, NULL, 0)) + == 0) { + malloc_cprintf(write_cb, cbopaque, + "Maximum thread-cached size class: %zu\n", sv); + } + if ((err = je_mallctl("opt.prof", &bv, &bsz, NULL, 0)) == 0 && + bv) { + CTL_GET("opt.lg_prof_sample", &sv, size_t); + malloc_cprintf(write_cb, cbopaque, + "Average profile sample interval: %"PRIu64 + " (2^%zu)\n", (((uint64_t)1U) << sv), sv); + + CTL_GET("opt.lg_prof_interval", &ssv, ssize_t); + if (ssv >= 0) { + malloc_cprintf(write_cb, cbopaque, + "Average profile dump interval: %"PRIu64 + " (2^%zd)\n", + (((uint64_t)1U) << ssv), ssv); + } else { + write_cb(cbopaque, + "Average profile dump interval: N/A\n"); + } + } + CTL_GET("opt.lg_chunk", &sv, size_t); + malloc_cprintf(write_cb, cbopaque, "Chunk size: %zu (2^%zu)\n", + (ZU(1) << sv), sv); + } + + if (config_stats) { + size_t *cactive; + size_t allocated, active, mapped; + size_t chunks_current, chunks_high; + uint64_t chunks_total; + size_t huge_allocated; + uint64_t huge_nmalloc, huge_ndalloc; + + CTL_GET("stats.cactive", &cactive, size_t *); + CTL_GET("stats.allocated", &allocated, size_t); + CTL_GET("stats.active", &active, size_t); + CTL_GET("stats.mapped", &mapped, size_t); + malloc_cprintf(write_cb, cbopaque, + "Allocated: %zu, active: %zu, mapped: %zu\n", + allocated, active, mapped); + malloc_cprintf(write_cb, cbopaque, + "Current active ceiling: %zu\n", atomic_read_z(cactive)); + + /* Print chunk stats. */ + CTL_GET("stats.chunks.total", &chunks_total, uint64_t); + CTL_GET("stats.chunks.high", &chunks_high, size_t); + CTL_GET("stats.chunks.current", &chunks_current, size_t); + malloc_cprintf(write_cb, cbopaque, "chunks: nchunks " + "highchunks curchunks\n"); + malloc_cprintf(write_cb, cbopaque, " %13"PRIu64"%13zu%13zu\n", + chunks_total, chunks_high, chunks_current); + + /* Print huge stats. */ + CTL_GET("stats.huge.nmalloc", &huge_nmalloc, uint64_t); + CTL_GET("stats.huge.ndalloc", &huge_ndalloc, uint64_t); + CTL_GET("stats.huge.allocated", &huge_allocated, size_t); + malloc_cprintf(write_cb, cbopaque, + "huge: nmalloc ndalloc allocated\n"); + malloc_cprintf(write_cb, cbopaque, + " %12"PRIu64" %12"PRIu64" %12zu\n", + huge_nmalloc, huge_ndalloc, huge_allocated); + + if (merged) { + unsigned narenas; + + CTL_GET("arenas.narenas", &narenas, unsigned); + { + bool initialized[narenas]; + size_t isz; + unsigned i, ninitialized; + + isz = sizeof(initialized); + xmallctl("arenas.initialized", initialized, + &isz, NULL, 0); + for (i = ninitialized = 0; i < narenas; i++) { + if (initialized[i]) + ninitialized++; + } + + if (ninitialized > 1 || unmerged == false) { + /* Print merged arena stats. */ + malloc_cprintf(write_cb, cbopaque, + "\nMerged arenas stats:\n"); + stats_arena_print(write_cb, cbopaque, + narenas, bins, large); + } + } + } + + if (unmerged) { + unsigned narenas; + + /* Print stats for each arena. */ + + CTL_GET("arenas.narenas", &narenas, unsigned); + { + bool initialized[narenas]; + size_t isz; + unsigned i; + + isz = sizeof(initialized); + xmallctl("arenas.initialized", initialized, + &isz, NULL, 0); + + for (i = 0; i < narenas; i++) { + if (initialized[i]) { + malloc_cprintf(write_cb, + cbopaque, + "\narenas[%u]:\n", i); + stats_arena_print(write_cb, + cbopaque, i, bins, large); + } + } + } + } + } + write_cb(cbopaque, "--- End jemalloc statistics ---\n"); +} Property changes on: contrib/jemalloc/src/stats.c ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H Added: svn:eol-style ## -0,0 +1 ## +native Index: contrib/jemalloc/src/mb.c =================================================================== --- contrib/jemalloc/src/mb.c (revision 0) +++ contrib/jemalloc/src/mb.c (working copy) @@ -0,0 +1,2 @@ +#define JEMALLOC_MB_C_ +#include "jemalloc/internal/jemalloc_internal.h" Property changes on: contrib/jemalloc/src/mb.c ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H Added: svn:eol-style ## -0,0 +1 ## +native Index: contrib/jemalloc/src/mutex.c =================================================================== --- contrib/jemalloc/src/mutex.c (revision 0) +++ contrib/jemalloc/src/mutex.c (working copy) @@ -0,0 +1,153 @@ +#define JEMALLOC_MUTEX_C_ +#include "jemalloc/internal/jemalloc_internal.h" + +#ifdef JEMALLOC_LAZY_LOCK +#include +#endif + +/******************************************************************************/ +/* Data. */ + +#ifdef JEMALLOC_LAZY_LOCK +bool isthreaded = false; +#endif +#ifdef JEMALLOC_MUTEX_INIT_CB +static bool postpone_init = true; +static malloc_mutex_t *postponed_mutexes = NULL; +#endif + +#ifdef JEMALLOC_LAZY_LOCK +static void pthread_create_once(void); +#endif + +/******************************************************************************/ +/* + * We intercept pthread_create() calls in order to toggle isthreaded if the + * process goes multi-threaded. + */ + +#ifdef JEMALLOC_LAZY_LOCK +static int (*pthread_create_fptr)(pthread_t *__restrict, const pthread_attr_t *, + void *(*)(void *), void *__restrict); + +static void +pthread_create_once(void) +{ + + pthread_create_fptr = dlsym(RTLD_NEXT, "pthread_create"); + if (pthread_create_fptr == NULL) { + malloc_write(": Error in dlsym(RTLD_NEXT, " + "\"pthread_create\")\n"); + abort(); + } + + isthreaded = true; +} + +JEMALLOC_ATTR(visibility("default")) +int +pthread_create(pthread_t *__restrict thread, + const pthread_attr_t *__restrict attr, void *(*start_routine)(void *), + void *__restrict arg) +{ + static pthread_once_t once_control = PTHREAD_ONCE_INIT; + + pthread_once(&once_control, pthread_create_once); + + return (pthread_create_fptr(thread, attr, start_routine, arg)); +} +#endif + +/******************************************************************************/ + +#ifdef JEMALLOC_MUTEX_INIT_CB +int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex, + void *(calloc_cb)(size_t, size_t)); + +__weak_reference(_pthread_mutex_init_calloc_cb_stub, + _pthread_mutex_init_calloc_cb); + +int +_pthread_mutex_init_calloc_cb_stub(pthread_mutex_t *mutex, + void *(calloc_cb)(size_t, size_t)) +{ + + return (0); +} +#endif + +bool +malloc_mutex_init(malloc_mutex_t *mutex) +{ +#ifdef JEMALLOC_OSSPIN + mutex->lock = 0; +#elif (defined(JEMALLOC_MUTEX_INIT_CB)) + if (postpone_init) { + mutex->postponed_next = postponed_mutexes; + postponed_mutexes = mutex; + } else { + if (_pthread_mutex_init_calloc_cb(&mutex->lock, base_calloc) != + 0) + return (true); + } +#else + pthread_mutexattr_t attr; + + if (pthread_mutexattr_init(&attr) != 0) + return (true); + pthread_mutexattr_settype(&attr, MALLOC_MUTEX_TYPE); + if (pthread_mutex_init(&mutex->lock, &attr) != 0) { + pthread_mutexattr_destroy(&attr); + return (true); + } + pthread_mutexattr_destroy(&attr); + +#endif + return (false); +} + +void +malloc_mutex_prefork(malloc_mutex_t *mutex) +{ + + malloc_mutex_lock(mutex); +} + +void +malloc_mutex_postfork_parent(malloc_mutex_t *mutex) +{ + + malloc_mutex_unlock(mutex); +} + +void +malloc_mutex_postfork_child(malloc_mutex_t *mutex) +{ + +#ifdef JEMALLOC_MUTEX_INIT_CB + malloc_mutex_unlock(mutex); +#else + if (malloc_mutex_init(mutex)) { + malloc_printf(": Error re-initializing mutex in " + "child\n"); + if (opt_abort) + abort(); + } +#endif +} + +bool +mutex_boot(void) +{ + +#ifdef JEMALLOC_MUTEX_INIT_CB + postpone_init = false; + while (postponed_mutexes != NULL) { + if (_pthread_mutex_init_calloc_cb(&postponed_mutexes->lock, + base_calloc) != 0) + return (true); + postponed_mutexes = postponed_mutexes->postponed_next; + } +#endif + return (false); +} Property changes on: contrib/jemalloc/src/mutex.c ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H Added: svn:eol-style ## -0,0 +1 ## +native Index: contrib/jemalloc/src/extent.c =================================================================== --- contrib/jemalloc/src/extent.c (revision 0) +++ contrib/jemalloc/src/extent.c (working copy) @@ -0,0 +1,39 @@ +#define JEMALLOC_EXTENT_C_ +#include "jemalloc/internal/jemalloc_internal.h" + +/******************************************************************************/ + +static inline int +extent_szad_comp(extent_node_t *a, extent_node_t *b) +{ + int ret; + size_t a_size = a->size; + size_t b_size = b->size; + + ret = (a_size > b_size) - (a_size < b_size); + if (ret == 0) { + uintptr_t a_addr = (uintptr_t)a->addr; + uintptr_t b_addr = (uintptr_t)b->addr; + + ret = (a_addr > b_addr) - (a_addr < b_addr); + } + + return (ret); +} + +/* Generate red-black tree functions. */ +rb_gen(, extent_tree_szad_, extent_tree_t, extent_node_t, link_szad, + extent_szad_comp) + +static inline int +extent_ad_comp(extent_node_t *a, extent_node_t *b) +{ + uintptr_t a_addr = (uintptr_t)a->addr; + uintptr_t b_addr = (uintptr_t)b->addr; + + return ((a_addr > b_addr) - (a_addr < b_addr)); +} + +/* Generate red-black tree functions. */ +rb_gen(, extent_tree_ad_, extent_tree_t, extent_node_t, link_ad, + extent_ad_comp) Property changes on: contrib/jemalloc/src/extent.c ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H Added: svn:eol-style ## -0,0 +1 ## +native Index: contrib/jemalloc/src/base.c =================================================================== --- contrib/jemalloc/src/base.c (revision 0) +++ contrib/jemalloc/src/base.c (working copy) @@ -0,0 +1,138 @@ +#define JEMALLOC_BASE_C_ +#include "jemalloc/internal/jemalloc_internal.h" + +/******************************************************************************/ +/* Data. */ + +static malloc_mutex_t base_mtx; + +/* + * Current pages that are being used for internal memory allocations. These + * pages are carved up in cacheline-size quanta, so that there is no chance of + * false cache line sharing. + */ +static void *base_pages; +static void *base_next_addr; +static void *base_past_addr; /* Addr immediately past base_pages. */ +static extent_node_t *base_nodes; + +/******************************************************************************/ +/* Function prototypes for non-inline static functions. */ + +static bool base_pages_alloc(size_t minsize); + +/******************************************************************************/ + +static bool +base_pages_alloc(size_t minsize) +{ + size_t csize; + bool zero; + + assert(minsize != 0); + csize = CHUNK_CEILING(minsize); + zero = false; + base_pages = chunk_alloc(csize, chunksize, true, &zero); + if (base_pages == NULL) + return (true); + base_next_addr = base_pages; + base_past_addr = (void *)((uintptr_t)base_pages + csize); + + return (false); +} + +void * +base_alloc(size_t size) +{ + void *ret; + size_t csize; + + /* Round size up to nearest multiple of the cacheline size. */ + csize = CACHELINE_CEILING(size); + + malloc_mutex_lock(&base_mtx); + /* Make sure there's enough space for the allocation. */ + if ((uintptr_t)base_next_addr + csize > (uintptr_t)base_past_addr) { + if (base_pages_alloc(csize)) { + malloc_mutex_unlock(&base_mtx); + return (NULL); + } + } + /* Allocate. */ + ret = base_next_addr; + base_next_addr = (void *)((uintptr_t)base_next_addr + csize); + malloc_mutex_unlock(&base_mtx); + + return (ret); +} + +void * +base_calloc(size_t number, size_t size) +{ + void *ret = base_alloc(number * size); + + if (ret != NULL) + memset(ret, 0, number * size); + + return (ret); +} + +extent_node_t * +base_node_alloc(void) +{ + extent_node_t *ret; + + malloc_mutex_lock(&base_mtx); + if (base_nodes != NULL) { + ret = base_nodes; + base_nodes = *(extent_node_t **)ret; + malloc_mutex_unlock(&base_mtx); + } else { + malloc_mutex_unlock(&base_mtx); + ret = (extent_node_t *)base_alloc(sizeof(extent_node_t)); + } + + return (ret); +} + +void +base_node_dealloc(extent_node_t *node) +{ + + malloc_mutex_lock(&base_mtx); + *(extent_node_t **)node = base_nodes; + base_nodes = node; + malloc_mutex_unlock(&base_mtx); +} + +bool +base_boot(void) +{ + + base_nodes = NULL; + if (malloc_mutex_init(&base_mtx)) + return (true); + + return (false); +} + +void +base_prefork(void) +{ + + malloc_mutex_prefork(&base_mtx); +} + +void +base_postfork_parent(void) +{ + + malloc_mutex_postfork_parent(&base_mtx); +} + +void +base_postfork_child(void) +{ + + malloc_mutex_postfork_child(&base_mtx); +} Property changes on: contrib/jemalloc/src/base.c ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H Added: svn:eol-style ## -0,0 +1 ## +native Index: contrib/jemalloc/src/util.c =================================================================== --- contrib/jemalloc/src/util.c (revision 0) +++ contrib/jemalloc/src/util.c (working copy) @@ -0,0 +1,635 @@ +#define assert(e) do { \ + if (config_debug && !(e)) { \ + malloc_write(": Failed assertion\n"); \ + abort(); \ + } \ +} while (0) + +#define not_reached() do { \ + if (config_debug) { \ + malloc_write(": Unreachable code reached\n"); \ + abort(); \ + } \ +} while (0) + +#define not_implemented() do { \ + if (config_debug) { \ + malloc_write(": Not implemented\n"); \ + abort(); \ + } \ +} while (0) + +#define JEMALLOC_UTIL_C_ +#include "jemalloc/internal/jemalloc_internal.h" + +/******************************************************************************/ +/* Function prototypes for non-inline static functions. */ + +static void wrtmessage(void *cbopaque, const char *s); +#define U2S_BUFSIZE ((1U << (LG_SIZEOF_INTMAX_T + 3)) + 1) +static char *u2s(uintmax_t x, unsigned base, bool uppercase, char *s, + size_t *slen_p); +#define D2S_BUFSIZE (1 + U2S_BUFSIZE) +static char *d2s(intmax_t x, char sign, char *s, size_t *slen_p); +#define O2S_BUFSIZE (1 + U2S_BUFSIZE) +static char *o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p); +#define X2S_BUFSIZE (2 + U2S_BUFSIZE) +static char *x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, + size_t *slen_p); + +/******************************************************************************/ + +/* malloc_message() setup. */ +JEMALLOC_CATTR(visibility("hidden"), static) +void +wrtmessage(void *cbopaque, const char *s) +{ + +#ifdef SYS_write + /* + * Use syscall(2) rather than write(2) when possible in order to avoid + * the possibility of memory allocation within libc. This is necessary + * on FreeBSD; most operating systems do not have this problem though. + */ + UNUSED int result = syscall(SYS_write, STDERR_FILENO, s, strlen(s)); +#else + UNUSED int result = write(STDERR_FILENO, s, strlen(s)); +#endif +} + +void (*je_malloc_message)(void *, const char *s) + JEMALLOC_ATTR(visibility("default")) = wrtmessage; + +JEMALLOC_CATTR(visibility("hidden"), static) +void +wrtmessage_1_0(const char *s1, const char *s2, const char *s3, + const char *s4) +{ + + wrtmessage(NULL, s1); + wrtmessage(NULL, s2); + wrtmessage(NULL, s3); + wrtmessage(NULL, s4); +} + +void (*__malloc_message_1_0)(const char *s1, const char *s2, const char *s3, + const char *s4) = wrtmessage_1_0; +__sym_compat(_malloc_message, __malloc_message_1_0, FBSD_1.0); + +/* + * glibc provides a non-standard strerror_r() when _GNU_SOURCE is defined, so + * provide a wrapper. + */ +int +buferror(int errnum, char *buf, size_t buflen) +{ +#ifdef _GNU_SOURCE + char *b = strerror_r(errno, buf, buflen); + if (b != buf) { + strncpy(buf, b, buflen); + buf[buflen-1] = '\0'; + } + return (0); +#else + return (strerror_r(errno, buf, buflen)); +#endif +} + +uintmax_t +malloc_strtoumax(const char *nptr, char **endptr, int base) +{ + uintmax_t ret, digit; + int b; + bool neg; + const char *p, *ns; + + if (base < 0 || base == 1 || base > 36) { + errno = EINVAL; + return (UINTMAX_MAX); + } + b = base; + + /* Swallow leading whitespace and get sign, if any. */ + neg = false; + p = nptr; + while (true) { + switch (*p) { + case '\t': case '\n': case '\v': case '\f': case '\r': case ' ': + p++; + break; + case '-': + neg = true; + /* Fall through. */ + case '+': + p++; + /* Fall through. */ + default: + goto label_prefix; + } + } + + /* Get prefix, if any. */ + label_prefix: + /* + * Note where the first non-whitespace/sign character is so that it is + * possible to tell whether any digits are consumed (e.g., " 0" vs. + * " -x"). + */ + ns = p; + if (*p == '0') { + switch (p[1]) { + case '0': case '1': case '2': case '3': case '4': case '5': + case '6': case '7': + if (b == 0) + b = 8; + if (b == 8) + p++; + break; + case 'x': + switch (p[2]) { + case '0': case '1': case '2': case '3': case '4': + case '5': case '6': case '7': case '8': case '9': + case 'A': case 'B': case 'C': case 'D': case 'E': + case 'F': + case 'a': case 'b': case 'c': case 'd': case 'e': + case 'f': + if (b == 0) + b = 16; + if (b == 16) + p += 2; + break; + default: + break; + } + break; + default: + break; + } + } + if (b == 0) + b = 10; + + /* Convert. */ + ret = 0; + while ((*p >= '0' && *p <= '9' && (digit = *p - '0') < b) + || (*p >= 'A' && *p <= 'Z' && (digit = 10 + *p - 'A') < b) + || (*p >= 'a' && *p <= 'z' && (digit = 10 + *p - 'a') < b)) { + uintmax_t pret = ret; + ret *= b; + ret += digit; + if (ret < pret) { + /* Overflow. */ + errno = ERANGE; + return (UINTMAX_MAX); + } + p++; + } + if (neg) + ret = -ret; + + if (endptr != NULL) { + if (p == ns) { + /* No characters were converted. */ + *endptr = (char *)nptr; + } else + *endptr = (char *)p; + } + + return (ret); +} + +static char * +u2s(uintmax_t x, unsigned base, bool uppercase, char *s, size_t *slen_p) +{ + unsigned i; + + i = U2S_BUFSIZE - 1; + s[i] = '\0'; + switch (base) { + case 10: + do { + i--; + s[i] = "0123456789"[x % (uint64_t)10]; + x /= (uint64_t)10; + } while (x > 0); + break; + case 16: { + const char *digits = (uppercase) + ? "0123456789ABCDEF" + : "0123456789abcdef"; + + do { + i--; + s[i] = digits[x & 0xf]; + x >>= 4; + } while (x > 0); + break; + } default: { + const char *digits = (uppercase) + ? "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" + : "0123456789abcdefghijklmnopqrstuvwxyz"; + + assert(base >= 2 && base <= 36); + do { + i--; + s[i] = digits[x % (uint64_t)base]; + x /= (uint64_t)base; + } while (x > 0); + }} + + *slen_p = U2S_BUFSIZE - 1 - i; + return (&s[i]); +} + +static char * +d2s(intmax_t x, char sign, char *s, size_t *slen_p) +{ + bool neg; + + if ((neg = (x < 0))) + x = -x; + s = u2s(x, 10, false, s, slen_p); + if (neg) + sign = '-'; + switch (sign) { + case '-': + if (neg == false) + break; + /* Fall through. */ + case ' ': + case '+': + s--; + (*slen_p)++; + *s = sign; + break; + default: not_reached(); + } + return (s); +} + +static char * +o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p) +{ + + s = u2s(x, 8, false, s, slen_p); + if (alt_form && *s != '0') { + s--; + (*slen_p)++; + *s = '0'; + } + return (s); +} + +static char * +x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, size_t *slen_p) +{ + + s = u2s(x, 16, uppercase, s, slen_p); + if (alt_form) { + s -= 2; + (*slen_p) += 2; + memcpy(s, uppercase ? "0X" : "0x", 2); + } + return (s); +} + +int +malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) +{ + int ret; + size_t i; + const char *f; + va_list tap; + +#define APPEND_C(c) do { \ + if (i < size) \ + str[i] = (c); \ + i++; \ +} while (0) +#define APPEND_S(s, slen) do { \ + if (i < size) { \ + size_t cpylen = (slen <= size - i) ? slen : size - i; \ + memcpy(&str[i], s, cpylen); \ + } \ + i += slen; \ +} while (0) +#define APPEND_PADDED_S(s, slen, width, left_justify) do { \ + /* Left padding. */ \ + size_t pad_len = (width == -1) ? 0 : ((slen < (size_t)width) ? \ + (size_t)width - slen : 0); \ + if (left_justify == false && pad_len != 0) { \ + size_t j; \ + for (j = 0; j < pad_len; j++) \ + APPEND_C(' '); \ + } \ + /* Value. */ \ + APPEND_S(s, slen); \ + /* Right padding. */ \ + if (left_justify && pad_len != 0) { \ + size_t j; \ + for (j = 0; j < pad_len; j++) \ + APPEND_C(' '); \ + } \ +} while (0) +#define GET_ARG_NUMERIC(val, len) do { \ + switch (len) { \ + case '?': \ + val = va_arg(ap, int); \ + break; \ + case 'l': \ + val = va_arg(ap, long); \ + break; \ + case 'q': \ + val = va_arg(ap, long long); \ + break; \ + case 'j': \ + val = va_arg(ap, intmax_t); \ + break; \ + case 't': \ + val = va_arg(ap, ptrdiff_t); \ + break; \ + case 'z': \ + val = va_arg(ap, ssize_t); \ + break; \ + case 'p': /* Synthetic; used for %p. */ \ + val = va_arg(ap, uintptr_t); \ + break; \ + default: not_reached(); \ + } \ +} while (0) + + if (config_debug) + va_copy(tap, ap); + + i = 0; + f = format; + while (true) { + switch (*f) { + case '\0': goto label_out; + case '%': { + bool alt_form = false; + bool zero_pad = false; + bool left_justify = false; + bool plus_space = false; + bool plus_plus = false; + int prec = -1; + int width = -1; + char len = '?'; + + f++; + if (*f == '%') { + /* %% */ + APPEND_C(*f); + break; + } + /* Flags. */ + while (true) { + switch (*f) { + case '#': + assert(alt_form == false); + alt_form = true; + break; + case '0': + assert(zero_pad == false); + zero_pad = true; + break; + case '-': + assert(left_justify == false); + left_justify = true; + break; + case ' ': + assert(plus_space == false); + plus_space = true; + break; + case '+': + assert(plus_plus == false); + plus_plus = true; + break; + default: goto label_width; + } + f++; + } + /* Width. */ + label_width: + switch (*f) { + case '*': + width = va_arg(ap, int); + f++; + break; + case '0': case '1': case '2': case '3': case '4': + case '5': case '6': case '7': case '8': case '9': { + uintmax_t uwidth; + errno = 0; + uwidth = malloc_strtoumax(f, (char **)&f, 10); + assert(uwidth != UINTMAX_MAX || errno != + ERANGE); + width = (int)uwidth; + if (*f == '.') { + f++; + goto label_precision; + } else + goto label_length; + break; + } case '.': + f++; + goto label_precision; + default: goto label_length; + } + /* Precision. */ + label_precision: + switch (*f) { + case '*': + prec = va_arg(ap, int); + f++; + break; + case '0': case '1': case '2': case '3': case '4': + case '5': case '6': case '7': case '8': case '9': { + uintmax_t uprec; + errno = 0; + uprec = malloc_strtoumax(f, (char **)&f, 10); + assert(uprec != UINTMAX_MAX || errno != ERANGE); + prec = (int)uprec; + break; + } + default: break; + } + /* Length. */ + label_length: + switch (*f) { + case 'l': + f++; + if (*f == 'l') { + len = 'q'; + f++; + } else + len = 'l'; + break; + case 'j': + len = 'j'; + f++; + break; + case 't': + len = 't'; + f++; + break; + case 'z': + len = 'z'; + f++; + break; + default: break; + } + /* Conversion specifier. */ + switch (*f) { + char *s; + size_t slen; + case 'd': case 'i': { + intmax_t val JEMALLOC_CC_SILENCE_INIT(0); + char buf[D2S_BUFSIZE]; + + GET_ARG_NUMERIC(val, len); + s = d2s(val, (plus_plus ? '+' : (plus_space ? + ' ' : '-')), buf, &slen); + APPEND_PADDED_S(s, slen, width, left_justify); + f++; + break; + } case 'o': { + uintmax_t val JEMALLOC_CC_SILENCE_INIT(0); + char buf[O2S_BUFSIZE]; + + GET_ARG_NUMERIC(val, len); + s = o2s(val, alt_form, buf, &slen); + APPEND_PADDED_S(s, slen, width, left_justify); + f++; + break; + } case 'u': { + uintmax_t val JEMALLOC_CC_SILENCE_INIT(0); + char buf[U2S_BUFSIZE]; + + GET_ARG_NUMERIC(val, len); + s = u2s(val, 10, false, buf, &slen); + APPEND_PADDED_S(s, slen, width, left_justify); + f++; + break; + } case 'x': case 'X': { + uintmax_t val JEMALLOC_CC_SILENCE_INIT(0); + char buf[X2S_BUFSIZE]; + + GET_ARG_NUMERIC(val, len); + s = x2s(val, alt_form, *f == 'X', buf, &slen); + APPEND_PADDED_S(s, slen, width, left_justify); + f++; + break; + } case 'c': { + unsigned char val; + char buf[2]; + + assert(len == '?' || len == 'l'); + assert_not_implemented(len != 'l'); + val = va_arg(ap, int); + buf[0] = val; + buf[1] = '\0'; + APPEND_PADDED_S(buf, 1, width, left_justify); + f++; + break; + } case 's': + assert(len == '?' || len == 'l'); + assert_not_implemented(len != 'l'); + s = va_arg(ap, char *); + slen = (prec == -1) ? strlen(s) : prec; + APPEND_PADDED_S(s, slen, width, left_justify); + f++; + break; + case 'p': { + uintmax_t val; + char buf[X2S_BUFSIZE]; + + GET_ARG_NUMERIC(val, 'p'); + s = x2s(val, true, false, buf, &slen); + APPEND_PADDED_S(s, slen, width, left_justify); + f++; + break; + } + default: not_implemented(); + } + break; + } default: { + APPEND_C(*f); + f++; + break; + }} + } + label_out: + if (i < size) + str[i] = '\0'; + else + str[size - 1] = '\0'; + ret = i; + +#undef APPEND_C +#undef APPEND_S +#undef APPEND_PADDED_S +#undef GET_ARG_NUMERIC + return (ret); +} + +JEMALLOC_ATTR(format(printf, 3, 4)) +int +malloc_snprintf(char *str, size_t size, const char *format, ...) +{ + int ret; + va_list ap; + + va_start(ap, format); + ret = malloc_vsnprintf(str, size, format, ap); + va_end(ap); + + return (ret); +} + +void +malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque, + const char *format, va_list ap) +{ + char buf[MALLOC_PRINTF_BUFSIZE]; + + if (write_cb == NULL) { + /* + * The caller did not provide an alternate write_cb callback + * function, so use the default one. malloc_write() is an + * inline function, so use malloc_message() directly here. + */ + write_cb = je_malloc_message; + cbopaque = NULL; + } + + malloc_vsnprintf(buf, sizeof(buf), format, ap); + write_cb(cbopaque, buf); +} + +/* + * Print to a callback function in such a way as to (hopefully) avoid memory + * allocation. + */ +JEMALLOC_ATTR(format(printf, 3, 4)) +void +malloc_cprintf(void (*write_cb)(void *, const char *), void *cbopaque, + const char *format, ...) +{ + va_list ap; + + va_start(ap, format); + malloc_vcprintf(write_cb, cbopaque, format, ap); + va_end(ap); +} + +/* Print to stderr in such a way as to avoid memory allocation. */ +JEMALLOC_ATTR(format(printf, 1, 2)) +void +malloc_printf(const char *format, ...) +{ + va_list ap; + + va_start(ap, format); + malloc_vcprintf(NULL, NULL, format, ap); + va_end(ap); +} Property changes on: contrib/jemalloc/src/util.c ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H Added: svn:eol-style ## -0,0 +1 ## +native Index: contrib/jemalloc/src/rtree.c =================================================================== --- contrib/jemalloc/src/rtree.c (revision 0) +++ contrib/jemalloc/src/rtree.c (working copy) @@ -0,0 +1,46 @@ +#define JEMALLOC_RTREE_C_ +#include "jemalloc/internal/jemalloc_internal.h" + +rtree_t * +rtree_new(unsigned bits) +{ + rtree_t *ret; + unsigned bits_per_level, height, i; + + bits_per_level = ffs(pow2_ceil((RTREE_NODESIZE / sizeof(void *)))) - 1; + height = bits / bits_per_level; + if (height * bits_per_level != bits) + height++; + assert(height * bits_per_level >= bits); + + ret = (rtree_t*)base_alloc(offsetof(rtree_t, level2bits) + + (sizeof(unsigned) * height)); + if (ret == NULL) + return (NULL); + memset(ret, 0, offsetof(rtree_t, level2bits) + (sizeof(unsigned) * + height)); + + if (malloc_mutex_init(&ret->mutex)) { + /* Leak the rtree. */ + return (NULL); + } + ret->height = height; + if (bits_per_level * height > bits) + ret->level2bits[0] = bits % bits_per_level; + else + ret->level2bits[0] = bits_per_level; + for (i = 1; i < height; i++) + ret->level2bits[i] = bits_per_level; + + ret->root = (void**)base_alloc(sizeof(void *) << ret->level2bits[0]); + if (ret->root == NULL) { + /* + * We leak the rtree here, since there's no generic base + * deallocation. + */ + return (NULL); + } + memset(ret->root, 0, sizeof(void *) << ret->level2bits[0]); + + return (ret); +} Property changes on: contrib/jemalloc/src/rtree.c ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H Added: svn:eol-style ## -0,0 +1 ## +native Index: contrib/jemalloc/src/chunk_dss.c =================================================================== --- contrib/jemalloc/src/chunk_dss.c (revision 0) +++ contrib/jemalloc/src/chunk_dss.c (working copy) @@ -0,0 +1,159 @@ +#define JEMALLOC_CHUNK_DSS_C_ +#include "jemalloc/internal/jemalloc_internal.h" +/******************************************************************************/ +/* Data. */ + +/* + * Protects sbrk() calls. This avoids malloc races among threads, though it + * does not protect against races with threads that call sbrk() directly. + */ +static malloc_mutex_t dss_mtx; + +/* Base address of the DSS. */ +static void *dss_base; +/* Current end of the DSS, or ((void *)-1) if the DSS is exhausted. */ +static void *dss_prev; +/* Current upper limit on DSS addresses. */ +static void *dss_max; + +/******************************************************************************/ + +#ifndef JEMALLOC_HAVE_SBRK +static void * +sbrk(intptr_t increment) +{ + + not_implemented(); + + return (NULL); +} +#endif + +void * +chunk_alloc_dss(size_t size, size_t alignment, bool *zero) +{ + void *ret; + + cassert(config_dss); + assert(size > 0 && (size & chunksize_mask) == 0); + assert(alignment > 0 && (alignment & chunksize_mask) == 0); + + /* + * sbrk() uses a signed increment argument, so take care not to + * interpret a huge allocation request as a negative increment. + */ + if ((intptr_t)size < 0) + return (NULL); + + malloc_mutex_lock(&dss_mtx); + if (dss_prev != (void *)-1) { + size_t gap_size, cpad_size; + void *cpad, *dss_next; + intptr_t incr; + + /* + * The loop is necessary to recover from races with other + * threads that are using the DSS for something other than + * malloc. + */ + do { + /* Get the current end of the DSS. */ + dss_max = sbrk(0); + /* + * Calculate how much padding is necessary to + * chunk-align the end of the DSS. + */ + gap_size = (chunksize - CHUNK_ADDR2OFFSET(dss_max)) & + chunksize_mask; + /* + * Compute how much chunk-aligned pad space (if any) is + * necessary to satisfy alignment. This space can be + * recycled for later use. + */ + cpad = (void *)((uintptr_t)dss_max + gap_size); + ret = (void *)ALIGNMENT_CEILING((uintptr_t)dss_max, + alignment); + cpad_size = (uintptr_t)ret - (uintptr_t)cpad; + dss_next = (void *)((uintptr_t)ret + size); + if ((uintptr_t)ret < (uintptr_t)dss_max || + (uintptr_t)dss_next < (uintptr_t)dss_max) { + /* Wrap-around. */ + malloc_mutex_unlock(&dss_mtx); + return (NULL); + } + incr = gap_size + cpad_size + size; + dss_prev = sbrk(incr); + if (dss_prev == dss_max) { + /* Success. */ + dss_max = dss_next; + malloc_mutex_unlock(&dss_mtx); + if (cpad_size != 0) + chunk_dealloc(cpad, cpad_size, true); + *zero = true; + return (ret); + } + } while (dss_prev != (void *)-1); + } + malloc_mutex_unlock(&dss_mtx); + + return (NULL); +} + +bool +chunk_in_dss(void *chunk) +{ + bool ret; + + cassert(config_dss); + + malloc_mutex_lock(&dss_mtx); + if ((uintptr_t)chunk >= (uintptr_t)dss_base + && (uintptr_t)chunk < (uintptr_t)dss_max) + ret = true; + else + ret = false; + malloc_mutex_unlock(&dss_mtx); + + return (ret); +} + +bool +chunk_dss_boot(void) +{ + + cassert(config_dss); + + if (malloc_mutex_init(&dss_mtx)) + return (true); + dss_base = sbrk(0); + dss_prev = dss_base; + dss_max = dss_base; + + return (false); +} + +void +chunk_dss_prefork(void) +{ + + if (config_dss) + malloc_mutex_prefork(&dss_mtx); +} + +void +chunk_dss_postfork_parent(void) +{ + + if (config_dss) + malloc_mutex_postfork_parent(&dss_mtx); +} + +void +chunk_dss_postfork_child(void) +{ + + if (config_dss) + malloc_mutex_postfork_child(&dss_mtx); +} + +/******************************************************************************/ Property changes on: contrib/jemalloc/src/chunk_dss.c ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H Added: svn:eol-style ## -0,0 +1 ## +native Index: contrib/jemalloc/src/chunk_mmap.c =================================================================== --- contrib/jemalloc/src/chunk_mmap.c (revision 0) +++ contrib/jemalloc/src/chunk_mmap.c (working copy) @@ -0,0 +1,207 @@ +#define JEMALLOC_CHUNK_MMAP_C_ +#include "jemalloc/internal/jemalloc_internal.h" + +/******************************************************************************/ +/* Data. */ + +/* + * Used by chunk_alloc_mmap() to decide whether to attempt the fast path and + * potentially avoid some system calls. + */ +malloc_tsd_data(static, mmap_unaligned, bool, false) +malloc_tsd_funcs(JEMALLOC_INLINE, mmap_unaligned, bool, false, + malloc_tsd_no_cleanup) + +/******************************************************************************/ +/* Function prototypes for non-inline static functions. */ + +static void *pages_map(void *addr, size_t size); +static void pages_unmap(void *addr, size_t size); +static void *chunk_alloc_mmap_slow(size_t size, size_t alignment, + bool unaligned); + +/******************************************************************************/ + +static void * +pages_map(void *addr, size_t size) +{ + void *ret; + + /* + * We don't use MAP_FIXED here, because it can cause the *replacement* + * of existing mappings, and we only want to create new mappings. + */ + ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, + -1, 0); + assert(ret != NULL); + + if (ret == MAP_FAILED) + ret = NULL; + else if (addr != NULL && ret != addr) { + /* + * We succeeded in mapping memory, but not in the right place. + */ + if (munmap(ret, size) == -1) { + char buf[BUFERROR_BUF]; + + buferror(errno, buf, sizeof(buf)); + malloc_printf(": Error in munmap(): %s\n", buf); + if (opt_abort) + abort(); + } +} + +static void * +chunk_alloc_mmap_slow(size_t size, size_t alignment, bool unaligned) +{ + void *ret, *pages; + size_t alloc_size, leadsize, trailsize; + + alloc_size = size + alignment - PAGE; + /* Beware size_t wrap-around. */ + if (alloc_size < size) + return (NULL); + pages = pages_map(NULL, alloc_size); + if (pages == NULL) + return (NULL); + leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) - + (uintptr_t)pages; + assert(alloc_size >= leadsize + size); + trailsize = alloc_size - leadsize - size; + ret = (void *)((uintptr_t)pages + leadsize); + if (leadsize != 0) { + /* Note that mmap() returned an unaligned mapping. */ + unaligned = true; + pages_unmap(pages, leadsize); + } + if (trailsize != 0) + pages_unmap((void *)((uintptr_t)ret + size), trailsize); + + /* + * If mmap() returned an aligned mapping, reset mmap_unaligned so that + * the next chunk_alloc_mmap() execution tries the fast allocation + * method. + */ + if (unaligned == false && mmap_unaligned_booted) { + bool mu = false; + mmap_unaligned_tsd_set(&mu); + } + + return (ret); +} + +void * +chunk_alloc_mmap(size_t size, size_t alignment) +{ + void *ret; + + /* + * Ideally, there would be a way to specify alignment to mmap() (like + * NetBSD has), but in the absence of such a feature, we have to work + * hard to efficiently create aligned mappings. The reliable, but + * slow method is to create a mapping that is over-sized, then trim the + * excess. However, that always results in at least one call to + * pages_unmap(). + * + * A more optimistic approach is to try mapping precisely the right + * amount, then try to append another mapping if alignment is off. In + * practice, this works out well as long as the application is not + * interleaving mappings via direct mmap() calls. If we do run into a + * situation where there is an interleaved mapping and we are unable to + * extend an unaligned mapping, our best option is to switch to the + * slow method until mmap() returns another aligned mapping. This will + * tend to leave a gap in the memory map that is too small to cause + * later problems for the optimistic method. + * + * Another possible confounding factor is address space layout + * randomization (ASLR), which causes mmap(2) to disregard the + * requested address. mmap_unaligned tracks whether the previous + * chunk_alloc_mmap() execution received any unaligned or relocated + * mappings, and if so, the current execution will immediately fall + * back to the slow method. However, we keep track of whether the fast + * method would have succeeded, and if so, we make a note to try the + * fast method next time. + */ + + if (mmap_unaligned_booted && *mmap_unaligned_tsd_get() == false) { + size_t offset; + + ret = pages_map(NULL, size); + if (ret == NULL) + return (NULL); + + offset = ALIGNMENT_ADDR2OFFSET(ret, alignment); + if (offset != 0) { + bool mu = true; + mmap_unaligned_tsd_set(&mu); + /* Try to extend chunk boundary. */ + if (pages_map((void *)((uintptr_t)ret + size), + chunksize - offset) == NULL) { + /* + * Extension failed. Clean up, then revert to + * the reliable-but-expensive method. + */ + pages_unmap(ret, size); + ret = chunk_alloc_mmap_slow(size, alignment, + true); + } else { + /* Clean up unneeded leading space. */ + pages_unmap(ret, chunksize - offset); + ret = (void *)((uintptr_t)ret + (chunksize - + offset)); + } + } + } else + ret = chunk_alloc_mmap_slow(size, alignment, false); + + return (ret); +} + +bool +chunk_dealloc_mmap(void *chunk, size_t size) +{ + + if (config_munmap) + pages_unmap(chunk, size); + + return (config_munmap == false); +} + +bool +chunk_mmap_boot(void) +{ + + /* + * XXX For the non-TLS implementation of tsd, the first access from + * each thread causes memory allocation. The result is a bootstrapping + * problem for this particular use case, so for now just disable it by + * leaving it in an unbooted state. + */ +#ifdef JEMALLOC_TLS + if (mmap_unaligned_tsd_boot()) + return (true); +#endif + + return (false); +} Property changes on: contrib/jemalloc/src/chunk_mmap.c ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H Added: svn:eol-style ## -0,0 +1 ## +native Index: contrib/jemalloc/src/ctl.c =================================================================== --- contrib/jemalloc/src/ctl.c (revision 0) +++ contrib/jemalloc/src/ctl.c (working copy) @@ -0,0 +1,1382 @@ +#define JEMALLOC_CTL_C_ +#include "jemalloc/internal/jemalloc_internal.h" + +/******************************************************************************/ +/* Data. */ + +/* + * ctl_mtx protects the following: + * - ctl_stats.* + * - opt_prof_active + */ +static malloc_mutex_t ctl_mtx; +static bool ctl_initialized; +static uint64_t ctl_epoch; +static ctl_stats_t ctl_stats; + +/******************************************************************************/ +/* Function prototypes for non-inline static functions. */ + +#define CTL_PROTO(n) \ +static int n##_ctl(const size_t *mib, size_t miblen, void *oldp, \ + size_t *oldlenp, void *newp, size_t newlen); + +#define INDEX_PROTO(n) \ +const ctl_node_t *n##_index(const size_t *mib, size_t miblen, \ + size_t i); + +static bool ctl_arena_init(ctl_arena_stats_t *astats); +static void ctl_arena_clear(ctl_arena_stats_t *astats); +static void ctl_arena_stats_amerge(ctl_arena_stats_t *cstats, + arena_t *arena); +static void ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, + ctl_arena_stats_t *astats); +static void ctl_arena_refresh(arena_t *arena, unsigned i); +static void ctl_refresh(void); +static bool ctl_init(void); +static int ctl_lookup(const char *name, ctl_node_t const **nodesp, + size_t *mibp, size_t *depthp); + +CTL_PROTO(version) +CTL_PROTO(epoch) +CTL_PROTO(thread_tcache_enabled) +CTL_PROTO(thread_tcache_flush) +CTL_PROTO(thread_arena) +CTL_PROTO(thread_allocated) +CTL_PROTO(thread_allocatedp) +CTL_PROTO(thread_deallocated) +CTL_PROTO(thread_deallocatedp) +CTL_PROTO(config_debug) +CTL_PROTO(config_dss) +CTL_PROTO(config_fill) +CTL_PROTO(config_lazy_lock) +CTL_PROTO(config_prof) +CTL_PROTO(config_prof_libgcc) +CTL_PROTO(config_prof_libunwind) +CTL_PROTO(config_stats) +CTL_PROTO(config_tcache) +CTL_PROTO(config_tls) +CTL_PROTO(config_utrace) +CTL_PROTO(config_valgrind) +CTL_PROTO(config_xmalloc) +CTL_PROTO(opt_abort) +CTL_PROTO(opt_lg_chunk) +CTL_PROTO(opt_narenas) +CTL_PROTO(opt_lg_dirty_mult) +CTL_PROTO(opt_stats_print) +CTL_PROTO(opt_junk) +CTL_PROTO(opt_zero) +CTL_PROTO(opt_quarantine) +CTL_PROTO(opt_redzone) +CTL_PROTO(opt_utrace) +CTL_PROTO(opt_valgrind) +CTL_PROTO(opt_xmalloc) +CTL_PROTO(opt_tcache) +CTL_PROTO(opt_lg_tcache_max) +CTL_PROTO(opt_prof) +CTL_PROTO(opt_prof_prefix) +CTL_PROTO(opt_prof_active) +CTL_PROTO(opt_lg_prof_sample) +CTL_PROTO(opt_lg_prof_interval) +CTL_PROTO(opt_prof_gdump) +CTL_PROTO(opt_prof_leak) +CTL_PROTO(opt_prof_accum) +CTL_PROTO(arenas_bin_i_size) +CTL_PROTO(arenas_bin_i_nregs) +CTL_PROTO(arenas_bin_i_run_size) +INDEX_PROTO(arenas_bin_i) +CTL_PROTO(arenas_lrun_i_size) +INDEX_PROTO(arenas_lrun_i) +CTL_PROTO(arenas_narenas) +CTL_PROTO(arenas_initialized) +CTL_PROTO(arenas_quantum) +CTL_PROTO(arenas_page) +CTL_PROTO(arenas_tcache_max) +CTL_PROTO(arenas_nbins) +CTL_PROTO(arenas_nhbins) +CTL_PROTO(arenas_nlruns) +CTL_PROTO(arenas_purge) +CTL_PROTO(prof_active) +CTL_PROTO(prof_dump) +CTL_PROTO(prof_interval) +CTL_PROTO(stats_chunks_current) +CTL_PROTO(stats_chunks_total) +CTL_PROTO(stats_chunks_high) +CTL_PROTO(stats_huge_allocated) +CTL_PROTO(stats_huge_nmalloc) +CTL_PROTO(stats_huge_ndalloc) +CTL_PROTO(stats_arenas_i_small_allocated) +CTL_PROTO(stats_arenas_i_small_nmalloc) +CTL_PROTO(stats_arenas_i_small_ndalloc) +CTL_PROTO(stats_arenas_i_small_nrequests) +CTL_PROTO(stats_arenas_i_large_allocated) +CTL_PROTO(stats_arenas_i_large_nmalloc) +CTL_PROTO(stats_arenas_i_large_ndalloc) +CTL_PROTO(stats_arenas_i_large_nrequests) +CTL_PROTO(stats_arenas_i_bins_j_allocated) +CTL_PROTO(stats_arenas_i_bins_j_nmalloc) +CTL_PROTO(stats_arenas_i_bins_j_ndalloc) +CTL_PROTO(stats_arenas_i_bins_j_nrequests) +CTL_PROTO(stats_arenas_i_bins_j_nfills) +CTL_PROTO(stats_arenas_i_bins_j_nflushes) +CTL_PROTO(stats_arenas_i_bins_j_nruns) +CTL_PROTO(stats_arenas_i_bins_j_nreruns) +CTL_PROTO(stats_arenas_i_bins_j_curruns) +INDEX_PROTO(stats_arenas_i_bins_j) +CTL_PROTO(stats_arenas_i_lruns_j_nmalloc) +CTL_PROTO(stats_arenas_i_lruns_j_ndalloc) +CTL_PROTO(stats_arenas_i_lruns_j_nrequests) +CTL_PROTO(stats_arenas_i_lruns_j_curruns) +INDEX_PROTO(stats_arenas_i_lruns_j) +CTL_PROTO(stats_arenas_i_nthreads) +CTL_PROTO(stats_arenas_i_pactive) +CTL_PROTO(stats_arenas_i_pdirty) +CTL_PROTO(stats_arenas_i_mapped) +CTL_PROTO(stats_arenas_i_npurge) +CTL_PROTO(stats_arenas_i_nmadvise) +CTL_PROTO(stats_arenas_i_purged) +INDEX_PROTO(stats_arenas_i) +CTL_PROTO(stats_cactive) +CTL_PROTO(stats_allocated) +CTL_PROTO(stats_active) +CTL_PROTO(stats_mapped) + +/******************************************************************************/ +/* mallctl tree. */ + +/* Maximum tree depth. */ +#define CTL_MAX_DEPTH 6 + +#define NAME(n) true, {.named = {n +#define CHILD(c) sizeof(c##_node) / sizeof(ctl_node_t), c##_node}}, NULL +#define CTL(c) 0, NULL}}, c##_ctl + +/* + * Only handles internal indexed nodes, since there are currently no external + * ones. + */ +#define INDEX(i) false, {.indexed = {i##_index}}, NULL + +static const ctl_node_t tcache_node[] = { + {NAME("enabled"), CTL(thread_tcache_enabled)}, + {NAME("flush"), CTL(thread_tcache_flush)} +}; + +static const ctl_node_t thread_node[] = { + {NAME("arena"), CTL(thread_arena)}, + {NAME("allocated"), CTL(thread_allocated)}, + {NAME("allocatedp"), CTL(thread_allocatedp)}, + {NAME("deallocated"), CTL(thread_deallocated)}, + {NAME("deallocatedp"), CTL(thread_deallocatedp)}, + {NAME("tcache"), CHILD(tcache)} +}; + +static const ctl_node_t config_node[] = { + {NAME("debug"), CTL(config_debug)}, + {NAME("dss"), CTL(config_dss)}, + {NAME("fill"), CTL(config_fill)}, + {NAME("lazy_lock"), CTL(config_lazy_lock)}, + {NAME("prof"), CTL(config_prof)}, + {NAME("prof_libgcc"), CTL(config_prof_libgcc)}, + {NAME("prof_libunwind"), CTL(config_prof_libunwind)}, + {NAME("stats"), CTL(config_stats)}, + {NAME("tcache"), CTL(config_tcache)}, + {NAME("tls"), CTL(config_tls)}, + {NAME("utrace"), CTL(config_utrace)}, + {NAME("valgrind"), CTL(config_valgrind)}, + {NAME("xmalloc"), CTL(config_xmalloc)} +}; + +static const ctl_node_t opt_node[] = { + {NAME("abort"), CTL(opt_abort)}, + {NAME("lg_chunk"), CTL(opt_lg_chunk)}, + {NAME("narenas"), CTL(opt_narenas)}, + {NAME("lg_dirty_mult"), CTL(opt_lg_dirty_mult)}, + {NAME("stats_print"), CTL(opt_stats_print)}, + {NAME("junk"), CTL(opt_junk)}, + {NAME("zero"), CTL(opt_zero)}, + {NAME("quarantine"), CTL(opt_quarantine)}, + {NAME("redzone"), CTL(opt_redzone)}, + {NAME("utrace"), CTL(opt_utrace)}, + {NAME("valgrind"), CTL(opt_valgrind)}, + {NAME("xmalloc"), CTL(opt_xmalloc)}, + {NAME("tcache"), CTL(opt_tcache)}, + {NAME("lg_tcache_max"), CTL(opt_lg_tcache_max)}, + {NAME("prof"), CTL(opt_prof)}, + {NAME("prof_prefix"), CTL(opt_prof_prefix)}, + {NAME("prof_active"), CTL(opt_prof_active)}, + {NAME("lg_prof_sample"), CTL(opt_lg_prof_sample)}, + {NAME("lg_prof_interval"), CTL(opt_lg_prof_interval)}, + {NAME("prof_gdump"), CTL(opt_prof_gdump)}, + {NAME("prof_leak"), CTL(opt_prof_leak)}, + {NAME("prof_accum"), CTL(opt_prof_accum)} +}; + +static const ctl_node_t arenas_bin_i_node[] = { + {NAME("size"), CTL(arenas_bin_i_size)}, + {NAME("nregs"), CTL(arenas_bin_i_nregs)}, + {NAME("run_size"), CTL(arenas_bin_i_run_size)} +}; +static const ctl_node_t super_arenas_bin_i_node[] = { + {NAME(""), CHILD(arenas_bin_i)} +}; + +static const ctl_node_t arenas_bin_node[] = { + {INDEX(arenas_bin_i)} +}; + +static const ctl_node_t arenas_lrun_i_node[] = { + {NAME("size"), CTL(arenas_lrun_i_size)} +}; +static const ctl_node_t super_arenas_lrun_i_node[] = { + {NAME(""), CHILD(arenas_lrun_i)} +}; + +static const ctl_node_t arenas_lrun_node[] = { + {INDEX(arenas_lrun_i)} +}; + +static const ctl_node_t arenas_node[] = { + {NAME("narenas"), CTL(arenas_narenas)}, + {NAME("initialized"), CTL(arenas_initialized)}, + {NAME("quantum"), CTL(arenas_quantum)}, + {NAME("page"), CTL(arenas_page)}, + {NAME("tcache_max"), CTL(arenas_tcache_max)}, + {NAME("nbins"), CTL(arenas_nbins)}, + {NAME("nhbins"), CTL(arenas_nhbins)}, + {NAME("bin"), CHILD(arenas_bin)}, + {NAME("nlruns"), CTL(arenas_nlruns)}, + {NAME("lrun"), CHILD(arenas_lrun)}, + {NAME("purge"), CTL(arenas_purge)} +}; + +static const ctl_node_t prof_node[] = { + {NAME("active"), CTL(prof_active)}, + {NAME("dump"), CTL(prof_dump)}, + {NAME("interval"), CTL(prof_interval)} +}; + +static const ctl_node_t stats_chunks_node[] = { + {NAME("current"), CTL(stats_chunks_current)}, + {NAME("total"), CTL(stats_chunks_total)}, + {NAME("high"), CTL(stats_chunks_high)} +}; + +static const ctl_node_t stats_huge_node[] = { + {NAME("allocated"), CTL(stats_huge_allocated)}, + {NAME("nmalloc"), CTL(stats_huge_nmalloc)}, + {NAME("ndalloc"), CTL(stats_huge_ndalloc)} +}; + +static const ctl_node_t stats_arenas_i_small_node[] = { + {NAME("allocated"), CTL(stats_arenas_i_small_allocated)}, + {NAME("nmalloc"), CTL(stats_arenas_i_small_nmalloc)}, + {NAME("ndalloc"), CTL(stats_arenas_i_small_ndalloc)}, + {NAME("nrequests"), CTL(stats_arenas_i_small_nrequests)} +}; + +static const ctl_node_t stats_arenas_i_large_node[] = { + {NAME("allocated"), CTL(stats_arenas_i_large_allocated)}, + {NAME("nmalloc"), CTL(stats_arenas_i_large_nmalloc)}, + {NAME("ndalloc"), CTL(stats_arenas_i_large_ndalloc)}, + {NAME("nrequests"), CTL(stats_arenas_i_large_nrequests)} +}; + +static const ctl_node_t stats_arenas_i_bins_j_node[] = { + {NAME("allocated"), CTL(stats_arenas_i_bins_j_allocated)}, + {NAME("nmalloc"), CTL(stats_arenas_i_bins_j_nmalloc)}, + {NAME("ndalloc"), CTL(stats_arenas_i_bins_j_ndalloc)}, + {NAME("nrequests"), CTL(stats_arenas_i_bins_j_nrequests)}, + {NAME("nfills"), CTL(stats_arenas_i_bins_j_nfills)}, + {NAME("nflushes"), CTL(stats_arenas_i_bins_j_nflushes)}, + {NAME("nruns"), CTL(stats_arenas_i_bins_j_nruns)}, + {NAME("nreruns"), CTL(stats_arenas_i_bins_j_nreruns)}, + {NAME("curruns"), CTL(stats_arenas_i_bins_j_curruns)} +}; +static const ctl_node_t super_stats_arenas_i_bins_j_node[] = { + {NAME(""), CHILD(stats_arenas_i_bins_j)} +}; + +static const ctl_node_t stats_arenas_i_bins_node[] = { + {INDEX(stats_arenas_i_bins_j)} +}; + +static const ctl_node_t stats_arenas_i_lruns_j_node[] = { + {NAME("nmalloc"), CTL(stats_arenas_i_lruns_j_nmalloc)}, + {NAME("ndalloc"), CTL(stats_arenas_i_lruns_j_ndalloc)}, + {NAME("nrequests"), CTL(stats_arenas_i_lruns_j_nrequests)}, + {NAME("curruns"), CTL(stats_arenas_i_lruns_j_curruns)} +}; +static const ctl_node_t super_stats_arenas_i_lruns_j_node[] = { + {NAME(""), CHILD(stats_arenas_i_lruns_j)} +}; + +static const ctl_node_t stats_arenas_i_lruns_node[] = { + {INDEX(stats_arenas_i_lruns_j)} +}; + +static const ctl_node_t stats_arenas_i_node[] = { + {NAME("nthreads"), CTL(stats_arenas_i_nthreads)}, + {NAME("pactive"), CTL(stats_arenas_i_pactive)}, + {NAME("pdirty"), CTL(stats_arenas_i_pdirty)}, + {NAME("mapped"), CTL(stats_arenas_i_mapped)}, + {NAME("npurge"), CTL(stats_arenas_i_npurge)}, + {NAME("nmadvise"), CTL(stats_arenas_i_nmadvise)}, + {NAME("purged"), CTL(stats_arenas_i_purged)}, + {NAME("small"), CHILD(stats_arenas_i_small)}, + {NAME("large"), CHILD(stats_arenas_i_large)}, + {NAME("bins"), CHILD(stats_arenas_i_bins)}, + {NAME("lruns"), CHILD(stats_arenas_i_lruns)} +}; +static const ctl_node_t super_stats_arenas_i_node[] = { + {NAME(""), CHILD(stats_arenas_i)} +}; + +static const ctl_node_t stats_arenas_node[] = { + {INDEX(stats_arenas_i)} +}; + +static const ctl_node_t stats_node[] = { + {NAME("cactive"), CTL(stats_cactive)}, + {NAME("allocated"), CTL(stats_allocated)}, + {NAME("active"), CTL(stats_active)}, + {NAME("mapped"), CTL(stats_mapped)}, + {NAME("chunks"), CHILD(stats_chunks)}, + {NAME("huge"), CHILD(stats_huge)}, + {NAME("arenas"), CHILD(stats_arenas)} +}; + +static const ctl_node_t root_node[] = { + {NAME("version"), CTL(version)}, + {NAME("epoch"), CTL(epoch)}, + {NAME("thread"), CHILD(thread)}, + {NAME("config"), CHILD(config)}, + {NAME("opt"), CHILD(opt)}, + {NAME("arenas"), CHILD(arenas)}, + {NAME("prof"), CHILD(prof)}, + {NAME("stats"), CHILD(stats)} +}; +static const ctl_node_t super_root_node[] = { + {NAME(""), CHILD(root)} +}; + +#undef NAME +#undef CHILD +#undef CTL +#undef INDEX + +/******************************************************************************/ + +static bool +ctl_arena_init(ctl_arena_stats_t *astats) +{ + + if (astats->lstats == NULL) { + astats->lstats = (malloc_large_stats_t *)base_alloc(nlclasses * + sizeof(malloc_large_stats_t)); + if (astats->lstats == NULL) + return (true); + } + + return (false); +} + +static void +ctl_arena_clear(ctl_arena_stats_t *astats) +{ + + astats->pactive = 0; + astats->pdirty = 0; + if (config_stats) { + memset(&astats->astats, 0, sizeof(arena_stats_t)); + astats->allocated_small = 0; + astats->nmalloc_small = 0; + astats->ndalloc_small = 0; + astats->nrequests_small = 0; + memset(astats->bstats, 0, NBINS * sizeof(malloc_bin_stats_t)); + memset(astats->lstats, 0, nlclasses * + sizeof(malloc_large_stats_t)); + } +} + +static void +ctl_arena_stats_amerge(ctl_arena_stats_t *cstats, arena_t *arena) +{ + unsigned i; + + arena_stats_merge(arena, &cstats->pactive, &cstats->pdirty, + &cstats->astats, cstats->bstats, cstats->lstats); + + for (i = 0; i < NBINS; i++) { + cstats->allocated_small += cstats->bstats[i].allocated; + cstats->nmalloc_small += cstats->bstats[i].nmalloc; + cstats->ndalloc_small += cstats->bstats[i].ndalloc; + cstats->nrequests_small += cstats->bstats[i].nrequests; + } +} + +static void +ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats) +{ + unsigned i; + + sstats->pactive += astats->pactive; + sstats->pdirty += astats->pdirty; + + sstats->astats.mapped += astats->astats.mapped; + sstats->astats.npurge += astats->astats.npurge; + sstats->astats.nmadvise += astats->astats.nmadvise; + sstats->astats.purged += astats->astats.purged; + + sstats->allocated_small += astats->allocated_small; + sstats->nmalloc_small += astats->nmalloc_small; + sstats->ndalloc_small += astats->ndalloc_small; + sstats->nrequests_small += astats->nrequests_small; + + sstats->astats.allocated_large += astats->astats.allocated_large; + sstats->astats.nmalloc_large += astats->astats.nmalloc_large; + sstats->astats.ndalloc_large += astats->astats.ndalloc_large; + sstats->astats.nrequests_large += astats->astats.nrequests_large; + + for (i = 0; i < nlclasses; i++) { + sstats->lstats[i].nmalloc += astats->lstats[i].nmalloc; + sstats->lstats[i].ndalloc += astats->lstats[i].ndalloc; + sstats->lstats[i].nrequests += astats->lstats[i].nrequests; + sstats->lstats[i].curruns += astats->lstats[i].curruns; + } + + for (i = 0; i < NBINS; i++) { + sstats->bstats[i].allocated += astats->bstats[i].allocated; + sstats->bstats[i].nmalloc += astats->bstats[i].nmalloc; + sstats->bstats[i].ndalloc += astats->bstats[i].ndalloc; + sstats->bstats[i].nrequests += astats->bstats[i].nrequests; + if (config_tcache) { + sstats->bstats[i].nfills += astats->bstats[i].nfills; + sstats->bstats[i].nflushes += + astats->bstats[i].nflushes; + } + sstats->bstats[i].nruns += astats->bstats[i].nruns; + sstats->bstats[i].reruns += astats->bstats[i].reruns; + sstats->bstats[i].curruns += astats->bstats[i].curruns; + } +} + +static void +ctl_arena_refresh(arena_t *arena, unsigned i) +{ + ctl_arena_stats_t *astats = &ctl_stats.arenas[i]; + ctl_arena_stats_t *sstats = &ctl_stats.arenas[narenas]; + + ctl_arena_clear(astats); + + sstats->nthreads += astats->nthreads; + if (config_stats) { + ctl_arena_stats_amerge(astats, arena); + /* Merge into sum stats as well. */ + ctl_arena_stats_smerge(sstats, astats); + } else { + astats->pactive += arena->nactive; + astats->pdirty += arena->ndirty; + /* Merge into sum stats as well. */ + sstats->pactive += arena->nactive; + sstats->pdirty += arena->ndirty; + } +} + +static void +ctl_refresh(void) +{ + unsigned i; + arena_t *tarenas[narenas]; + + if (config_stats) { + malloc_mutex_lock(&chunks_mtx); + ctl_stats.chunks.current = stats_chunks.curchunks; + ctl_stats.chunks.total = stats_chunks.nchunks; + ctl_stats.chunks.high = stats_chunks.highchunks; + malloc_mutex_unlock(&chunks_mtx); + + malloc_mutex_lock(&huge_mtx); + ctl_stats.huge.allocated = huge_allocated; + ctl_stats.huge.nmalloc = huge_nmalloc; + ctl_stats.huge.ndalloc = huge_ndalloc; + malloc_mutex_unlock(&huge_mtx); + } + + /* + * Clear sum stats, since they will be merged into by + * ctl_arena_refresh(). + */ + ctl_stats.arenas[narenas].nthreads = 0; + ctl_arena_clear(&ctl_stats.arenas[narenas]); + + malloc_mutex_lock(&arenas_lock); + memcpy(tarenas, arenas, sizeof(arena_t *) * narenas); + for (i = 0; i < narenas; i++) { + if (arenas[i] != NULL) + ctl_stats.arenas[i].nthreads = arenas[i]->nthreads; + else + ctl_stats.arenas[i].nthreads = 0; + } + malloc_mutex_unlock(&arenas_lock); + for (i = 0; i < narenas; i++) { + bool initialized = (tarenas[i] != NULL); + + ctl_stats.arenas[i].initialized = initialized; + if (initialized) + ctl_arena_refresh(tarenas[i], i); + } + + if (config_stats) { + ctl_stats.allocated = ctl_stats.arenas[narenas].allocated_small + + ctl_stats.arenas[narenas].astats.allocated_large + + ctl_stats.huge.allocated; + ctl_stats.active = (ctl_stats.arenas[narenas].pactive << + LG_PAGE) + ctl_stats.huge.allocated; + ctl_stats.mapped = (ctl_stats.chunks.current << opt_lg_chunk); + } + + ctl_epoch++; +} + +static bool +ctl_init(void) +{ + bool ret; + + malloc_mutex_lock(&ctl_mtx); + if (ctl_initialized == false) { + /* + * Allocate space for one extra arena stats element, which + * contains summed stats across all arenas. + */ + ctl_stats.arenas = (ctl_arena_stats_t *)base_alloc( + (narenas + 1) * sizeof(ctl_arena_stats_t)); + if (ctl_stats.arenas == NULL) { + ret = true; + goto label_return; + } + memset(ctl_stats.arenas, 0, (narenas + 1) * + sizeof(ctl_arena_stats_t)); + + /* + * Initialize all stats structures, regardless of whether they + * ever get used. Lazy initialization would allow errors to + * cause inconsistent state to be viewable by the application. + */ + if (config_stats) { + unsigned i; + for (i = 0; i <= narenas; i++) { + if (ctl_arena_init(&ctl_stats.arenas[i])) { + ret = true; + goto label_return; + } + } + } + ctl_stats.arenas[narenas].initialized = true; + + ctl_epoch = 0; + ctl_refresh(); + ctl_initialized = true; + } + + ret = false; +label_return: + malloc_mutex_unlock(&ctl_mtx); + return (ret); +} + +static int +ctl_lookup(const char *name, ctl_node_t const **nodesp, size_t *mibp, + size_t *depthp) +{ + int ret; + const char *elm, *tdot, *dot; + size_t elen, i, j; + const ctl_node_t *node; + + elm = name; + /* Equivalent to strchrnul(). */ + dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot : strchr(elm, '\0'); + elen = (size_t)((uintptr_t)dot - (uintptr_t)elm); + if (elen == 0) { + ret = ENOENT; + goto label_return; + } + node = super_root_node; + for (i = 0; i < *depthp; i++) { + assert(node->named); + assert(node->u.named.nchildren > 0); + if (node->u.named.children[0].named) { + const ctl_node_t *pnode = node; + + /* Children are named. */ + for (j = 0; j < node->u.named.nchildren; j++) { + const ctl_node_t *child = + &node->u.named.children[j]; + if (strlen(child->u.named.name) == elen + && strncmp(elm, child->u.named.name, + elen) == 0) { + node = child; + if (nodesp != NULL) + nodesp[i] = node; + mibp[i] = j; + break; + } + } + if (node == pnode) { + ret = ENOENT; + goto label_return; + } + } else { + uintmax_t index; + const ctl_node_t *inode; + + /* Children are indexed. */ + index = malloc_strtoumax(elm, NULL, 10); + if (index == UINTMAX_MAX || index > SIZE_T_MAX) { + ret = ENOENT; + goto label_return; + } + + inode = &node->u.named.children[0]; + node = inode->u.indexed.index(mibp, *depthp, + (size_t)index); + if (node == NULL) { + ret = ENOENT; + goto label_return; + } + + if (nodesp != NULL) + nodesp[i] = node; + mibp[i] = (size_t)index; + } + + if (node->ctl != NULL) { + /* Terminal node. */ + if (*dot != '\0') { + /* + * The name contains more elements than are + * in this path through the tree. + */ + ret = ENOENT; + goto label_return; + } + /* Complete lookup successful. */ + *depthp = i + 1; + break; + } + + /* Update elm. */ + if (*dot == '\0') { + /* No more elements. */ + ret = ENOENT; + goto label_return; + } + elm = &dot[1]; + dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot : + strchr(elm, '\0'); + elen = (size_t)((uintptr_t)dot - (uintptr_t)elm); + } + + ret = 0; +label_return: + return (ret); +} + +int +ctl_byname(const char *name, void *oldp, size_t *oldlenp, void *newp, + size_t newlen) +{ + int ret; + size_t depth; + ctl_node_t const *nodes[CTL_MAX_DEPTH]; + size_t mib[CTL_MAX_DEPTH]; + + if (ctl_initialized == false && ctl_init()) { + ret = EAGAIN; + goto label_return; + } + + depth = CTL_MAX_DEPTH; + ret = ctl_lookup(name, nodes, mib, &depth); + if (ret != 0) + goto label_return; + + if (nodes[depth-1]->ctl == NULL) { + /* The name refers to a partial path through the ctl tree. */ + ret = ENOENT; + goto label_return; + } + + ret = nodes[depth-1]->ctl(mib, depth, oldp, oldlenp, newp, newlen); +label_return: + return(ret); +} + +int +ctl_nametomib(const char *name, size_t *mibp, size_t *miblenp) +{ + int ret; + + if (ctl_initialized == false && ctl_init()) { + ret = EAGAIN; + goto label_return; + } + + ret = ctl_lookup(name, NULL, mibp, miblenp); +label_return: + return(ret); +} + +int +ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, + void *newp, size_t newlen) +{ + int ret; + const ctl_node_t *node; + size_t i; + + if (ctl_initialized == false && ctl_init()) { + ret = EAGAIN; + goto label_return; + } + + /* Iterate down the tree. */ + node = super_root_node; + for (i = 0; i < miblen; i++) { + if (node->u.named.children[0].named) { + /* Children are named. */ + if (node->u.named.nchildren <= mib[i]) { + ret = ENOENT; + goto label_return; + } + node = &node->u.named.children[mib[i]]; + } else { + const ctl_node_t *inode; + + /* Indexed element. */ + inode = &node->u.named.children[0]; + node = inode->u.indexed.index(mib, miblen, mib[i]); + if (node == NULL) { + ret = ENOENT; + goto label_return; + } + } + } + + /* Call the ctl function. */ + if (node->ctl == NULL) { + /* Partial MIB. */ + ret = ENOENT; + goto label_return; + } + ret = node->ctl(mib, miblen, oldp, oldlenp, newp, newlen); + +label_return: + return(ret); +} + +bool +ctl_boot(void) +{ + + if (malloc_mutex_init(&ctl_mtx)) + return (true); + + ctl_initialized = false; + + return (false); +} + +/******************************************************************************/ +/* *_ctl() functions. */ + +#define READONLY() do { \ + if (newp != NULL || newlen != 0) { \ + ret = EPERM; \ + goto label_return; \ + } \ +} while (0) + +#define WRITEONLY() do { \ + if (oldp != NULL || oldlenp != NULL) { \ + ret = EPERM; \ + goto label_return; \ + } \ +} while (0) + +#define VOID() do { \ + READONLY(); \ + WRITEONLY(); \ +} while (0) + +#define READ(v, t) do { \ + if (oldp != NULL && oldlenp != NULL) { \ + if (*oldlenp != sizeof(t)) { \ + size_t copylen = (sizeof(t) <= *oldlenp) \ + ? sizeof(t) : *oldlenp; \ + memcpy(oldp, (void *)&v, copylen); \ + ret = EINVAL; \ + goto label_return; \ + } else \ + *(t *)oldp = v; \ + } \ +} while (0) + +#define WRITE(v, t) do { \ + if (newp != NULL) { \ + if (newlen != sizeof(t)) { \ + ret = EINVAL; \ + goto label_return; \ + } \ + v = *(t *)newp; \ + } \ +} while (0) + +/* + * There's a lot of code duplication in the following macros due to limitations + * in how nested cpp macros are expanded. + */ +#define CTL_RO_CLGEN(c, l, n, v, t) \ +static int \ +n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ + void *newp, size_t newlen) \ +{ \ + int ret; \ + t oldval; \ + \ + if ((c) == false) \ + return (ENOENT); \ + if (l) \ + malloc_mutex_lock(&ctl_mtx); \ + READONLY(); \ + oldval = v; \ + READ(oldval, t); \ + \ + ret = 0; \ +label_return: \ + if (l) \ + malloc_mutex_unlock(&ctl_mtx); \ + return (ret); \ +} + +#define CTL_RO_CGEN(c, n, v, t) \ +static int \ +n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ + void *newp, size_t newlen) \ +{ \ + int ret; \ + t oldval; \ + \ + if ((c) == false) \ + return (ENOENT); \ + malloc_mutex_lock(&ctl_mtx); \ + READONLY(); \ + oldval = v; \ + READ(oldval, t); \ + \ + ret = 0; \ +label_return: \ + malloc_mutex_unlock(&ctl_mtx); \ + return (ret); \ +} + +#define CTL_RO_GEN(n, v, t) \ +static int \ +n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ + void *newp, size_t newlen) \ +{ \ + int ret; \ + t oldval; \ + \ + malloc_mutex_lock(&ctl_mtx); \ + READONLY(); \ + oldval = v; \ + READ(oldval, t); \ + \ + ret = 0; \ +label_return: \ + malloc_mutex_unlock(&ctl_mtx); \ + return (ret); \ +} + +/* + * ctl_mtx is not acquired, under the assumption that no pertinent data will + * mutate during the call. + */ +#define CTL_RO_NL_CGEN(c, n, v, t) \ +static int \ +n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ + void *newp, size_t newlen) \ +{ \ + int ret; \ + t oldval; \ + \ + if ((c) == false) \ + return (ENOENT); \ + READONLY(); \ + oldval = v; \ + READ(oldval, t); \ + \ + ret = 0; \ +label_return: \ + return (ret); \ +} + +#define CTL_RO_NL_GEN(n, v, t) \ +static int \ +n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ + void *newp, size_t newlen) \ +{ \ + int ret; \ + t oldval; \ + \ + READONLY(); \ + oldval = v; \ + READ(oldval, t); \ + \ + ret = 0; \ +label_return: \ + return (ret); \ +} + +#define CTL_RO_BOOL_CONFIG_GEN(n) \ +static int \ +n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ + void *newp, size_t newlen) \ +{ \ + int ret; \ + bool oldval; \ + \ + READONLY(); \ + oldval = n; \ + READ(oldval, bool); \ + \ + ret = 0; \ +label_return: \ + return (ret); \ +} + +CTL_RO_NL_GEN(version, JEMALLOC_VERSION, const char *) + +static int +epoch_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, + void *newp, size_t newlen) +{ + int ret; + uint64_t newval; + + malloc_mutex_lock(&ctl_mtx); + newval = 0; + WRITE(newval, uint64_t); + if (newval != 0) + ctl_refresh(); + READ(ctl_epoch, uint64_t); + + ret = 0; +label_return: + malloc_mutex_unlock(&ctl_mtx); + return (ret); +} + +static int +thread_tcache_enabled_ctl(const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) +{ + int ret; + bool oldval; + + if (config_tcache == false) + return (ENOENT); + + oldval = tcache_enabled_get(); + if (newp != NULL) { + if (newlen != sizeof(bool)) { + ret = EINVAL; + goto label_return; + } + tcache_enabled_set(*(bool *)newp); + } + READ(oldval, bool); + +label_return: + ret = 0; + return (ret); +} + +static int +thread_tcache_flush_ctl(const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) +{ + int ret; + + if (config_tcache == false) + return (ENOENT); + + VOID(); + + tcache_flush(); + + ret = 0; +label_return: + return (ret); +} + +static int +thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, + void *newp, size_t newlen) +{ + int ret; + unsigned newind, oldind; + + newind = oldind = choose_arena(NULL)->ind; + WRITE(newind, unsigned); + READ(oldind, unsigned); + if (newind != oldind) { + arena_t *arena; + + if (newind >= narenas) { + /* New arena index is out of range. */ + ret = EFAULT; + goto label_return; + } + + /* Initialize arena if necessary. */ + malloc_mutex_lock(&arenas_lock); + if ((arena = arenas[newind]) == NULL && (arena = + arenas_extend(newind)) == NULL) { + malloc_mutex_unlock(&arenas_lock); + ret = EAGAIN; + goto label_return; + } + assert(arena == arenas[newind]); + arenas[oldind]->nthreads--; + arenas[newind]->nthreads++; + malloc_mutex_unlock(&arenas_lock); + + /* Set new arena association. */ + if (config_tcache) { + tcache_t *tcache; + if ((uintptr_t)(tcache = *tcache_tsd_get()) > + (uintptr_t)TCACHE_STATE_MAX) { + tcache_arena_dissociate(tcache); + tcache_arena_associate(tcache, arena); + } + } + arenas_tsd_set(&arena); + } + + ret = 0; +label_return: + return (ret); +} + +CTL_RO_NL_CGEN(config_stats, thread_allocated, + thread_allocated_tsd_get()->allocated, uint64_t) +CTL_RO_NL_CGEN(config_stats, thread_allocatedp, + &thread_allocated_tsd_get()->allocated, uint64_t *) +CTL_RO_NL_CGEN(config_stats, thread_deallocated, + thread_allocated_tsd_get()->deallocated, uint64_t) +CTL_RO_NL_CGEN(config_stats, thread_deallocatedp, + &thread_allocated_tsd_get()->deallocated, uint64_t *) + +/******************************************************************************/ + +CTL_RO_BOOL_CONFIG_GEN(config_debug) +CTL_RO_BOOL_CONFIG_GEN(config_dss) +CTL_RO_BOOL_CONFIG_GEN(config_fill) +CTL_RO_BOOL_CONFIG_GEN(config_lazy_lock) +CTL_RO_BOOL_CONFIG_GEN(config_prof) +CTL_RO_BOOL_CONFIG_GEN(config_prof_libgcc) +CTL_RO_BOOL_CONFIG_GEN(config_prof_libunwind) +CTL_RO_BOOL_CONFIG_GEN(config_stats) +CTL_RO_BOOL_CONFIG_GEN(config_tcache) +CTL_RO_BOOL_CONFIG_GEN(config_tls) +CTL_RO_BOOL_CONFIG_GEN(config_utrace) +CTL_RO_BOOL_CONFIG_GEN(config_valgrind) +CTL_RO_BOOL_CONFIG_GEN(config_xmalloc) + +/******************************************************************************/ + +CTL_RO_NL_GEN(opt_abort, opt_abort, bool) +CTL_RO_NL_GEN(opt_lg_chunk, opt_lg_chunk, size_t) +CTL_RO_NL_GEN(opt_narenas, opt_narenas, size_t) +CTL_RO_NL_GEN(opt_lg_dirty_mult, opt_lg_dirty_mult, ssize_t) +CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool) +CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, bool) +CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool) +CTL_RO_NL_CGEN(config_fill, opt_quarantine, opt_quarantine, size_t) +CTL_RO_NL_CGEN(config_fill, opt_redzone, opt_redzone, bool) +CTL_RO_NL_CGEN(config_utrace, opt_utrace, opt_utrace, bool) +CTL_RO_NL_CGEN(config_valgrind, opt_valgrind, opt_valgrind, bool) +CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool) +CTL_RO_NL_CGEN(config_tcache, opt_tcache, opt_tcache, bool) +CTL_RO_NL_CGEN(config_tcache, opt_lg_tcache_max, opt_lg_tcache_max, ssize_t) +CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool) +CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *) +CTL_RO_CGEN(config_prof, opt_prof_active, opt_prof_active, bool) /* Mutable. */ +CTL_RO_NL_CGEN(config_prof, opt_lg_prof_sample, opt_lg_prof_sample, size_t) +CTL_RO_NL_CGEN(config_prof, opt_lg_prof_interval, opt_lg_prof_interval, ssize_t) +CTL_RO_NL_CGEN(config_prof, opt_prof_gdump, opt_prof_gdump, bool) +CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool) +CTL_RO_NL_CGEN(config_prof, opt_prof_accum, opt_prof_accum, bool) + +/******************************************************************************/ + +CTL_RO_NL_GEN(arenas_bin_i_size, arena_bin_info[mib[2]].reg_size, size_t) +CTL_RO_NL_GEN(arenas_bin_i_nregs, arena_bin_info[mib[2]].nregs, uint32_t) +CTL_RO_NL_GEN(arenas_bin_i_run_size, arena_bin_info[mib[2]].run_size, size_t) +const ctl_node_t * +arenas_bin_i_index(const size_t *mib, size_t miblen, size_t i) +{ + + if (i > NBINS) + return (NULL); + return (super_arenas_bin_i_node); +} + +CTL_RO_NL_GEN(arenas_lrun_i_size, ((mib[2]+1) << LG_PAGE), size_t) +const ctl_node_t * +arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i) +{ + + if (i > nlclasses) + return (NULL); + return (super_arenas_lrun_i_node); +} + +CTL_RO_NL_GEN(arenas_narenas, narenas, unsigned) + +static int +arenas_initialized_ctl(const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) +{ + int ret; + unsigned nread, i; + + malloc_mutex_lock(&ctl_mtx); + READONLY(); + if (*oldlenp != narenas * sizeof(bool)) { + ret = EINVAL; + nread = (*oldlenp < narenas * sizeof(bool)) + ? (*oldlenp / sizeof(bool)) : narenas; + } else { + ret = 0; + nread = narenas; + } + + for (i = 0; i < nread; i++) + ((bool *)oldp)[i] = ctl_stats.arenas[i].initialized; + +label_return: + malloc_mutex_unlock(&ctl_mtx); + return (ret); +} + +CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t) +CTL_RO_NL_GEN(arenas_page, PAGE, size_t) +CTL_RO_NL_CGEN(config_tcache, arenas_tcache_max, tcache_maxclass, size_t) +CTL_RO_NL_GEN(arenas_nbins, NBINS, unsigned) +CTL_RO_NL_CGEN(config_tcache, arenas_nhbins, nhbins, unsigned) +CTL_RO_NL_GEN(arenas_nlruns, nlclasses, size_t) + +static int +arenas_purge_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, + void *newp, size_t newlen) +{ + int ret; + unsigned arena; + + WRITEONLY(); + arena = UINT_MAX; + WRITE(arena, unsigned); + if (newp != NULL && arena >= narenas) { + ret = EFAULT; + goto label_return; + } else { + arena_t *tarenas[narenas]; + + malloc_mutex_lock(&arenas_lock); + memcpy(tarenas, arenas, sizeof(arena_t *) * narenas); + malloc_mutex_unlock(&arenas_lock); + + if (arena == UINT_MAX) { + unsigned i; + for (i = 0; i < narenas; i++) { + if (tarenas[i] != NULL) + arena_purge_all(tarenas[i]); + } + } else { + assert(arena < narenas); + if (tarenas[arena] != NULL) + arena_purge_all(tarenas[arena]); + } + } + + ret = 0; +label_return: + return (ret); +} + +/******************************************************************************/ + +static int +prof_active_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, + void *newp, size_t newlen) +{ + int ret; + bool oldval; + + if (config_prof == false) + return (ENOENT); + + malloc_mutex_lock(&ctl_mtx); /* Protect opt_prof_active. */ + oldval = opt_prof_active; + if (newp != NULL) { + /* + * The memory barriers will tend to make opt_prof_active + * propagate faster on systems with weak memory ordering. + */ + mb_write(); + WRITE(opt_prof_active, bool); + mb_write(); + } + READ(oldval, bool); + + ret = 0; +label_return: + malloc_mutex_unlock(&ctl_mtx); + return (ret); +} + +static int +prof_dump_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, + void *newp, size_t newlen) +{ + int ret; + const char *filename = NULL; + + if (config_prof == false) + return (ENOENT); + + WRITEONLY(); + WRITE(filename, const char *); + + if (prof_mdump(filename)) { + ret = EFAULT; + goto label_return; + } + + ret = 0; +label_return: + return (ret); +} + +CTL_RO_NL_CGEN(config_prof, prof_interval, prof_interval, uint64_t) + +/******************************************************************************/ + +CTL_RO_CGEN(config_stats, stats_chunks_current, ctl_stats.chunks.current, + size_t) +CTL_RO_CGEN(config_stats, stats_chunks_total, ctl_stats.chunks.total, uint64_t) +CTL_RO_CGEN(config_stats, stats_chunks_high, ctl_stats.chunks.high, size_t) +CTL_RO_CGEN(config_stats, stats_huge_allocated, huge_allocated, size_t) +CTL_RO_CGEN(config_stats, stats_huge_nmalloc, huge_nmalloc, uint64_t) +CTL_RO_CGEN(config_stats, stats_huge_ndalloc, huge_ndalloc, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_small_allocated, + ctl_stats.arenas[mib[2]].allocated_small, size_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_small_nmalloc, + ctl_stats.arenas[mib[2]].nmalloc_small, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_small_ndalloc, + ctl_stats.arenas[mib[2]].ndalloc_small, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_small_nrequests, + ctl_stats.arenas[mib[2]].nrequests_small, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_large_allocated, + ctl_stats.arenas[mib[2]].astats.allocated_large, size_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_large_nmalloc, + ctl_stats.arenas[mib[2]].astats.nmalloc_large, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_large_ndalloc, + ctl_stats.arenas[mib[2]].astats.ndalloc_large, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_large_nrequests, + ctl_stats.arenas[mib[2]].astats.nrequests_large, uint64_t) + +CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_allocated, + ctl_stats.arenas[mib[2]].bstats[mib[4]].allocated, size_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nmalloc, + ctl_stats.arenas[mib[2]].bstats[mib[4]].nmalloc, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_ndalloc, + ctl_stats.arenas[mib[2]].bstats[mib[4]].ndalloc, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nrequests, + ctl_stats.arenas[mib[2]].bstats[mib[4]].nrequests, uint64_t) +CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nfills, + ctl_stats.arenas[mib[2]].bstats[mib[4]].nfills, uint64_t) +CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nflushes, + ctl_stats.arenas[mib[2]].bstats[mib[4]].nflushes, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nruns, + ctl_stats.arenas[mib[2]].bstats[mib[4]].nruns, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nreruns, + ctl_stats.arenas[mib[2]].bstats[mib[4]].reruns, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curruns, + ctl_stats.arenas[mib[2]].bstats[mib[4]].curruns, size_t) + +const ctl_node_t * +stats_arenas_i_bins_j_index(const size_t *mib, size_t miblen, size_t j) +{ + + if (j > NBINS) + return (NULL); + return (super_stats_arenas_i_bins_j_node); +} + +CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_nmalloc, + ctl_stats.arenas[mib[2]].lstats[mib[4]].nmalloc, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_ndalloc, + ctl_stats.arenas[mib[2]].lstats[mib[4]].ndalloc, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_nrequests, + ctl_stats.arenas[mib[2]].lstats[mib[4]].nrequests, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_curruns, + ctl_stats.arenas[mib[2]].lstats[mib[4]].curruns, size_t) + +const ctl_node_t * +stats_arenas_i_lruns_j_index(const size_t *mib, size_t miblen, size_t j) +{ + + if (j > nlclasses) + return (NULL); + return (super_stats_arenas_i_lruns_j_node); +} + +CTL_RO_GEN(stats_arenas_i_nthreads, ctl_stats.arenas[mib[2]].nthreads, unsigned) +CTL_RO_GEN(stats_arenas_i_pactive, ctl_stats.arenas[mib[2]].pactive, size_t) +CTL_RO_GEN(stats_arenas_i_pdirty, ctl_stats.arenas[mib[2]].pdirty, size_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_mapped, + ctl_stats.arenas[mib[2]].astats.mapped, size_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_npurge, + ctl_stats.arenas[mib[2]].astats.npurge, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_nmadvise, + ctl_stats.arenas[mib[2]].astats.nmadvise, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_purged, + ctl_stats.arenas[mib[2]].astats.purged, uint64_t) + +const ctl_node_t * +stats_arenas_i_index(const size_t *mib, size_t miblen, size_t i) +{ + const ctl_node_t * ret; + + malloc_mutex_lock(&ctl_mtx); + if (ctl_stats.arenas[i].initialized == false) { + ret = NULL; + goto label_return; + } + + ret = super_stats_arenas_i_node; +label_return: + malloc_mutex_unlock(&ctl_mtx); + return (ret); +} + +CTL_RO_CGEN(config_stats, stats_cactive, &stats_cactive, size_t *) +CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats.allocated, size_t) +CTL_RO_CGEN(config_stats, stats_active, ctl_stats.active, size_t) +CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats.mapped, size_t) Property changes on: contrib/jemalloc/src/ctl.c ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H Added: svn:eol-style ## -0,0 +1 ## +native Index: contrib/jemalloc/src/hash.c =================================================================== --- contrib/jemalloc/src/hash.c (revision 0) +++ contrib/jemalloc/src/hash.c (working copy) @@ -0,0 +1,2 @@ +#define JEMALLOC_HASH_C_ +#include "jemalloc/internal/jemalloc_internal.h" Property changes on: contrib/jemalloc/src/hash.c ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H Added: svn:eol-style ## -0,0 +1 ## +native Index: contrib/jemalloc/src/arena.c =================================================================== --- contrib/jemalloc/src/arena.c (revision 0) +++ contrib/jemalloc/src/arena.c (working copy) @@ -0,0 +1,2248 @@ +#define JEMALLOC_ARENA_C_ +#include "jemalloc/internal/jemalloc_internal.h" + +/******************************************************************************/ +/* Data. */ + +ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT; +arena_bin_info_t arena_bin_info[NBINS]; + +JEMALLOC_ATTR(aligned(CACHELINE)) +const uint8_t small_size2bin[] = { +#define S2B_8(i) i, +#define S2B_16(i) S2B_8(i) S2B_8(i) +#define S2B_32(i) S2B_16(i) S2B_16(i) +#define S2B_64(i) S2B_32(i) S2B_32(i) +#define S2B_128(i) S2B_64(i) S2B_64(i) +#define S2B_256(i) S2B_128(i) S2B_128(i) +#define S2B_512(i) S2B_256(i) S2B_256(i) +#define S2B_1024(i) S2B_512(i) S2B_512(i) +#define S2B_2048(i) S2B_1024(i) S2B_1024(i) +#define S2B_4096(i) S2B_2048(i) S2B_2048(i) +#define S2B_8192(i) S2B_4096(i) S2B_4096(i) +#define SIZE_CLASS(bin, delta, size) \ + S2B_##delta(bin) + SIZE_CLASSES +#undef S2B_8 +#undef S2B_16 +#undef S2B_32 +#undef S2B_64 +#undef S2B_128 +#undef S2B_256 +#undef S2B_512 +#undef S2B_1024 +#undef S2B_2048 +#undef S2B_4096 +#undef S2B_8192 +#undef SIZE_CLASS +}; + +/******************************************************************************/ +/* Function prototypes for non-inline static functions. */ + +static void arena_run_split(arena_t *arena, arena_run_t *run, size_t size, + bool large, bool zero); +static arena_chunk_t *arena_chunk_alloc(arena_t *arena); +static void arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk); +static arena_run_t *arena_run_alloc(arena_t *arena, size_t size, bool large, + bool zero); +static void arena_purge(arena_t *arena, bool all); +static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty); +static void arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, + arena_run_t *run, size_t oldsize, size_t newsize); +static void arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, + arena_run_t *run, size_t oldsize, size_t newsize, bool dirty); +static arena_run_t *arena_bin_runs_first(arena_bin_t *bin); +static void arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run); +static void arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run); +static arena_run_t *arena_bin_nonfull_run_tryget(arena_bin_t *bin); +static arena_run_t *arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin); +static void *arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin); +static void arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run, + arena_bin_t *bin); +static void arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, + arena_run_t *run, arena_bin_t *bin); +static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, + arena_run_t *run, arena_bin_t *bin); +static void arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, + void *ptr, size_t oldsize, size_t size); +static bool arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, + void *ptr, size_t oldsize, size_t size, size_t extra, bool zero); +static bool arena_ralloc_large(void *ptr, size_t oldsize, size_t size, + size_t extra, bool zero); +static size_t bin_info_run_size_calc(arena_bin_info_t *bin_info, + size_t min_run_size); +static void bin_info_init(void); + +/******************************************************************************/ + +static inline int +arena_run_comp(arena_chunk_map_t *a, arena_chunk_map_t *b) +{ + uintptr_t a_mapelm = (uintptr_t)a; + uintptr_t b_mapelm = (uintptr_t)b; + + assert(a != NULL); + assert(b != NULL); + + return ((a_mapelm > b_mapelm) - (a_mapelm < b_mapelm)); +} + +/* Generate red-black tree functions. */ +rb_gen(static UNUSED, arena_run_tree_, arena_run_tree_t, arena_chunk_map_t, + u.rb_link, arena_run_comp) + +static inline int +arena_avail_comp(arena_chunk_map_t *a, arena_chunk_map_t *b) +{ + int ret; + size_t a_size = a->bits & ~PAGE_MASK; + size_t b_size = b->bits & ~PAGE_MASK; + + assert((a->bits & CHUNK_MAP_KEY) == CHUNK_MAP_KEY || (a->bits & + CHUNK_MAP_DIRTY) == (b->bits & CHUNK_MAP_DIRTY)); + + ret = (a_size > b_size) - (a_size < b_size); + if (ret == 0) { + uintptr_t a_mapelm, b_mapelm; + + if ((a->bits & CHUNK_MAP_KEY) != CHUNK_MAP_KEY) + a_mapelm = (uintptr_t)a; + else { + /* + * Treat keys as though they are lower than anything + * else. + */ + a_mapelm = 0; + } + b_mapelm = (uintptr_t)b; + + ret = (a_mapelm > b_mapelm) - (a_mapelm < b_mapelm); + } + + return (ret); +} + +/* Generate red-black tree functions. */ +rb_gen(static UNUSED, arena_avail_tree_, arena_avail_tree_t, arena_chunk_map_t, + u.rb_link, arena_avail_comp) + +static inline void * +arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info) +{ + void *ret; + unsigned regind; + bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run + + (uintptr_t)bin_info->bitmap_offset); + + assert(run->nfree > 0); + assert(bitmap_full(bitmap, &bin_info->bitmap_info) == false); + + regind = bitmap_sfu(bitmap, &bin_info->bitmap_info); + ret = (void *)((uintptr_t)run + (uintptr_t)bin_info->reg0_offset + + (uintptr_t)(bin_info->reg_interval * regind)); + run->nfree--; + if (regind == run->nextind) + run->nextind++; + assert(regind < run->nextind); + return (ret); +} + +static inline void +arena_run_reg_dalloc(arena_run_t *run, void *ptr) +{ + arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); + size_t binind = arena_bin_index(chunk->arena, run->bin); + arena_bin_info_t *bin_info = &arena_bin_info[binind]; + unsigned regind = arena_run_regind(run, bin_info, ptr); + bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run + + (uintptr_t)bin_info->bitmap_offset); + + assert(run->nfree < bin_info->nregs); + /* Freeing an interior pointer can cause assertion failure. */ + assert(((uintptr_t)ptr - ((uintptr_t)run + + (uintptr_t)bin_info->reg0_offset)) % + (uintptr_t)bin_info->reg_interval == 0); + assert((uintptr_t)ptr >= (uintptr_t)run + + (uintptr_t)bin_info->reg0_offset); + /* Freeing an unallocated pointer can cause assertion failure. */ + assert(bitmap_get(bitmap, &bin_info->bitmap_info, regind)); + + bitmap_unset(bitmap, &bin_info->bitmap_info, regind); + run->nfree++; +} + +static inline void +arena_chunk_validate_zeroed(arena_chunk_t *chunk, size_t run_ind) +{ + size_t i; + UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE)); + + for (i = 0; i < PAGE / sizeof(size_t); i++) + assert(p[i] == 0); +} + +static void +arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large, + bool zero) +{ + arena_chunk_t *chunk; + size_t run_ind, total_pages, need_pages, rem_pages, i; + size_t flag_dirty; + arena_avail_tree_t *runs_avail; + + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); + run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE); + flag_dirty = chunk->map[run_ind-map_bias].bits & CHUNK_MAP_DIRTY; + runs_avail = (flag_dirty != 0) ? &arena->runs_avail_dirty : + &arena->runs_avail_clean; + total_pages = (chunk->map[run_ind-map_bias].bits & ~PAGE_MASK) >> + LG_PAGE; + assert((chunk->map[run_ind+total_pages-1-map_bias].bits & + CHUNK_MAP_DIRTY) == flag_dirty); + need_pages = (size >> LG_PAGE); + assert(need_pages > 0); + assert(need_pages <= total_pages); + rem_pages = total_pages - need_pages; + + arena_avail_tree_remove(runs_avail, &chunk->map[run_ind-map_bias]); + if (config_stats) { + /* + * Update stats_cactive if nactive is crossing a chunk + * multiple. + */ + size_t cactive_diff = CHUNK_CEILING((arena->nactive + + need_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive << + LG_PAGE); + if (cactive_diff != 0) + stats_cactive_add(cactive_diff); + } + arena->nactive += need_pages; + + /* Keep track of trailing unused pages for later use. */ + if (rem_pages > 0) { + if (flag_dirty != 0) { + chunk->map[run_ind+need_pages-map_bias].bits = + (rem_pages << LG_PAGE) | CHUNK_MAP_DIRTY; + chunk->map[run_ind+total_pages-1-map_bias].bits = + (rem_pages << LG_PAGE) | CHUNK_MAP_DIRTY; + } else { + chunk->map[run_ind+need_pages-map_bias].bits = + (rem_pages << LG_PAGE) | + (chunk->map[run_ind+need_pages-map_bias].bits & + CHUNK_MAP_UNZEROED); + chunk->map[run_ind+total_pages-1-map_bias].bits = + (rem_pages << LG_PAGE) | + (chunk->map[run_ind+total_pages-1-map_bias].bits & + CHUNK_MAP_UNZEROED); + } + arena_avail_tree_insert(runs_avail, + &chunk->map[run_ind+need_pages-map_bias]); + } + + /* Update dirty page accounting. */ + if (flag_dirty != 0) { + chunk->ndirty -= need_pages; + arena->ndirty -= need_pages; + } + + /* + * Update the page map separately for large vs. small runs, since it is + * possible to avoid iteration for large mallocs. + */ + if (large) { + if (zero) { + if (flag_dirty == 0) { + /* + * The run is clean, so some pages may be + * zeroed (i.e. never before touched). + */ + for (i = 0; i < need_pages; i++) { + if ((chunk->map[run_ind+i-map_bias].bits + & CHUNK_MAP_UNZEROED) != 0) { + VALGRIND_MAKE_MEM_UNDEFINED( + (void *)((uintptr_t) + chunk + ((run_ind+i) << + LG_PAGE)), PAGE); + memset((void *)((uintptr_t) + chunk + ((run_ind+i) << + LG_PAGE)), 0, PAGE); + } else if (config_debug) { + VALGRIND_MAKE_MEM_DEFINED( + (void *)((uintptr_t) + chunk + ((run_ind+i) << + LG_PAGE)), PAGE); + arena_chunk_validate_zeroed( + chunk, run_ind+i); + } + } + } else { + /* + * The run is dirty, so all pages must be + * zeroed. + */ + VALGRIND_MAKE_MEM_UNDEFINED((void + *)((uintptr_t)chunk + (run_ind << + LG_PAGE)), (need_pages << LG_PAGE)); + memset((void *)((uintptr_t)chunk + (run_ind << + LG_PAGE)), 0, (need_pages << LG_PAGE)); + } + } + + /* + * Set the last element first, in case the run only contains one + * page (i.e. both statements set the same element). + */ + chunk->map[run_ind+need_pages-1-map_bias].bits = + CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED | flag_dirty; + chunk->map[run_ind-map_bias].bits = size | flag_dirty | + CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED; + } else { + assert(zero == false); + /* + * Propagate the dirty and unzeroed flags to the allocated + * small run, so that arena_dalloc_bin_run() has the ability to + * conditionally trim clean pages. + */ + chunk->map[run_ind-map_bias].bits = + (chunk->map[run_ind-map_bias].bits & CHUNK_MAP_UNZEROED) | + CHUNK_MAP_ALLOCATED | flag_dirty; + /* + * The first page will always be dirtied during small run + * initialization, so a validation failure here would not + * actually cause an observable failure. + */ + if (config_debug && flag_dirty == 0 && + (chunk->map[run_ind-map_bias].bits & CHUNK_MAP_UNZEROED) + == 0) + arena_chunk_validate_zeroed(chunk, run_ind); + for (i = 1; i < need_pages - 1; i++) { + chunk->map[run_ind+i-map_bias].bits = (i << LG_PAGE) + | (chunk->map[run_ind+i-map_bias].bits & + CHUNK_MAP_UNZEROED) | CHUNK_MAP_ALLOCATED; + if (config_debug && flag_dirty == 0 && + (chunk->map[run_ind+i-map_bias].bits & + CHUNK_MAP_UNZEROED) == 0) + arena_chunk_validate_zeroed(chunk, run_ind+i); + } + chunk->map[run_ind+need_pages-1-map_bias].bits = ((need_pages + - 1) << LG_PAGE) | + (chunk->map[run_ind+need_pages-1-map_bias].bits & + CHUNK_MAP_UNZEROED) | CHUNK_MAP_ALLOCATED | flag_dirty; + if (config_debug && flag_dirty == 0 && + (chunk->map[run_ind+need_pages-1-map_bias].bits & + CHUNK_MAP_UNZEROED) == 0) { + arena_chunk_validate_zeroed(chunk, + run_ind+need_pages-1); + } + } +} + +static arena_chunk_t * +arena_chunk_alloc(arena_t *arena) +{ + arena_chunk_t *chunk; + size_t i; + + if (arena->spare != NULL) { + arena_avail_tree_t *runs_avail; + + chunk = arena->spare; + arena->spare = NULL; + + /* Insert the run into the appropriate runs_avail_* tree. */ + if ((chunk->map[0].bits & CHUNK_MAP_DIRTY) == 0) + runs_avail = &arena->runs_avail_clean; + else + runs_avail = &arena->runs_avail_dirty; + assert((chunk->map[0].bits & ~PAGE_MASK) == arena_maxclass); + assert((chunk->map[chunk_npages-1-map_bias].bits & ~PAGE_MASK) + == arena_maxclass); + assert((chunk->map[0].bits & CHUNK_MAP_DIRTY) == + (chunk->map[chunk_npages-1-map_bias].bits & + CHUNK_MAP_DIRTY)); + arena_avail_tree_insert(runs_avail, &chunk->map[0]); + } else { + bool zero; + size_t unzeroed; + + zero = false; + malloc_mutex_unlock(&arena->lock); + chunk = (arena_chunk_t *)chunk_alloc(chunksize, chunksize, + false, &zero); + malloc_mutex_lock(&arena->lock); + if (chunk == NULL) + return (NULL); + if (config_stats) + arena->stats.mapped += chunksize; + + chunk->arena = arena; + ql_elm_new(chunk, link_dirty); + chunk->dirtied = false; + + /* + * Claim that no pages are in use, since the header is merely + * overhead. + */ + chunk->ndirty = 0; + + /* + * Initialize the map to contain one maximal free untouched run. + * Mark the pages as zeroed iff chunk_alloc() returned a zeroed + * chunk. + */ + unzeroed = zero ? 0 : CHUNK_MAP_UNZEROED; + chunk->map[0].bits = arena_maxclass | unzeroed; + /* + * There is no need to initialize the internal page map entries + * unless the chunk is not zeroed. + */ + if (zero == false) { + for (i = map_bias+1; i < chunk_npages-1; i++) + chunk->map[i-map_bias].bits = unzeroed; + } else if (config_debug) { + for (i = map_bias+1; i < chunk_npages-1; i++) + assert(chunk->map[i-map_bias].bits == unzeroed); + } + chunk->map[chunk_npages-1-map_bias].bits = arena_maxclass | + unzeroed; + + /* Insert the run into the runs_avail_clean tree. */ + arena_avail_tree_insert(&arena->runs_avail_clean, + &chunk->map[0]); + } + + return (chunk); +} + +static void +arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk) +{ + arena_avail_tree_t *runs_avail; + + /* + * Remove run from the appropriate runs_avail_* tree, so that the arena + * does not use it. + */ + if ((chunk->map[0].bits & CHUNK_MAP_DIRTY) == 0) + runs_avail = &arena->runs_avail_clean; + else + runs_avail = &arena->runs_avail_dirty; + arena_avail_tree_remove(runs_avail, &chunk->map[0]); + + if (arena->spare != NULL) { + arena_chunk_t *spare = arena->spare; + + arena->spare = chunk; + if (spare->dirtied) { + ql_remove(&chunk->arena->chunks_dirty, spare, + link_dirty); + arena->ndirty -= spare->ndirty; + } + malloc_mutex_unlock(&arena->lock); + chunk_dealloc((void *)spare, chunksize, true); + malloc_mutex_lock(&arena->lock); + if (config_stats) + arena->stats.mapped -= chunksize; + } else + arena->spare = chunk; +} + +static arena_run_t * +arena_run_alloc(arena_t *arena, size_t size, bool large, bool zero) +{ + arena_chunk_t *chunk; + arena_run_t *run; + arena_chunk_map_t *mapelm, key; + + assert(size <= arena_maxclass); + assert((size & PAGE_MASK) == 0); + + /* Search the arena's chunks for the lowest best fit. */ + key.bits = size | CHUNK_MAP_KEY; + mapelm = arena_avail_tree_nsearch(&arena->runs_avail_dirty, &key); + if (mapelm != NULL) { + arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm); + size_t pageind = (((uintptr_t)mapelm - + (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t)) + + map_bias; + + run = (arena_run_t *)((uintptr_t)run_chunk + (pageind << + LG_PAGE)); + arena_run_split(arena, run, size, large, zero); + return (run); + } + mapelm = arena_avail_tree_nsearch(&arena->runs_avail_clean, &key); + if (mapelm != NULL) { + arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm); + size_t pageind = (((uintptr_t)mapelm - + (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t)) + + map_bias; + + run = (arena_run_t *)((uintptr_t)run_chunk + (pageind << + LG_PAGE)); + arena_run_split(arena, run, size, large, zero); + return (run); + } + + /* + * No usable runs. Create a new chunk from which to allocate the run. + */ + chunk = arena_chunk_alloc(arena); + if (chunk != NULL) { + run = (arena_run_t *)((uintptr_t)chunk + (map_bias << LG_PAGE)); + arena_run_split(arena, run, size, large, zero); + return (run); + } + + /* + * arena_chunk_alloc() failed, but another thread may have made + * sufficient memory available while this one dropped arena->lock in + * arena_chunk_alloc(), so search one more time. + */ + mapelm = arena_avail_tree_nsearch(&arena->runs_avail_dirty, &key); + if (mapelm != NULL) { + arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm); + size_t pageind = (((uintptr_t)mapelm - + (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t)) + + map_bias; + + run = (arena_run_t *)((uintptr_t)run_chunk + (pageind << + LG_PAGE)); + arena_run_split(arena, run, size, large, zero); + return (run); + } + mapelm = arena_avail_tree_nsearch(&arena->runs_avail_clean, &key); + if (mapelm != NULL) { + arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm); + size_t pageind = (((uintptr_t)mapelm - + (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t)) + + map_bias; + + run = (arena_run_t *)((uintptr_t)run_chunk + (pageind << + LG_PAGE)); + arena_run_split(arena, run, size, large, zero); + return (run); + } + + return (NULL); +} + +static inline void +arena_maybe_purge(arena_t *arena) +{ + + /* Enforce opt_lg_dirty_mult. */ + if (opt_lg_dirty_mult >= 0 && arena->ndirty > arena->npurgatory && + (arena->ndirty - arena->npurgatory) > chunk_npages && + (arena->nactive >> opt_lg_dirty_mult) < (arena->ndirty - + arena->npurgatory)) + arena_purge(arena, false); +} + +static inline void +arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk) +{ + ql_head(arena_chunk_map_t) mapelms; + arena_chunk_map_t *mapelm; + size_t pageind, flag_unzeroed; + size_t ndirty; + size_t nmadvise; + + ql_new(&mapelms); + + flag_unzeroed = +#ifdef JEMALLOC_PURGE_MADVISE_DONTNEED + /* + * madvise(..., MADV_DONTNEED) results in zero-filled pages for anonymous + * mappings, but not for file-backed mappings. + */ + 0 +#else + CHUNK_MAP_UNZEROED +#endif + ; + + /* + * If chunk is the spare, temporarily re-allocate it, 1) so that its + * run is reinserted into runs_avail_dirty, and 2) so that it cannot be + * completely discarded by another thread while arena->lock is dropped + * by this thread. Note that the arena_run_dalloc() call will + * implicitly deallocate the chunk, so no explicit action is required + * in this function to deallocate the chunk. + * + * Note that once a chunk contains dirty pages, it cannot again contain + * a single run unless 1) it is a dirty run, or 2) this function purges + * dirty pages and causes the transition to a single clean run. Thus + * (chunk == arena->spare) is possible, but it is not possible for + * this function to be called on the spare unless it contains a dirty + * run. + */ + if (chunk == arena->spare) { + assert((chunk->map[0].bits & CHUNK_MAP_DIRTY) != 0); + arena_chunk_alloc(arena); + } + + /* Temporarily allocate all free dirty runs within chunk. */ + for (pageind = map_bias; pageind < chunk_npages;) { + mapelm = &chunk->map[pageind-map_bias]; + if ((mapelm->bits & CHUNK_MAP_ALLOCATED) == 0) { + size_t npages; + + npages = mapelm->bits >> LG_PAGE; + assert(pageind + npages <= chunk_npages); + if (mapelm->bits & CHUNK_MAP_DIRTY) { + size_t i; + + arena_avail_tree_remove( + &arena->runs_avail_dirty, mapelm); + + mapelm->bits = (npages << LG_PAGE) | + flag_unzeroed | CHUNK_MAP_LARGE | + CHUNK_MAP_ALLOCATED; + /* + * Update internal elements in the page map, so + * that CHUNK_MAP_UNZEROED is properly set. + */ + for (i = 1; i < npages - 1; i++) { + chunk->map[pageind+i-map_bias].bits = + flag_unzeroed; + } + if (npages > 1) { + chunk->map[ + pageind+npages-1-map_bias].bits = + flag_unzeroed | CHUNK_MAP_LARGE | + CHUNK_MAP_ALLOCATED; + } + + if (config_stats) { + /* + * Update stats_cactive if nactive is + * crossing a chunk multiple. + */ + size_t cactive_diff = + CHUNK_CEILING((arena->nactive + + npages) << LG_PAGE) - + CHUNK_CEILING(arena->nactive << + LG_PAGE); + if (cactive_diff != 0) + stats_cactive_add(cactive_diff); + } + arena->nactive += npages; + /* Append to list for later processing. */ + ql_elm_new(mapelm, u.ql_link); + ql_tail_insert(&mapelms, mapelm, u.ql_link); + } + + pageind += npages; + } else { + /* Skip allocated run. */ + if (mapelm->bits & CHUNK_MAP_LARGE) + pageind += mapelm->bits >> LG_PAGE; + else { + arena_run_t *run = (arena_run_t *)((uintptr_t) + chunk + (uintptr_t)(pageind << LG_PAGE)); + + assert((mapelm->bits >> LG_PAGE) == 0); + size_t binind = arena_bin_index(arena, + run->bin); + arena_bin_info_t *bin_info = + &arena_bin_info[binind]; + pageind += bin_info->run_size >> LG_PAGE; + } + } + } + assert(pageind == chunk_npages); + + if (config_debug) + ndirty = chunk->ndirty; + if (config_stats) + arena->stats.purged += chunk->ndirty; + arena->ndirty -= chunk->ndirty; + chunk->ndirty = 0; + ql_remove(&arena->chunks_dirty, chunk, link_dirty); + chunk->dirtied = false; + + malloc_mutex_unlock(&arena->lock); + if (config_stats) + nmadvise = 0; + ql_foreach(mapelm, &mapelms, u.ql_link) { + size_t pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) / + sizeof(arena_chunk_map_t)) + map_bias; + size_t npages = mapelm->bits >> LG_PAGE; + + assert(pageind + npages <= chunk_npages); + assert(ndirty >= npages); + if (config_debug) + ndirty -= npages; + + madvise((void *)((uintptr_t)chunk + (pageind << LG_PAGE)), + (npages << LG_PAGE), JEMALLOC_MADV_PURGE); + if (config_stats) + nmadvise++; + } + assert(ndirty == 0); + malloc_mutex_lock(&arena->lock); + if (config_stats) + arena->stats.nmadvise += nmadvise; + + /* Deallocate runs. */ + for (mapelm = ql_first(&mapelms); mapelm != NULL; + mapelm = ql_first(&mapelms)) { + size_t pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) / + sizeof(arena_chunk_map_t)) + map_bias; + arena_run_t *run = (arena_run_t *)((uintptr_t)chunk + + (uintptr_t)(pageind << LG_PAGE)); + + ql_remove(&mapelms, mapelm, u.ql_link); + arena_run_dalloc(arena, run, false); + } +} + +static void +arena_purge(arena_t *arena, bool all) +{ + arena_chunk_t *chunk; + size_t npurgatory; + if (config_debug) { + size_t ndirty = 0; + + ql_foreach(chunk, &arena->chunks_dirty, link_dirty) { + assert(chunk->dirtied); + ndirty += chunk->ndirty; + } + assert(ndirty == arena->ndirty); + } + assert(arena->ndirty > arena->npurgatory || all); + assert(arena->ndirty - arena->npurgatory > chunk_npages || all); + assert((arena->nactive >> opt_lg_dirty_mult) < (arena->ndirty - + arena->npurgatory) || all); + + if (config_stats) + arena->stats.npurge++; + + /* + * Compute the minimum number of pages that this thread should try to + * purge, and add the result to arena->npurgatory. This will keep + * multiple threads from racing to reduce ndirty below the threshold. + */ + npurgatory = arena->ndirty - arena->npurgatory; + if (all == false) { + assert(npurgatory >= arena->nactive >> opt_lg_dirty_mult); + npurgatory -= arena->nactive >> opt_lg_dirty_mult; + } + arena->npurgatory += npurgatory; + + while (npurgatory > 0) { + /* Get next chunk with dirty pages. */ + chunk = ql_first(&arena->chunks_dirty); + if (chunk == NULL) { + /* + * This thread was unable to purge as many pages as + * originally intended, due to races with other threads + * that either did some of the purging work, or re-used + * dirty pages. + */ + arena->npurgatory -= npurgatory; + return; + } + while (chunk->ndirty == 0) { + ql_remove(&arena->chunks_dirty, chunk, link_dirty); + chunk->dirtied = false; + chunk = ql_first(&arena->chunks_dirty); + if (chunk == NULL) { + /* Same logic as for above. */ + arena->npurgatory -= npurgatory; + return; + } + } + + if (chunk->ndirty > npurgatory) { + /* + * This thread will, at a minimum, purge all the dirty + * pages in chunk, so set npurgatory to reflect this + * thread's commitment to purge the pages. This tends + * to reduce the chances of the following scenario: + * + * 1) This thread sets arena->npurgatory such that + * (arena->ndirty - arena->npurgatory) is at the + * threshold. + * 2) This thread drops arena->lock. + * 3) Another thread causes one or more pages to be + * dirtied, and immediately determines that it must + * purge dirty pages. + * + * If this scenario *does* play out, that's okay, + * because all of the purging work being done really + * needs to happen. + */ + arena->npurgatory += chunk->ndirty - npurgatory; + npurgatory = chunk->ndirty; + } + + arena->npurgatory -= chunk->ndirty; + npurgatory -= chunk->ndirty; + arena_chunk_purge(arena, chunk); + } +} + +void +arena_purge_all(arena_t *arena) +{ + + malloc_mutex_lock(&arena->lock); + arena_purge(arena, true); + malloc_mutex_unlock(&arena->lock); +} + +static void +arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty) +{ + arena_chunk_t *chunk; + size_t size, run_ind, run_pages, flag_dirty; + arena_avail_tree_t *runs_avail; + + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); + run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE); + assert(run_ind >= map_bias); + assert(run_ind < chunk_npages); + if ((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_LARGE) != 0) { + size = chunk->map[run_ind-map_bias].bits & ~PAGE_MASK; + assert(size == PAGE || + (chunk->map[run_ind+(size>>LG_PAGE)-1-map_bias].bits & + ~PAGE_MASK) == 0); + assert((chunk->map[run_ind+(size>>LG_PAGE)-1-map_bias].bits & + CHUNK_MAP_LARGE) != 0); + assert((chunk->map[run_ind+(size>>LG_PAGE)-1-map_bias].bits & + CHUNK_MAP_ALLOCATED) != 0); + } else { + size_t binind = arena_bin_index(arena, run->bin); + arena_bin_info_t *bin_info = &arena_bin_info[binind]; + size = bin_info->run_size; + } + run_pages = (size >> LG_PAGE); + if (config_stats) { + /* + * Update stats_cactive if nactive is crossing a chunk + * multiple. + */ + size_t cactive_diff = CHUNK_CEILING(arena->nactive << LG_PAGE) - + CHUNK_CEILING((arena->nactive - run_pages) << LG_PAGE); + if (cactive_diff != 0) + stats_cactive_sub(cactive_diff); + } + arena->nactive -= run_pages; + + /* + * The run is dirty if the caller claims to have dirtied it, as well as + * if it was already dirty before being allocated. + */ + if ((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_DIRTY) != 0) + dirty = true; + flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0; + runs_avail = dirty ? &arena->runs_avail_dirty : + &arena->runs_avail_clean; + + /* Mark pages as unallocated in the chunk map. */ + if (dirty) { + chunk->map[run_ind-map_bias].bits = size | CHUNK_MAP_DIRTY; + chunk->map[run_ind+run_pages-1-map_bias].bits = size | + CHUNK_MAP_DIRTY; + + chunk->ndirty += run_pages; + arena->ndirty += run_pages; + } else { + chunk->map[run_ind-map_bias].bits = size | + (chunk->map[run_ind-map_bias].bits & CHUNK_MAP_UNZEROED); + chunk->map[run_ind+run_pages-1-map_bias].bits = size | + (chunk->map[run_ind+run_pages-1-map_bias].bits & + CHUNK_MAP_UNZEROED); + } + + /* Try to coalesce forward. */ + if (run_ind + run_pages < chunk_npages && + (chunk->map[run_ind+run_pages-map_bias].bits & CHUNK_MAP_ALLOCATED) + == 0 && (chunk->map[run_ind+run_pages-map_bias].bits & + CHUNK_MAP_DIRTY) == flag_dirty) { + size_t nrun_size = chunk->map[run_ind+run_pages-map_bias].bits & + ~PAGE_MASK; + size_t nrun_pages = nrun_size >> LG_PAGE; + + /* + * Remove successor from runs_avail; the coalesced run is + * inserted later. + */ + assert((chunk->map[run_ind+run_pages+nrun_pages-1-map_bias].bits + & ~PAGE_MASK) == nrun_size); + assert((chunk->map[run_ind+run_pages+nrun_pages-1-map_bias].bits + & CHUNK_MAP_ALLOCATED) == 0); + assert((chunk->map[run_ind+run_pages+nrun_pages-1-map_bias].bits + & CHUNK_MAP_DIRTY) == flag_dirty); + arena_avail_tree_remove(runs_avail, + &chunk->map[run_ind+run_pages-map_bias]); + + size += nrun_size; + run_pages += nrun_pages; + + chunk->map[run_ind-map_bias].bits = size | + (chunk->map[run_ind-map_bias].bits & CHUNK_MAP_FLAGS_MASK); + chunk->map[run_ind+run_pages-1-map_bias].bits = size | + (chunk->map[run_ind+run_pages-1-map_bias].bits & + CHUNK_MAP_FLAGS_MASK); + } + + /* Try to coalesce backward. */ + if (run_ind > map_bias && (chunk->map[run_ind-1-map_bias].bits & + CHUNK_MAP_ALLOCATED) == 0 && (chunk->map[run_ind-1-map_bias].bits & + CHUNK_MAP_DIRTY) == flag_dirty) { + size_t prun_size = chunk->map[run_ind-1-map_bias].bits & + ~PAGE_MASK; + size_t prun_pages = prun_size >> LG_PAGE; + + run_ind -= prun_pages; + + /* + * Remove predecessor from runs_avail; the coalesced run is + * inserted later. + */ + assert((chunk->map[run_ind-map_bias].bits & ~PAGE_MASK) + == prun_size); + assert((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_ALLOCATED) + == 0); + assert((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_DIRTY) + == flag_dirty); + arena_avail_tree_remove(runs_avail, + &chunk->map[run_ind-map_bias]); + + size += prun_size; + run_pages += prun_pages; + + chunk->map[run_ind-map_bias].bits = size | + (chunk->map[run_ind-map_bias].bits & CHUNK_MAP_FLAGS_MASK); + chunk->map[run_ind+run_pages-1-map_bias].bits = size | + (chunk->map[run_ind+run_pages-1-map_bias].bits & + CHUNK_MAP_FLAGS_MASK); + } + + /* Insert into runs_avail, now that coalescing is complete. */ + assert((chunk->map[run_ind-map_bias].bits & ~PAGE_MASK) == + (chunk->map[run_ind+run_pages-1-map_bias].bits & ~PAGE_MASK)); + assert((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_DIRTY) == + (chunk->map[run_ind+run_pages-1-map_bias].bits & CHUNK_MAP_DIRTY)); + arena_avail_tree_insert(runs_avail, &chunk->map[run_ind-map_bias]); + + if (dirty) { + /* + * Insert into chunks_dirty before potentially calling + * arena_chunk_dealloc(), so that chunks_dirty and + * arena->ndirty are consistent. + */ + if (chunk->dirtied == false) { + ql_tail_insert(&arena->chunks_dirty, chunk, link_dirty); + chunk->dirtied = true; + } + } + + /* + * Deallocate chunk if it is now completely unused. The bit + * manipulation checks whether the first run is unallocated and extends + * to the end of the chunk. + */ + if ((chunk->map[0].bits & (~PAGE_MASK | CHUNK_MAP_ALLOCATED)) == + arena_maxclass) + arena_chunk_dealloc(arena, chunk); + + /* + * It is okay to do dirty page processing here even if the chunk was + * deallocated above, since in that case it is the spare. Waiting + * until after possible chunk deallocation to do dirty processing + * allows for an old spare to be fully deallocated, thus decreasing the + * chances of spuriously crossing the dirty page purging threshold. + */ + if (dirty) + arena_maybe_purge(arena); +} + +static void +arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, + size_t oldsize, size_t newsize) +{ + size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE; + size_t head_npages = (oldsize - newsize) >> LG_PAGE; + size_t flag_dirty = chunk->map[pageind-map_bias].bits & CHUNK_MAP_DIRTY; + + assert(oldsize > newsize); + + /* + * Update the chunk map so that arena_run_dalloc() can treat the + * leading run as separately allocated. Set the last element of each + * run first, in case of single-page runs. + */ + assert((chunk->map[pageind-map_bias].bits & CHUNK_MAP_LARGE) != 0); + assert((chunk->map[pageind-map_bias].bits & CHUNK_MAP_ALLOCATED) != 0); + chunk->map[pageind+head_npages-1-map_bias].bits = flag_dirty | + (chunk->map[pageind+head_npages-1-map_bias].bits & + CHUNK_MAP_UNZEROED) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED; + chunk->map[pageind-map_bias].bits = (oldsize - newsize) + | flag_dirty | (chunk->map[pageind-map_bias].bits & + CHUNK_MAP_UNZEROED) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED; + + if (config_debug) { + UNUSED size_t tail_npages = newsize >> LG_PAGE; + assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias] + .bits & ~PAGE_MASK) == 0); + assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias] + .bits & CHUNK_MAP_DIRTY) == flag_dirty); + assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias] + .bits & CHUNK_MAP_LARGE) != 0); + assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias] + .bits & CHUNK_MAP_ALLOCATED) != 0); + } + chunk->map[pageind+head_npages-map_bias].bits = newsize | flag_dirty | + (chunk->map[pageind+head_npages-map_bias].bits & + CHUNK_MAP_FLAGS_MASK) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED; + + arena_run_dalloc(arena, run, false); +} + +static void +arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, + size_t oldsize, size_t newsize, bool dirty) +{ + size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE; + size_t head_npages = newsize >> LG_PAGE; + size_t tail_npages = (oldsize - newsize) >> LG_PAGE; + size_t flag_dirty = chunk->map[pageind-map_bias].bits & + CHUNK_MAP_DIRTY; + + assert(oldsize > newsize); + + /* + * Update the chunk map so that arena_run_dalloc() can treat the + * trailing run as separately allocated. Set the last element of each + * run first, in case of single-page runs. + */ + assert((chunk->map[pageind-map_bias].bits & CHUNK_MAP_LARGE) != 0); + assert((chunk->map[pageind-map_bias].bits & CHUNK_MAP_ALLOCATED) != 0); + chunk->map[pageind+head_npages-1-map_bias].bits = flag_dirty | + (chunk->map[pageind+head_npages-1-map_bias].bits & + CHUNK_MAP_UNZEROED) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED; + chunk->map[pageind-map_bias].bits = newsize | flag_dirty | + (chunk->map[pageind-map_bias].bits & CHUNK_MAP_UNZEROED) | + CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED; + + assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias].bits & + ~PAGE_MASK) == 0); + assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias].bits & + CHUNK_MAP_LARGE) != 0); + assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias].bits & + CHUNK_MAP_ALLOCATED) != 0); + chunk->map[pageind+head_npages+tail_npages-1-map_bias].bits = + flag_dirty | + (chunk->map[pageind+head_npages+tail_npages-1-map_bias].bits & + CHUNK_MAP_UNZEROED) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED; + chunk->map[pageind+head_npages-map_bias].bits = (oldsize - newsize) | + flag_dirty | (chunk->map[pageind+head_npages-map_bias].bits & + CHUNK_MAP_UNZEROED) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED; + + arena_run_dalloc(arena, (arena_run_t *)((uintptr_t)run + newsize), + dirty); +} + +static arena_run_t * +arena_bin_runs_first(arena_bin_t *bin) +{ + arena_chunk_map_t *mapelm = arena_run_tree_first(&bin->runs); + if (mapelm != NULL) { + arena_chunk_t *chunk; + size_t pageind; + + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(mapelm); + pageind = ((((uintptr_t)mapelm - (uintptr_t)chunk->map) / + sizeof(arena_chunk_map_t))) + map_bias; + arena_run_t *run = (arena_run_t *)((uintptr_t)chunk + + (uintptr_t)((pageind - (mapelm->bits >> LG_PAGE)) << + LG_PAGE)); + return (run); + } + + return (NULL); +} + +static void +arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run) +{ + arena_chunk_t *chunk = CHUNK_ADDR2BASE(run); + size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE; + arena_chunk_map_t *mapelm = &chunk->map[pageind-map_bias]; + + assert(arena_run_tree_search(&bin->runs, mapelm) == NULL); + + arena_run_tree_insert(&bin->runs, mapelm); +} + +static void +arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run) +{ + arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); + size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE; + arena_chunk_map_t *mapelm = &chunk->map[pageind-map_bias]; + + assert(arena_run_tree_search(&bin->runs, mapelm) != NULL); + + arena_run_tree_remove(&bin->runs, mapelm); +} + +static arena_run_t * +arena_bin_nonfull_run_tryget(arena_bin_t *bin) +{ + arena_run_t *run = arena_bin_runs_first(bin); + if (run != NULL) { + arena_bin_runs_remove(bin, run); + if (config_stats) + bin->stats.reruns++; + } + return (run); +} + +static arena_run_t * +arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin) +{ + arena_run_t *run; + size_t binind; + arena_bin_info_t *bin_info; + + /* Look for a usable run. */ + run = arena_bin_nonfull_run_tryget(bin); + if (run != NULL) + return (run); + /* No existing runs have any space available. */ + + binind = arena_bin_index(arena, bin); + bin_info = &arena_bin_info[binind]; + + /* Allocate a new run. */ + malloc_mutex_unlock(&bin->lock); + /******************************/ + malloc_mutex_lock(&arena->lock); + run = arena_run_alloc(arena, bin_info->run_size, false, false); + if (run != NULL) { + bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run + + (uintptr_t)bin_info->bitmap_offset); + + /* Initialize run internals. */ + run->bin = bin; + run->nextind = 0; + run->nfree = bin_info->nregs; + bitmap_init(bitmap, &bin_info->bitmap_info); + } + malloc_mutex_unlock(&arena->lock); + /********************************/ + malloc_mutex_lock(&bin->lock); + if (run != NULL) { + if (config_stats) { + bin->stats.nruns++; + bin->stats.curruns++; + } + return (run); + } + + /* + * arena_run_alloc() failed, but another thread may have made + * sufficient memory available while this one dropped bin->lock above, + * so search one more time. + */ + run = arena_bin_nonfull_run_tryget(bin); + if (run != NULL) + return (run); + + return (NULL); +} + +/* Re-fill bin->runcur, then call arena_run_reg_alloc(). */ +static void * +arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin) +{ + void *ret; + size_t binind; + arena_bin_info_t *bin_info; + arena_run_t *run; + + binind = arena_bin_index(arena, bin); + bin_info = &arena_bin_info[binind]; + bin->runcur = NULL; + run = arena_bin_nonfull_run_get(arena, bin); + if (bin->runcur != NULL && bin->runcur->nfree > 0) { + /* + * Another thread updated runcur while this one ran without the + * bin lock in arena_bin_nonfull_run_get(). + */ + assert(bin->runcur->nfree > 0); + ret = arena_run_reg_alloc(bin->runcur, bin_info); + if (run != NULL) { + arena_chunk_t *chunk; + + /* + * arena_run_alloc() may have allocated run, or it may + * have pulled run from the bin's run tree. Therefore + * it is unsafe to make any assumptions about how run + * has previously been used, and arena_bin_lower_run() + * must be called, as if a region were just deallocated + * from the run. + */ + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); + if (run->nfree == bin_info->nregs) + arena_dalloc_bin_run(arena, chunk, run, bin); + else + arena_bin_lower_run(arena, chunk, run, bin); + } + return (ret); + } + + if (run == NULL) + return (NULL); + + bin->runcur = run; + + assert(bin->runcur->nfree > 0); + + return (arena_run_reg_alloc(bin->runcur, bin_info)); +} + +void +arena_prof_accum(arena_t *arena, uint64_t accumbytes) +{ + + if (prof_interval != 0) { + arena->prof_accumbytes += accumbytes; + if (arena->prof_accumbytes >= prof_interval) { + prof_idump(); + arena->prof_accumbytes -= prof_interval; + } + } +} + +void +arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind, + uint64_t prof_accumbytes) +{ + unsigned i, nfill; + arena_bin_t *bin; + arena_run_t *run; + void *ptr; + + assert(tbin->ncached == 0); + + if (config_prof) { + malloc_mutex_lock(&arena->lock); + arena_prof_accum(arena, prof_accumbytes); + malloc_mutex_unlock(&arena->lock); + } + bin = &arena->bins[binind]; + malloc_mutex_lock(&bin->lock); + for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >> + tbin->lg_fill_div); i < nfill; i++) { + if ((run = bin->runcur) != NULL && run->nfree > 0) + ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]); + else + ptr = arena_bin_malloc_hard(arena, bin); + if (ptr == NULL) + break; + if (config_fill && opt_junk) { + arena_alloc_junk_small(ptr, &arena_bin_info[binind], + true); + } + /* Insert such that low regions get used first. */ + tbin->avail[nfill - 1 - i] = ptr; + } + if (config_stats) { + bin->stats.allocated += i * arena_bin_info[binind].reg_size; + bin->stats.nmalloc += i; + bin->stats.nrequests += tbin->tstats.nrequests; + bin->stats.nfills++; + tbin->tstats.nrequests = 0; + } + malloc_mutex_unlock(&bin->lock); + tbin->ncached = i; +} + +void +arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero) +{ + + if (zero) { + size_t redzone_size = bin_info->redzone_size; + memset((void *)((uintptr_t)ptr - redzone_size), 0xa5, + redzone_size); + memset((void *)((uintptr_t)ptr + bin_info->reg_size), 0xa5, + redzone_size); + } else { + memset((void *)((uintptr_t)ptr - bin_info->redzone_size), 0xa5, + bin_info->reg_interval); + } +} + +void +arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info) +{ + size_t size = bin_info->reg_size; + size_t redzone_size = bin_info->redzone_size; + size_t i; + bool error = false; + + for (i = 1; i <= redzone_size; i++) { + unsigned byte; + if ((byte = *(uint8_t *)((uintptr_t)ptr - i)) != 0xa5) { + error = true; + malloc_printf(": Corrupt redzone " + "%zu byte%s before %p (size %zu), byte=%#x\n", i, + (i == 1) ? "" : "s", ptr, size, byte); + } + } + for (i = 0; i < redzone_size; i++) { + unsigned byte; + if ((byte = *(uint8_t *)((uintptr_t)ptr + size + i)) != 0xa5) { + error = true; + malloc_printf(": Corrupt redzone " + "%zu byte%s after end of %p (size %zu), byte=%#x\n", + i, (i == 1) ? "" : "s", ptr, size, byte); + } + } + if (opt_abort && error) + abort(); + + memset((void *)((uintptr_t)ptr - redzone_size), 0x5a, + bin_info->reg_interval); +} + +void * +arena_malloc_small(arena_t *arena, size_t size, bool zero) +{ + void *ret; + arena_bin_t *bin; + arena_run_t *run; + size_t binind; + + binind = SMALL_SIZE2BIN(size); + assert(binind < NBINS); + bin = &arena->bins[binind]; + size = arena_bin_info[binind].reg_size; + + malloc_mutex_lock(&bin->lock); + if ((run = bin->runcur) != NULL && run->nfree > 0) + ret = arena_run_reg_alloc(run, &arena_bin_info[binind]); + else + ret = arena_bin_malloc_hard(arena, bin); + + if (ret == NULL) { + malloc_mutex_unlock(&bin->lock); + return (NULL); + } + + if (config_stats) { + bin->stats.allocated += size; + bin->stats.nmalloc++; + bin->stats.nrequests++; + } + malloc_mutex_unlock(&bin->lock); + if (config_prof && isthreaded == false) { + malloc_mutex_lock(&arena->lock); + arena_prof_accum(arena, size); + malloc_mutex_unlock(&arena->lock); + } + + if (zero == false) { + if (config_fill) { + if (opt_junk) { + arena_alloc_junk_small(ret, + &arena_bin_info[binind], false); + } else if (opt_zero) + memset(ret, 0, size); + } + } else { + if (config_fill && opt_junk) { + arena_alloc_junk_small(ret, &arena_bin_info[binind], + true); + } + VALGRIND_MAKE_MEM_UNDEFINED(ret, size); + memset(ret, 0, size); + } + + return (ret); +} + +void * +arena_malloc_large(arena_t *arena, size_t size, bool zero) +{ + void *ret; + + /* Large allocation. */ + size = PAGE_CEILING(size); + malloc_mutex_lock(&arena->lock); + ret = (void *)arena_run_alloc(arena, size, true, zero); + if (ret == NULL) { + malloc_mutex_unlock(&arena->lock); + return (NULL); + } + if (config_stats) { + arena->stats.nmalloc_large++; + arena->stats.nrequests_large++; + arena->stats.allocated_large += size; + arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++; + arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++; + arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++; + } + if (config_prof) + arena_prof_accum(arena, size); + malloc_mutex_unlock(&arena->lock); + + if (zero == false) { + if (config_fill) { + if (opt_junk) + memset(ret, 0xa5, size); + else if (opt_zero) + memset(ret, 0, size); + } + } + + return (ret); +} + +/* Only handles large allocations that require more than page alignment. */ +void * +arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero) +{ + void *ret; + size_t alloc_size, leadsize, trailsize; + arena_run_t *run; + arena_chunk_t *chunk; + + assert((size & PAGE_MASK) == 0); + + alignment = PAGE_CEILING(alignment); + alloc_size = size + alignment - PAGE; + + malloc_mutex_lock(&arena->lock); + run = arena_run_alloc(arena, alloc_size, true, zero); + if (run == NULL) { + malloc_mutex_unlock(&arena->lock); + return (NULL); + } + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); + + leadsize = ALIGNMENT_CEILING((uintptr_t)run, alignment) - + (uintptr_t)run; + assert(alloc_size >= leadsize + size); + trailsize = alloc_size - leadsize - size; + ret = (void *)((uintptr_t)run + leadsize); + if (leadsize != 0) { + arena_run_trim_head(arena, chunk, run, alloc_size, alloc_size - + leadsize); + } + if (trailsize != 0) { + arena_run_trim_tail(arena, chunk, ret, size + trailsize, size, + false); + } + + if (config_stats) { + arena->stats.nmalloc_large++; + arena->stats.nrequests_large++; + arena->stats.allocated_large += size; + arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++; + arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++; + arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++; + } + malloc_mutex_unlock(&arena->lock); + + if (config_fill && zero == false) { + if (opt_junk) + memset(ret, 0xa5, size); + else if (opt_zero) + memset(ret, 0, size); + } + return (ret); +} + +/* Return the size of the allocation pointed to by ptr. */ +size_t +arena_salloc(const void *ptr, bool demote) +{ + size_t ret; + arena_chunk_t *chunk; + size_t pageind, mapbits; + + assert(ptr != NULL); + assert(CHUNK_ADDR2BASE(ptr) != ptr); + + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); + pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; + mapbits = chunk->map[pageind-map_bias].bits; + assert((mapbits & CHUNK_MAP_ALLOCATED) != 0); + if ((mapbits & CHUNK_MAP_LARGE) == 0) { + arena_run_t *run = (arena_run_t *)((uintptr_t)chunk + + (uintptr_t)((pageind - (mapbits >> LG_PAGE)) << LG_PAGE)); + size_t binind = arena_bin_index(chunk->arena, run->bin); + arena_bin_info_t *bin_info = &arena_bin_info[binind]; + assert(((uintptr_t)ptr - ((uintptr_t)run + + (uintptr_t)bin_info->reg0_offset)) % bin_info->reg_interval + == 0); + ret = bin_info->reg_size; + } else { + assert(((uintptr_t)ptr & PAGE_MASK) == 0); + ret = mapbits & ~PAGE_MASK; + if (demote && prof_promote && ret == PAGE && (mapbits & + CHUNK_MAP_CLASS_MASK) != 0) { + size_t binind = ((mapbits & CHUNK_MAP_CLASS_MASK) >> + CHUNK_MAP_CLASS_SHIFT) - 1; + assert(binind < NBINS); + ret = arena_bin_info[binind].reg_size; + } + assert(ret != 0); + } + + return (ret); +} + +void +arena_prof_promoted(const void *ptr, size_t size) +{ + arena_chunk_t *chunk; + size_t pageind, binind; + + assert(config_prof); + assert(ptr != NULL); + assert(CHUNK_ADDR2BASE(ptr) != ptr); + assert(isalloc(ptr, false) == PAGE); + assert(isalloc(ptr, true) == PAGE); + assert(size <= SMALL_MAXCLASS); + + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); + pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; + binind = SMALL_SIZE2BIN(size); + assert(binind < NBINS); + chunk->map[pageind-map_bias].bits = (chunk->map[pageind-map_bias].bits & + ~CHUNK_MAP_CLASS_MASK) | ((binind+1) << CHUNK_MAP_CLASS_SHIFT); + + assert(isalloc(ptr, false) == PAGE); + assert(isalloc(ptr, true) == size); +} + +static void +arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run, + arena_bin_t *bin) +{ + + /* Dissociate run from bin. */ + if (run == bin->runcur) + bin->runcur = NULL; + else { + size_t binind = arena_bin_index(chunk->arena, bin); + arena_bin_info_t *bin_info = &arena_bin_info[binind]; + + if (bin_info->nregs != 1) { + /* + * This block's conditional is necessary because if the + * run only contains one region, then it never gets + * inserted into the non-full runs tree. + */ + arena_bin_runs_remove(bin, run); + } + } +} + +static void +arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, + arena_bin_t *bin) +{ + size_t binind; + arena_bin_info_t *bin_info; + size_t npages, run_ind, past; + + assert(run != bin->runcur); + assert(arena_run_tree_search(&bin->runs, &chunk->map[ + (((uintptr_t)run-(uintptr_t)chunk)>>LG_PAGE)-map_bias]) == NULL); + + binind = arena_bin_index(chunk->arena, run->bin); + bin_info = &arena_bin_info[binind]; + + malloc_mutex_unlock(&bin->lock); + /******************************/ + npages = bin_info->run_size >> LG_PAGE; + run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE); + past = (size_t)(PAGE_CEILING((uintptr_t)run + + (uintptr_t)bin_info->reg0_offset + (uintptr_t)(run->nextind * + bin_info->reg_interval - bin_info->redzone_size) - + (uintptr_t)chunk) >> LG_PAGE); + malloc_mutex_lock(&arena->lock); + + /* + * If the run was originally clean, and some pages were never touched, + * trim the clean pages before deallocating the dirty portion of the + * run. + */ + if ((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_DIRTY) == 0 && past + - run_ind < npages) { + /* + * Trim clean pages. Convert to large run beforehand. Set the + * last map element first, in case this is a one-page run. + */ + chunk->map[run_ind+npages-1-map_bias].bits = CHUNK_MAP_LARGE | + (chunk->map[run_ind+npages-1-map_bias].bits & + CHUNK_MAP_FLAGS_MASK); + chunk->map[run_ind-map_bias].bits = bin_info->run_size | + CHUNK_MAP_LARGE | (chunk->map[run_ind-map_bias].bits & + CHUNK_MAP_FLAGS_MASK); + arena_run_trim_tail(arena, chunk, run, (npages << LG_PAGE), + ((past - run_ind) << LG_PAGE), false); + /* npages = past - run_ind; */ + } + arena_run_dalloc(arena, run, true); + malloc_mutex_unlock(&arena->lock); + /****************************/ + malloc_mutex_lock(&bin->lock); + if (config_stats) + bin->stats.curruns--; +} + +static void +arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, + arena_bin_t *bin) +{ + + /* + * Make sure that if bin->runcur is non-NULL, it refers to the lowest + * non-full run. It is okay to NULL runcur out rather than proactively + * keeping it pointing at the lowest non-full run. + */ + if ((uintptr_t)run < (uintptr_t)bin->runcur) { + /* Switch runcur. */ + if (bin->runcur->nfree > 0) + arena_bin_runs_insert(bin, bin->runcur); + bin->runcur = run; + if (config_stats) + bin->stats.reruns++; + } else + arena_bin_runs_insert(bin, run); +} + +void +arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr, + arena_chunk_map_t *mapelm) +{ + size_t pageind; + arena_run_t *run; + arena_bin_t *bin; + size_t size; + + pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; + run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind - + (mapelm->bits >> LG_PAGE)) << LG_PAGE)); + bin = run->bin; + size_t binind = arena_bin_index(arena, bin); + arena_bin_info_t *bin_info = &arena_bin_info[binind]; + if (config_fill || config_stats) + size = bin_info->reg_size; + + if (config_fill && opt_junk) + arena_dalloc_junk_small(ptr, bin_info); + + arena_run_reg_dalloc(run, ptr); + if (run->nfree == bin_info->nregs) { + arena_dissociate_bin_run(chunk, run, bin); + arena_dalloc_bin_run(arena, chunk, run, bin); + } else if (run->nfree == 1 && run != bin->runcur) + arena_bin_lower_run(arena, chunk, run, bin); + + if (config_stats) { + bin->stats.allocated -= size; + bin->stats.ndalloc++; + } +} + +void +arena_stats_merge(arena_t *arena, size_t *nactive, size_t *ndirty, + arena_stats_t *astats, malloc_bin_stats_t *bstats, + malloc_large_stats_t *lstats) +{ + unsigned i; + + malloc_mutex_lock(&arena->lock); + *nactive += arena->nactive; + *ndirty += arena->ndirty; + + astats->mapped += arena->stats.mapped; + astats->npurge += arena->stats.npurge; + astats->nmadvise += arena->stats.nmadvise; + astats->purged += arena->stats.purged; + astats->allocated_large += arena->stats.allocated_large; + astats->nmalloc_large += arena->stats.nmalloc_large; + astats->ndalloc_large += arena->stats.ndalloc_large; + astats->nrequests_large += arena->stats.nrequests_large; + + for (i = 0; i < nlclasses; i++) { + lstats[i].nmalloc += arena->stats.lstats[i].nmalloc; + lstats[i].ndalloc += arena->stats.lstats[i].ndalloc; + lstats[i].nrequests += arena->stats.lstats[i].nrequests; + lstats[i].curruns += arena->stats.lstats[i].curruns; + } + malloc_mutex_unlock(&arena->lock); + + for (i = 0; i < NBINS; i++) { + arena_bin_t *bin = &arena->bins[i]; + + malloc_mutex_lock(&bin->lock); + bstats[i].allocated += bin->stats.allocated; + bstats[i].nmalloc += bin->stats.nmalloc; + bstats[i].ndalloc += bin->stats.ndalloc; + bstats[i].nrequests += bin->stats.nrequests; + if (config_tcache) { + bstats[i].nfills += bin->stats.nfills; + bstats[i].nflushes += bin->stats.nflushes; + } + bstats[i].nruns += bin->stats.nruns; + bstats[i].reruns += bin->stats.reruns; + bstats[i].curruns += bin->stats.curruns; + malloc_mutex_unlock(&bin->lock); + } +} + +void +arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr) +{ + + if (config_fill || config_stats) { + size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; + size_t size = chunk->map[pageind-map_bias].bits & ~PAGE_MASK; + + if (config_fill && config_stats && opt_junk) + memset(ptr, 0x5a, size); + if (config_stats) { + arena->stats.ndalloc_large++; + arena->stats.allocated_large -= size; + arena->stats.lstats[(size >> LG_PAGE) - 1].ndalloc++; + arena->stats.lstats[(size >> LG_PAGE) - 1].curruns--; + } + } + + arena_run_dalloc(arena, (arena_run_t *)ptr, true); +} + +static void +arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr, + size_t oldsize, size_t size) +{ + + assert(size < oldsize); + + /* + * Shrink the run, and make trailing pages available for other + * allocations. + */ + malloc_mutex_lock(&arena->lock); + arena_run_trim_tail(arena, chunk, (arena_run_t *)ptr, oldsize, size, + true); + if (config_stats) { + arena->stats.ndalloc_large++; + arena->stats.allocated_large -= oldsize; + arena->stats.lstats[(oldsize >> LG_PAGE) - 1].ndalloc++; + arena->stats.lstats[(oldsize >> LG_PAGE) - 1].curruns--; + + arena->stats.nmalloc_large++; + arena->stats.nrequests_large++; + arena->stats.allocated_large += size; + arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++; + arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++; + arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++; + } + malloc_mutex_unlock(&arena->lock); +} + +static bool +arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr, + size_t oldsize, size_t size, size_t extra, bool zero) +{ + size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; + size_t npages = oldsize >> LG_PAGE; + size_t followsize; + + assert(oldsize == (chunk->map[pageind-map_bias].bits & ~PAGE_MASK)); + + /* Try to extend the run. */ + assert(size + extra > oldsize); + malloc_mutex_lock(&arena->lock); + if (pageind + npages < chunk_npages && + (chunk->map[pageind+npages-map_bias].bits + & CHUNK_MAP_ALLOCATED) == 0 && (followsize = + chunk->map[pageind+npages-map_bias].bits & ~PAGE_MASK) >= size - + oldsize) { + /* + * The next run is available and sufficiently large. Split the + * following run, then merge the first part with the existing + * allocation. + */ + size_t flag_dirty; + size_t splitsize = (oldsize + followsize <= size + extra) + ? followsize : size + extra - oldsize; + arena_run_split(arena, (arena_run_t *)((uintptr_t)chunk + + ((pageind+npages) << LG_PAGE)), splitsize, true, zero); + + size = oldsize + splitsize; + npages = size >> LG_PAGE; + + /* + * Mark the extended run as dirty if either portion of the run + * was dirty before allocation. This is rather pedantic, + * because there's not actually any sequence of events that + * could cause the resulting run to be passed to + * arena_run_dalloc() with the dirty argument set to false + * (which is when dirty flag consistency would really matter). + */ + flag_dirty = (chunk->map[pageind-map_bias].bits & + CHUNK_MAP_DIRTY) | + (chunk->map[pageind+npages-1-map_bias].bits & + CHUNK_MAP_DIRTY); + chunk->map[pageind-map_bias].bits = size | flag_dirty + | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED; + chunk->map[pageind+npages-1-map_bias].bits = flag_dirty | + CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED; + + if (config_stats) { + arena->stats.ndalloc_large++; + arena->stats.allocated_large -= oldsize; + arena->stats.lstats[(oldsize >> LG_PAGE) + - 1].ndalloc++; + arena->stats.lstats[(oldsize >> LG_PAGE) + - 1].curruns--; + + arena->stats.nmalloc_large++; + arena->stats.nrequests_large++; + arena->stats.allocated_large += size; + arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++; + arena->stats.lstats[(size >> LG_PAGE) + - 1].nrequests++; + arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++; + } + malloc_mutex_unlock(&arena->lock); + return (false); + } + malloc_mutex_unlock(&arena->lock); + + return (true); +} + +/* + * Try to resize a large allocation, in order to avoid copying. This will + * always fail if growing an object, and the following run is already in use. + */ +static bool +arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra, + bool zero) +{ + size_t psize; + + psize = PAGE_CEILING(size + extra); + if (psize == oldsize) { + /* Same size class. */ + if (config_fill && opt_junk && size < oldsize) { + memset((void *)((uintptr_t)ptr + size), 0x5a, oldsize - + size); + } + return (false); + } else { + arena_chunk_t *chunk; + arena_t *arena; + + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); + arena = chunk->arena; + + if (psize < oldsize) { + /* Fill before shrinking in order avoid a race. */ + if (config_fill && opt_junk) { + memset((void *)((uintptr_t)ptr + size), 0x5a, + oldsize - size); + } + arena_ralloc_large_shrink(arena, chunk, ptr, oldsize, + psize); + return (false); + } else { + bool ret = arena_ralloc_large_grow(arena, chunk, ptr, + oldsize, PAGE_CEILING(size), + psize - PAGE_CEILING(size), zero); + if (config_fill && ret == false && zero == false && + opt_zero) { + memset((void *)((uintptr_t)ptr + oldsize), 0, + size - oldsize); + } + return (ret); + } + } +} + +void * +arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra, + bool zero) +{ + + /* + * Avoid moving the allocation if the size class can be left the same. + */ + if (oldsize <= arena_maxclass) { + if (oldsize <= SMALL_MAXCLASS) { + assert(arena_bin_info[SMALL_SIZE2BIN(oldsize)].reg_size + == oldsize); + if ((size + extra <= SMALL_MAXCLASS && + SMALL_SIZE2BIN(size + extra) == + SMALL_SIZE2BIN(oldsize)) || (size <= oldsize && + size + extra >= oldsize)) { + if (config_fill && opt_junk && size < oldsize) { + memset((void *)((uintptr_t)ptr + size), + 0x5a, oldsize - size); + } + return (ptr); + } + } else { + assert(size <= arena_maxclass); + if (size + extra > SMALL_MAXCLASS) { + if (arena_ralloc_large(ptr, oldsize, size, + extra, zero) == false) + return (ptr); + } + } + } + + /* Reallocation would require a move. */ + return (NULL); +} + +void * +arena_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra, + size_t alignment, bool zero, bool try_tcache) +{ + void *ret; + size_t copysize; + + /* Try to avoid moving the allocation. */ + ret = arena_ralloc_no_move(ptr, oldsize, size, extra, zero); + if (ret != NULL) + return (ret); + + /* + * size and oldsize are different enough that we need to move the + * object. In that case, fall back to allocating new space and + * copying. + */ + if (alignment != 0) { + size_t usize = sa2u(size + extra, alignment); + if (usize == 0) + return (NULL); + ret = ipalloc(usize, alignment, zero); + } else + ret = arena_malloc(NULL, size + extra, zero, try_tcache); + + if (ret == NULL) { + if (extra == 0) + return (NULL); + /* Try again, this time without extra. */ + if (alignment != 0) { + size_t usize = sa2u(size, alignment); + if (usize == 0) + return (NULL); + ret = ipalloc(usize, alignment, zero); + } else + ret = arena_malloc(NULL, size, zero, try_tcache); + + if (ret == NULL) + return (NULL); + } + + /* Junk/zero-filling were already done by ipalloc()/arena_malloc(). */ + + /* + * Copy at most size bytes (not size+extra), since the caller has no + * expectation that the extra bytes will be reliably preserved. + */ + copysize = (size < oldsize) ? size : oldsize; + memcpy(ret, ptr, copysize); + iqalloc(ptr); + return (ret); +} + +bool +arena_new(arena_t *arena, unsigned ind) +{ + unsigned i; + arena_bin_t *bin; + + arena->ind = ind; + arena->nthreads = 0; + + if (malloc_mutex_init(&arena->lock)) + return (true); + + if (config_stats) { + memset(&arena->stats, 0, sizeof(arena_stats_t)); + arena->stats.lstats = + (malloc_large_stats_t *)base_alloc(nlclasses * + sizeof(malloc_large_stats_t)); + if (arena->stats.lstats == NULL) + return (true); + memset(arena->stats.lstats, 0, nlclasses * + sizeof(malloc_large_stats_t)); + if (config_tcache) + ql_new(&arena->tcache_ql); + } + + if (config_prof) + arena->prof_accumbytes = 0; + + /* Initialize chunks. */ + ql_new(&arena->chunks_dirty); + arena->spare = NULL; + + arena->nactive = 0; + arena->ndirty = 0; + arena->npurgatory = 0; + + arena_avail_tree_new(&arena->runs_avail_clean); + arena_avail_tree_new(&arena->runs_avail_dirty); + + /* Initialize bins. */ + for (i = 0; i < NBINS; i++) { + bin = &arena->bins[i]; + if (malloc_mutex_init(&bin->lock)) + return (true); + bin->runcur = NULL; + arena_run_tree_new(&bin->runs); + if (config_stats) + memset(&bin->stats, 0, sizeof(malloc_bin_stats_t)); + } + + return (false); +} + +/* + * Calculate bin_info->run_size such that it meets the following constraints: + * + * *) bin_info->run_size >= min_run_size + * *) bin_info->run_size <= arena_maxclass + * *) run header overhead <= RUN_MAX_OVRHD (or header overhead relaxed). + * *) bin_info->nregs <= RUN_MAXREGS + * + * bin_info->nregs, bin_info->bitmap_offset, and bin_info->reg0_offset are also + * calculated here, since these settings are all interdependent. + */ +static size_t +bin_info_run_size_calc(arena_bin_info_t *bin_info, size_t min_run_size) +{ + size_t pad_size; + size_t try_run_size, good_run_size; + uint32_t try_nregs, good_nregs; + uint32_t try_hdr_size, good_hdr_size; + uint32_t try_bitmap_offset, good_bitmap_offset; + uint32_t try_ctx0_offset, good_ctx0_offset; + uint32_t try_redzone0_offset, good_redzone0_offset; + + assert(min_run_size >= PAGE); + assert(min_run_size <= arena_maxclass); + + /* + * Determine redzone size based on minimum alignment and minimum + * redzone size. Add padding to the end of the run if it is needed to + * align the regions. The padding allows each redzone to be half the + * minimum alignment; without the padding, each redzone would have to + * be twice as large in order to maintain alignment. + */ + if (config_fill && opt_redzone) { + size_t align_min = ZU(1) << (ffs(bin_info->reg_size) - 1); + if (align_min <= REDZONE_MINSIZE) { + bin_info->redzone_size = REDZONE_MINSIZE; + pad_size = 0; + } else { + bin_info->redzone_size = align_min >> 1; + pad_size = bin_info->redzone_size; + } + } else { + bin_info->redzone_size = 0; + pad_size = 0; + } + bin_info->reg_interval = bin_info->reg_size + + (bin_info->redzone_size << 1); + + /* + * Calculate known-valid settings before entering the run_size + * expansion loop, so that the first part of the loop always copies + * valid settings. + * + * The do..while loop iteratively reduces the number of regions until + * the run header and the regions no longer overlap. A closed formula + * would be quite messy, since there is an interdependency between the + * header's mask length and the number of regions. + */ + try_run_size = min_run_size; + try_nregs = ((try_run_size - sizeof(arena_run_t)) / + bin_info->reg_interval) + + 1; /* Counter-act try_nregs-- in loop. */ + if (try_nregs > RUN_MAXREGS) { + try_nregs = RUN_MAXREGS + + 1; /* Counter-act try_nregs-- in loop. */ + } + do { + try_nregs--; + try_hdr_size = sizeof(arena_run_t); + /* Pad to a long boundary. */ + try_hdr_size = LONG_CEILING(try_hdr_size); + try_bitmap_offset = try_hdr_size; + /* Add space for bitmap. */ + try_hdr_size += bitmap_size(try_nregs); + if (config_prof && opt_prof && prof_promote == false) { + /* Pad to a quantum boundary. */ + try_hdr_size = QUANTUM_CEILING(try_hdr_size); + try_ctx0_offset = try_hdr_size; + /* Add space for one (prof_ctx_t *) per region. */ + try_hdr_size += try_nregs * sizeof(prof_ctx_t *); + } else + try_ctx0_offset = 0; + try_redzone0_offset = try_run_size - (try_nregs * + bin_info->reg_interval) - pad_size; + } while (try_hdr_size > try_redzone0_offset); + + /* run_size expansion loop. */ + do { + /* + * Copy valid settings before trying more aggressive settings. + */ + good_run_size = try_run_size; + good_nregs = try_nregs; + good_hdr_size = try_hdr_size; + good_bitmap_offset = try_bitmap_offset; + good_ctx0_offset = try_ctx0_offset; + good_redzone0_offset = try_redzone0_offset; + + /* Try more aggressive settings. */ + try_run_size += PAGE; + try_nregs = ((try_run_size - sizeof(arena_run_t) - pad_size) / + bin_info->reg_interval) + + 1; /* Counter-act try_nregs-- in loop. */ + if (try_nregs > RUN_MAXREGS) { + try_nregs = RUN_MAXREGS + + 1; /* Counter-act try_nregs-- in loop. */ + } + do { + try_nregs--; + try_hdr_size = sizeof(arena_run_t); + /* Pad to a long boundary. */ + try_hdr_size = LONG_CEILING(try_hdr_size); + try_bitmap_offset = try_hdr_size; + /* Add space for bitmap. */ + try_hdr_size += bitmap_size(try_nregs); + if (config_prof && opt_prof && prof_promote == false) { + /* Pad to a quantum boundary. */ + try_hdr_size = QUANTUM_CEILING(try_hdr_size); + try_ctx0_offset = try_hdr_size; + /* + * Add space for one (prof_ctx_t *) per region. + */ + try_hdr_size += try_nregs * + sizeof(prof_ctx_t *); + } + try_redzone0_offset = try_run_size - (try_nregs * + bin_info->reg_interval) - pad_size; + } while (try_hdr_size > try_redzone0_offset); + } while (try_run_size <= arena_maxclass + && try_run_size <= arena_maxclass + && RUN_MAX_OVRHD * (bin_info->reg_interval << 3) > + RUN_MAX_OVRHD_RELAX + && (try_redzone0_offset << RUN_BFP) > RUN_MAX_OVRHD * try_run_size + && try_nregs < RUN_MAXREGS); + + assert(good_hdr_size <= good_redzone0_offset); + + /* Copy final settings. */ + bin_info->run_size = good_run_size; + bin_info->nregs = good_nregs; + bin_info->bitmap_offset = good_bitmap_offset; + bin_info->ctx0_offset = good_ctx0_offset; + bin_info->reg0_offset = good_redzone0_offset + bin_info->redzone_size; + + assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs + * bin_info->reg_interval) + pad_size == bin_info->run_size); + + return (good_run_size); +} + +static void +bin_info_init(void) +{ + arena_bin_info_t *bin_info; + size_t prev_run_size = PAGE; + +#define SIZE_CLASS(bin, delta, size) \ + bin_info = &arena_bin_info[bin]; \ + bin_info->reg_size = size; \ + prev_run_size = bin_info_run_size_calc(bin_info, prev_run_size);\ + bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs); + SIZE_CLASSES +#undef SIZE_CLASS +} + +void +arena_boot(void) +{ + size_t header_size; + unsigned i; + + /* + * Compute the header size such that it is large enough to contain the + * page map. The page map is biased to omit entries for the header + * itself, so some iteration is necessary to compute the map bias. + * + * 1) Compute safe header_size and map_bias values that include enough + * space for an unbiased page map. + * 2) Refine map_bias based on (1) to omit the header pages in the page + * map. The resulting map_bias may be one too small. + * 3) Refine map_bias based on (2). The result will be >= the result + * from (2), and will always be correct. + */ + map_bias = 0; + for (i = 0; i < 3; i++) { + header_size = offsetof(arena_chunk_t, map) + + (sizeof(arena_chunk_map_t) * (chunk_npages-map_bias)); + map_bias = (header_size >> LG_PAGE) + ((header_size & PAGE_MASK) + != 0); + } + assert(map_bias > 0); + + arena_maxclass = chunksize - (map_bias << LG_PAGE); + + bin_info_init(); +} + +void +arena_prefork(arena_t *arena) +{ + unsigned i; + + malloc_mutex_prefork(&arena->lock); + for (i = 0; i < NBINS; i++) + malloc_mutex_prefork(&arena->bins[i].lock); +} + +void +arena_postfork_parent(arena_t *arena) +{ + unsigned i; + + for (i = 0; i < NBINS; i++) + malloc_mutex_postfork_parent(&arena->bins[i].lock); + malloc_mutex_postfork_parent(&arena->lock); +} + +void +arena_postfork_child(arena_t *arena) +{ + unsigned i; + + for (i = 0; i < NBINS; i++) + malloc_mutex_postfork_child(&arena->bins[i].lock); + malloc_mutex_postfork_child(&arena->lock); +} Property changes on: contrib/jemalloc/src/arena.c ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H Added: svn:eol-style ## -0,0 +1 ## +native Index: contrib/jemalloc/src/jemalloc.c =================================================================== --- contrib/jemalloc/src/jemalloc.c (revision 0) +++ contrib/jemalloc/src/jemalloc.c (working copy) @@ -0,0 +1,1732 @@ +#define JEMALLOC_C_ +#include "jemalloc/internal/jemalloc_internal.h" + +/******************************************************************************/ +/* Data. */ + +malloc_tsd_data(, arenas, arena_t *, NULL) +malloc_tsd_data(, thread_allocated, thread_allocated_t, + THREAD_ALLOCATED_INITIALIZER) + +const char *__malloc_options_1_0; +__sym_compat(_malloc_options, __malloc_options_1_0, FBSD_1.0); + +/* Runtime configuration options. */ +const char *je_malloc_conf JEMALLOC_ATTR(visibility("default")); +#ifdef JEMALLOC_DEBUG +bool opt_abort = true; +# ifdef JEMALLOC_FILL +bool opt_junk = true; +# else +bool opt_junk = false; +# endif +#else +bool opt_abort = false; +bool opt_junk = false; +#endif +size_t opt_quarantine = ZU(0); +bool opt_redzone = false; +bool opt_utrace = false; +bool opt_valgrind = false; +bool opt_xmalloc = false; +bool opt_zero = false; +size_t opt_narenas = 0; + +unsigned ncpus; + +malloc_mutex_t arenas_lock; +arena_t **arenas; +unsigned narenas; + +/* Set to true once the allocator has been initialized. */ +static bool malloc_initialized = false; + +#ifdef JEMALLOC_THREADED_INIT +/* Used to let the initializing thread recursively allocate. */ +# define NO_INITIALIZER ((unsigned long)0) +# define INITIALIZER pthread_self() +# define IS_INITIALIZER (malloc_initializer == pthread_self()) +static pthread_t malloc_initializer = NO_INITIALIZER; +#else +# define NO_INITIALIZER false +# define INITIALIZER true +# define IS_INITIALIZER malloc_initializer +static bool malloc_initializer = NO_INITIALIZER; +#endif + +/* Used to avoid initialization races. */ +static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER; + +typedef struct { + void *p; /* Input pointer (as in realloc(p, s)). */ + size_t s; /* Request size. */ + void *r; /* Result pointer. */ +} malloc_utrace_t; + +#ifdef JEMALLOC_UTRACE +# define UTRACE(a, b, c) do { \ + if (opt_utrace) { \ + malloc_utrace_t ut; \ + ut.p = (a); \ + ut.s = (b); \ + ut.r = (c); \ + utrace(&ut, sizeof(ut)); \ + } \ +} while (0) +#else +# define UTRACE(a, b, c) +#endif + +/******************************************************************************/ +/* Function prototypes for non-inline static functions. */ + +static void stats_print_atexit(void); +static unsigned malloc_ncpus(void); +static bool malloc_conf_next(char const **opts_p, char const **k_p, + size_t *klen_p, char const **v_p, size_t *vlen_p); +static void malloc_conf_error(const char *msg, const char *k, size_t klen, + const char *v, size_t vlen); +static void malloc_conf_init(void); +static bool malloc_init_hard(void); +static int imemalign(void **memptr, size_t alignment, size_t size, + size_t min_alignment); + +/******************************************************************************/ +/* + * Begin miscellaneous support functions. + */ + +/* Create a new arena and insert it into the arenas array at index ind. */ +arena_t * +arenas_extend(unsigned ind) +{ + arena_t *ret; + + ret = (arena_t *)base_alloc(sizeof(arena_t)); + if (ret != NULL && arena_new(ret, ind) == false) { + arenas[ind] = ret; + return (ret); + } + /* Only reached if there is an OOM error. */ + + /* + * OOM here is quite inconvenient to propagate, since dealing with it + * would require a check for failure in the fast path. Instead, punt + * by using arenas[0]. In practice, this is an extremely unlikely + * failure. + */ + malloc_write(": Error initializing arena\n"); + if (opt_abort) + abort(); + + return (arenas[0]); +} + +/* Slow path, called only by choose_arena(). */ +arena_t * +choose_arena_hard(void) +{ + arena_t *ret; + + if (narenas > 1) { + unsigned i, choose, first_null; + + choose = 0; + first_null = narenas; + malloc_mutex_lock(&arenas_lock); + assert(arenas[0] != NULL); + for (i = 1; i < narenas; i++) { + if (arenas[i] != NULL) { + /* + * Choose the first arena that has the lowest + * number of threads assigned to it. + */ + if (arenas[i]->nthreads < + arenas[choose]->nthreads) + choose = i; + } else if (first_null == narenas) { + /* + * Record the index of the first uninitialized + * arena, in case all extant arenas are in use. + * + * NB: It is possible for there to be + * discontinuities in terms of initialized + * versus uninitialized arenas, due to the + * "thread.arena" mallctl. + */ + first_null = i; + } + } + + if (arenas[choose]->nthreads == 0 || first_null == narenas) { + /* + * Use an unloaded arena, or the least loaded arena if + * all arenas are already initialized. + */ + ret = arenas[choose]; + } else { + /* Initialize a new arena. */ + ret = arenas_extend(first_null); + } + ret->nthreads++; + malloc_mutex_unlock(&arenas_lock); + } else { + ret = arenas[0]; + malloc_mutex_lock(&arenas_lock); + ret->nthreads++; + malloc_mutex_unlock(&arenas_lock); + } + + arenas_tsd_set(&ret); + + return (ret); +} + +static void +stats_print_atexit(void) +{ + + if (config_tcache && config_stats) { + unsigned i; + + /* + * Merge stats from extant threads. This is racy, since + * individual threads do not lock when recording tcache stats + * events. As a consequence, the final stats may be slightly + * out of date by the time they are reported, if other threads + * continue to allocate. + */ + for (i = 0; i < narenas; i++) { + arena_t *arena = arenas[i]; + if (arena != NULL) { + tcache_t *tcache; + + /* + * tcache_stats_merge() locks bins, so if any + * code is introduced that acquires both arena + * and bin locks in the opposite order, + * deadlocks may result. + */ + malloc_mutex_lock(&arena->lock); + ql_foreach(tcache, &arena->tcache_ql, link) { + tcache_stats_merge(tcache, arena); + } + malloc_mutex_unlock(&arena->lock); + } + } + } + je_malloc_stats_print(NULL, NULL, NULL); +} + +/* + * End miscellaneous support functions. + */ +/******************************************************************************/ +/* + * Begin initialization functions. + */ + +static unsigned +malloc_ncpus(void) +{ + unsigned ret; + long result; + + result = sysconf(_SC_NPROCESSORS_ONLN); + if (result == -1) { + /* Error. */ + ret = 1; + } + ret = (unsigned)result; + + return (ret); +} + +void +arenas_cleanup(void *arg) +{ + arena_t *arena = *(arena_t **)arg; + + malloc_mutex_lock(&arenas_lock); + arena->nthreads--; + malloc_mutex_unlock(&arenas_lock); +} + +static inline bool +malloc_init(void) +{ + + if (malloc_initialized == false) + return (malloc_init_hard()); + + return (false); +} + +static bool +malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p, + char const **v_p, size_t *vlen_p) +{ + bool accept; + const char *opts = *opts_p; + + *k_p = opts; + + for (accept = false; accept == false;) { + switch (*opts) { + case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': + case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': + case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': + case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': + case 'Y': case 'Z': + case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': + case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': + case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': + case 's': case 't': case 'u': case 'v': case 'w': case 'x': + case 'y': case 'z': + case '0': case '1': case '2': case '3': case '4': case '5': + case '6': case '7': case '8': case '9': + case '_': + opts++; + break; + case ':': + opts++; + *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p; + *v_p = opts; + accept = true; + break; + case '\0': + if (opts != *opts_p) { + malloc_write(": Conf string ends " + "with key\n"); + } + return (true); + default: + malloc_write(": Malformed conf string\n"); + return (true); + } + } + + for (accept = false; accept == false;) { + switch (*opts) { + case ',': + opts++; + /* + * Look ahead one character here, because the next time + * this function is called, it will assume that end of + * input has been cleanly reached if no input remains, + * but we have optimistically already consumed the + * comma if one exists. + */ + if (*opts == '\0') { + malloc_write(": Conf string ends " + "with comma\n"); + } + *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p; + accept = true; + break; + case '\0': + *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p; + accept = true; + break; + default: + opts++; + break; + } + } + + *opts_p = opts; + return (false); +} + +static void +malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v, + size_t vlen) +{ + + malloc_printf(": %s: %.*s:%.*s\n", msg, (int)klen, k, + (int)vlen, v); +} + +static void +malloc_conf_init(void) +{ + unsigned i; + char buf[PATH_MAX + 1]; + const char *opts, *k, *v; + size_t klen, vlen; + + for (i = 0; i < 3; i++) { + /* Get runtime configuration. */ + switch (i) { + case 0: + if (je_malloc_conf != NULL) { + /* + * Use options that were compiled into the + * program. + */ + opts = je_malloc_conf; + } else { + /* No configuration specified. */ + buf[0] = '\0'; + opts = buf; + } + break; + case 1: { + int linklen; + const char *linkname = +#ifdef JEMALLOC_PREFIX + "/etc/"JEMALLOC_PREFIX"malloc.conf" +#else + "/etc/malloc.conf" +#endif + ; + + if ((linklen = readlink(linkname, buf, + sizeof(buf) - 1)) != -1) { + /* + * Use the contents of the "/etc/malloc.conf" + * symbolic link's name. + */ + buf[linklen] = '\0'; + opts = buf; + } else { + /* No configuration specified. */ + buf[0] = '\0'; + opts = buf; + } + break; + } case 2: { + const char *envname = +#ifdef JEMALLOC_PREFIX + JEMALLOC_CPREFIX"MALLOC_CONF" +#else + "MALLOC_CONF" +#endif + ; + + if ((opts = getenv(envname)) != NULL) { + /* + * Do nothing; opts is already initialized to + * the value of the MALLOC_CONF environment + * variable. + */ + } else { + /* No configuration specified. */ + buf[0] = '\0'; + opts = buf; + } + break; + } default: + /* NOTREACHED */ + assert(false); + buf[0] = '\0'; + opts = buf; + } + + while (*opts != '\0' && malloc_conf_next(&opts, &k, &klen, &v, + &vlen) == false) { +#define CONF_HANDLE_BOOL_HIT(o, n, hit) \ + if (sizeof(#n)-1 == klen && strncmp(#n, k, \ + klen) == 0) { \ + if (strncmp("true", v, vlen) == 0 && \ + vlen == sizeof("true")-1) \ + o = true; \ + else if (strncmp("false", v, vlen) == \ + 0 && vlen == sizeof("false")-1) \ + o = false; \ + else { \ + malloc_conf_error( \ + "Invalid conf value", \ + k, klen, v, vlen); \ + } \ + hit = true; \ + } else \ + hit = false; +#define CONF_HANDLE_BOOL(o, n) { \ + bool hit; \ + CONF_HANDLE_BOOL_HIT(o, n, hit); \ + if (hit) \ + continue; \ +} +#define CONF_HANDLE_SIZE_T(o, n, min, max) \ + if (sizeof(#n)-1 == klen && strncmp(#n, k, \ + klen) == 0) { \ + uintmax_t um; \ + char *end; \ + \ + errno = 0; \ + um = malloc_strtoumax(v, &end, 0); \ + if (errno != 0 || (uintptr_t)end - \ + (uintptr_t)v != vlen) { \ + malloc_conf_error( \ + "Invalid conf value", \ + k, klen, v, vlen); \ + } else if (um < min || um > max) { \ + malloc_conf_error( \ + "Out-of-range conf value", \ + k, klen, v, vlen); \ + } else \ + o = um; \ + continue; \ + } +#define CONF_HANDLE_SSIZE_T(o, n, min, max) \ + if (sizeof(#n)-1 == klen && strncmp(#n, k, \ + klen) == 0) { \ + long l; \ + char *end; \ + \ + errno = 0; \ + l = strtol(v, &end, 0); \ + if (errno != 0 || (uintptr_t)end - \ + (uintptr_t)v != vlen) { \ + malloc_conf_error( \ + "Invalid conf value", \ + k, klen, v, vlen); \ + } else if (l < (ssize_t)min || l > \ + (ssize_t)max) { \ + malloc_conf_error( \ + "Out-of-range conf value", \ + k, klen, v, vlen); \ + } else \ + o = l; \ + continue; \ + } +#define CONF_HANDLE_CHAR_P(o, n, d) \ + if (sizeof(#n)-1 == klen && strncmp(#n, k, \ + klen) == 0) { \ + size_t cpylen = (vlen <= \ + sizeof(o)-1) ? vlen : \ + sizeof(o)-1; \ + strncpy(o, v, cpylen); \ + o[cpylen] = '\0'; \ + continue; \ + } + + CONF_HANDLE_BOOL(opt_abort, abort) + /* + * Chunks always require at least one header page, plus + * one data page in the absence of redzones, or three + * pages in the presence of redzones. In order to + * simplify options processing, fix the limit based on + * config_fill. + */ + CONF_HANDLE_SIZE_T(opt_lg_chunk, lg_chunk, LG_PAGE + + (config_fill ? 2 : 1), (sizeof(size_t) << 3) - 1) + CONF_HANDLE_SIZE_T(opt_narenas, narenas, 1, SIZE_T_MAX) + CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, lg_dirty_mult, + -1, (sizeof(size_t) << 3) - 1) + CONF_HANDLE_BOOL(opt_stats_print, stats_print) + if (config_fill) { + CONF_HANDLE_BOOL(opt_junk, junk) + CONF_HANDLE_SIZE_T(opt_quarantine, quarantine, + 0, SIZE_T_MAX) + CONF_HANDLE_BOOL(opt_redzone, redzone) + CONF_HANDLE_BOOL(opt_zero, zero) + } + if (config_utrace) { + CONF_HANDLE_BOOL(opt_utrace, utrace) + } + if (config_valgrind) { + bool hit; + CONF_HANDLE_BOOL_HIT(opt_valgrind, + valgrind, hit) + if (config_fill && opt_valgrind && hit) { + opt_junk = false; + opt_zero = false; + if (opt_quarantine == 0) { + opt_quarantine = + JEMALLOC_VALGRIND_QUARANTINE_DEFAULT; + } + opt_redzone = true; + } + if (hit) + continue; + } + if (config_xmalloc) { + CONF_HANDLE_BOOL(opt_xmalloc, xmalloc) + } + if (config_tcache) { + CONF_HANDLE_BOOL(opt_tcache, tcache) + CONF_HANDLE_SSIZE_T(opt_lg_tcache_max, + lg_tcache_max, -1, + (sizeof(size_t) << 3) - 1) + } + if (config_prof) { + CONF_HANDLE_BOOL(opt_prof, prof) + CONF_HANDLE_CHAR_P(opt_prof_prefix, prof_prefix, + "jeprof") + CONF_HANDLE_BOOL(opt_prof_active, prof_active) + CONF_HANDLE_SSIZE_T(opt_lg_prof_sample, + lg_prof_sample, 0, + (sizeof(uint64_t) << 3) - 1) + CONF_HANDLE_BOOL(opt_prof_accum, prof_accum) + CONF_HANDLE_SSIZE_T(opt_lg_prof_interval, + lg_prof_interval, -1, + (sizeof(uint64_t) << 3) - 1) + CONF_HANDLE_BOOL(opt_prof_gdump, prof_gdump) + CONF_HANDLE_BOOL(opt_prof_leak, prof_leak) + } + malloc_conf_error("Invalid conf pair", k, klen, v, + vlen); +#undef CONF_HANDLE_BOOL +#undef CONF_HANDLE_SIZE_T +#undef CONF_HANDLE_SSIZE_T +#undef CONF_HANDLE_CHAR_P + } + } +} + +static bool +malloc_init_hard(void) +{ + arena_t *init_arenas[1]; + + malloc_mutex_lock(&init_lock); + if (malloc_initialized || IS_INITIALIZER) { + /* + * Another thread initialized the allocator before this one + * acquired init_lock, or this thread is the initializing + * thread, and it is recursively allocating. + */ + malloc_mutex_unlock(&init_lock); + return (false); + } +#ifdef JEMALLOC_THREADED_INIT + if (malloc_initializer != NO_INITIALIZER && IS_INITIALIZER == false) { + /* Busy-wait until the initializing thread completes. */ + do { + malloc_mutex_unlock(&init_lock); + CPU_SPINWAIT; + malloc_mutex_lock(&init_lock); + } while (malloc_initialized == false); + malloc_mutex_unlock(&init_lock); + return (false); + } +#endif + malloc_initializer = INITIALIZER; + + malloc_tsd_boot(); + if (config_prof) + prof_boot0(); + + malloc_conf_init(); + +#if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE)) + /* Register fork handlers. */ + if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent, + jemalloc_postfork_child) != 0) { + malloc_write(": Error in pthread_atfork()\n"); + if (opt_abort) + abort(); + } +#endif + + if (opt_stats_print) { + /* Print statistics at exit. */ + if (atexit(stats_print_atexit) != 0) { + malloc_write(": Error in atexit()\n"); + if (opt_abort) + abort(); + } + } + + if (base_boot()) { + malloc_mutex_unlock(&init_lock); + return (true); + } + + if (chunk_boot0()) { + malloc_mutex_unlock(&init_lock); + return (true); + } + + if (ctl_boot()) { + malloc_mutex_unlock(&init_lock); + return (true); + } + + if (config_prof) + prof_boot1(); + + arena_boot(); + + if (config_tcache && tcache_boot0()) { + malloc_mutex_unlock(&init_lock); + return (true); + } + + if (huge_boot()) { + malloc_mutex_unlock(&init_lock); + return (true); + } + + if (malloc_mutex_init(&arenas_lock)) + return (true); + + /* + * Create enough scaffolding to allow recursive allocation in + * malloc_ncpus(). + */ + narenas = 1; + arenas = init_arenas; + memset(arenas, 0, sizeof(arena_t *) * narenas); + + /* + * Initialize one arena here. The rest are lazily created in + * choose_arena_hard(). + */ + arenas_extend(0); + if (arenas[0] == NULL) { + malloc_mutex_unlock(&init_lock); + return (true); + } + + /* Initialize allocation counters before any allocations can occur. */ + if (config_stats && thread_allocated_tsd_boot()) { + malloc_mutex_unlock(&init_lock); + return (true); + } + + if (arenas_tsd_boot()) { + malloc_mutex_unlock(&init_lock); + return (true); + } + + if (config_tcache && tcache_boot1()) { + malloc_mutex_unlock(&init_lock); + return (true); + } + + if (config_fill && quarantine_boot()) { + malloc_mutex_unlock(&init_lock); + return (true); + } + + if (config_prof && prof_boot2()) { + malloc_mutex_unlock(&init_lock); + return (true); + } + + /* Get number of CPUs. */ + malloc_mutex_unlock(&init_lock); + ncpus = malloc_ncpus(); + malloc_mutex_lock(&init_lock); + + if (chunk_boot1()) { + malloc_mutex_unlock(&init_lock); + return (true); + } + + if (mutex_boot()) { + malloc_mutex_unlock(&init_lock); + return (true); + } + + if (opt_narenas == 0) { + /* + * For SMP systems, create more than one arena per CPU by + * default. + */ + if (ncpus > 1) + opt_narenas = ncpus << 2; + else + opt_narenas = 1; + } + narenas = opt_narenas; + /* + * Make sure that the arenas array can be allocated. In practice, this + * limit is enough to allow the allocator to function, but the ctl + * machinery will fail to allocate memory at far lower limits. + */ + if (narenas > chunksize / sizeof(arena_t *)) { + narenas = chunksize / sizeof(arena_t *); + malloc_printf(": Reducing narenas to limit (%d)\n", + narenas); + } + + /* Allocate and initialize arenas. */ + arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas); + if (arenas == NULL) { + malloc_mutex_unlock(&init_lock); + return (true); + } + /* + * Zero the array. In practice, this should always be pre-zeroed, + * since it was just mmap()ed, but let's be sure. + */ + memset(arenas, 0, sizeof(arena_t *) * narenas); + /* Copy the pointer to the one arena that was already initialized. */ + arenas[0] = init_arenas[0]; + + malloc_initialized = true; + malloc_mutex_unlock(&init_lock); + return (false); +} + +/* + * End initialization functions. + */ +/******************************************************************************/ +/* + * Begin malloc(3)-compatible functions. + */ + +JEMALLOC_ATTR(malloc) +JEMALLOC_ATTR(visibility("default")) +void * +je_malloc(size_t size) +{ + void *ret; + size_t usize; + prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL); + + if (malloc_init()) { + ret = NULL; + goto label_oom; + } + + if (size == 0) + size = 1; + + if (config_prof && opt_prof) { + usize = s2u(size); + PROF_ALLOC_PREP(1, usize, cnt); + if (cnt == NULL) { + ret = NULL; + goto label_oom; + } + if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <= + SMALL_MAXCLASS) { + ret = imalloc(SMALL_MAXCLASS+1); + if (ret != NULL) + arena_prof_promoted(ret, usize); + } else + ret = imalloc(size); + } else { + if (config_stats || (config_valgrind && opt_valgrind)) + usize = s2u(size); + ret = imalloc(size); + } + +label_oom: + if (ret == NULL) { + if (config_xmalloc && opt_xmalloc) { + malloc_write(": Error in malloc(): " + "out of memory\n"); + abort(); + } + errno = ENOMEM; + } + if (config_prof && opt_prof && ret != NULL) + prof_malloc(ret, usize, cnt); + if (config_stats && ret != NULL) { + assert(usize == isalloc(ret, config_prof)); + thread_allocated_tsd_get()->allocated += usize; + } + UTRACE(0, size, ret); + JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, false); + return (ret); +} + +JEMALLOC_ATTR(nonnull(1)) +#ifdef JEMALLOC_PROF +/* + * Avoid any uncertainty as to how many backtrace frames to ignore in + * PROF_ALLOC_PREP(). + */ +JEMALLOC_ATTR(noinline) +#endif +static int +imemalign(void **memptr, size_t alignment, size_t size, + size_t min_alignment) +{ + int ret; + size_t usize; + void *result; + prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL); + + assert(min_alignment != 0); + + if (malloc_init()) + result = NULL; + else { + if (size == 0) + size = 1; + + /* Make sure that alignment is a large enough power of 2. */ + if (((alignment - 1) & alignment) != 0 + || (alignment < min_alignment)) { + if (config_xmalloc && opt_xmalloc) { + malloc_write(": Error allocating " + "aligned memory: invalid alignment\n"); + abort(); + } + result = NULL; + ret = EINVAL; + goto label_return; + } + + usize = sa2u(size, alignment); + if (usize == 0) { + result = NULL; + ret = ENOMEM; + goto label_return; + } + + if (config_prof && opt_prof) { + PROF_ALLOC_PREP(2, usize, cnt); + if (cnt == NULL) { + result = NULL; + ret = EINVAL; + } else { + if (prof_promote && (uintptr_t)cnt != + (uintptr_t)1U && usize <= SMALL_MAXCLASS) { + assert(sa2u(SMALL_MAXCLASS+1, + alignment) != 0); + result = ipalloc(sa2u(SMALL_MAXCLASS+1, + alignment), alignment, false); + if (result != NULL) { + arena_prof_promoted(result, + usize); + } + } else { + result = ipalloc(usize, alignment, + false); + } + } + } else + result = ipalloc(usize, alignment, false); + } + + if (result == NULL) { + if (config_xmalloc && opt_xmalloc) { + malloc_write(": Error allocating aligned " + "memory: out of memory\n"); + abort(); + } + ret = ENOMEM; + goto label_return; + } + + *memptr = result; + ret = 0; + +label_return: + if (config_stats && result != NULL) { + assert(usize == isalloc(result, config_prof)); + thread_allocated_tsd_get()->allocated += usize; + } + if (config_prof && opt_prof && result != NULL) + prof_malloc(result, usize, cnt); + UTRACE(0, size, result); + return (ret); +} + +JEMALLOC_ATTR(nonnull(1)) +JEMALLOC_ATTR(visibility("default")) +int +je_posix_memalign(void **memptr, size_t alignment, size_t size) +{ + int ret = imemalign(memptr, alignment, size, sizeof(void *)); + JEMALLOC_VALGRIND_MALLOC(ret == 0, *memptr, isalloc(*memptr, + config_prof), false); + return (ret); +} + +JEMALLOC_ATTR(malloc) +JEMALLOC_ATTR(visibility("default")) +void * +je_aligned_alloc(size_t alignment, size_t size) +{ + void *ret; + int err; + + if ((err = imemalign(&ret, alignment, size, 1)) != 0) { + ret = NULL; + errno = err; + } + JEMALLOC_VALGRIND_MALLOC(err == 0, ret, isalloc(ret, config_prof), + false); + return (ret); +} + +JEMALLOC_ATTR(malloc) +JEMALLOC_ATTR(visibility("default")) +void * +je_calloc(size_t num, size_t size) +{ + void *ret; + size_t num_size; + size_t usize; + prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL); + + if (malloc_init()) { + num_size = 0; + ret = NULL; + goto label_return; + } + + num_size = num * size; + if (num_size == 0) { + if (num == 0 || size == 0) + num_size = 1; + else { + ret = NULL; + goto label_return; + } + /* + * Try to avoid division here. We know that it isn't possible to + * overflow during multiplication if neither operand uses any of the + * most significant half of the bits in a size_t. + */ + } else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2))) + && (num_size / size != num)) { + /* size_t overflow. */ + ret = NULL; + goto label_return; + } + + if (config_prof && opt_prof) { + usize = s2u(num_size); + PROF_ALLOC_PREP(1, usize, cnt); + if (cnt == NULL) { + ret = NULL; + goto label_return; + } + if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize + <= SMALL_MAXCLASS) { + ret = icalloc(SMALL_MAXCLASS+1); + if (ret != NULL) + arena_prof_promoted(ret, usize); + } else + ret = icalloc(num_size); + } else { + if (config_stats || (config_valgrind && opt_valgrind)) + usize = s2u(num_size); + ret = icalloc(num_size); + } + +label_return: + if (ret == NULL) { + if (config_xmalloc && opt_xmalloc) { + malloc_write(": Error in calloc(): out of " + "memory\n"); + abort(); + } + errno = ENOMEM; + } + + if (config_prof && opt_prof && ret != NULL) + prof_malloc(ret, usize, cnt); + if (config_stats && ret != NULL) { + assert(usize == isalloc(ret, config_prof)); + thread_allocated_tsd_get()->allocated += usize; + } + UTRACE(0, num_size, ret); + JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, true); + return (ret); +} + +JEMALLOC_ATTR(visibility("default")) +void * +je_realloc(void *ptr, size_t size) +{ + void *ret; + size_t usize; + size_t old_size = 0; + size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); + prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL); + prof_ctx_t *old_ctx JEMALLOC_CC_SILENCE_INIT(NULL); + + if (size == 0) { + if (ptr != NULL) { + /* realloc(ptr, 0) is equivalent to free(p). */ + if (config_prof) { + old_size = isalloc(ptr, true); + if (config_valgrind && opt_valgrind) + old_rzsize = p2rz(ptr); + } else if (config_stats) { + old_size = isalloc(ptr, false); + if (config_valgrind && opt_valgrind) + old_rzsize = u2rz(old_size); + } else if (config_valgrind && opt_valgrind) { + old_size = isalloc(ptr, false); + old_rzsize = u2rz(old_size); + } + if (config_prof && opt_prof) { + old_ctx = prof_ctx_get(ptr); + cnt = NULL; + } + iqalloc(ptr); + ret = NULL; + goto label_return; + } else + size = 1; + } + + if (ptr != NULL) { + assert(malloc_initialized || IS_INITIALIZER); + + if (config_prof) { + old_size = isalloc(ptr, true); + if (config_valgrind && opt_valgrind) + old_rzsize = p2rz(ptr); + } else if (config_stats) { + old_size = isalloc(ptr, false); + if (config_valgrind && opt_valgrind) + old_rzsize = u2rz(old_size); + } else if (config_valgrind && opt_valgrind) { + old_size = isalloc(ptr, false); + old_rzsize = u2rz(old_size); + } + if (config_prof && opt_prof) { + usize = s2u(size); + old_ctx = prof_ctx_get(ptr); + PROF_ALLOC_PREP(1, usize, cnt); + if (cnt == NULL) { + old_ctx = NULL; + ret = NULL; + goto label_oom; + } + if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && + usize <= SMALL_MAXCLASS) { + ret = iralloc(ptr, SMALL_MAXCLASS+1, 0, 0, + false, false); + if (ret != NULL) + arena_prof_promoted(ret, usize); + else + old_ctx = NULL; + } else { + ret = iralloc(ptr, size, 0, 0, false, false); + if (ret == NULL) + old_ctx = NULL; + } + } else { + if (config_stats || (config_valgrind && opt_valgrind)) + usize = s2u(size); + ret = iralloc(ptr, size, 0, 0, false, false); + } + +label_oom: + if (ret == NULL) { + if (config_xmalloc && opt_xmalloc) { + malloc_write(": Error in realloc(): " + "out of memory\n"); + abort(); + } + errno = ENOMEM; + } + } else { + /* realloc(NULL, size) is equivalent to malloc(size). */ + if (config_prof && opt_prof) + old_ctx = NULL; + if (malloc_init()) { + if (config_prof && opt_prof) + cnt = NULL; + ret = NULL; + } else { + if (config_prof && opt_prof) { + usize = s2u(size); + PROF_ALLOC_PREP(1, usize, cnt); + if (cnt == NULL) + ret = NULL; + else { + if (prof_promote && (uintptr_t)cnt != + (uintptr_t)1U && usize <= + SMALL_MAXCLASS) { + ret = imalloc(SMALL_MAXCLASS+1); + if (ret != NULL) { + arena_prof_promoted(ret, + usize); + } + } else + ret = imalloc(size); + } + } else { + if (config_stats || (config_valgrind && + opt_valgrind)) + usize = s2u(size); + ret = imalloc(size); + } + } + + if (ret == NULL) { + if (config_xmalloc && opt_xmalloc) { + malloc_write(": Error in realloc(): " + "out of memory\n"); + abort(); + } + errno = ENOMEM; + } + } + +label_return: + if (config_prof && opt_prof) + prof_realloc(ret, usize, cnt, old_size, old_ctx); + if (config_stats && ret != NULL) { + thread_allocated_t *ta; + assert(usize == isalloc(ret, config_prof)); + ta = thread_allocated_tsd_get(); + ta->allocated += usize; + ta->deallocated += old_size; + } + UTRACE(ptr, size, ret); + JEMALLOC_VALGRIND_REALLOC(ret, usize, ptr, old_size, old_rzsize, false); + return (ret); +} + +JEMALLOC_ATTR(visibility("default")) +void +je_free(void *ptr) +{ + + UTRACE(ptr, 0, 0); + if (ptr != NULL) { + size_t usize; + size_t rzsize JEMALLOC_CC_SILENCE_INIT(0); + + assert(malloc_initialized || IS_INITIALIZER); + + if (config_prof && opt_prof) { + usize = isalloc(ptr, config_prof); + prof_free(ptr, usize); + } else if (config_stats || config_valgrind) + usize = isalloc(ptr, config_prof); + if (config_stats) + thread_allocated_tsd_get()->deallocated += usize; + if (config_valgrind && opt_valgrind) + rzsize = p2rz(ptr); + iqalloc(ptr); + JEMALLOC_VALGRIND_FREE(ptr, rzsize); + } +} + +/* + * End malloc(3)-compatible functions. + */ +/******************************************************************************/ +/* + * Begin non-standard override functions. + */ + +#ifdef JEMALLOC_OVERRIDE_MEMALIGN +JEMALLOC_ATTR(malloc) +JEMALLOC_ATTR(visibility("default")) +void * +je_memalign(size_t alignment, size_t size) +{ + void *ret JEMALLOC_CC_SILENCE_INIT(NULL); + imemalign(&ret, alignment, size, 1); + JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false); + return (ret); +} +#endif + +#ifdef JEMALLOC_OVERRIDE_VALLOC +JEMALLOC_ATTR(malloc) +JEMALLOC_ATTR(visibility("default")) +void * +je_valloc(size_t size) +{ + void *ret JEMALLOC_CC_SILENCE_INIT(NULL); + imemalign(&ret, PAGE, size, 1); + JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false); + return (ret); +} +#endif + +/* + * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has + * #define je_malloc malloc + */ +#define malloc_is_malloc 1 +#define is_malloc_(a) malloc_is_ ## a +#define is_malloc(a) is_malloc_(a) + +#if ((is_malloc(je_malloc) == 1) && defined(__GLIBC__) && !defined(__UCLIBC__)) +/* + * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible + * to inconsistently reference libc's malloc(3)-compatible functions + * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541). + * + * These definitions interpose hooks in glibc. The functions are actually + * passed an extra argument for the caller return address, which will be + * ignored. + */ +JEMALLOC_ATTR(visibility("default")) +void (* const __free_hook)(void *ptr) = je_free; + +JEMALLOC_ATTR(visibility("default")) +void *(* const __malloc_hook)(size_t size) = je_malloc; + +JEMALLOC_ATTR(visibility("default")) +void *(* const __realloc_hook)(void *ptr, size_t size) = je_realloc; + +JEMALLOC_ATTR(visibility("default")) +void *(* const __memalign_hook)(size_t alignment, size_t size) = je_memalign; +#endif + +/* + * End non-standard override functions. + */ +/******************************************************************************/ +/* + * Begin non-standard functions. + */ + +JEMALLOC_ATTR(visibility("default")) +size_t +je_malloc_usable_size(const void *ptr) +{ + size_t ret; + + assert(malloc_initialized || IS_INITIALIZER); + + if (config_ivsalloc) + ret = ivsalloc(ptr, config_prof); + else + ret = (ptr != NULL) ? isalloc(ptr, config_prof) : 0; + + return (ret); +} + +JEMALLOC_ATTR(visibility("default")) +void +je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque, + const char *opts) +{ + + stats_print(write_cb, cbopaque, opts); +} + +JEMALLOC_ATTR(visibility("default")) +int +je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp, + size_t newlen) +{ + + if (malloc_init()) + return (EAGAIN); + + return (ctl_byname(name, oldp, oldlenp, newp, newlen)); +} + +JEMALLOC_ATTR(visibility("default")) +int +je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) +{ + + if (malloc_init()) + return (EAGAIN); + + return (ctl_nametomib(name, mibp, miblenp)); +} + +JEMALLOC_ATTR(visibility("default")) +int +je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, + void *newp, size_t newlen) +{ + + if (malloc_init()) + return (EAGAIN); + + return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen)); +} + +/* + * End non-standard functions. + */ +/******************************************************************************/ +/* + * Begin experimental functions. + */ +#ifdef JEMALLOC_EXPERIMENTAL + +JEMALLOC_INLINE void * +iallocm(size_t usize, size_t alignment, bool zero) +{ + + assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize, + alignment))); + + if (alignment != 0) + return (ipalloc(usize, alignment, zero)); + else if (zero) + return (icalloc(usize)); + else + return (imalloc(usize)); +} + +JEMALLOC_ATTR(nonnull(1)) +JEMALLOC_ATTR(visibility("default")) +int +je_allocm(void **ptr, size_t *rsize, size_t size, int flags) +{ + void *p; + size_t usize; + size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK) + & (SIZE_T_MAX-1)); + bool zero = flags & ALLOCM_ZERO; + prof_thr_cnt_t *cnt; + + assert(ptr != NULL); + assert(size != 0); + + if (malloc_init()) + goto label_oom; + + usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment); + if (usize == 0) + goto label_oom; + + if (config_prof && opt_prof) { + PROF_ALLOC_PREP(1, usize, cnt); + if (cnt == NULL) + goto label_oom; + if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <= + SMALL_MAXCLASS) { + size_t usize_promoted = (alignment == 0) ? + s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1, + alignment); + assert(usize_promoted != 0); + p = iallocm(usize_promoted, alignment, zero); + if (p == NULL) + goto label_oom; + arena_prof_promoted(p, usize); + } else { + p = iallocm(usize, alignment, zero); + if (p == NULL) + goto label_oom; + } + prof_malloc(p, usize, cnt); + } else { + p = iallocm(usize, alignment, zero); + if (p == NULL) + goto label_oom; + } + if (rsize != NULL) + *rsize = usize; + + *ptr = p; + if (config_stats) { + assert(usize == isalloc(p, config_prof)); + thread_allocated_tsd_get()->allocated += usize; + } + UTRACE(0, size, p); + JEMALLOC_VALGRIND_MALLOC(true, p, usize, zero); + return (ALLOCM_SUCCESS); +label_oom: + if (config_xmalloc && opt_xmalloc) { + malloc_write(": Error in allocm(): " + "out of memory\n"); + abort(); + } + *ptr = NULL; + UTRACE(0, size, 0); + return (ALLOCM_ERR_OOM); +} + +JEMALLOC_ATTR(nonnull(1)) +JEMALLOC_ATTR(visibility("default")) +int +je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags) +{ + void *p, *q; + size_t usize; + size_t old_size; + size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); + size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK) + & (SIZE_T_MAX-1)); + bool zero = flags & ALLOCM_ZERO; + bool no_move = flags & ALLOCM_NO_MOVE; + prof_thr_cnt_t *cnt; + + assert(ptr != NULL); + assert(*ptr != NULL); + assert(size != 0); + assert(SIZE_T_MAX - size >= extra); + assert(malloc_initialized || IS_INITIALIZER); + + p = *ptr; + if (config_prof && opt_prof) { + /* + * usize isn't knowable before iralloc() returns when extra is + * non-zero. Therefore, compute its maximum possible value and + * use that in PROF_ALLOC_PREP() to decide whether to capture a + * backtrace. prof_realloc() will use the actual usize to + * decide whether to sample. + */ + size_t max_usize = (alignment == 0) ? s2u(size+extra) : + sa2u(size+extra, alignment); + prof_ctx_t *old_ctx = prof_ctx_get(p); + old_size = isalloc(p, true); + if (config_valgrind && opt_valgrind) + old_rzsize = p2rz(p); + PROF_ALLOC_PREP(1, max_usize, cnt); + if (cnt == NULL) + goto label_oom; + /* + * Use minimum usize to determine whether promotion may happen. + */ + if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U + && ((alignment == 0) ? s2u(size) : sa2u(size, alignment)) + <= SMALL_MAXCLASS) { + q = iralloc(p, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >= + size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1), + alignment, zero, no_move); + if (q == NULL) + goto label_err; + if (max_usize < PAGE) { + usize = max_usize; + arena_prof_promoted(q, usize); + } else + usize = isalloc(q, config_prof); + } else { + q = iralloc(p, size, extra, alignment, zero, no_move); + if (q == NULL) + goto label_err; + usize = isalloc(q, config_prof); + } + prof_realloc(q, usize, cnt, old_size, old_ctx); + if (rsize != NULL) + *rsize = usize; + } else { + if (config_stats) { + old_size = isalloc(p, false); + if (config_valgrind && opt_valgrind) + old_rzsize = u2rz(old_size); + } else if (config_valgrind && opt_valgrind) { + old_size = isalloc(p, false); + old_rzsize = u2rz(old_size); + } + q = iralloc(p, size, extra, alignment, zero, no_move); + if (q == NULL) + goto label_err; + if (config_stats) + usize = isalloc(q, config_prof); + if (rsize != NULL) { + if (config_stats == false) + usize = isalloc(q, config_prof); + *rsize = usize; + } + } + + *ptr = q; + if (config_stats) { + thread_allocated_t *ta; + ta = thread_allocated_tsd_get(); + ta->allocated += usize; + ta->deallocated += old_size; + } + UTRACE(p, size, q); + JEMALLOC_VALGRIND_REALLOC(q, usize, p, old_size, old_rzsize, zero); + return (ALLOCM_SUCCESS); +label_err: + if (no_move) { + UTRACE(p, size, q); + return (ALLOCM_ERR_NOT_MOVED); + } +label_oom: + if (config_xmalloc && opt_xmalloc) { + malloc_write(": Error in rallocm(): " + "out of memory\n"); + abort(); + } + UTRACE(p, size, 0); + return (ALLOCM_ERR_OOM); +} + +JEMALLOC_ATTR(nonnull(1)) +JEMALLOC_ATTR(visibility("default")) +int +je_sallocm(const void *ptr, size_t *rsize, int flags) +{ + size_t sz; + + assert(malloc_initialized || IS_INITIALIZER); + + if (config_ivsalloc) + sz = ivsalloc(ptr, config_prof); + else { + assert(ptr != NULL); + sz = isalloc(ptr, config_prof); + } + assert(rsize != NULL); + *rsize = sz; + + return (ALLOCM_SUCCESS); +} + +JEMALLOC_ATTR(nonnull(1)) +JEMALLOC_ATTR(visibility("default")) +int +je_dallocm(void *ptr, int flags) +{ + size_t usize; + size_t rzsize JEMALLOC_CC_SILENCE_INIT(0); + + assert(ptr != NULL); + assert(malloc_initialized || IS_INITIALIZER); + + UTRACE(ptr, 0, 0); + if (config_stats || config_valgrind) + usize = isalloc(ptr, config_prof); + if (config_prof && opt_prof) { + if (config_stats == false && config_valgrind == false) + usize = isalloc(ptr, config_prof); + prof_free(ptr, usize); + } + if (config_stats) + thread_allocated_tsd_get()->deallocated += usize; + if (config_valgrind && opt_valgrind) + rzsize = p2rz(ptr); + iqalloc(ptr); + JEMALLOC_VALGRIND_FREE(ptr, rzsize); + + return (ALLOCM_SUCCESS); +} + +JEMALLOC_ATTR(visibility("default")) +int +je_nallocm(size_t *rsize, size_t size, int flags) +{ + size_t usize; + size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK) + & (SIZE_T_MAX-1)); + + assert(size != 0); + + if (malloc_init()) + return (ALLOCM_ERR_OOM); + + usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment); + if (usize == 0) + return (ALLOCM_ERR_OOM); + + if (rsize != NULL) + *rsize = usize; + return (ALLOCM_SUCCESS); +} + +#endif +/* + * End experimental functions. + */ +/******************************************************************************/ +/* + * The following functions are used by threading libraries for protection of + * malloc during fork(). + */ + +#ifndef JEMALLOC_MUTEX_INIT_CB +void +jemalloc_prefork(void) +#else +void +_malloc_prefork(void) +#endif +{ + unsigned i; + + /* Acquire all mutexes in a safe order. */ + malloc_mutex_prefork(&arenas_lock); + for (i = 0; i < narenas; i++) { + if (arenas[i] != NULL) + arena_prefork(arenas[i]); + } + base_prefork(); + huge_prefork(); + chunk_dss_prefork(); +} + +#ifndef JEMALLOC_MUTEX_INIT_CB +void +jemalloc_postfork_parent(void) +#else +void +_malloc_postfork(void) +#endif +{ + unsigned i; + + /* Release all mutexes, now that fork() has completed. */ + chunk_dss_postfork_parent(); + huge_postfork_parent(); + base_postfork_parent(); + for (i = 0; i < narenas; i++) { + if (arenas[i] != NULL) + arena_postfork_parent(arenas[i]); + } + malloc_mutex_postfork_parent(&arenas_lock); +} + +void +jemalloc_postfork_child(void) +{ + unsigned i; + + /* Release all mutexes, now that fork() has completed. */ + chunk_dss_postfork_child(); + huge_postfork_child(); + base_postfork_child(); + for (i = 0; i < narenas; i++) { + if (arenas[i] != NULL) + arena_postfork_child(arenas[i]); + } + malloc_mutex_postfork_child(&arenas_lock); +} + +/******************************************************************************/ +/* + * The following functions are used for TLS allocation/deallocation in static + * binaries on FreeBSD. The primary difference between these and i[mcd]alloc() + * is that these avoid accessing TLS variables. + */ + +static void * +a0alloc(size_t size, bool zero) +{ + + if (malloc_init()) + return (NULL); + + if (size == 0) + size = 1; + + if (size <= arena_maxclass) + return (arena_malloc(arenas[0], size, zero, false)); + else + return (huge_malloc(size, zero)); +} + +void * +a0malloc(size_t size) +{ + + return (a0alloc(size, false)); +} + +void * +a0calloc(size_t num, size_t size) +{ + + return (a0alloc(num * size, true)); +} + +void +a0free(void *ptr) +{ + arena_chunk_t *chunk; + + if (ptr == NULL) + return; + + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); + if (chunk != ptr) + arena_dalloc(chunk->arena, chunk, ptr, false); + else + huge_dalloc(ptr, true); +} + +/******************************************************************************/ Property changes on: contrib/jemalloc/src/jemalloc.c ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H Added: svn:eol-style ## -0,0 +1 ## +native Index: contrib/jemalloc/src/tcache.c =================================================================== --- contrib/jemalloc/src/tcache.c (revision 0) +++ contrib/jemalloc/src/tcache.c (working copy) @@ -0,0 +1,435 @@ +#define JEMALLOC_TCACHE_C_ +#include "jemalloc/internal/jemalloc_internal.h" + +/******************************************************************************/ +/* Data. */ + +malloc_tsd_data(, tcache, tcache_t *, NULL) +malloc_tsd_data(, tcache_enabled, tcache_enabled_t, tcache_enabled_default) + +bool opt_tcache = true; +ssize_t opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT; + +tcache_bin_info_t *tcache_bin_info; +static unsigned stack_nelms; /* Total stack elms per tcache. */ + +size_t nhbins; +size_t tcache_maxclass; + +/******************************************************************************/ + +void * +tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin, size_t binind) +{ + void *ret; + + arena_tcache_fill_small(tcache->arena, tbin, binind, + config_prof ? tcache->prof_accumbytes : 0); + if (config_prof) + tcache->prof_accumbytes = 0; + ret = tcache_alloc_easy(tbin); + + return (ret); +} + +void +tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem, + tcache_t *tcache) +{ + void *ptr; + unsigned i, nflush, ndeferred; + bool merged_stats = false; + + assert(binind < NBINS); + assert(rem <= tbin->ncached); + + for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) { + /* Lock the arena bin associated with the first object. */ + arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE( + tbin->avail[0]); + arena_t *arena = chunk->arena; + arena_bin_t *bin = &arena->bins[binind]; + + if (config_prof && arena == tcache->arena) { + malloc_mutex_lock(&arena->lock); + arena_prof_accum(arena, tcache->prof_accumbytes); + malloc_mutex_unlock(&arena->lock); + tcache->prof_accumbytes = 0; + } + + malloc_mutex_lock(&bin->lock); + if (config_stats && arena == tcache->arena) { + assert(merged_stats == false); + merged_stats = true; + bin->stats.nflushes++; + bin->stats.nrequests += tbin->tstats.nrequests; + tbin->tstats.nrequests = 0; + } + ndeferred = 0; + for (i = 0; i < nflush; i++) { + ptr = tbin->avail[i]; + assert(ptr != NULL); + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); + if (chunk->arena == arena) { + size_t pageind = ((uintptr_t)ptr - + (uintptr_t)chunk) >> LG_PAGE; + arena_chunk_map_t *mapelm = + &chunk->map[pageind-map_bias]; + if (config_fill && opt_junk) { + arena_alloc_junk_small(ptr, + &arena_bin_info[binind], true); + } + arena_dalloc_bin(arena, chunk, ptr, mapelm); + } else { + /* + * This object was allocated via a different + * arena bin than the one that is currently + * locked. Stash the object, so that it can be + * handled in a future pass. + */ + tbin->avail[ndeferred] = ptr; + ndeferred++; + } + } + malloc_mutex_unlock(&bin->lock); + } + if (config_stats && merged_stats == false) { + /* + * The flush loop didn't happen to flush to this thread's + * arena, so the stats didn't get merged. Manually do so now. + */ + arena_bin_t *bin = &tcache->arena->bins[binind]; + malloc_mutex_lock(&bin->lock); + bin->stats.nflushes++; + bin->stats.nrequests += tbin->tstats.nrequests; + tbin->tstats.nrequests = 0; + malloc_mutex_unlock(&bin->lock); + } + + memmove(tbin->avail, &tbin->avail[tbin->ncached - rem], + rem * sizeof(void *)); + tbin->ncached = rem; + if ((int)tbin->ncached < tbin->low_water) + tbin->low_water = tbin->ncached; +} + +void +tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem, + tcache_t *tcache) +{ + void *ptr; + unsigned i, nflush, ndeferred; + bool merged_stats = false; + + assert(binind < nhbins); + assert(rem <= tbin->ncached); + + for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) { + /* Lock the arena associated with the first object. */ + arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE( + tbin->avail[0]); + arena_t *arena = chunk->arena; + + malloc_mutex_lock(&arena->lock); + if ((config_prof || config_stats) && arena == tcache->arena) { + if (config_prof) { + arena_prof_accum(arena, + tcache->prof_accumbytes); + tcache->prof_accumbytes = 0; + } + if (config_stats) { + merged_stats = true; + arena->stats.nrequests_large += + tbin->tstats.nrequests; + arena->stats.lstats[binind - NBINS].nrequests += + tbin->tstats.nrequests; + tbin->tstats.nrequests = 0; + } + } + ndeferred = 0; + for (i = 0; i < nflush; i++) { + ptr = tbin->avail[i]; + assert(ptr != NULL); + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); + if (chunk->arena == arena) + arena_dalloc_large(arena, chunk, ptr); + else { + /* + * This object was allocated via a different + * arena than the one that is currently locked. + * Stash the object, so that it can be handled + * in a future pass. + */ + tbin->avail[ndeferred] = ptr; + ndeferred++; + } + } + malloc_mutex_unlock(&arena->lock); + } + if (config_stats && merged_stats == false) { + /* + * The flush loop didn't happen to flush to this thread's + * arena, so the stats didn't get merged. Manually do so now. + */ + arena_t *arena = tcache->arena; + malloc_mutex_lock(&arena->lock); + arena->stats.nrequests_large += tbin->tstats.nrequests; + arena->stats.lstats[binind - NBINS].nrequests += + tbin->tstats.nrequests; + tbin->tstats.nrequests = 0; + malloc_mutex_unlock(&arena->lock); + } + + memmove(tbin->avail, &tbin->avail[tbin->ncached - rem], + rem * sizeof(void *)); + tbin->ncached = rem; + if ((int)tbin->ncached < tbin->low_water) + tbin->low_water = tbin->ncached; +} + +void +tcache_arena_associate(tcache_t *tcache, arena_t *arena) +{ + + if (config_stats) { + /* Link into list of extant tcaches. */ + malloc_mutex_lock(&arena->lock); + ql_elm_new(tcache, link); + ql_tail_insert(&arena->tcache_ql, tcache, link); + malloc_mutex_unlock(&arena->lock); + } + tcache->arena = arena; +} + +void +tcache_arena_dissociate(tcache_t *tcache) +{ + + if (config_stats) { + /* Unlink from list of extant tcaches. */ + malloc_mutex_lock(&tcache->arena->lock); + ql_remove(&tcache->arena->tcache_ql, tcache, link); + malloc_mutex_unlock(&tcache->arena->lock); + tcache_stats_merge(tcache, tcache->arena); + } +} + +tcache_t * +tcache_create(arena_t *arena) +{ + tcache_t *tcache; + size_t size, stack_offset; + unsigned i; + + size = offsetof(tcache_t, tbins) + (sizeof(tcache_bin_t) * nhbins); + /* Naturally align the pointer stacks. */ + size = PTR_CEILING(size); + stack_offset = size; + size += stack_nelms * sizeof(void *); + /* + * Round up to the nearest multiple of the cacheline size, in order to + * avoid the possibility of false cacheline sharing. + * + * That this works relies on the same logic as in ipalloc(), but we + * cannot directly call ipalloc() here due to tcache bootstrapping + * issues. + */ + size = (size + CACHELINE_MASK) & (-CACHELINE); + + if (size <= SMALL_MAXCLASS) + tcache = (tcache_t *)arena_malloc_small(arena, size, true); + else if (size <= tcache_maxclass) + tcache = (tcache_t *)arena_malloc_large(arena, size, true); + else + tcache = (tcache_t *)icalloc(size); + + if (tcache == NULL) + return (NULL); + + tcache_arena_associate(tcache, arena); + + assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0); + for (i = 0; i < nhbins; i++) { + tcache->tbins[i].lg_fill_div = 1; + tcache->tbins[i].avail = (void **)((uintptr_t)tcache + + (uintptr_t)stack_offset); + stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *); + } + + tcache_tsd_set(&tcache); + + return (tcache); +} + +void +tcache_destroy(tcache_t *tcache) +{ + unsigned i; + size_t tcache_size; + + tcache_arena_dissociate(tcache); + + for (i = 0; i < NBINS; i++) { + tcache_bin_t *tbin = &tcache->tbins[i]; + tcache_bin_flush_small(tbin, i, 0, tcache); + + if (config_stats && tbin->tstats.nrequests != 0) { + arena_t *arena = tcache->arena; + arena_bin_t *bin = &arena->bins[i]; + malloc_mutex_lock(&bin->lock); + bin->stats.nrequests += tbin->tstats.nrequests; + malloc_mutex_unlock(&bin->lock); + } + } + + for (; i < nhbins; i++) { + tcache_bin_t *tbin = &tcache->tbins[i]; + tcache_bin_flush_large(tbin, i, 0, tcache); + + if (config_stats && tbin->tstats.nrequests != 0) { + arena_t *arena = tcache->arena; + malloc_mutex_lock(&arena->lock); + arena->stats.nrequests_large += tbin->tstats.nrequests; + arena->stats.lstats[i - NBINS].nrequests += + tbin->tstats.nrequests; + malloc_mutex_unlock(&arena->lock); + } + } + + if (config_prof && tcache->prof_accumbytes > 0) { + malloc_mutex_lock(&tcache->arena->lock); + arena_prof_accum(tcache->arena, tcache->prof_accumbytes); + malloc_mutex_unlock(&tcache->arena->lock); + } + + tcache_size = arena_salloc(tcache, false); + if (tcache_size <= SMALL_MAXCLASS) { + arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache); + arena_t *arena = chunk->arena; + size_t pageind = ((uintptr_t)tcache - (uintptr_t)chunk) >> + LG_PAGE; + arena_chunk_map_t *mapelm = &chunk->map[pageind-map_bias]; + arena_run_t *run = (arena_run_t *)((uintptr_t)chunk + + (uintptr_t)((pageind - (mapelm->bits >> LG_PAGE)) << + LG_PAGE)); + arena_bin_t *bin = run->bin; + + malloc_mutex_lock(&bin->lock); + arena_dalloc_bin(arena, chunk, tcache, mapelm); + malloc_mutex_unlock(&bin->lock); + } else if (tcache_size <= tcache_maxclass) { + arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache); + arena_t *arena = chunk->arena; + + malloc_mutex_lock(&arena->lock); + arena_dalloc_large(arena, chunk, tcache); + malloc_mutex_unlock(&arena->lock); + } else + idalloc(tcache); +} + +void +tcache_thread_cleanup(void *arg) +{ + tcache_t *tcache = *(tcache_t **)arg; + + if (tcache == TCACHE_STATE_DISABLED) { + /* Do nothing. */ + } else if (tcache == TCACHE_STATE_REINCARNATED) { + /* + * Another destructor called an allocator function after this + * destructor was called. Reset tcache to + * TCACHE_STATE_PURGATORY in order to receive another callback. + */ + tcache = TCACHE_STATE_PURGATORY; + tcache_tsd_set(&tcache); + } else if (tcache == TCACHE_STATE_PURGATORY) { + /* + * The previous time this destructor was called, we set the key + * to TCACHE_STATE_PURGATORY so that other destructors wouldn't + * cause re-creation of the tcache. This time, do nothing, so + * that the destructor will not be called again. + */ + } else if (tcache != NULL) { + assert(tcache != TCACHE_STATE_PURGATORY); + tcache_destroy(tcache); + tcache = TCACHE_STATE_PURGATORY; + tcache_tsd_set(&tcache); + } +} + +void +tcache_stats_merge(tcache_t *tcache, arena_t *arena) +{ + unsigned i; + + /* Merge and reset tcache stats. */ + for (i = 0; i < NBINS; i++) { + arena_bin_t *bin = &arena->bins[i]; + tcache_bin_t *tbin = &tcache->tbins[i]; + malloc_mutex_lock(&bin->lock); + bin->stats.nrequests += tbin->tstats.nrequests; + malloc_mutex_unlock(&bin->lock); + tbin->tstats.nrequests = 0; + } + + for (; i < nhbins; i++) { + malloc_large_stats_t *lstats = &arena->stats.lstats[i - NBINS]; + tcache_bin_t *tbin = &tcache->tbins[i]; + arena->stats.nrequests_large += tbin->tstats.nrequests; + lstats->nrequests += tbin->tstats.nrequests; + tbin->tstats.nrequests = 0; + } +} + +bool +tcache_boot0(void) +{ + unsigned i; + + /* + * If necessary, clamp opt_lg_tcache_max, now that arena_maxclass is + * known. + */ + if (opt_lg_tcache_max < 0 || (1U << opt_lg_tcache_max) < SMALL_MAXCLASS) + tcache_maxclass = SMALL_MAXCLASS; + else if ((1U << opt_lg_tcache_max) > arena_maxclass) + tcache_maxclass = arena_maxclass; + else + tcache_maxclass = (1U << opt_lg_tcache_max); + + nhbins = NBINS + (tcache_maxclass >> LG_PAGE); + + /* Initialize tcache_bin_info. */ + tcache_bin_info = (tcache_bin_info_t *)base_alloc(nhbins * + sizeof(tcache_bin_info_t)); + if (tcache_bin_info == NULL) + return (true); + stack_nelms = 0; + for (i = 0; i < NBINS; i++) { + if ((arena_bin_info[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MAX) { + tcache_bin_info[i].ncached_max = + (arena_bin_info[i].nregs << 1); + } else { + tcache_bin_info[i].ncached_max = + TCACHE_NSLOTS_SMALL_MAX; + } + stack_nelms += tcache_bin_info[i].ncached_max; + } + for (; i < nhbins; i++) { + tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_LARGE; + stack_nelms += tcache_bin_info[i].ncached_max; + } + + return (false); +} + +bool +tcache_boot1(void) +{ + + if (tcache_tsd_boot() || tcache_enabled_tsd_boot()) + return (true); + + return (false); +} Property changes on: contrib/jemalloc/src/tcache.c ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H Added: svn:eol-style ## -0,0 +1 ## +native Index: contrib/jemalloc/src/huge.c =================================================================== --- contrib/jemalloc/src/huge.c (revision 0) +++ contrib/jemalloc/src/huge.c (working copy) @@ -0,0 +1,306 @@ +#define JEMALLOC_HUGE_C_ +#include "jemalloc/internal/jemalloc_internal.h" + +/******************************************************************************/ +/* Data. */ + +uint64_t huge_nmalloc; +uint64_t huge_ndalloc; +size_t huge_allocated; + +malloc_mutex_t huge_mtx; + +/******************************************************************************/ + +/* Tree of chunks that are stand-alone huge allocations. */ +static extent_tree_t huge; + +void * +huge_malloc(size_t size, bool zero) +{ + + return (huge_palloc(size, chunksize, zero)); +} + +void * +huge_palloc(size_t size, size_t alignment, bool zero) +{ + void *ret; + size_t csize; + extent_node_t *node; + + /* Allocate one or more contiguous chunks for this request. */ + + csize = CHUNK_CEILING(size); + if (csize == 0) { + /* size is large enough to cause size_t wrap-around. */ + return (NULL); + } + + /* Allocate an extent node with which to track the chunk. */ + node = base_node_alloc(); + if (node == NULL) + return (NULL); + + ret = chunk_alloc(csize, alignment, false, &zero); + if (ret == NULL) { + base_node_dealloc(node); + return (NULL); + } + + /* Insert node into huge. */ + node->addr = ret; + node->size = csize; + + malloc_mutex_lock(&huge_mtx); + extent_tree_ad_insert(&huge, node); + if (config_stats) { + stats_cactive_add(csize); + huge_nmalloc++; + huge_allocated += csize; + } + malloc_mutex_unlock(&huge_mtx); + + if (config_fill && zero == false) { + if (opt_junk) + memset(ret, 0xa5, csize); + else if (opt_zero) + memset(ret, 0, csize); + } + + return (ret); +} + +void * +huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra) +{ + + /* + * Avoid moving the allocation if the size class can be left the same. + */ + if (oldsize > arena_maxclass + && CHUNK_CEILING(oldsize) >= CHUNK_CEILING(size) + && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(size+extra)) { + assert(CHUNK_CEILING(oldsize) == oldsize); + if (config_fill && opt_junk && size < oldsize) { + memset((void *)((uintptr_t)ptr + size), 0x5a, + oldsize - size); + } + return (ptr); + } + + /* Reallocation would require a move. */ + return (NULL); +} + +void * +huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra, + size_t alignment, bool zero) +{ + void *ret; + size_t copysize; + + /* Try to avoid moving the allocation. */ + ret = huge_ralloc_no_move(ptr, oldsize, size, extra); + if (ret != NULL) + return (ret); + + /* + * size and oldsize are different enough that we need to use a + * different size class. In that case, fall back to allocating new + * space and copying. + */ + if (alignment > chunksize) + ret = huge_palloc(size + extra, alignment, zero); + else + ret = huge_malloc(size + extra, zero); + + if (ret == NULL) { + if (extra == 0) + return (NULL); + /* Try again, this time without extra. */ + if (alignment > chunksize) + ret = huge_palloc(size, alignment, zero); + else + ret = huge_malloc(size, zero); + + if (ret == NULL) + return (NULL); + } + + /* + * Copy at most size bytes (not size+extra), since the caller has no + * expectation that the extra bytes will be reliably preserved. + */ + copysize = (size < oldsize) ? size : oldsize; + + /* + * Use mremap(2) if this is a huge-->huge reallocation, and neither the + * source nor the destination are in dss. + */ +#ifdef JEMALLOC_MREMAP_FIXED + if (oldsize >= chunksize && (config_dss == false || (chunk_in_dss(ptr) + == false && chunk_in_dss(ret) == false))) { + size_t newsize = huge_salloc(ret); + + /* + * Remove ptr from the tree of huge allocations before + * performing the remap operation, in order to avoid the + * possibility of another thread acquiring that mapping before + * this one removes it from the tree. + */ + huge_dalloc(ptr, false); + if (mremap(ptr, oldsize, newsize, MREMAP_MAYMOVE|MREMAP_FIXED, + ret) == MAP_FAILED) { + /* + * Assuming no chunk management bugs in the allocator, + * the only documented way an error can occur here is + * if the application changed the map type for a + * portion of the old allocation. This is firmly in + * undefined behavior territory, so write a diagnostic + * message, and optionally abort. + */ + char buf[BUFERROR_BUF]; + + buferror(errno, buf, sizeof(buf)); + malloc_printf(": Error in mremap(): %s\n", + buf); + if (opt_abort) + abort(); + memcpy(ret, ptr, copysize); + chunk_dealloc_mmap(ptr, oldsize); + } + } else +#endif + { + memcpy(ret, ptr, copysize); + iqalloc(ptr); + } + return (ret); +} + +void +huge_dalloc(void *ptr, bool unmap) +{ + extent_node_t *node, key; + + malloc_mutex_lock(&huge_mtx); + + /* Extract from tree of huge allocations. */ + key.addr = ptr; + node = extent_tree_ad_search(&huge, &key); + assert(node != NULL); + assert(node->addr == ptr); + extent_tree_ad_remove(&huge, node); + + if (config_stats) { + stats_cactive_sub(node->size); + huge_ndalloc++; + huge_allocated -= node->size; + } + + malloc_mutex_unlock(&huge_mtx); + + if (unmap && config_fill && config_dss && opt_junk) + memset(node->addr, 0x5a, node->size); + + chunk_dealloc(node->addr, node->size, unmap); + + base_node_dealloc(node); +} + +size_t +huge_salloc(const void *ptr) +{ + size_t ret; + extent_node_t *node, key; + + malloc_mutex_lock(&huge_mtx); + + /* Extract from tree of huge allocations. */ + key.addr = __DECONST(void *, ptr); + node = extent_tree_ad_search(&huge, &key); + assert(node != NULL); + + ret = node->size; + + malloc_mutex_unlock(&huge_mtx); + + return (ret); +} + +prof_ctx_t * +huge_prof_ctx_get(const void *ptr) +{ + prof_ctx_t *ret; + extent_node_t *node, key; + + malloc_mutex_lock(&huge_mtx); + + /* Extract from tree of huge allocations. */ + key.addr = __DECONST(void *, ptr); + node = extent_tree_ad_search(&huge, &key); + assert(node != NULL); + + ret = node->prof_ctx; + + malloc_mutex_unlock(&huge_mtx); + + return (ret); +} + +void +huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx) +{ + extent_node_t *node, key; + + malloc_mutex_lock(&huge_mtx); + + /* Extract from tree of huge allocations. */ + key.addr = __DECONST(void *, ptr); + node = extent_tree_ad_search(&huge, &key); + assert(node != NULL); + + node->prof_ctx = ctx; + + malloc_mutex_unlock(&huge_mtx); +} + +bool +huge_boot(void) +{ + + /* Initialize chunks data. */ + if (malloc_mutex_init(&huge_mtx)) + return (true); + extent_tree_ad_new(&huge); + + if (config_stats) { + huge_nmalloc = 0; + huge_ndalloc = 0; + huge_allocated = 0; + } + + return (false); +} + +void +huge_prefork(void) +{ + + malloc_mutex_prefork(&huge_mtx); +} + +void +huge_postfork_parent(void) +{ + + malloc_mutex_postfork_parent(&huge_mtx); +} + +void +huge_postfork_child(void) +{ + + malloc_mutex_postfork_child(&huge_mtx); +} Property changes on: contrib/jemalloc/src/huge.c ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H Added: svn:eol-style ## -0,0 +1 ## +native Index: contrib/jemalloc/src/ckh.c =================================================================== --- contrib/jemalloc/src/ckh.c (revision 0) +++ contrib/jemalloc/src/ckh.c (working copy) @@ -0,0 +1,609 @@ +/* + ******************************************************************************* + * Implementation of (2^1+,2) cuckoo hashing, where 2^1+ indicates that each + * hash bucket contains 2^n cells, for n >= 1, and 2 indicates that two hash + * functions are employed. The original cuckoo hashing algorithm was described + * in: + * + * Pagh, R., F.F. Rodler (2004) Cuckoo Hashing. Journal of Algorithms + * 51(2):122-144. + * + * Generalization of cuckoo hashing was discussed in: + * + * Erlingsson, U., M. Manasse, F. McSherry (2006) A cool and practical + * alternative to traditional hash tables. In Proceedings of the 7th + * Workshop on Distributed Data and Structures (WDAS'06), Santa Clara, CA, + * January 2006. + * + * This implementation uses precisely two hash functions because that is the + * fewest that can work, and supporting multiple hashes is an implementation + * burden. Here is a reproduction of Figure 1 from Erlingsson et al. (2006) + * that shows approximate expected maximum load factors for various + * configurations: + * + * | #cells/bucket | + * #hashes | 1 | 2 | 4 | 8 | + * --------+-------+-------+-------+-------+ + * 1 | 0.006 | 0.006 | 0.03 | 0.12 | + * 2 | 0.49 | 0.86 |>0.93< |>0.96< | + * 3 | 0.91 | 0.97 | 0.98 | 0.999 | + * 4 | 0.97 | 0.99 | 0.999 | | + * + * The number of cells per bucket is chosen such that a bucket fits in one cache + * line. So, on 32- and 64-bit systems, we use (8,2) and (4,2) cuckoo hashing, + * respectively. + * + ******************************************************************************/ +#define JEMALLOC_CKH_C_ +#include "jemalloc/internal/jemalloc_internal.h" + +/******************************************************************************/ +/* Function prototypes for non-inline static functions. */ + +static bool ckh_grow(ckh_t *ckh); +static void ckh_shrink(ckh_t *ckh); + +/******************************************************************************/ + +/* + * Search bucket for key and return the cell number if found; SIZE_T_MAX + * otherwise. + */ +JEMALLOC_INLINE size_t +ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key) +{ + ckhc_t *cell; + unsigned i; + + for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) { + cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i]; + if (cell->key != NULL && ckh->keycomp(key, cell->key)) + return ((bucket << LG_CKH_BUCKET_CELLS) + i); + } + + return (SIZE_T_MAX); +} + +/* + * Search table for key and return cell number if found; SIZE_T_MAX otherwise. + */ +JEMALLOC_INLINE size_t +ckh_isearch(ckh_t *ckh, const void *key) +{ + size_t hash1, hash2, bucket, cell; + + assert(ckh != NULL); + + ckh->hash(key, ckh->lg_curbuckets, &hash1, &hash2); + + /* Search primary bucket. */ + bucket = hash1 & ((ZU(1) << ckh->lg_curbuckets) - 1); + cell = ckh_bucket_search(ckh, bucket, key); + if (cell != SIZE_T_MAX) + return (cell); + + /* Search secondary bucket. */ + bucket = hash2 & ((ZU(1) << ckh->lg_curbuckets) - 1); + cell = ckh_bucket_search(ckh, bucket, key); + return (cell); +} + +JEMALLOC_INLINE bool +ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key, + const void *data) +{ + ckhc_t *cell; + unsigned offset, i; + + /* + * Cycle through the cells in the bucket, starting at a random position. + * The randomness avoids worst-case search overhead as buckets fill up. + */ + prng32(offset, LG_CKH_BUCKET_CELLS, ckh->prng_state, CKH_A, CKH_C); + for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) { + cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + + ((i + offset) & ((ZU(1) << LG_CKH_BUCKET_CELLS) - 1))]; + if (cell->key == NULL) { + cell->key = key; + cell->data = data; + ckh->count++; + return (false); + } + } + + return (true); +} + +/* + * No space is available in bucket. Randomly evict an item, then try to find an + * alternate location for that item. Iteratively repeat this + * eviction/relocation procedure until either success or detection of an + * eviction/relocation bucket cycle. + */ +JEMALLOC_INLINE bool +ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey, + void const **argdata) +{ + const void *key, *data, *tkey, *tdata; + ckhc_t *cell; + size_t hash1, hash2, bucket, tbucket; + unsigned i; + + bucket = argbucket; + key = *argkey; + data = *argdata; + while (true) { + /* + * Choose a random item within the bucket to evict. This is + * critical to correct function, because without (eventually) + * evicting all items within a bucket during iteration, it + * would be possible to get stuck in an infinite loop if there + * were an item for which both hashes indicated the same + * bucket. + */ + prng32(i, LG_CKH_BUCKET_CELLS, ckh->prng_state, CKH_A, CKH_C); + cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i]; + assert(cell->key != NULL); + + /* Swap cell->{key,data} and {key,data} (evict). */ + tkey = cell->key; tdata = cell->data; + cell->key = key; cell->data = data; + key = tkey; data = tdata; + +#ifdef CKH_COUNT + ckh->nrelocs++; +#endif + + /* Find the alternate bucket for the evicted item. */ + ckh->hash(key, ckh->lg_curbuckets, &hash1, &hash2); + tbucket = hash2 & ((ZU(1) << ckh->lg_curbuckets) - 1); + if (tbucket == bucket) { + tbucket = hash1 & ((ZU(1) << ckh->lg_curbuckets) - 1); + /* + * It may be that (tbucket == bucket) still, if the + * item's hashes both indicate this bucket. However, + * we are guaranteed to eventually escape this bucket + * during iteration, assuming pseudo-random item + * selection (true randomness would make infinite + * looping a remote possibility). The reason we can + * never get trapped forever is that there are two + * cases: + * + * 1) This bucket == argbucket, so we will quickly + * detect an eviction cycle and terminate. + * 2) An item was evicted to this bucket from another, + * which means that at least one item in this bucket + * has hashes that indicate distinct buckets. + */ + } + /* Check for a cycle. */ + if (tbucket == argbucket) { + *argkey = key; + *argdata = data; + return (true); + } + + bucket = tbucket; + if (ckh_try_bucket_insert(ckh, bucket, key, data) == false) + return (false); + } +} + +JEMALLOC_INLINE bool +ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata) +{ + size_t hash1, hash2, bucket; + const void *key = *argkey; + const void *data = *argdata; + + ckh->hash(key, ckh->lg_curbuckets, &hash1, &hash2); + + /* Try to insert in primary bucket. */ + bucket = hash1 & ((ZU(1) << ckh->lg_curbuckets) - 1); + if (ckh_try_bucket_insert(ckh, bucket, key, data) == false) + return (false); + + /* Try to insert in secondary bucket. */ + bucket = hash2 & ((ZU(1) << ckh->lg_curbuckets) - 1); + if (ckh_try_bucket_insert(ckh, bucket, key, data) == false) + return (false); + + /* + * Try to find a place for this item via iterative eviction/relocation. + */ + return (ckh_evict_reloc_insert(ckh, bucket, argkey, argdata)); +} + +/* + * Try to rebuild the hash table from scratch by inserting all items from the + * old table into the new. + */ +JEMALLOC_INLINE bool +ckh_rebuild(ckh_t *ckh, ckhc_t *aTab) +{ + size_t count, i, nins; + const void *key, *data; + + count = ckh->count; + ckh->count = 0; + for (i = nins = 0; nins < count; i++) { + if (aTab[i].key != NULL) { + key = aTab[i].key; + data = aTab[i].data; + if (ckh_try_insert(ckh, &key, &data)) { + ckh->count = count; + return (true); + } + nins++; + } + } + + return (false); +} + +static bool +ckh_grow(ckh_t *ckh) +{ + bool ret; + ckhc_t *tab, *ttab; + size_t lg_curcells; + unsigned lg_prevbuckets; + +#ifdef CKH_COUNT + ckh->ngrows++; +#endif + + /* + * It is possible (though unlikely, given well behaved hashes) that the + * table will have to be doubled more than once in order to create a + * usable table. + */ + lg_prevbuckets = ckh->lg_curbuckets; + lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS; + while (true) { + size_t usize; + + lg_curcells++; + usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE); + if (usize == 0) { + ret = true; + goto label_return; + } + tab = (ckhc_t *)ipalloc(usize, CACHELINE, true); + if (tab == NULL) { + ret = true; + goto label_return; + } + /* Swap in new table. */ + ttab = ckh->tab; + ckh->tab = tab; + tab = ttab; + ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS; + + if (ckh_rebuild(ckh, tab) == false) { + idalloc(tab); + break; + } + + /* Rebuilding failed, so back out partially rebuilt table. */ + idalloc(ckh->tab); + ckh->tab = tab; + ckh->lg_curbuckets = lg_prevbuckets; + } + + ret = false; +label_return: + return (ret); +} + +static void +ckh_shrink(ckh_t *ckh) +{ + ckhc_t *tab, *ttab; + size_t lg_curcells, usize; + unsigned lg_prevbuckets; + + /* + * It is possible (though unlikely, given well behaved hashes) that the + * table rebuild will fail. + */ + lg_prevbuckets = ckh->lg_curbuckets; + lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 1; + usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE); + if (usize == 0) + return; + tab = (ckhc_t *)ipalloc(usize, CACHELINE, true); + if (tab == NULL) { + /* + * An OOM error isn't worth propagating, since it doesn't + * prevent this or future operations from proceeding. + */ + return; + } + /* Swap in new table. */ + ttab = ckh->tab; + ckh->tab = tab; + tab = ttab; + ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS; + + if (ckh_rebuild(ckh, tab) == false) { + idalloc(tab); +#ifdef CKH_COUNT + ckh->nshrinks++; +#endif + return; + } + + /* Rebuilding failed, so back out partially rebuilt table. */ + idalloc(ckh->tab); + ckh->tab = tab; + ckh->lg_curbuckets = lg_prevbuckets; +#ifdef CKH_COUNT + ckh->nshrinkfails++; +#endif +} + +bool +ckh_new(ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh_keycomp_t *keycomp) +{ + bool ret; + size_t mincells, usize; + unsigned lg_mincells; + + assert(minitems > 0); + assert(hash != NULL); + assert(keycomp != NULL); + +#ifdef CKH_COUNT + ckh->ngrows = 0; + ckh->nshrinks = 0; + ckh->nshrinkfails = 0; + ckh->ninserts = 0; + ckh->nrelocs = 0; +#endif + ckh->prng_state = 42; /* Value doesn't really matter. */ + ckh->count = 0; + + /* + * Find the minimum power of 2 that is large enough to fit aBaseCount + * entries. We are using (2+,2) cuckoo hashing, which has an expected + * maximum load factor of at least ~0.86, so 0.75 is a conservative load + * factor that will typically allow 2^aLgMinItems to fit without ever + * growing the table. + */ + assert(LG_CKH_BUCKET_CELLS > 0); + mincells = ((minitems + (3 - (minitems % 3))) / 3) << 2; + for (lg_mincells = LG_CKH_BUCKET_CELLS; + (ZU(1) << lg_mincells) < mincells; + lg_mincells++) + ; /* Do nothing. */ + ckh->lg_minbuckets = lg_mincells - LG_CKH_BUCKET_CELLS; + ckh->lg_curbuckets = lg_mincells - LG_CKH_BUCKET_CELLS; + ckh->hash = hash; + ckh->keycomp = keycomp; + + usize = sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE); + if (usize == 0) { + ret = true; + goto label_return; + } + ckh->tab = (ckhc_t *)ipalloc(usize, CACHELINE, true); + if (ckh->tab == NULL) { + ret = true; + goto label_return; + } + + ret = false; +label_return: + return (ret); +} + +void +ckh_delete(ckh_t *ckh) +{ + + assert(ckh != NULL); + +#ifdef CKH_VERBOSE + malloc_printf( + "%s(%p): ngrows: %"PRIu64", nshrinks: %"PRIu64"," + " nshrinkfails: %"PRIu64", ninserts: %"PRIu64"," + " nrelocs: %"PRIu64"\n", __func__, ckh, + (unsigned long long)ckh->ngrows, + (unsigned long long)ckh->nshrinks, + (unsigned long long)ckh->nshrinkfails, + (unsigned long long)ckh->ninserts, + (unsigned long long)ckh->nrelocs); +#endif + + idalloc(ckh->tab); +#ifdef JEMALLOC_DEBUG + memset(ckh, 0x5a, sizeof(ckh_t)); +#endif +} + +size_t +ckh_count(ckh_t *ckh) +{ + + assert(ckh != NULL); + + return (ckh->count); +} + +bool +ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data) +{ + size_t i, ncells; + + for (i = *tabind, ncells = (ZU(1) << (ckh->lg_curbuckets + + LG_CKH_BUCKET_CELLS)); i < ncells; i++) { + if (ckh->tab[i].key != NULL) { + if (key != NULL) + *key = (void *)ckh->tab[i].key; + if (data != NULL) + *data = (void *)ckh->tab[i].data; + *tabind = i + 1; + return (false); + } + } + + return (true); +} + +bool +ckh_insert(ckh_t *ckh, const void *key, const void *data) +{ + bool ret; + + assert(ckh != NULL); + assert(ckh_search(ckh, key, NULL, NULL)); + +#ifdef CKH_COUNT + ckh->ninserts++; +#endif + + while (ckh_try_insert(ckh, &key, &data)) { + if (ckh_grow(ckh)) { + ret = true; + goto label_return; + } + } + + ret = false; +label_return: + return (ret); +} + +bool +ckh_remove(ckh_t *ckh, const void *searchkey, void **key, void **data) +{ + size_t cell; + + assert(ckh != NULL); + + cell = ckh_isearch(ckh, searchkey); + if (cell != SIZE_T_MAX) { + if (key != NULL) + *key = (void *)ckh->tab[cell].key; + if (data != NULL) + *data = (void *)ckh->tab[cell].data; + ckh->tab[cell].key = NULL; + ckh->tab[cell].data = NULL; /* Not necessary. */ + + ckh->count--; + /* Try to halve the table if it is less than 1/4 full. */ + if (ckh->count < (ZU(1) << (ckh->lg_curbuckets + + LG_CKH_BUCKET_CELLS - 2)) && ckh->lg_curbuckets + > ckh->lg_minbuckets) { + /* Ignore error due to OOM. */ + ckh_shrink(ckh); + } + + return (false); + } + + return (true); +} + +bool +ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data) +{ + size_t cell; + + assert(ckh != NULL); + + cell = ckh_isearch(ckh, searchkey); + if (cell != SIZE_T_MAX) { + if (key != NULL) + *key = (void *)ckh->tab[cell].key; + if (data != NULL) + *data = (void *)ckh->tab[cell].data; + return (false); + } + + return (true); +} + +void +ckh_string_hash(const void *key, unsigned minbits, size_t *hash1, size_t *hash2) +{ + size_t ret1, ret2; + uint64_t h; + + assert(minbits <= 32 || (SIZEOF_PTR == 8 && minbits <= 64)); + assert(hash1 != NULL); + assert(hash2 != NULL); + + h = hash(key, strlen((const char *)key), UINT64_C(0x94122f335b332aea)); + if (minbits <= 32) { + /* + * Avoid doing multiple hashes, since a single hash provides + * enough bits. + */ + ret1 = h & ZU(0xffffffffU); + ret2 = h >> 32; + } else { + ret1 = h; + ret2 = hash(key, strlen((const char *)key), + UINT64_C(0x8432a476666bbc13)); + } + + *hash1 = ret1; + *hash2 = ret2; +} + +bool +ckh_string_keycomp(const void *k1, const void *k2) +{ + + assert(k1 != NULL); + assert(k2 != NULL); + + return (strcmp((char *)k1, (char *)k2) ? false : true); +} + +void +ckh_pointer_hash(const void *key, unsigned minbits, size_t *hash1, + size_t *hash2) +{ + size_t ret1, ret2; + uint64_t h; + union { + const void *v; + uint64_t i; + } u; + + assert(minbits <= 32 || (SIZEOF_PTR == 8 && minbits <= 64)); + assert(hash1 != NULL); + assert(hash2 != NULL); + + assert(sizeof(u.v) == sizeof(u.i)); +#if (LG_SIZEOF_PTR != LG_SIZEOF_INT) + u.i = 0; +#endif + u.v = key; + h = hash(&u.i, sizeof(u.i), UINT64_C(0xd983396e68886082)); + if (minbits <= 32) { + /* + * Avoid doing multiple hashes, since a single hash provides + * enough bits. + */ + ret1 = h & ZU(0xffffffffU); + ret2 = h >> 32; + } else { + assert(SIZEOF_PTR == 8); + ret1 = h; + ret2 = hash(&u.i, sizeof(u.i), UINT64_C(0x5e2be9aff8709a5d)); + } + + *hash1 = ret1; + *hash2 = ret2; +} + +bool +ckh_pointer_keycomp(const void *k1, const void *k2) +{ + + return ((k1 == k2) ? true : false); +} Property changes on: contrib/jemalloc/src/ckh.c ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H Added: svn:eol-style ## -0,0 +1 ## +native Index: contrib/jemalloc/src/prof.c =================================================================== --- contrib/jemalloc/src/prof.c (revision 0) +++ contrib/jemalloc/src/prof.c (working copy) @@ -0,0 +1,1243 @@ +#define JEMALLOC_PROF_C_ +#include "jemalloc/internal/jemalloc_internal.h" +/******************************************************************************/ + +#ifdef JEMALLOC_PROF_LIBUNWIND +#define UNW_LOCAL_ONLY +#include +#endif + +#ifdef JEMALLOC_PROF_LIBGCC +#include +#endif + +/******************************************************************************/ +/* Data. */ + +malloc_tsd_data(, prof_tdata, prof_tdata_t *, NULL) + +bool opt_prof = false; +bool opt_prof_active = true; +size_t opt_lg_prof_sample = LG_PROF_SAMPLE_DEFAULT; +ssize_t opt_lg_prof_interval = LG_PROF_INTERVAL_DEFAULT; +bool opt_prof_gdump = false; +bool opt_prof_leak = false; +bool opt_prof_accum = true; +char opt_prof_prefix[PATH_MAX + 1]; + +uint64_t prof_interval; +bool prof_promote; + +/* + * Table of mutexes that are shared among ctx's. These are leaf locks, so + * there is no problem with using them for more than one ctx at the same time. + * The primary motivation for this sharing though is that ctx's are ephemeral, + * and destroying mutexes causes complications for systems that allocate when + * creating/destroying mutexes. + */ +static malloc_mutex_t *ctx_locks; +static unsigned cum_ctxs; /* Atomic counter. */ + +/* + * Global hash of (prof_bt_t *)-->(prof_ctx_t *). This is the master data + * structure that knows about all backtraces currently captured. + */ +static ckh_t bt2ctx; +static malloc_mutex_t bt2ctx_mtx; + +static malloc_mutex_t prof_dump_seq_mtx; +static uint64_t prof_dump_seq; +static uint64_t prof_dump_iseq; +static uint64_t prof_dump_mseq; +static uint64_t prof_dump_useq; + +/* + * This buffer is rather large for stack allocation, so use a single buffer for + * all profile dumps. The buffer is implicitly protected by bt2ctx_mtx, since + * it must be locked anyway during dumping. + */ +static char prof_dump_buf[PROF_DUMP_BUFSIZE]; +static unsigned prof_dump_buf_end; +static int prof_dump_fd; + +/* Do not dump any profiles until bootstrapping is complete. */ +static bool prof_booted = false; + +static malloc_mutex_t enq_mtx; +static bool enq; +static bool enq_idump; +static bool enq_gdump; + +/******************************************************************************/ +/* Function prototypes for non-inline static functions. */ + +static prof_bt_t *bt_dup(prof_bt_t *bt); +static void bt_destroy(prof_bt_t *bt); +#ifdef JEMALLOC_PROF_LIBGCC +static _Unwind_Reason_Code prof_unwind_init_callback( + struct _Unwind_Context *context, void *arg); +static _Unwind_Reason_Code prof_unwind_callback( + struct _Unwind_Context *context, void *arg); +#endif +static bool prof_flush(bool propagate_err); +static bool prof_write(bool propagate_err, const char *s); +static bool prof_printf(bool propagate_err, const char *format, ...) + JEMALLOC_ATTR(format(printf, 2, 3)); +static void prof_ctx_sum(prof_ctx_t *ctx, prof_cnt_t *cnt_all, + size_t *leak_nctx); +static void prof_ctx_destroy(prof_ctx_t *ctx); +static void prof_ctx_merge(prof_ctx_t *ctx, prof_thr_cnt_t *cnt); +static bool prof_dump_ctx(bool propagate_err, prof_ctx_t *ctx, + prof_bt_t *bt); +static bool prof_dump_maps(bool propagate_err); +static bool prof_dump(bool propagate_err, const char *filename, + bool leakcheck); +static void prof_dump_filename(char *filename, char v, int64_t vseq); +static void prof_fdump(void); +static void prof_bt_hash(const void *key, unsigned minbits, size_t *hash1, + size_t *hash2); +static bool prof_bt_keycomp(const void *k1, const void *k2); +static malloc_mutex_t *prof_ctx_mutex_choose(void); + +/******************************************************************************/ + +void +bt_init(prof_bt_t *bt, void **vec) +{ + + cassert(config_prof); + + bt->vec = vec; + bt->len = 0; +} + +static void +bt_destroy(prof_bt_t *bt) +{ + + cassert(config_prof); + + idalloc(bt); +} + +static prof_bt_t * +bt_dup(prof_bt_t *bt) +{ + prof_bt_t *ret; + + cassert(config_prof); + + /* + * Create a single allocation that has space for vec immediately + * following the prof_bt_t structure. The backtraces that get + * stored in the backtrace caches are copied from stack-allocated + * temporary variables, so size is known at creation time. Making this + * a contiguous object improves cache locality. + */ + ret = (prof_bt_t *)imalloc(QUANTUM_CEILING(sizeof(prof_bt_t)) + + (bt->len * sizeof(void *))); + if (ret == NULL) + return (NULL); + ret->vec = (void **)((uintptr_t)ret + + QUANTUM_CEILING(sizeof(prof_bt_t))); + memcpy(ret->vec, bt->vec, bt->len * sizeof(void *)); + ret->len = bt->len; + + return (ret); +} + +static inline void +prof_enter(void) +{ + + cassert(config_prof); + + malloc_mutex_lock(&enq_mtx); + enq = true; + malloc_mutex_unlock(&enq_mtx); + + malloc_mutex_lock(&bt2ctx_mtx); +} + +static inline void +prof_leave(void) +{ + bool idump, gdump; + + cassert(config_prof); + + malloc_mutex_unlock(&bt2ctx_mtx); + + malloc_mutex_lock(&enq_mtx); + enq = false; + idump = enq_idump; + enq_idump = false; + gdump = enq_gdump; + enq_gdump = false; + malloc_mutex_unlock(&enq_mtx); + + if (idump) + prof_idump(); + if (gdump) + prof_gdump(); +} + +#ifdef JEMALLOC_PROF_LIBUNWIND +void +prof_backtrace(prof_bt_t *bt, unsigned nignore) +{ + unw_context_t uc; + unw_cursor_t cursor; + unsigned i; + int err; + + cassert(config_prof); + assert(bt->len == 0); + assert(bt->vec != NULL); + + unw_getcontext(&uc); + unw_init_local(&cursor, &uc); + + /* Throw away (nignore+1) stack frames, if that many exist. */ + for (i = 0; i < nignore + 1; i++) { + err = unw_step(&cursor); + if (err <= 0) + return; + } + + /* + * Iterate over stack frames until there are no more, or until no space + * remains in bt. + */ + for (i = 0; i < PROF_BT_MAX; i++) { + unw_get_reg(&cursor, UNW_REG_IP, (unw_word_t *)&bt->vec[i]); + bt->len++; + err = unw_step(&cursor); + if (err <= 0) + break; + } +} +#elif (defined(JEMALLOC_PROF_LIBGCC)) +static _Unwind_Reason_Code +prof_unwind_init_callback(struct _Unwind_Context *context, void *arg) +{ + + cassert(config_prof); + + return (_URC_NO_REASON); +} + +static _Unwind_Reason_Code +prof_unwind_callback(struct _Unwind_Context *context, void *arg) +{ + prof_unwind_data_t *data = (prof_unwind_data_t *)arg; + + cassert(config_prof); + + if (data->nignore > 0) + data->nignore--; + else { + data->bt->vec[data->bt->len] = (void *)_Unwind_GetIP(context); + data->bt->len++; + if (data->bt->len == data->max) + return (_URC_END_OF_STACK); + } + + return (_URC_NO_REASON); +} + +void +prof_backtrace(prof_bt_t *bt, unsigned nignore) +{ + prof_unwind_data_t data = {bt, nignore, PROF_BT_MAX}; + + cassert(config_prof); + + _Unwind_Backtrace(prof_unwind_callback, &data); +} +#elif (defined(JEMALLOC_PROF_GCC)) +void +prof_backtrace(prof_bt_t *bt, unsigned nignore) +{ +#define BT_FRAME(i) \ + if ((i) < nignore + PROF_BT_MAX) { \ + void *p; \ + if (__builtin_frame_address(i) == 0) \ + return; \ + p = __builtin_return_address(i); \ + if (p == NULL) \ + return; \ + if (i >= nignore) { \ + bt->vec[(i) - nignore] = p; \ + bt->len = (i) - nignore + 1; \ + } \ + } else \ + return; + + cassert(config_prof); + assert(nignore <= 3); + + BT_FRAME(0) + BT_FRAME(1) + BT_FRAME(2) + BT_FRAME(3) + BT_FRAME(4) + BT_FRAME(5) + BT_FRAME(6) + BT_FRAME(7) + BT_FRAME(8) + BT_FRAME(9) + + BT_FRAME(10) + BT_FRAME(11) + BT_FRAME(12) + BT_FRAME(13) + BT_FRAME(14) + BT_FRAME(15) + BT_FRAME(16) + BT_FRAME(17) + BT_FRAME(18) + BT_FRAME(19) + + BT_FRAME(20) + BT_FRAME(21) + BT_FRAME(22) + BT_FRAME(23) + BT_FRAME(24) + BT_FRAME(25) + BT_FRAME(26) + BT_FRAME(27) + BT_FRAME(28) + BT_FRAME(29) + + BT_FRAME(30) + BT_FRAME(31) + BT_FRAME(32) + BT_FRAME(33) + BT_FRAME(34) + BT_FRAME(35) + BT_FRAME(36) + BT_FRAME(37) + BT_FRAME(38) + BT_FRAME(39) + + BT_FRAME(40) + BT_FRAME(41) + BT_FRAME(42) + BT_FRAME(43) + BT_FRAME(44) + BT_FRAME(45) + BT_FRAME(46) + BT_FRAME(47) + BT_FRAME(48) + BT_FRAME(49) + + BT_FRAME(50) + BT_FRAME(51) + BT_FRAME(52) + BT_FRAME(53) + BT_FRAME(54) + BT_FRAME(55) + BT_FRAME(56) + BT_FRAME(57) + BT_FRAME(58) + BT_FRAME(59) + + BT_FRAME(60) + BT_FRAME(61) + BT_FRAME(62) + BT_FRAME(63) + BT_FRAME(64) + BT_FRAME(65) + BT_FRAME(66) + BT_FRAME(67) + BT_FRAME(68) + BT_FRAME(69) + + BT_FRAME(70) + BT_FRAME(71) + BT_FRAME(72) + BT_FRAME(73) + BT_FRAME(74) + BT_FRAME(75) + BT_FRAME(76) + BT_FRAME(77) + BT_FRAME(78) + BT_FRAME(79) + + BT_FRAME(80) + BT_FRAME(81) + BT_FRAME(82) + BT_FRAME(83) + BT_FRAME(84) + BT_FRAME(85) + BT_FRAME(86) + BT_FRAME(87) + BT_FRAME(88) + BT_FRAME(89) + + BT_FRAME(90) + BT_FRAME(91) + BT_FRAME(92) + BT_FRAME(93) + BT_FRAME(94) + BT_FRAME(95) + BT_FRAME(96) + BT_FRAME(97) + BT_FRAME(98) + BT_FRAME(99) + + BT_FRAME(100) + BT_FRAME(101) + BT_FRAME(102) + BT_FRAME(103) + BT_FRAME(104) + BT_FRAME(105) + BT_FRAME(106) + BT_FRAME(107) + BT_FRAME(108) + BT_FRAME(109) + + BT_FRAME(110) + BT_FRAME(111) + BT_FRAME(112) + BT_FRAME(113) + BT_FRAME(114) + BT_FRAME(115) + BT_FRAME(116) + BT_FRAME(117) + BT_FRAME(118) + BT_FRAME(119) + + BT_FRAME(120) + BT_FRAME(121) + BT_FRAME(122) + BT_FRAME(123) + BT_FRAME(124) + BT_FRAME(125) + BT_FRAME(126) + BT_FRAME(127) + + /* Extras to compensate for nignore. */ + BT_FRAME(128) + BT_FRAME(129) + BT_FRAME(130) +#undef BT_FRAME +} +#else +void +prof_backtrace(prof_bt_t *bt, unsigned nignore) +{ + + cassert(config_prof); + assert(false); +} +#endif + +prof_thr_cnt_t * +prof_lookup(prof_bt_t *bt) +{ + union { + prof_thr_cnt_t *p; + void *v; + } ret; + prof_tdata_t *prof_tdata; + + cassert(config_prof); + + prof_tdata = *prof_tdata_tsd_get(); + if (prof_tdata == NULL) { + prof_tdata = prof_tdata_init(); + if (prof_tdata == NULL) + return (NULL); + } + + if (ckh_search(&prof_tdata->bt2cnt, bt, NULL, &ret.v)) { + union { + prof_bt_t *p; + void *v; + } btkey; + union { + prof_ctx_t *p; + void *v; + } ctx; + bool new_ctx; + + /* + * This thread's cache lacks bt. Look for it in the global + * cache. + */ + prof_enter(); + if (ckh_search(&bt2ctx, bt, &btkey.v, &ctx.v)) { + /* bt has never been seen before. Insert it. */ + ctx.v = imalloc(sizeof(prof_ctx_t)); + if (ctx.v == NULL) { + prof_leave(); + return (NULL); + } + btkey.p = bt_dup(bt); + if (btkey.v == NULL) { + prof_leave(); + idalloc(ctx.v); + return (NULL); + } + ctx.p->bt = btkey.p; + ctx.p->lock = prof_ctx_mutex_choose(); + memset(&ctx.p->cnt_merged, 0, sizeof(prof_cnt_t)); + ql_new(&ctx.p->cnts_ql); + if (ckh_insert(&bt2ctx, btkey.v, ctx.v)) { + /* OOM. */ + prof_leave(); + idalloc(btkey.v); + idalloc(ctx.v); + return (NULL); + } + /* + * Artificially raise curobjs, in order to avoid a race + * condition with prof_ctx_merge()/prof_ctx_destroy(). + * + * No locking is necessary for ctx here because no other + * threads have had the opportunity to fetch it from + * bt2ctx yet. + */ + ctx.p->cnt_merged.curobjs++; + new_ctx = true; + } else { + /* + * Artificially raise curobjs, in order to avoid a race + * condition with prof_ctx_merge()/prof_ctx_destroy(). + */ + malloc_mutex_lock(ctx.p->lock); + ctx.p->cnt_merged.curobjs++; + malloc_mutex_unlock(ctx.p->lock); + new_ctx = false; + } + prof_leave(); + + /* Link a prof_thd_cnt_t into ctx for this thread. */ + if (ckh_count(&prof_tdata->bt2cnt) == PROF_TCMAX) { + assert(ckh_count(&prof_tdata->bt2cnt) > 0); + /* + * Flush the least recently used cnt in order to keep + * bt2cnt from becoming too large. + */ + ret.p = ql_last(&prof_tdata->lru_ql, lru_link); + assert(ret.v != NULL); + if (ckh_remove(&prof_tdata->bt2cnt, ret.p->ctx->bt, + NULL, NULL)) + assert(false); + ql_remove(&prof_tdata->lru_ql, ret.p, lru_link); + prof_ctx_merge(ret.p->ctx, ret.p); + /* ret can now be re-used. */ + } else { + assert(ckh_count(&prof_tdata->bt2cnt) < PROF_TCMAX); + /* Allocate and partially initialize a new cnt. */ + ret.v = imalloc(sizeof(prof_thr_cnt_t)); + if (ret.p == NULL) { + if (new_ctx) + prof_ctx_destroy(ctx.p); + return (NULL); + } + ql_elm_new(ret.p, cnts_link); + ql_elm_new(ret.p, lru_link); + } + /* Finish initializing ret. */ + ret.p->ctx = ctx.p; + ret.p->epoch = 0; + memset(&ret.p->cnts, 0, sizeof(prof_cnt_t)); + if (ckh_insert(&prof_tdata->bt2cnt, btkey.v, ret.v)) { + if (new_ctx) + prof_ctx_destroy(ctx.p); + idalloc(ret.v); + return (NULL); + } + ql_head_insert(&prof_tdata->lru_ql, ret.p, lru_link); + malloc_mutex_lock(ctx.p->lock); + ql_tail_insert(&ctx.p->cnts_ql, ret.p, cnts_link); + ctx.p->cnt_merged.curobjs--; + malloc_mutex_unlock(ctx.p->lock); + } else { + /* Move ret to the front of the LRU. */ + ql_remove(&prof_tdata->lru_ql, ret.p, lru_link); + ql_head_insert(&prof_tdata->lru_ql, ret.p, lru_link); + } + + return (ret.p); +} + +static bool +prof_flush(bool propagate_err) +{ + bool ret = false; + ssize_t err; + + cassert(config_prof); + + err = write(prof_dump_fd, prof_dump_buf, prof_dump_buf_end); + if (err == -1) { + if (propagate_err == false) { + malloc_write(": write() failed during heap " + "profile flush\n"); + if (opt_abort) + abort(); + } + ret = true; + } + prof_dump_buf_end = 0; + + return (ret); +} + +static bool +prof_write(bool propagate_err, const char *s) +{ + unsigned i, slen, n; + + cassert(config_prof); + + i = 0; + slen = strlen(s); + while (i < slen) { + /* Flush the buffer if it is full. */ + if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) + if (prof_flush(propagate_err) && propagate_err) + return (true); + + if (prof_dump_buf_end + slen <= PROF_DUMP_BUFSIZE) { + /* Finish writing. */ + n = slen - i; + } else { + /* Write as much of s as will fit. */ + n = PROF_DUMP_BUFSIZE - prof_dump_buf_end; + } + memcpy(&prof_dump_buf[prof_dump_buf_end], &s[i], n); + prof_dump_buf_end += n; + i += n; + } + + return (false); +} + +JEMALLOC_ATTR(format(printf, 2, 3)) +static bool +prof_printf(bool propagate_err, const char *format, ...) +{ + bool ret; + va_list ap; + char buf[PROF_PRINTF_BUFSIZE]; + + va_start(ap, format); + malloc_vsnprintf(buf, sizeof(buf), format, ap); + va_end(ap); + ret = prof_write(propagate_err, buf); + + return (ret); +} + +static void +prof_ctx_sum(prof_ctx_t *ctx, prof_cnt_t *cnt_all, size_t *leak_nctx) +{ + prof_thr_cnt_t *thr_cnt; + prof_cnt_t tcnt; + + cassert(config_prof); + + malloc_mutex_lock(ctx->lock); + + memcpy(&ctx->cnt_summed, &ctx->cnt_merged, sizeof(prof_cnt_t)); + ql_foreach(thr_cnt, &ctx->cnts_ql, cnts_link) { + volatile unsigned *epoch = &thr_cnt->epoch; + + while (true) { + unsigned epoch0 = *epoch; + + /* Make sure epoch is even. */ + if (epoch0 & 1U) + continue; + + memcpy(&tcnt, &thr_cnt->cnts, sizeof(prof_cnt_t)); + + /* Terminate if epoch didn't change while reading. */ + if (*epoch == epoch0) + break; + } + + ctx->cnt_summed.curobjs += tcnt.curobjs; + ctx->cnt_summed.curbytes += tcnt.curbytes; + if (opt_prof_accum) { + ctx->cnt_summed.accumobjs += tcnt.accumobjs; + ctx->cnt_summed.accumbytes += tcnt.accumbytes; + } + } + + if (ctx->cnt_summed.curobjs != 0) + (*leak_nctx)++; + + /* Add to cnt_all. */ + cnt_all->curobjs += ctx->cnt_summed.curobjs; + cnt_all->curbytes += ctx->cnt_summed.curbytes; + if (opt_prof_accum) { + cnt_all->accumobjs += ctx->cnt_summed.accumobjs; + cnt_all->accumbytes += ctx->cnt_summed.accumbytes; + } + + malloc_mutex_unlock(ctx->lock); +} + +static void +prof_ctx_destroy(prof_ctx_t *ctx) +{ + + cassert(config_prof); + + /* + * Check that ctx is still unused by any thread cache before destroying + * it. prof_lookup() artificially raises ctx->cnt_merge.curobjs in + * order to avoid a race condition with this function, as does + * prof_ctx_merge() in order to avoid a race between the main body of + * prof_ctx_merge() and entry into this function. + */ + prof_enter(); + malloc_mutex_lock(ctx->lock); + if (ql_first(&ctx->cnts_ql) == NULL && ctx->cnt_merged.curobjs == 1) { + assert(ctx->cnt_merged.curbytes == 0); + assert(ctx->cnt_merged.accumobjs == 0); + assert(ctx->cnt_merged.accumbytes == 0); + /* Remove ctx from bt2ctx. */ + if (ckh_remove(&bt2ctx, ctx->bt, NULL, NULL)) + assert(false); + prof_leave(); + /* Destroy ctx. */ + malloc_mutex_unlock(ctx->lock); + bt_destroy(ctx->bt); + idalloc(ctx); + } else { + /* + * Compensate for increment in prof_ctx_merge() or + * prof_lookup(). + */ + ctx->cnt_merged.curobjs--; + malloc_mutex_unlock(ctx->lock); + prof_leave(); + } +} + +static void +prof_ctx_merge(prof_ctx_t *ctx, prof_thr_cnt_t *cnt) +{ + bool destroy; + + cassert(config_prof); + + /* Merge cnt stats and detach from ctx. */ + malloc_mutex_lock(ctx->lock); + ctx->cnt_merged.curobjs += cnt->cnts.curobjs; + ctx->cnt_merged.curbytes += cnt->cnts.curbytes; + ctx->cnt_merged.accumobjs += cnt->cnts.accumobjs; + ctx->cnt_merged.accumbytes += cnt->cnts.accumbytes; + ql_remove(&ctx->cnts_ql, cnt, cnts_link); + if (opt_prof_accum == false && ql_first(&ctx->cnts_ql) == NULL && + ctx->cnt_merged.curobjs == 0) { + /* + * Artificially raise ctx->cnt_merged.curobjs in order to keep + * another thread from winning the race to destroy ctx while + * this one has ctx->lock dropped. Without this, it would be + * possible for another thread to: + * + * 1) Sample an allocation associated with ctx. + * 2) Deallocate the sampled object. + * 3) Successfully prof_ctx_destroy(ctx). + * + * The result would be that ctx no longer exists by the time + * this thread accesses it in prof_ctx_destroy(). + */ + ctx->cnt_merged.curobjs++; + destroy = true; + } else + destroy = false; + malloc_mutex_unlock(ctx->lock); + if (destroy) + prof_ctx_destroy(ctx); +} + +static bool +prof_dump_ctx(bool propagate_err, prof_ctx_t *ctx, prof_bt_t *bt) +{ + unsigned i; + + cassert(config_prof); + + if (opt_prof_accum == false && ctx->cnt_summed.curobjs == 0) { + assert(ctx->cnt_summed.curbytes == 0); + assert(ctx->cnt_summed.accumobjs == 0); + assert(ctx->cnt_summed.accumbytes == 0); + return (false); + } + + if (prof_printf(propagate_err, "%"PRId64": %"PRId64 + " [%"PRIu64": %"PRIu64"] @", + ctx->cnt_summed.curobjs, ctx->cnt_summed.curbytes, + ctx->cnt_summed.accumobjs, ctx->cnt_summed.accumbytes)) + return (true); + + for (i = 0; i < bt->len; i++) { + if (prof_printf(propagate_err, " %#"PRIxPTR, + (uintptr_t)bt->vec[i])) + return (true); + } + + if (prof_write(propagate_err, "\n")) + return (true); + + return (false); +} + +static bool +prof_dump_maps(bool propagate_err) +{ + int mfd; + char filename[PATH_MAX + 1]; + + cassert(config_prof); + + malloc_snprintf(filename, sizeof(filename), "/proc/%d/maps", + (int)getpid()); + mfd = open(filename, O_RDONLY); + if (mfd != -1) { + ssize_t nread; + + if (prof_write(propagate_err, "\nMAPPED_LIBRARIES:\n") && + propagate_err) + return (true); + nread = 0; + do { + prof_dump_buf_end += nread; + if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) { + /* Make space in prof_dump_buf before read(). */ + if (prof_flush(propagate_err) && propagate_err) + return (true); + } + nread = read(mfd, &prof_dump_buf[prof_dump_buf_end], + PROF_DUMP_BUFSIZE - prof_dump_buf_end); + } while (nread > 0); + close(mfd); + } else + return (true); + + return (false); +} + +static bool +prof_dump(bool propagate_err, const char *filename, bool leakcheck) +{ + prof_cnt_t cnt_all; + size_t tabind; + union { + prof_bt_t *p; + void *v; + } bt; + union { + prof_ctx_t *p; + void *v; + } ctx; + size_t leak_nctx; + + cassert(config_prof); + + prof_enter(); + prof_dump_fd = creat(filename, 0644); + if (prof_dump_fd == -1) { + if (propagate_err == false) { + malloc_printf( + ": creat(\"%s\"), 0644) failed\n", + filename); + if (opt_abort) + abort(); + } + goto label_error; + } + + /* Merge per thread profile stats, and sum them in cnt_all. */ + memset(&cnt_all, 0, sizeof(prof_cnt_t)); + leak_nctx = 0; + for (tabind = 0; ckh_iter(&bt2ctx, &tabind, NULL, &ctx.v) == false;) + prof_ctx_sum(ctx.p, &cnt_all, &leak_nctx); + + /* Dump profile header. */ + if (opt_lg_prof_sample == 0) { + if (prof_printf(propagate_err, + "heap profile: %"PRId64": %"PRId64 + " [%"PRIu64": %"PRIu64"] @ heapprofile\n", + cnt_all.curobjs, cnt_all.curbytes, + cnt_all.accumobjs, cnt_all.accumbytes)) + goto label_error; + } else { + if (prof_printf(propagate_err, + "heap profile: %"PRId64": %"PRId64 + " [%"PRIu64": %"PRIu64"] @ heap_v2/%"PRIu64"\n", + cnt_all.curobjs, cnt_all.curbytes, + cnt_all.accumobjs, cnt_all.accumbytes, + ((uint64_t)1U << opt_lg_prof_sample))) + goto label_error; + } + + /* Dump per ctx profile stats. */ + for (tabind = 0; ckh_iter(&bt2ctx, &tabind, &bt.v, &ctx.v) + == false;) { + if (prof_dump_ctx(propagate_err, ctx.p, bt.p)) + goto label_error; + } + + /* Dump /proc//maps if possible. */ + if (prof_dump_maps(propagate_err)) + goto label_error; + + if (prof_flush(propagate_err)) + goto label_error; + close(prof_dump_fd); + prof_leave(); + + if (leakcheck && cnt_all.curbytes != 0) { + malloc_printf(": Leak summary: %"PRId64" byte%s, %" + PRId64" object%s, %zu context%s\n", + cnt_all.curbytes, (cnt_all.curbytes != 1) ? "s" : "", + cnt_all.curobjs, (cnt_all.curobjs != 1) ? "s" : "", + leak_nctx, (leak_nctx != 1) ? "s" : ""); + malloc_printf( + ": Run pprof on \"%s\" for leak detail\n", + filename); + } + + return (false); +label_error: + prof_leave(); + return (true); +} + +#define DUMP_FILENAME_BUFSIZE (PATH_MAX + 1) +static void +prof_dump_filename(char *filename, char v, int64_t vseq) +{ + + cassert(config_prof); + + if (vseq != UINT64_C(0xffffffffffffffff)) { + /* "...v.heap" */ + malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE, + "%s.%d.%"PRIu64".%c%"PRId64".heap", + opt_prof_prefix, (int)getpid(), prof_dump_seq, v, vseq); + } else { + /* "....heap" */ + malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE, + "%s.%d.%"PRIu64".%c.heap", + opt_prof_prefix, (int)getpid(), prof_dump_seq, v); + } +} + +static void +prof_fdump(void) +{ + char filename[DUMP_FILENAME_BUFSIZE]; + + cassert(config_prof); + + if (prof_booted == false) + return; + + if (opt_prof_prefix[0] != '\0') { + malloc_mutex_lock(&prof_dump_seq_mtx); + prof_dump_filename(filename, 'f', UINT64_C(0xffffffffffffffff)); + malloc_mutex_unlock(&prof_dump_seq_mtx); + prof_dump(false, filename, opt_prof_leak); + } +} + +void +prof_idump(void) +{ + char filename[PATH_MAX + 1]; + + cassert(config_prof); + + if (prof_booted == false) + return; + malloc_mutex_lock(&enq_mtx); + if (enq) { + enq_idump = true; + malloc_mutex_unlock(&enq_mtx); + return; + } + malloc_mutex_unlock(&enq_mtx); + + if (opt_prof_prefix[0] != '\0') { + malloc_mutex_lock(&prof_dump_seq_mtx); + prof_dump_filename(filename, 'i', prof_dump_iseq); + prof_dump_iseq++; + malloc_mutex_unlock(&prof_dump_seq_mtx); + prof_dump(false, filename, false); + } +} + +bool +prof_mdump(const char *filename) +{ + char filename_buf[DUMP_FILENAME_BUFSIZE]; + + cassert(config_prof); + + if (opt_prof == false || prof_booted == false) + return (true); + + if (filename == NULL) { + /* No filename specified, so automatically generate one. */ + if (opt_prof_prefix[0] == '\0') + return (true); + malloc_mutex_lock(&prof_dump_seq_mtx); + prof_dump_filename(filename_buf, 'm', prof_dump_mseq); + prof_dump_mseq++; + malloc_mutex_unlock(&prof_dump_seq_mtx); + filename = filename_buf; + } + return (prof_dump(true, filename, false)); +} + +void +prof_gdump(void) +{ + char filename[DUMP_FILENAME_BUFSIZE]; + + cassert(config_prof); + + if (prof_booted == false) + return; + malloc_mutex_lock(&enq_mtx); + if (enq) { + enq_gdump = true; + malloc_mutex_unlock(&enq_mtx); + return; + } + malloc_mutex_unlock(&enq_mtx); + + if (opt_prof_prefix[0] != '\0') { + malloc_mutex_lock(&prof_dump_seq_mtx); + prof_dump_filename(filename, 'u', prof_dump_useq); + prof_dump_useq++; + malloc_mutex_unlock(&prof_dump_seq_mtx); + prof_dump(false, filename, false); + } +} + +static void +prof_bt_hash(const void *key, unsigned minbits, size_t *hash1, size_t *hash2) +{ + size_t ret1, ret2; + uint64_t h; + prof_bt_t *bt = (prof_bt_t *)key; + + cassert(config_prof); + assert(minbits <= 32 || (SIZEOF_PTR == 8 && minbits <= 64)); + assert(hash1 != NULL); + assert(hash2 != NULL); + + h = hash(bt->vec, bt->len * sizeof(void *), + UINT64_C(0x94122f335b332aea)); + if (minbits <= 32) { + /* + * Avoid doing multiple hashes, since a single hash provides + * enough bits. + */ + ret1 = h & ZU(0xffffffffU); + ret2 = h >> 32; + } else { + ret1 = h; + ret2 = hash(bt->vec, bt->len * sizeof(void *), + UINT64_C(0x8432a476666bbc13)); + } + + *hash1 = ret1; + *hash2 = ret2; +} + +static bool +prof_bt_keycomp(const void *k1, const void *k2) +{ + const prof_bt_t *bt1 = (prof_bt_t *)k1; + const prof_bt_t *bt2 = (prof_bt_t *)k2; + + cassert(config_prof); + + if (bt1->len != bt2->len) + return (false); + return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0); +} + +static malloc_mutex_t * +prof_ctx_mutex_choose(void) +{ + unsigned nctxs = atomic_add_u(&cum_ctxs, 1); + + return (&ctx_locks[(nctxs - 1) % PROF_NCTX_LOCKS]); +} + +prof_tdata_t * +prof_tdata_init(void) +{ + prof_tdata_t *prof_tdata; + + cassert(config_prof); + + /* Initialize an empty cache for this thread. */ + prof_tdata = (prof_tdata_t *)imalloc(sizeof(prof_tdata_t)); + if (prof_tdata == NULL) + return (NULL); + + if (ckh_new(&prof_tdata->bt2cnt, PROF_CKH_MINITEMS, + prof_bt_hash, prof_bt_keycomp)) { + idalloc(prof_tdata); + return (NULL); + } + ql_new(&prof_tdata->lru_ql); + + prof_tdata->vec = imalloc(sizeof(void *) * PROF_BT_MAX); + if (prof_tdata->vec == NULL) { + ckh_delete(&prof_tdata->bt2cnt); + idalloc(prof_tdata); + return (NULL); + } + + prof_tdata->prng_state = 0; + prof_tdata->threshold = 0; + prof_tdata->accum = 0; + + prof_tdata_tsd_set(&prof_tdata); + + return (prof_tdata); +} + +void +prof_tdata_cleanup(void *arg) +{ + prof_thr_cnt_t *cnt; + prof_tdata_t *prof_tdata = *(prof_tdata_t **)arg; + + cassert(config_prof); + + /* + * Delete the hash table. All of its contents can still be iterated + * over via the LRU. + */ + ckh_delete(&prof_tdata->bt2cnt); + + /* Iteratively merge cnt's into the global stats and delete them. */ + while ((cnt = ql_last(&prof_tdata->lru_ql, lru_link)) != NULL) { + ql_remove(&prof_tdata->lru_ql, cnt, lru_link); + prof_ctx_merge(cnt->ctx, cnt); + idalloc(cnt); + } + + idalloc(prof_tdata->vec); + + idalloc(prof_tdata); + prof_tdata = NULL; + prof_tdata_tsd_set(&prof_tdata); +} + +void +prof_boot0(void) +{ + + cassert(config_prof); + + memcpy(opt_prof_prefix, PROF_PREFIX_DEFAULT, + sizeof(PROF_PREFIX_DEFAULT)); +} + +void +prof_boot1(void) +{ + + cassert(config_prof); + + /* + * opt_prof and prof_promote must be in their final state before any + * arenas are initialized, so this function must be executed early. + */ + + if (opt_prof_leak && opt_prof == false) { + /* + * Enable opt_prof, but in such a way that profiles are never + * automatically dumped. + */ + opt_prof = true; + opt_prof_gdump = false; + prof_interval = 0; + } else if (opt_prof) { + if (opt_lg_prof_interval >= 0) { + prof_interval = (((uint64_t)1U) << + opt_lg_prof_interval); + } else + prof_interval = 0; + } + + prof_promote = (opt_prof && opt_lg_prof_sample > LG_PAGE); +} + +bool +prof_boot2(void) +{ + + cassert(config_prof); + + if (opt_prof) { + unsigned i; + + if (ckh_new(&bt2ctx, PROF_CKH_MINITEMS, prof_bt_hash, + prof_bt_keycomp)) + return (true); + if (malloc_mutex_init(&bt2ctx_mtx)) + return (true); + if (prof_tdata_tsd_boot()) { + malloc_write( + ": Error in pthread_key_create()\n"); + abort(); + } + + if (malloc_mutex_init(&prof_dump_seq_mtx)) + return (true); + + if (malloc_mutex_init(&enq_mtx)) + return (true); + enq = false; + enq_idump = false; + enq_gdump = false; + + if (atexit(prof_fdump) != 0) { + malloc_write(": Error in atexit()\n"); + if (opt_abort) + abort(); + } + + ctx_locks = (malloc_mutex_t *)base_alloc(PROF_NCTX_LOCKS * + sizeof(malloc_mutex_t)); + if (ctx_locks == NULL) + return (true); + for (i = 0; i < PROF_NCTX_LOCKS; i++) { + if (malloc_mutex_init(&ctx_locks[i])) + return (true); + } + } + +#ifdef JEMALLOC_PROF_LIBGCC + /* + * Cause the backtracing machinery to allocate its internal state + * before enabling profiling. + */ + _Unwind_Backtrace(prof_unwind_init_callback, NULL); +#endif + + prof_booted = true; + + return (false); +} + +/******************************************************************************/ Property changes on: contrib/jemalloc/src/prof.c ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H Added: svn:eol-style ## -0,0 +1 ## +native Index: contrib/jemalloc/src/quarantine.c =================================================================== --- contrib/jemalloc/src/quarantine.c (revision 0) +++ contrib/jemalloc/src/quarantine.c (working copy) @@ -0,0 +1,163 @@ +#include "jemalloc/internal/jemalloc_internal.h" + +/******************************************************************************/ +/* Data. */ + +typedef struct quarantine_s quarantine_t; + +struct quarantine_s { + size_t curbytes; + size_t curobjs; + size_t first; +#define LG_MAXOBJS_INIT 10 + size_t lg_maxobjs; + void *objs[1]; /* Dynamically sized ring buffer. */ +}; + +static void quarantine_cleanup(void *arg); + +malloc_tsd_data(static, quarantine, quarantine_t *, NULL) +malloc_tsd_funcs(JEMALLOC_INLINE, quarantine, quarantine_t *, NULL, + quarantine_cleanup) + +/******************************************************************************/ +/* Function prototypes for non-inline static functions. */ + +static quarantine_t *quarantine_init(size_t lg_maxobjs); +static quarantine_t *quarantine_grow(quarantine_t *quarantine); +static void quarantine_drain(quarantine_t *quarantine, size_t upper_bound); + +/******************************************************************************/ + +static quarantine_t * +quarantine_init(size_t lg_maxobjs) +{ + quarantine_t *quarantine; + + quarantine = (quarantine_t *)imalloc(offsetof(quarantine_t, objs) + + ((ZU(1) << lg_maxobjs) * sizeof(void *))); + if (quarantine == NULL) + return (NULL); + quarantine->curbytes = 0; + quarantine->curobjs = 0; + quarantine->first = 0; + quarantine->lg_maxobjs = lg_maxobjs; + + quarantine_tsd_set(&quarantine); + + return (quarantine); +} + +static quarantine_t * +quarantine_grow(quarantine_t *quarantine) +{ + quarantine_t *ret; + + ret = quarantine_init(quarantine->lg_maxobjs + 1); + if (ret == NULL) + return (quarantine); + + ret->curbytes = quarantine->curbytes; + if (quarantine->first + quarantine->curobjs < (ZU(1) << + quarantine->lg_maxobjs)) { + /* objs ring buffer data are contiguous. */ + memcpy(ret->objs, &quarantine->objs[quarantine->first], + quarantine->curobjs * sizeof(void *)); + ret->curobjs = quarantine->curobjs; + } else { + /* objs ring buffer data wrap around. */ + size_t ncopy = (ZU(1) << quarantine->lg_maxobjs) - + quarantine->first; + memcpy(ret->objs, &quarantine->objs[quarantine->first], ncopy * + sizeof(void *)); + ret->curobjs = ncopy; + if (quarantine->curobjs != 0) { + memcpy(&ret->objs[ret->curobjs], quarantine->objs, + quarantine->curobjs - ncopy); + } + } + + return (ret); +} + +static void +quarantine_drain(quarantine_t *quarantine, size_t upper_bound) +{ + + while (quarantine->curbytes > upper_bound && quarantine->curobjs > 0) { + void *ptr = quarantine->objs[quarantine->first]; + size_t usize = isalloc(ptr, config_prof); + idalloc(ptr); + quarantine->curbytes -= usize; + quarantine->curobjs--; + quarantine->first = (quarantine->first + 1) & ((ZU(1) << + quarantine->lg_maxobjs) - 1); + } +} + +void +quarantine(void *ptr) +{ + quarantine_t *quarantine; + size_t usize = isalloc(ptr, config_prof); + + assert(config_fill); + assert(opt_quarantine); + + quarantine = *quarantine_tsd_get(); + if (quarantine == NULL && (quarantine = + quarantine_init(LG_MAXOBJS_INIT)) == NULL) { + idalloc(ptr); + return; + } + /* + * Drain one or more objects if the quarantine size limit would be + * exceeded by appending ptr. + */ + if (quarantine->curbytes + usize > opt_quarantine) { + size_t upper_bound = (opt_quarantine >= usize) ? opt_quarantine + - usize : 0; + quarantine_drain(quarantine, upper_bound); + } + /* Grow the quarantine ring buffer if it's full. */ + if (quarantine->curobjs == (ZU(1) << quarantine->lg_maxobjs)) + quarantine = quarantine_grow(quarantine); + /* quarantine_grow() must free a slot if it fails to grow. */ + assert(quarantine->curobjs < (ZU(1) << quarantine->lg_maxobjs)); + /* Append ptr if its size doesn't exceed the quarantine size. */ + if (quarantine->curbytes + usize <= opt_quarantine) { + size_t offset = (quarantine->first + quarantine->curobjs) & + ((ZU(1) << quarantine->lg_maxobjs) - 1); + quarantine->objs[offset] = ptr; + quarantine->curbytes += usize; + quarantine->curobjs++; + if (opt_junk) + memset(ptr, 0x5a, usize); + } else { + assert(quarantine->curbytes == 0); + idalloc(ptr); + } +} + +static void +quarantine_cleanup(void *arg) +{ + quarantine_t *quarantine = *(quarantine_t **)arg; + + if (quarantine != NULL) { + quarantine_drain(quarantine, 0); + idalloc(quarantine); + } +} + +bool +quarantine_boot(void) +{ + + assert(config_fill); + + if (quarantine_tsd_boot()) + return (true); + + return (false); +} Property changes on: contrib/jemalloc/src/quarantine.c ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H Added: svn:eol-style ## -0,0 +1 ## +native Index: contrib/jemalloc/src/chunk.c =================================================================== --- contrib/jemalloc/src/chunk.c (revision 0) +++ contrib/jemalloc/src/chunk.c (working copy) @@ -0,0 +1,304 @@ +#define JEMALLOC_CHUNK_C_ +#include "jemalloc/internal/jemalloc_internal.h" + +/******************************************************************************/ +/* Data. */ + +size_t opt_lg_chunk = LG_CHUNK_DEFAULT; + +malloc_mutex_t chunks_mtx; +chunk_stats_t stats_chunks; + +/* + * Trees of chunks that were previously allocated (trees differ only in node + * ordering). These are used when allocating chunks, in an attempt to re-use + * address space. Depending on function, different tree orderings are needed, + * which is why there are two trees with the same contents. + */ +static extent_tree_t chunks_szad; +static extent_tree_t chunks_ad; + +rtree_t *chunks_rtree; + +/* Various chunk-related settings. */ +size_t chunksize; +size_t chunksize_mask; /* (chunksize - 1). */ +size_t chunk_npages; +size_t map_bias; +size_t arena_maxclass; /* Max size class for arenas. */ + +/******************************************************************************/ +/* Function prototypes for non-inline static functions. */ + +static void *chunk_recycle(size_t size, size_t alignment, bool *zero); +static void chunk_record(void *chunk, size_t size); + +/******************************************************************************/ + +static void * +chunk_recycle(size_t size, size_t alignment, bool *zero) +{ + void *ret; + extent_node_t *node; + extent_node_t key; + size_t alloc_size, leadsize, trailsize; + + alloc_size = size + alignment - chunksize; + /* Beware size_t wrap-around. */ + if (alloc_size < size) + return (NULL); + key.addr = NULL; + key.size = alloc_size; + malloc_mutex_lock(&chunks_mtx); + node = extent_tree_szad_nsearch(&chunks_szad, &key); + if (node == NULL) { + malloc_mutex_unlock(&chunks_mtx); + return (NULL); + } + leadsize = ALIGNMENT_CEILING((uintptr_t)node->addr, alignment) - + (uintptr_t)node->addr; + assert(alloc_size >= leadsize + size); + trailsize = alloc_size - leadsize - size; + ret = (void *)((uintptr_t)node->addr + leadsize); + /* Remove node from the tree. */ + extent_tree_szad_remove(&chunks_szad, node); + extent_tree_ad_remove(&chunks_ad, node); + if (leadsize != 0) { + /* Insert the leading space as a smaller chunk. */ + node->size = leadsize; + extent_tree_szad_insert(&chunks_szad, node); + extent_tree_ad_insert(&chunks_ad, node); + node = NULL; + } + if (trailsize != 0) { + /* Insert the trailing space as a smaller chunk. */ + if (node == NULL) { + /* + * An additional node is required, but + * base_node_alloc() can cause a new base chunk to be + * allocated. Drop chunks_mtx in order to avoid + * deadlock, and if node allocation fails, deallocate + * the result before returning an error. + */ + malloc_mutex_unlock(&chunks_mtx); + node = base_node_alloc(); + if (node == NULL) { + chunk_dealloc(ret, size, true); + return (NULL); + } + malloc_mutex_lock(&chunks_mtx); + } + node->addr = (void *)((uintptr_t)(ret) + size); + node->size = trailsize; + extent_tree_szad_insert(&chunks_szad, node); + extent_tree_ad_insert(&chunks_ad, node); + node = NULL; + } + malloc_mutex_unlock(&chunks_mtx); + + if (node != NULL) + base_node_dealloc(node); +#ifdef JEMALLOC_PURGE_MADVISE_FREE + if (*zero) { + VALGRIND_MAKE_MEM_UNDEFINED(ret, size); + memset(ret, 0, size); + } +#endif + return (ret); +} + +/* + * If the caller specifies (*zero == false), it is still possible to receive + * zeroed memory, in which case *zero is toggled to true. arena_chunk_alloc() + * takes advantage of this to avoid demanding zeroed chunks, but taking + * advantage of them if they are returned. + */ +void * +chunk_alloc(size_t size, size_t alignment, bool base, bool *zero) +{ + void *ret; + + assert(size != 0); + assert((size & chunksize_mask) == 0); + assert((alignment & chunksize_mask) == 0); + + ret = chunk_recycle(size, alignment, zero); + if (ret != NULL) + goto label_return; + if (config_dss) { + ret = chunk_alloc_dss(size, alignment, zero); + if (ret != NULL) + goto label_return; + } + ret = chunk_alloc_mmap(size, alignment); + if (ret != NULL) { + *zero = true; + goto label_return; + } + + /* All strategies for allocation failed. */ + ret = NULL; +label_return: + if (config_ivsalloc && base == false && ret != NULL) { + if (rtree_set(chunks_rtree, (uintptr_t)ret, ret)) { + chunk_dealloc(ret, size, true); + return (NULL); + } + } + if ((config_stats || config_prof) && ret != NULL) { + bool gdump; + malloc_mutex_lock(&chunks_mtx); + if (config_stats) + stats_chunks.nchunks += (size / chunksize); + stats_chunks.curchunks += (size / chunksize); + if (stats_chunks.curchunks > stats_chunks.highchunks) { + stats_chunks.highchunks = stats_chunks.curchunks; + if (config_prof) + gdump = true; + } else if (config_prof) + gdump = false; + malloc_mutex_unlock(&chunks_mtx); + if (config_prof && opt_prof && opt_prof_gdump && gdump) + prof_gdump(); + } + + assert(CHUNK_ADDR2BASE(ret) == ret); + return (ret); +} + +static void +chunk_record(void *chunk, size_t size) +{ + extent_node_t *xnode, *node, *prev, key; + + madvise(chunk, size, JEMALLOC_MADV_PURGE); + + xnode = NULL; + malloc_mutex_lock(&chunks_mtx); + while (true) { + key.addr = (void *)((uintptr_t)chunk + size); + node = extent_tree_ad_nsearch(&chunks_ad, &key); + /* Try to coalesce forward. */ + if (node != NULL && node->addr == key.addr) { + /* + * Coalesce chunk with the following address range. + * This does not change the position within chunks_ad, + * so only remove/insert from/into chunks_szad. + */ + extent_tree_szad_remove(&chunks_szad, node); + node->addr = chunk; + node->size += size; + extent_tree_szad_insert(&chunks_szad, node); + break; + } else if (xnode == NULL) { + /* + * It is possible that base_node_alloc() will cause a + * new base chunk to be allocated, so take care not to + * deadlock on chunks_mtx, and recover if another thread + * deallocates an adjacent chunk while this one is busy + * allocating xnode. + */ + malloc_mutex_unlock(&chunks_mtx); + xnode = base_node_alloc(); + if (xnode == NULL) + return; + malloc_mutex_lock(&chunks_mtx); + } else { + /* Coalescing forward failed, so insert a new node. */ + node = xnode; + xnode = NULL; + node->addr = chunk; + node->size = size; + extent_tree_ad_insert(&chunks_ad, node); + extent_tree_szad_insert(&chunks_szad, node); + break; + } + } + /* Discard xnode if it ended up unused due to a race. */ + if (xnode != NULL) + base_node_dealloc(xnode); + + /* Try to coalesce backward. */ + prev = extent_tree_ad_prev(&chunks_ad, node); + if (prev != NULL && (void *)((uintptr_t)prev->addr + prev->size) == + chunk) { + /* + * Coalesce chunk with the previous address range. This does + * not change the position within chunks_ad, so only + * remove/insert node from/into chunks_szad. + */ + extent_tree_szad_remove(&chunks_szad, prev); + extent_tree_ad_remove(&chunks_ad, prev); + + extent_tree_szad_remove(&chunks_szad, node); + node->addr = prev->addr; + node->size += prev->size; + extent_tree_szad_insert(&chunks_szad, node); + + base_node_dealloc(prev); + } + malloc_mutex_unlock(&chunks_mtx); +} + +void +chunk_dealloc(void *chunk, size_t size, bool unmap) +{ + + assert(chunk != NULL); + assert(CHUNK_ADDR2BASE(chunk) == chunk); + assert(size != 0); + assert((size & chunksize_mask) == 0); + + if (config_ivsalloc) + rtree_set(chunks_rtree, (uintptr_t)chunk, NULL); + if (config_stats || config_prof) { + malloc_mutex_lock(&chunks_mtx); + stats_chunks.curchunks -= (size / chunksize); + malloc_mutex_unlock(&chunks_mtx); + } + + if (unmap) { + if (chunk_dealloc_mmap(chunk, size) == false) + return; + chunk_record(chunk, size); + } +} + +bool +chunk_boot0(void) +{ + + /* Set variables according to the value of opt_lg_chunk. */ + chunksize = (ZU(1) << opt_lg_chunk); + assert(chunksize >= PAGE); + chunksize_mask = chunksize - 1; + chunk_npages = (chunksize >> LG_PAGE); + + if (config_stats || config_prof) { + if (malloc_mutex_init(&chunks_mtx)) + return (true); + memset(&stats_chunks, 0, sizeof(chunk_stats_t)); + } + if (config_dss && chunk_dss_boot()) + return (true); + extent_tree_szad_new(&chunks_szad); + extent_tree_ad_new(&chunks_ad); + if (config_ivsalloc) { + chunks_rtree = rtree_new((ZU(1) << (LG_SIZEOF_PTR+3)) - + opt_lg_chunk); + if (chunks_rtree == NULL) + return (true); + } + + return (false); +} + +bool +chunk_boot1(void) +{ + + if (chunk_mmap_boot()) + return (true); + + return (false); +} Property changes on: contrib/jemalloc/src/chunk.c ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H Added: svn:eol-style ## -0,0 +1 ## +native Index: contrib/jemalloc/src/atomic.c =================================================================== --- contrib/jemalloc/src/atomic.c (revision 0) +++ contrib/jemalloc/src/atomic.c (working copy) @@ -0,0 +1,2 @@ +#define JEMALLOC_ATOMIC_C_ +#include "jemalloc/internal/jemalloc_internal.h" Property changes on: contrib/jemalloc/src/atomic.c ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H Added: svn:eol-style ## -0,0 +1 ## +native Index: contrib/jemalloc/src/bitmap.c =================================================================== --- contrib/jemalloc/src/bitmap.c (revision 0) +++ contrib/jemalloc/src/bitmap.c (working copy) @@ -0,0 +1,90 @@ +#define JEMALLOC_BITMAP_C_ +#include "jemalloc/internal/jemalloc_internal.h" + +/******************************************************************************/ +/* Function prototypes for non-inline static functions. */ + +static size_t bits2groups(size_t nbits); + +/******************************************************************************/ + +static size_t +bits2groups(size_t nbits) +{ + + return ((nbits >> LG_BITMAP_GROUP_NBITS) + + !!(nbits & BITMAP_GROUP_NBITS_MASK)); +} + +void +bitmap_info_init(bitmap_info_t *binfo, size_t nbits) +{ + unsigned i; + size_t group_count; + + assert(nbits > 0); + assert(nbits <= (ZU(1) << LG_BITMAP_MAXBITS)); + + /* + * Compute the number of groups necessary to store nbits bits, and + * progressively work upward through the levels until reaching a level + * that requires only one group. + */ + binfo->levels[0].group_offset = 0; + group_count = bits2groups(nbits); + for (i = 1; group_count > 1; i++) { + assert(i < BITMAP_MAX_LEVELS); + binfo->levels[i].group_offset = binfo->levels[i-1].group_offset + + group_count; + group_count = bits2groups(group_count); + } + binfo->levels[i].group_offset = binfo->levels[i-1].group_offset + + group_count; + binfo->nlevels = i; + binfo->nbits = nbits; +} + +size_t +bitmap_info_ngroups(const bitmap_info_t *binfo) +{ + + return (binfo->levels[binfo->nlevels].group_offset << LG_SIZEOF_BITMAP); +} + +size_t +bitmap_size(size_t nbits) +{ + bitmap_info_t binfo; + + bitmap_info_init(&binfo, nbits); + return (bitmap_info_ngroups(&binfo)); +} + +void +bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo) +{ + size_t extra; + unsigned i; + + /* + * Bits are actually inverted with regard to the external bitmap + * interface, so the bitmap starts out with all 1 bits, except for + * trailing unused bits (if any). Note that each group uses bit 0 to + * correspond to the first logical bit in the group, so extra bits + * are the most significant bits of the last group. + */ + memset(bitmap, 0xffU, binfo->levels[binfo->nlevels].group_offset << + LG_SIZEOF_BITMAP); + extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK)) + & BITMAP_GROUP_NBITS_MASK; + if (extra != 0) + bitmap[binfo->levels[1].group_offset - 1] >>= extra; + for (i = 1; i < binfo->nlevels; i++) { + size_t group_count = binfo->levels[i].group_offset - + binfo->levels[i-1].group_offset; + extra = (BITMAP_GROUP_NBITS - (group_count & + BITMAP_GROUP_NBITS_MASK)) & BITMAP_GROUP_NBITS_MASK; + if (extra != 0) + bitmap[binfo->levels[i+1].group_offset - 1] >>= extra; + } +} Property changes on: contrib/jemalloc/src/bitmap.c ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H Added: svn:eol-style ## -0,0 +1 ## +native Index: contrib/jemalloc/src/tsd.c =================================================================== --- contrib/jemalloc/src/tsd.c (revision 0) +++ contrib/jemalloc/src/tsd.c (working copy) @@ -0,0 +1,72 @@ +#define JEMALLOC_TSD_C_ +#include "jemalloc/internal/jemalloc_internal.h" + +/******************************************************************************/ +/* Data. */ + +static unsigned ncleanups; +static malloc_tsd_cleanup_t cleanups[MALLOC_TSD_CLEANUPS_MAX]; + +/******************************************************************************/ + +void * +malloc_tsd_malloc(size_t size) +{ + + /* Avoid choose_arena() in order to dodge bootstrapping issues. */ + return arena_malloc(arenas[0], size, false, false); +} + +void +malloc_tsd_dalloc(void *wrapper) +{ + + idalloc(wrapper); +} + +void +malloc_tsd_no_cleanup(void *arg) +{ + + not_reached(); +} + +#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP +void +_malloc_thread_cleanup(void) +{ + bool pending[ncleanups], again; + unsigned i; + + for (i = 0; i < ncleanups; i++) + pending[i] = true; + + do { + again = false; + for (i = 0; i < ncleanups; i++) { + if (pending[i]) { + pending[i] = cleanups[i].f(cleanups[i].arg); + if (pending[i]) + again = true; + } + } + } while (again); +} +#endif + +void +malloc_tsd_cleanup_register(bool (*f)(void *), void *arg) +{ + + assert(ncleanups < MALLOC_TSD_CLEANUPS_MAX); + cleanups[ncleanups].f = f; + cleanups[ncleanups].arg = arg; + ncleanups++; +} + +void +malloc_tsd_boot(void) +{ + + ncleanups = 0; +} Property changes on: contrib/jemalloc/src/tsd.c ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H Added: svn:eol-style ## -0,0 +1 ## +native Index: contrib/jemalloc/FREEBSD-upgrade =================================================================== --- contrib/jemalloc/FREEBSD-upgrade (revision 0) +++ contrib/jemalloc/FREEBSD-upgrade (working copy) @@ -0,0 +1,122 @@ +#!/bin/sh +# $FreeBSD$ +# +# Usage: cd /usr/src/contrib/jemalloc +# ./FREEBSD-upgrade [args] +# +# At least the following ports are required when importing jemalloc: +# - devel/autoconf +# - devel/git +# - devel/gmake +# - textproc/docbook-xsl +# +# The normal workflow for importing a new release is: +# +# cd /usr/src/contrib/jemalloc +# +# Merge local changes that were made since the previous import: +# +# ./FREEBSD-upgrade merge-changes +# ./FREEBSD-upgrade rediff +# +# Extract latest jemalloc release. +# +# ./FREEBSD-upgrade extract +# +# Fix patch conflicts as necessary, then regenerate diffs to update line +# offsets: +# +# ./FREEBSD-upgrade rediff +# ./FREEBSD-upgrade extract +# +# Do multiple buildworld/installworld rounds. If problems arise and patches +# are needed, edit the code in ${work} as necessary, then: +# +# ./FREEBSD-upgrade rediff +# ./FREEBSD-upgrade extract +# +# The rediff/extract order is important because rediff saves the local +# changes, then extract blows away the work tree and re-creates it with the +# diffs applied. +# +# Finally, to clean up: +# +# ./FREEBSD-upgrade clean + +set -e + +if [ ! -x "FREEBSD-upgrade" ] ; then + echo "Run from within src/contrib/jemalloc/" >&2 + exit 1 +fi + +src=`pwd` +workname="jemalloc.git" +work="${src}/../${workname}" # merge-changes expects ${workname} in "..". +changes="${src}/FREEBSD-changes" + +do_extract() { + local rev=$1 + # Clone. + rm -rf ${work} + git clone git://canonware.com/jemalloc.git ${work} + ( + cd ${work} + if [ "x${rev}" != "x" ] ; then + # Use optional rev argument to check out a revision other than HEAD on + # master. + git checkout ${rev} + fi + # Apply diffs before generating files. + patch -p1 < "${src}/FREEBSD-diffs" + find . -name '*.orig' -delete + # Generate various files. + ./autogen.sh --enable-cc-silence --enable-dss --enable-xmalloc \ + --enable-utrace --with-xslroot=/usr/local/share/xsl/docbook + gmake dist + ) +} + +do_diff() { + (cd ${work}; git add -A; git diff --cached) > FREEBSD-diffs +} + +command=$1 +shift +case "${command}" in + merge-changes) # Merge local changes that were made since the previous import. + rev=`cat VERSION |tr 'g' ' ' |awk '{print $2}'` + # Extract code corresponding to most recent import. + do_extract ${rev} + # Compute local differences to the upstream+patches and apply them. + ( + cd .. + diff -ru -X ${src}/FREEBSD-Xlist ${workname} jemalloc > ${changes} || true + ) + ( + cd ${work} + patch -p1 < ${changes} + find . -name '*.orig' -delete + ) + # Update diff. + do_diff + ;; + extract) # Extract upstream sources, apply patches, copy to contrib/jemalloc. + rev=$1 + do_extract ${rev} + # Delete existing files so that cruft doesn't silently remain. + rm -rf COPYING VERSION doc include src + # Copy files over. + tar cf - -C ${work} -X FREEBSD-Xlist . |tar xvf - + ;; + rediff) # Regenerate diffs based on working tree. + do_diff + ;; + clean) # Remove working tree and temporary files. + rm -rf ${work} ${changes} + ;; + *) + echo "Unsupported command: \"${command}\"" >&2 + exit 1 + ;; +esac Property changes on: contrib/jemalloc/FREEBSD-upgrade ___________________________________________________________________ Added: svn:executable ## -0,0 +1 ## +* Index: contrib/jemalloc/FREEBSD-diffs =================================================================== --- contrib/jemalloc/FREEBSD-diffs (revision 0) +++ contrib/jemalloc/FREEBSD-diffs (working copy) @@ -0,0 +1,247 @@ +diff --git a/doc/jemalloc.xml.in b/doc/jemalloc.xml.in +index ee60c98..9593ea7 100644 +--- a/doc/jemalloc.xml.in ++++ b/doc/jemalloc.xml.in +@@ -51,12 +51,23 @@ + This manual describes jemalloc @jemalloc_version@. More information + can be found at the jemalloc website. ++ ++ The following configuration options are enabled in libc's built-in ++ jemalloc: , ++ , , ++ , , ++ , , ++ , and . ++ Additionally, is enabled in development ++ versions of FreeBSD (controlled by the ++ MALLOC_PRODUCTION make ++ variable). + + + SYNOPSIS + + #include <stdlib.h> +-#include <jemalloc/jemalloc.h> ++#include <malloc_np.h> + + Standard API + +@@ -2070,4 +2081,16 @@ malloc_conf = "lg_chunk:24";]]> + The posix_memalign function conforms + to IEEE Std 1003.1-2001 (“POSIX.1”). + ++ ++ HISTORY ++ The malloc_usable_size and ++ posix_memalign functions first appeared in ++ FreeBSD 7.0. ++ ++ The aligned_alloc, ++ malloc_stats_print, ++ mallctl*, and ++ *allocm functions first appeared in ++ FreeBSD 10.0. ++ + +diff --git a/include/jemalloc/internal/jemalloc_internal.h.in b/include/jemalloc/internal/jemalloc_internal.h.in +index aa21aa5..e0f5fed 100644 +--- a/include/jemalloc/internal/jemalloc_internal.h.in ++++ b/include/jemalloc/internal/jemalloc_internal.h.in +@@ -1,3 +1,6 @@ ++#include "libc_private.h" ++#include "namespace.h" ++ + #include + #include + #include +@@ -33,6 +36,9 @@ + #include + #include + ++#include "un-namespace.h" ++#include "libc_private.h" ++ + #define JEMALLOC_NO_DEMANGLE + #include "../jemalloc@install_suffix@.h" + +diff --git a/include/jemalloc/internal/mutex.h b/include/jemalloc/internal/mutex.h +index c46feee..d7133f4 100644 +--- a/include/jemalloc/internal/mutex.h ++++ b/include/jemalloc/internal/mutex.h +@@ -39,8 +39,6 @@ struct malloc_mutex_s { + + #ifdef JEMALLOC_LAZY_LOCK + extern bool isthreaded; +-#else +-# define isthreaded true + #endif + + bool malloc_mutex_init(malloc_mutex_t *mutex); +diff --git a/include/jemalloc/jemalloc.h.in b/include/jemalloc/jemalloc.h.in +index f0581db..f26d8bc 100644 +--- a/include/jemalloc/jemalloc.h.in ++++ b/include/jemalloc/jemalloc.h.in +@@ -15,6 +15,7 @@ extern "C" { + #define JEMALLOC_VERSION_GID "@jemalloc_version_gid@" + + #include "jemalloc_defs@install_suffix@.h" ++#include "jemalloc_FreeBSD.h" + + #ifdef JEMALLOC_EXPERIMENTAL + #define ALLOCM_LG_ALIGN(la) (la) +diff --git a/include/jemalloc/jemalloc_FreeBSD.h b/include/jemalloc/jemalloc_FreeBSD.h +new file mode 100644 +index 0000000..2c5797f +--- /dev/null ++++ b/include/jemalloc/jemalloc_FreeBSD.h +@@ -0,0 +1,76 @@ ++/* ++ * Override settings that were generated in jemalloc_defs.h as necessary. ++ */ ++ ++#undef JEMALLOC_OVERRIDE_VALLOC ++ ++#ifndef MALLOC_PRODUCTION ++#define JEMALLOC_DEBUG ++#endif ++ ++/* ++ * The following are architecture-dependent, so conditionally define them for ++ * each supported architecture. ++ */ ++#undef CPU_SPINWAIT ++#undef JEMALLOC_TLS_MODEL ++#undef STATIC_PAGE_SHIFT ++#undef LG_SIZEOF_PTR ++#undef LG_SIZEOF_INT ++#undef LG_SIZEOF_LONG ++#undef LG_SIZEOF_INTMAX_T ++ ++#ifdef __i386__ ++# define LG_SIZEOF_PTR 2 ++# define CPU_SPINWAIT __asm__ volatile("pause") ++# define JEMALLOC_TLS_MODEL __attribute__((tls_model("initial-exec"))) ++#endif ++#ifdef __ia64__ ++# define LG_SIZEOF_PTR 3 ++#endif ++#ifdef __sparc64__ ++# define LG_SIZEOF_PTR 3 ++# define JEMALLOC_TLS_MODEL __attribute__((tls_model("initial-exec"))) ++#endif ++#ifdef __amd64__ ++# define LG_SIZEOF_PTR 3 ++# define CPU_SPINWAIT __asm__ volatile("pause") ++# define JEMALLOC_TLS_MODEL __attribute__((tls_model("initial-exec"))) ++#endif ++#ifdef __arm__ ++# define LG_SIZEOF_PTR 2 ++#endif ++#ifdef __mips__ ++# define LG_SIZEOF_PTR 2 ++#endif ++#ifdef __powerpc64__ ++# define LG_SIZEOF_PTR 3 ++#elif defined(__powerpc__) ++# define LG_SIZEOF_PTR 2 ++#endif ++ ++#ifndef JEMALLOC_TLS_MODEL ++# define JEMALLOC_TLS_MODEL /* Default. */ ++#endif ++#ifdef __clang__ ++# undef JEMALLOC_TLS_MODEL ++# define JEMALLOC_TLS_MODEL /* clang does not support tls_model yet. */ ++#endif ++ ++#define STATIC_PAGE_SHIFT PAGE_SHIFT ++#define LG_SIZEOF_INT 2 ++#define LG_SIZEOF_LONG LG_SIZEOF_PTR ++#define LG_SIZEOF_INTMAX_T 3 ++ ++/* Disable lazy-lock machinery, mangle isthreaded, and adjust its type. */ ++#undef JEMALLOC_LAZY_LOCK ++extern int __isthreaded; ++#define isthreaded ((bool)__isthreaded) ++ ++/* Mangle. */ ++#define open _open ++#define read _read ++#define write _write ++#define close _close ++#define pthread_mutex_lock _pthread_mutex_lock ++#define pthread_mutex_unlock _pthread_mutex_unlock +diff --git a/src/jemalloc.c b/src/jemalloc.c +index 0decd8a..73fad29 100644 +--- a/src/jemalloc.c ++++ b/src/jemalloc.c +@@ -8,6 +8,9 @@ malloc_tsd_data(, arenas, arena_t *, NULL) + malloc_tsd_data(, thread_allocated, thread_allocated_t, + THREAD_ALLOCATED_INITIALIZER) + ++const char *__malloc_options_1_0; ++__sym_compat(_malloc_options, __malloc_options_1_0, FBSD_1.0); ++ + /* Runtime configuration options. */ + const char *je_malloc_conf JEMALLOC_ATTR(visibility("default")); + #ifdef JEMALLOC_DEBUG +@@ -401,7 +404,8 @@ malloc_conf_init(void) + #endif + ; + +- if ((opts = getenv(envname)) != NULL) { ++ if (issetugid() == 0 && (opts = getenv(envname)) != ++ NULL) { + /* + * Do nothing; opts is already initialized to + * the value of the MALLOC_CONF environment +diff --git a/src/mutex.c b/src/mutex.c +index 4b8ce57..7be5fc9 100644 +--- a/src/mutex.c ++++ b/src/mutex.c +@@ -63,6 +63,17 @@ pthread_create(pthread_t *__restrict thread, + #ifdef JEMALLOC_MUTEX_INIT_CB + int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex, + void *(calloc_cb)(size_t, size_t)); ++ ++__weak_reference(_pthread_mutex_init_calloc_cb_stub, ++ _pthread_mutex_init_calloc_cb); ++ ++int ++_pthread_mutex_init_calloc_cb_stub(pthread_mutex_t *mutex, ++ void *(calloc_cb)(size_t, size_t)) ++{ ++ ++ return (0); ++} + #endif + + bool +diff --git a/src/util.c b/src/util.c +index 2aab61f..8b05042 100644 +--- a/src/util.c ++++ b/src/util.c +@@ -60,6 +60,22 @@ wrtmessage(void *cbopaque, const char *s) + void (*je_malloc_message)(void *, const char *s) + JEMALLOC_ATTR(visibility("default")) = wrtmessage; + ++JEMALLOC_CATTR(visibility("hidden"), static) ++void ++wrtmessage_1_0(const char *s1, const char *s2, const char *s3, ++ const char *s4) ++{ ++ ++ wrtmessage(NULL, s1); ++ wrtmessage(NULL, s2); ++ wrtmessage(NULL, s3); ++ wrtmessage(NULL, s4); ++} ++ ++void (*__malloc_message_1_0)(const char *s1, const char *s2, const char *s3, ++ const char *s4) = wrtmessage_1_0; ++__sym_compat(_malloc_message, __malloc_message_1_0, FBSD_1.0); ++ + /* + * glibc provides a non-standard strerror_r() when _GNU_SOURCE is defined, so + * provide a wrapper. Index: contrib/jemalloc/doc/jemalloc.3 =================================================================== --- contrib/jemalloc/doc/jemalloc.3 (revision 0) +++ contrib/jemalloc/doc/jemalloc.3 (working copy) @@ -0,0 +1,1457 @@ +'\" t +.\" Title: JEMALLOC +.\" Author: Jason Evans +.\" Generator: DocBook XSL Stylesheets v1.76.1 +.\" Date: 04/13/2012 +.\" Manual: User Manual +.\" Source: jemalloc 1.0.0-254-g7ca0fdfb85b2a9fc7a112e158892c098e004385b +.\" Language: English +.\" +.TH "JEMALLOC" "3" "04/13/2012" "jemalloc 1.0.0-254-g7ca0fdfb85" "User Manual" +.\" ----------------------------------------------------------------- +.\" * Define some portability stuff +.\" ----------------------------------------------------------------- +.\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +.\" http://bugs.debian.org/507673 +.\" http://lists.gnu.org/archive/html/groff/2009-02/msg00013.html +.\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +.ie \n(.g .ds Aq \(aq +.el .ds Aq ' +.\" ----------------------------------------------------------------- +.\" * set default formatting +.\" ----------------------------------------------------------------- +.\" disable hyphenation +.nh +.\" disable justification (adjust text to left margin only) +.ad l +.\" ----------------------------------------------------------------- +.\" * MAIN CONTENT STARTS HERE * +.\" ----------------------------------------------------------------- +.SH "NAME" +jemalloc \- general purpose memory allocation functions +.SH "LIBRARY" +.PP +This manual describes jemalloc 1\&.0\&.0\-254\-g7ca0fdfb85b2a9fc7a112e158892c098e004385b\&. More information can be found at the +\m[blue]\fBjemalloc website\fR\m[]\&\s-2\u[1]\d\s+2\&. +.PP +The following configuration options are enabled in libc\*(Aqs built\-in jemalloc: +\fB\-\-enable\-dss\fR, +\fB\-\-enable\-experimental\fR, +\fB\-\-enable\-fill\fR, +\fB\-\-enable\-lazy\-lock\fR, +\fB\-\-enable\-stats\fR, +\fB\-\-enable\-tcache\fR, +\fB\-\-enable\-tls\fR, +\fB\-\-enable\-utrace\fR, and +\fB\-\-enable\-xmalloc\fR\&. Additionally, +\fB\-\-enable\-debug\fR +is enabled in development versions of FreeBSD (controlled by the +\fBMALLOC_PRODUCTION\fR +make variable)\&. +.SH "SYNOPSIS" +.sp +.ft B +.nf +#include +#include +.fi +.ft +.SS "Standard API" +.HP \w'void\ *malloc('u +.BI "void *malloc(size_t\ " "size" ");" +.HP \w'void\ *calloc('u +.BI "void *calloc(size_t\ " "number" ", size_t\ " "size" ");" +.HP \w'int\ posix_memalign('u +.BI "int posix_memalign(void\ **" "ptr" ", size_t\ " "alignment" ", size_t\ " "size" ");" +.HP \w'void\ *aligned_alloc('u +.BI "void *aligned_alloc(size_t\ " "alignment" ", size_t\ " "size" ");" +.HP \w'void\ *realloc('u +.BI "void *realloc(void\ *" "ptr" ", size_t\ " "size" ");" +.HP \w'void\ free('u +.BI "void free(void\ *" "ptr" ");" +.SS "Non\-standard API" +.HP \w'size_t\ malloc_usable_size('u +.BI "size_t malloc_usable_size(const\ void\ *" "ptr" ");" +.HP \w'void\ malloc_stats_print('u +.BI "void malloc_stats_print(void\ " "(*write_cb)" "\ (void\ *,\ const\ char\ *), void\ *" "cbopaque" ", const\ char\ *" "opts" ");" +.HP \w'int\ mallctl('u +.BI "int mallctl(const\ char\ *" "name" ", void\ *" "oldp" ", size_t\ *" "oldlenp" ", void\ *" "newp" ", size_t\ " "newlen" ");" +.HP \w'int\ mallctlnametomib('u +.BI "int mallctlnametomib(const\ char\ *" "name" ", size_t\ *" "mibp" ", size_t\ *" "miblenp" ");" +.HP \w'int\ mallctlbymib('u +.BI "int mallctlbymib(const\ size_t\ *" "mib" ", size_t\ " "miblen" ", void\ *" "oldp" ", size_t\ *" "oldlenp" ", void\ *" "newp" ", size_t\ " "newlen" ");" +.HP \w'void\ (*malloc_message)('u +.BI "void (*malloc_message)(void\ *" "cbopaque" ", const\ char\ *" "s" ");" +.PP +const char *\fImalloc_conf\fR; +.SS "Experimental API" +.HP \w'int\ allocm('u +.BI "int allocm(void\ **" "ptr" ", size_t\ *" "rsize" ", size_t\ " "size" ", int\ " "flags" ");" +.HP \w'int\ rallocm('u +.BI "int rallocm(void\ **" "ptr" ", size_t\ *" "rsize" ", size_t\ " "size" ", size_t\ " "extra" ", int\ " "flags" ");" +.HP \w'int\ sallocm('u +.BI "int sallocm(const\ void\ *" "ptr" ", size_t\ *" "rsize" ", int\ " "flags" ");" +.HP \w'int\ dallocm('u +.BI "int dallocm(void\ *" "ptr" ", int\ " "flags" ");" +.HP \w'int\ nallocm('u +.BI "int nallocm(size_t\ *" "rsize" ", size_t\ " "size" ", int\ " "flags" ");" +.SH "DESCRIPTION" +.SS "Standard API" +.PP +The +\fBmalloc\fR\fB\fR +function allocates +\fIsize\fR +bytes of uninitialized memory\&. The allocated space is suitably aligned (after possible pointer coercion) for storage of any type of object\&. +.PP +The +\fBcalloc\fR\fB\fR +function allocates space for +\fInumber\fR +objects, each +\fIsize\fR +bytes in length\&. The result is identical to calling +\fBmalloc\fR\fB\fR +with an argument of +\fInumber\fR +* +\fIsize\fR, with the exception that the allocated memory is explicitly initialized to zero bytes\&. +.PP +The +\fBposix_memalign\fR\fB\fR +function allocates +\fIsize\fR +bytes of memory such that the allocation\*(Aqs base address is an even multiple of +\fIalignment\fR, and returns the allocation in the value pointed to by +\fIptr\fR\&. The requested +\fIalignment\fR +must be a power of 2 at least as large as +sizeof(\fBvoid *\fR)\&. +.PP +The +\fBaligned_alloc\fR\fB\fR +function allocates +\fIsize\fR +bytes of memory such that the allocation\*(Aqs base address is an even multiple of +\fIalignment\fR\&. The requested +\fIalignment\fR +must be a power of 2\&. Behavior is undefined if +\fIsize\fR +is not an integral multiple of +\fIalignment\fR\&. +.PP +The +\fBrealloc\fR\fB\fR +function changes the size of the previously allocated memory referenced by +\fIptr\fR +to +\fIsize\fR +bytes\&. The contents of the memory are unchanged up to the lesser of the new and old sizes\&. If the new size is larger, the contents of the newly allocated portion of the memory are undefined\&. Upon success, the memory referenced by +\fIptr\fR +is freed and a pointer to the newly allocated memory is returned\&. Note that +\fBrealloc\fR\fB\fR +may move the memory allocation, resulting in a different return value than +\fIptr\fR\&. If +\fIptr\fR +is +\fBNULL\fR, the +\fBrealloc\fR\fB\fR +function behaves identically to +\fBmalloc\fR\fB\fR +for the specified size\&. +.PP +The +\fBfree\fR\fB\fR +function causes the allocated memory referenced by +\fIptr\fR +to be made available for future allocations\&. If +\fIptr\fR +is +\fBNULL\fR, no action occurs\&. +.SS "Non\-standard API" +.PP +The +\fBmalloc_usable_size\fR\fB\fR +function returns the usable size of the allocation pointed to by +\fIptr\fR\&. The return value may be larger than the size that was requested during allocation\&. The +\fBmalloc_usable_size\fR\fB\fR +function is not a mechanism for in\-place +\fBrealloc\fR\fB\fR; rather it is provided solely as a tool for introspection purposes\&. Any discrepancy between the requested allocation size and the size reported by +\fBmalloc_usable_size\fR\fB\fR +should not be depended on, since such behavior is entirely implementation\-dependent\&. +.PP +The +\fBmalloc_stats_print\fR\fB\fR +function writes human\-readable summary statistics via the +\fIwrite_cb\fR +callback function pointer and +\fIcbopaque\fR +data passed to +\fIwrite_cb\fR, or +\fBmalloc_message\fR\fB\fR +if +\fIwrite_cb\fR +is +\fBNULL\fR\&. This function can be called repeatedly\&. General information that never changes during execution can be omitted by specifying "g" as a character within the +\fIopts\fR +string\&. Note that +\fBmalloc_message\fR\fB\fR +uses the +\fBmallctl*\fR\fB\fR +functions internally, so inconsistent statistics can be reported if multiple threads use these functions simultaneously\&. If +\fB\-\-enable\-stats\fR +is specified during configuration, \(lqm\(rq and \(lqa\(rq can be specified to omit merged arena and per arena statistics, respectively; \(lqb\(rq and \(lql\(rq can be specified to omit per size class statistics for bins and large objects, respectively\&. Unrecognized characters are silently ignored\&. Note that thread caching may prevent some statistics from being completely up to date, since extra locking would be required to merge counters that track thread cache operations\&. +.PP +The +\fBmallctl\fR\fB\fR +function provides a general interface for introspecting the memory allocator, as well as setting modifiable parameters and triggering actions\&. The period\-separated +\fIname\fR +argument specifies a location in a tree\-structured namespace; see the +MALLCTL NAMESPACE +section for documentation on the tree contents\&. To read a value, pass a pointer via +\fIoldp\fR +to adequate space to contain the value, and a pointer to its length via +\fIoldlenp\fR; otherwise pass +\fBNULL\fR +and +\fBNULL\fR\&. Similarly, to write a value, pass a pointer to the value via +\fInewp\fR, and its length via +\fInewlen\fR; otherwise pass +\fBNULL\fR +and +\fB0\fR\&. +.PP +The +\fBmallctlnametomib\fR\fB\fR +function provides a way to avoid repeated name lookups for applications that repeatedly query the same portion of the namespace, by translating a name to a \(lqManagement Information Base\(rq (MIB) that can be passed repeatedly to +\fBmallctlbymib\fR\fB\fR\&. Upon successful return from +\fBmallctlnametomib\fR\fB\fR, +\fImibp\fR +contains an array of +\fI*miblenp\fR +integers, where +\fI*miblenp\fR +is the lesser of the number of components in +\fIname\fR +and the input value of +\fI*miblenp\fR\&. Thus it is possible to pass a +\fI*miblenp\fR +that is smaller than the number of period\-separated name components, which results in a partial MIB that can be used as the basis for constructing a complete MIB\&. For name components that are integers (e\&.g\&. the 2 in +"arenas\&.bin\&.2\&.size"), the corresponding MIB component will always be that integer\&. Therefore, it is legitimate to construct code like the following: +.sp +.if n \{\ +.RS 4 +.\} +.nf +unsigned nbins, i; + +int mib[4]; +size_t len, miblen; + +len = sizeof(nbins); +mallctl("arenas\&.nbins", &nbins, &len, NULL, 0); + +miblen = 4; +mallnametomib("arenas\&.bin\&.0\&.size", mib, &miblen); +for (i = 0; i < nbins; i++) { + size_t bin_size; + + mib[2] = i; + len = sizeof(bin_size); + mallctlbymib(mib, miblen, &bin_size, &len, NULL, 0); + /* Do something with bin_size\&.\&.\&. */ +} +.fi +.if n \{\ +.RE +.\} +.SS "Experimental API" +.PP +The experimental API is subject to change or removal without regard for backward compatibility\&. If +\fB\-\-disable\-experimental\fR +is specified during configuration, the experimental API is omitted\&. +.PP +The +\fBallocm\fR\fB\fR, +\fBrallocm\fR\fB\fR, +\fBsallocm\fR\fB\fR, +\fBdallocm\fR\fB\fR, and +\fBnallocm\fR\fB\fR +functions all have a +\fIflags\fR +argument that can be used to specify options\&. The functions only check the options that are contextually relevant\&. Use bitwise or (|) operations to specify one or more of the following: +.PP +\fBALLOCM_LG_ALIGN(\fR\fB\fIla\fR\fR\fB) \fR +.RS 4 +Align the memory allocation to start at an address that is a multiple of +(1 << \fIla\fR)\&. This macro does not validate that +\fIla\fR +is within the valid range\&. +.RE +.PP +\fBALLOCM_ALIGN(\fR\fB\fIa\fR\fR\fB) \fR +.RS 4 +Align the memory allocation to start at an address that is a multiple of +\fIa\fR, where +\fIa\fR +is a power of two\&. This macro does not validate that +\fIa\fR +is a power of 2\&. +.RE +.PP +\fBALLOCM_ZERO\fR +.RS 4 +Initialize newly allocated memory to contain zero bytes\&. In the growing reallocation case, the real size prior to reallocation defines the boundary between untouched bytes and those that are initialized to contain zero bytes\&. If this option is absent, newly allocated memory is uninitialized\&. +.RE +.PP +\fBALLOCM_NO_MOVE\fR +.RS 4 +For reallocation, fail rather than moving the object\&. This constraint can apply to both growth and shrinkage\&. +.RE +.PP +The +\fBallocm\fR\fB\fR +function allocates at least +\fIsize\fR +bytes of memory, sets +\fI*ptr\fR +to the base address of the allocation, and sets +\fI*rsize\fR +to the real size of the allocation if +\fIrsize\fR +is not +\fBNULL\fR\&. Behavior is undefined if +\fIsize\fR +is +\fB0\fR\&. +.PP +The +\fBrallocm\fR\fB\fR +function resizes the allocation at +\fI*ptr\fR +to be at least +\fIsize\fR +bytes, sets +\fI*ptr\fR +to the base address of the allocation if it moved, and sets +\fI*rsize\fR +to the real size of the allocation if +\fIrsize\fR +is not +\fBNULL\fR\&. If +\fIextra\fR +is non\-zero, an attempt is made to resize the allocation to be at least +\fIsize\fR + \fIextra\fR) +bytes, though inability to allocate the extra byte(s) will not by itself result in failure\&. Behavior is undefined if +\fIsize\fR +is +\fB0\fR, or if +(\fIsize\fR + \fIextra\fR > \fBSIZE_T_MAX\fR)\&. +.PP +The +\fBsallocm\fR\fB\fR +function sets +\fI*rsize\fR +to the real size of the allocation\&. +.PP +The +\fBdallocm\fR\fB\fR +function causes the memory referenced by +\fIptr\fR +to be made available for future allocations\&. +.PP +The +\fBnallocm\fR\fB\fR +function allocates no memory, but it performs the same size computation as the +\fBallocm\fR\fB\fR +function, and if +\fIrsize\fR +is not +\fBNULL\fR +it sets +\fI*rsize\fR +to the real size of the allocation that would result from the equivalent +\fBallocm\fR\fB\fR +function call\&. Behavior is undefined if +\fIsize\fR +is +\fB0\fR\&. +.SH "TUNING" +.PP +Once, when the first call is made to one of the memory allocation routines, the allocator initializes its internals based in part on various options that can be specified at compile\- or run\-time\&. +.PP +The string pointed to by the global variable +\fImalloc_conf\fR, the \(lqname\(rq of the file referenced by the symbolic link named +/etc/malloc\&.conf, and the value of the environment variable +\fBMALLOC_CONF\fR, will be interpreted, in that order, from left to right as options\&. +.PP +An options string is a comma\-separated list of option:value pairs\&. There is one key corresponding to each +"opt\&.*" +mallctl (see the +MALLCTL NAMESPACE +section for options documentation)\&. For example, +abort:true,narenas:1 +sets the +"opt\&.abort" +and +"opt\&.narenas" +options\&. Some options have boolean values (true/false), others have integer values (base 8, 10, or 16, depending on prefix), and yet others have raw string values\&. +.SH "IMPLEMENTATION NOTES" +.PP +Traditionally, allocators have used +\fBsbrk\fR(2) +to obtain memory, which is suboptimal for several reasons, including race conditions, increased fragmentation, and artificial limitations on maximum usable memory\&. If +\fB\-\-enable\-dss\fR +is specified during configuration, this allocator uses both +\fBsbrk\fR(2) +and +\fBmmap\fR(2), in that order of preference; otherwise only +\fBmmap\fR(2) +is used\&. +.PP +This allocator uses multiple arenas in order to reduce lock contention for threaded programs on multi\-processor systems\&. This works well with regard to threading scalability, but incurs some costs\&. There is a small fixed per\-arena overhead, and additionally, arenas manage memory completely independently of each other, which means a small fixed increase in overall memory fragmentation\&. These overheads are not generally an issue, given the number of arenas normally used\&. Note that using substantially more arenas than the default is not likely to improve performance, mainly due to reduced cache performance\&. However, it may make sense to reduce the number of arenas if an application does not make much use of the allocation functions\&. +.PP +In addition to multiple arenas, unless +\fB\-\-disable\-tcache\fR +is specified during configuration, this allocator supports thread\-specific caching for small and large objects, in order to make it possible to completely avoid synchronization for most allocation requests\&. Such caching allows very fast allocation in the common case, but it increases memory usage and fragmentation, since a bounded number of objects can remain allocated in each thread cache\&. +.PP +Memory is conceptually broken into equal\-sized chunks, where the chunk size is a power of two that is greater than the page size\&. Chunks are always aligned to multiples of the chunk size\&. This alignment makes it possible to find metadata for user objects very quickly\&. +.PP +User objects are broken into three categories according to size: small, large, and huge\&. Small objects are smaller than one page\&. Large objects are smaller than the chunk size\&. Huge objects are a multiple of the chunk size\&. Small and large objects are managed by arenas; huge objects are managed separately in a single data structure that is shared by all threads\&. Huge objects are used by applications infrequently enough that this single data structure is not a scalability issue\&. +.PP +Each chunk that is managed by an arena tracks its contents as runs of contiguous pages (unused, backing a set of small objects, or backing one large object)\&. The combination of chunk alignment and chunk page maps makes it possible to determine all metadata regarding small and large allocations in constant time\&. +.PP +Small objects are managed in groups by page runs\&. Each run maintains a frontier and free list to track which regions are in use\&. Allocation requests that are no more than half the quantum (8 or 16, depending on architecture) are rounded up to the nearest power of two that is at least +sizeof(\fBdouble\fR)\&. All other small object size classes are multiples of the quantum, spaced such that internal fragmentation is limited to approximately 25% for all but the smallest size classes\&. Allocation requests that are larger than the maximum small size class, but small enough to fit in an arena\-managed chunk (see the +"opt\&.lg_chunk" +option), are rounded up to the nearest run size\&. Allocation requests that are too large to fit in an arena\-managed chunk are rounded up to the nearest multiple of the chunk size\&. +.PP +Allocations are packed tightly together, which can be an issue for multi\-threaded applications\&. If you need to assure that allocations do not suffer from cacheline sharing, round your allocation requests up to the nearest multiple of the cacheline size, or specify cacheline alignment when allocating\&. +.PP +Assuming 4 MiB chunks, 4 KiB pages, and a 16\-byte quantum on a 64\-bit system, the size classes in each category are as shown in +Table 1\&. +.sp +.it 1 an-trap +.nr an-no-space-flag 1 +.nr an-break-flag 1 +.br +.B Table\ \&1.\ \&Size classes +.TS +allbox tab(:); +lB rB lB. +T{ +Category +T}:T{ +Spacing +T}:T{ +Size +T} +.T& +l r l +^ r l +^ r l +^ r l +^ r l +^ r l +^ r l +l r l +l r l. +T{ +Small +T}:T{ +lg +T}:T{ +[8] +T} +:T{ +16 +T}:T{ +[16, 32, 48, \&.\&.\&., 128] +T} +:T{ +32 +T}:T{ +[160, 192, 224, 256] +T} +:T{ +64 +T}:T{ +[320, 384, 448, 512] +T} +:T{ +128 +T}:T{ +[640, 768, 896, 1024] +T} +:T{ +256 +T}:T{ +[1280, 1536, 1792, 2048] +T} +:T{ +512 +T}:T{ +[2560, 3072, 3584] +T} +T{ +Large +T}:T{ +4 KiB +T}:T{ +[4 KiB, 8 KiB, 12 KiB, \&.\&.\&., 4072 KiB] +T} +T{ +Huge +T}:T{ +4 MiB +T}:T{ +[4 MiB, 8 MiB, 12 MiB, \&.\&.\&.] +T} +.TE +.sp 1 +.SH "MALLCTL NAMESPACE" +.PP +The following names are defined in the namespace accessible via the +\fBmallctl*\fR\fB\fR +functions\&. Value types are specified in parentheses, their readable/writable statuses are encoded as +rw, +r\-, +\-w, or +\-\-, and required build configuration flags follow, if any\&. A name element encoded as + +or + +indicates an integer component, where the integer varies from 0 to some upper value that must be determined via introspection\&. In the case of +"stats\&.arenas\&.\&.*", + +equal to +"arenas\&.narenas" +can be used to access the summation of statistics from all arenas\&. Take special note of the +"epoch" +mallctl, which controls refreshing of cached dynamic statistics\&. +.PP +"version" (\fBconst char *\fR) r\- +.RS 4 +Return the jemalloc version string\&. +.RE +.PP +"epoch" (\fBuint64_t\fR) rw +.RS 4 +If a value is passed in, refresh the data from which the +\fBmallctl*\fR\fB\fR +functions report values, and increment the epoch\&. Return the current epoch\&. This is useful for detecting whether another thread caused a refresh\&. +.RE +.PP +"config\&.debug" (\fBbool\fR) r\- +.RS 4 +\fB\-\-enable\-debug\fR +was specified during build configuration\&. +.RE +.PP +"config\&.dss" (\fBbool\fR) r\- +.RS 4 +\fB\-\-enable\-dss\fR +was specified during build configuration\&. +.RE +.PP +"config\&.fill" (\fBbool\fR) r\- +.RS 4 +\fB\-\-enable\-fill\fR +was specified during build configuration\&. +.RE +.PP +"config\&.lazy_lock" (\fBbool\fR) r\- +.RS 4 +\fB\-\-enable\-lazy\-lock\fR +was specified during build configuration\&. +.RE +.PP +"config\&.prof" (\fBbool\fR) r\- +.RS 4 +\fB\-\-enable\-prof\fR +was specified during build configuration\&. +.RE +.PP +"config\&.prof_libgcc" (\fBbool\fR) r\- +.RS 4 +\fB\-\-disable\-prof\-libgcc\fR +was not specified during build configuration\&. +.RE +.PP +"config\&.prof_libunwind" (\fBbool\fR) r\- +.RS 4 +\fB\-\-enable\-prof\-libunwind\fR +was specified during build configuration\&. +.RE +.PP +"config\&.stats" (\fBbool\fR) r\- +.RS 4 +\fB\-\-enable\-stats\fR +was specified during build configuration\&. +.RE +.PP +"config\&.tcache" (\fBbool\fR) r\- +.RS 4 +\fB\-\-disable\-tcache\fR +was not specified during build configuration\&. +.RE +.PP +"config\&.tls" (\fBbool\fR) r\- +.RS 4 +\fB\-\-disable\-tls\fR +was not specified during build configuration\&. +.RE +.PP +"config\&.utrace" (\fBbool\fR) r\- +.RS 4 +\fB\-\-enable\-utrace\fR +was specified during build configuration\&. +.RE +.PP +"config\&.valgrind" (\fBbool\fR) r\- +.RS 4 +\fB\-\-enable\-valgrind\fR +was specified during build configuration\&. +.RE +.PP +"config\&.xmalloc" (\fBbool\fR) r\- +.RS 4 +\fB\-\-enable\-xmalloc\fR +was specified during build configuration\&. +.RE +.PP +"opt\&.abort" (\fBbool\fR) r\- +.RS 4 +Abort\-on\-warning enabled/disabled\&. If true, most warnings are fatal\&. The process will call +\fBabort\fR(3) +in these cases\&. This option is disabled by default unless +\fB\-\-enable\-debug\fR +is specified during configuration, in which case it is enabled by default\&. +.RE +.PP +"opt\&.lg_chunk" (\fBsize_t\fR) r\- +.RS 4 +Virtual memory chunk size (log base 2)\&. The default chunk size is 4 MiB (2^22)\&. +.RE +.PP +"opt\&.narenas" (\fBsize_t\fR) r\- +.RS 4 +Maximum number of arenas to use\&. The default maximum number of arenas is four times the number of CPUs, or one if there is a single CPU\&. +.RE +.PP +"opt\&.lg_dirty_mult" (\fBssize_t\fR) r\- +.RS 4 +Per\-arena minimum ratio (log base 2) of active to dirty pages\&. Some dirty unused pages may be allowed to accumulate, within the limit set by the ratio (or one chunk worth of dirty pages, whichever is greater), before informing the kernel about some of those pages via +\fBmadvise\fR(2) +or a similar system call\&. This provides the kernel with sufficient information to recycle dirty pages if physical memory becomes scarce and the pages remain unused\&. The default minimum ratio is 32:1 (2^5:1); an option value of \-1 will disable dirty page purging\&. +.RE +.PP +"opt\&.stats_print" (\fBbool\fR) r\- +.RS 4 +Enable/disable statistics printing at exit\&. If enabled, the +\fBmalloc_stats_print\fR\fB\fR +function is called at program exit via an +\fBatexit\fR(3) +function\&. If +\fB\-\-enable\-stats\fR +is specified during configuration, this has the potential to cause deadlock for a multi\-threaded process that exits while one or more threads are executing in the memory allocation functions\&. Therefore, this option should only be used with care; it is primarily intended as a performance tuning aid during application development\&. This option is disabled by default\&. +.RE +.PP +"opt\&.junk" (\fBbool\fR) r\- [\fB\-\-enable\-fill\fR] +.RS 4 +Junk filling enabled/disabled\&. If enabled, each byte of uninitialized allocated memory will be initialized to +0xa5\&. All deallocated memory will be initialized to +0x5a\&. This is intended for debugging and will impact performance negatively\&. This option is disabled by default unless +\fB\-\-enable\-debug\fR +is specified during configuration, in which case it is enabled by default\&. +.RE +.PP +"opt\&.quarantine" (\fBsize_t\fR) r\- [\fB\-\-enable\-fill\fR] +.RS 4 +Per thread quarantine size in bytes\&. If non\-zero, each thread maintains a FIFO object quarantine that stores up to the specified number of bytes of memory\&. The quarantined memory is not freed until it is released from quarantine, though it is immediately junk\-filled if the +"opt\&.junk" +option is enabled\&. This feature is of particular use in combination with +\m[blue]\fBValgrind\fR\m[]\&\s-2\u[2]\d\s+2, which can detect attempts to access quarantined objects\&. This is intended for debugging and will impact performance negatively\&. The default quarantine size is 0\&. +.RE +.PP +"opt\&.redzone" (\fBbool\fR) r\- [\fB\-\-enable\-fill\fR] +.RS 4 +Redzones enabled/disabled\&. If enabled, small allocations have redzones before and after them\&. Furthermore, if the +"opt\&.junk" +option is enabled, the redzones are checked for corruption during deallocation\&. However, the primary intended purpose of this feature is to be used in combination with +\m[blue]\fBValgrind\fR\m[]\&\s-2\u[2]\d\s+2, which needs redzones in order to do effective buffer overflow/underflow detection\&. This option is intended for debugging and will impact performance negatively\&. This option is disabled by default\&. +.RE +.PP +"opt\&.zero" (\fBbool\fR) r\- [\fB\-\-enable\-fill\fR] +.RS 4 +Zero filling enabled/disabled\&. If enabled, each byte of uninitialized allocated memory will be initialized to 0\&. Note that this initialization only happens once for each byte, so +\fBrealloc\fR\fB\fR +and +\fBrallocm\fR\fB\fR +calls do not zero memory that was previously allocated\&. This is intended for debugging and will impact performance negatively\&. This option is disabled by default\&. +.RE +.PP +"opt\&.utrace" (\fBbool\fR) r\- [\fB\-\-enable\-utrace\fR] +.RS 4 +Allocation tracing based on +\fButrace\fR(2) +enabled/disabled\&. This option is disabled by default\&. +.RE +.PP +"opt\&.valgrind" (\fBbool\fR) r\- [\fB\-\-enable\-valgrind\fR] +.RS 4 +\m[blue]\fBValgrind\fR\m[]\&\s-2\u[2]\d\s+2 +support enabled/disabled\&. If enabled, several other options are automatically modified during options processing to work well with Valgrind: +"opt\&.junk" +and +"opt\&.zero" +are set to false, +"opt\&.quarantine" +is set to 16 MiB, and +"opt\&.redzone" +is set to true\&. This option is disabled by default\&. +.RE +.PP +"opt\&.xmalloc" (\fBbool\fR) r\- [\fB\-\-enable\-xmalloc\fR] +.RS 4 +Abort\-on\-out\-of\-memory enabled/disabled\&. If enabled, rather than returning failure for any allocation function, display a diagnostic message on +\fBSTDERR_FILENO\fR +and cause the program to drop core (using +\fBabort\fR(3))\&. If an application is designed to depend on this behavior, set the option at compile time by including the following in the source code: +.sp +.if n \{\ +.RS 4 +.\} +.nf +malloc_conf = "xmalloc:true"; +.fi +.if n \{\ +.RE +.\} +.sp +This option is disabled by default\&. +.RE +.PP +"opt\&.tcache" (\fBbool\fR) r\- [\fB\-\-enable\-tcache\fR] +.RS 4 +Thread\-specific caching enabled/disabled\&. When there are multiple threads, each thread uses a thread\-specific cache for objects up to a certain size\&. Thread\-specific caching allows many allocations to be satisfied without performing any thread synchronization, at the cost of increased memory use\&. See the +"opt\&.lg_tcache_max" +option for related tuning information\&. This option is enabled by default\&. +.RE +.PP +"opt\&.lg_tcache_max" (\fBsize_t\fR) r\- [\fB\-\-enable\-tcache\fR] +.RS 4 +Maximum size class (log base 2) to cache in the thread\-specific cache\&. At a minimum, all small size classes are cached, and at a maximum all large size classes are cached\&. The default maximum is 32 KiB (2^15)\&. +.RE +.PP +"opt\&.prof" (\fBbool\fR) r\- [\fB\-\-enable\-prof\fR] +.RS 4 +Memory profiling enabled/disabled\&. If enabled, profile memory allocation activity, and use an +\fBatexit\fR(3) +function to dump final memory usage to a file named according to the pattern +\&.\&.\&.f\&.heap, where + +is controlled by the +"opt\&.prof_prefix" +option\&. See the +"opt\&.prof_active" +option for on\-the\-fly activation/deactivation\&. See the +"opt\&.lg_prof_sample" +option for probabilistic sampling control\&. See the +"opt\&.prof_accum" +option for control of cumulative sample reporting\&. See the +"opt\&.lg_prof_interval" +option for information on interval\-triggered profile dumping, and the +"opt\&.prof_gdump" +option for information on high\-water\-triggered profile dumping\&. Profile output is compatible with the included +\fBpprof\fR +Perl script, which originates from the +\m[blue]\fBgoogle\-perftools package\fR\m[]\&\s-2\u[3]\d\s+2\&. +.RE +.PP +"opt\&.prof_prefix" (\fBconst char *\fR) r\- [\fB\-\-enable\-prof\fR] +.RS 4 +Filename prefix for profile dumps\&. If the prefix is set to the empty string, no automatic dumps will occur; this is primarily useful for disabling the automatic final heap dump (which also disables leak reporting, if enabled)\&. The default prefix is +jeprof\&. +.RE +.PP +"opt\&.prof_active" (\fBbool\fR) r\- [\fB\-\-enable\-prof\fR] +.RS 4 +Profiling activated/deactivated\&. This is a secondary control mechanism that makes it possible to start the application with profiling enabled (see the +"opt\&.prof" +option) but inactive, then toggle profiling at any time during program execution with the +"prof\&.active" +mallctl\&. This option is enabled by default\&. +.RE +.PP +"opt\&.lg_prof_sample" (\fBssize_t\fR) r\- [\fB\-\-enable\-prof\fR] +.RS 4 +Average interval (log base 2) between allocation samples, as measured in bytes of allocation activity\&. Increasing the sampling interval decreases profile fidelity, but also decreases the computational overhead\&. The default sample interval is 1 (2^0) (i\&.e\&. all allocations are sampled)\&. +.RE +.PP +"opt\&.prof_accum" (\fBbool\fR) r\- [\fB\-\-enable\-prof\fR] +.RS 4 +Reporting of cumulative object/byte counts in profile dumps enabled/disabled\&. If this option is enabled, every unique backtrace must be stored for the duration of execution\&. Depending on the application, this can impose a large memory overhead, and the cumulative counts are not always of interest\&. This option is enabled by default\&. +.RE +.PP +"opt\&.lg_prof_interval" (\fBssize_t\fR) r\- [\fB\-\-enable\-prof\fR] +.RS 4 +Average interval (log base 2) between memory profile dumps, as measured in bytes of allocation activity\&. The actual interval between dumps may be sporadic because decentralized allocation counters are used to avoid synchronization bottlenecks\&. Profiles are dumped to files named according to the pattern +\&.\&.\&.i\&.heap, where + +is controlled by the +"opt\&.prof_prefix" +option\&. By default, interval\-triggered profile dumping is disabled (encoded as \-1)\&. +.RE +.PP +"opt\&.prof_gdump" (\fBbool\fR) r\- [\fB\-\-enable\-prof\fR] +.RS 4 +Trigger a memory profile dump every time the total virtual memory exceeds the previous maximum\&. Profiles are dumped to files named according to the pattern +\&.\&.\&.u\&.heap, where + +is controlled by the +"opt\&.prof_prefix" +option\&. This option is disabled by default\&. +.RE +.PP +"opt\&.prof_leak" (\fBbool\fR) r\- [\fB\-\-enable\-prof\fR] +.RS 4 +Leak reporting enabled/disabled\&. If enabled, use an +\fBatexit\fR(3) +function to report memory leaks detected by allocation sampling\&. See the +"opt\&.prof" +option for information on analyzing heap profile output\&. This option is disabled by default\&. +.RE +.PP +"thread\&.arena" (\fBunsigned\fR) rw +.RS 4 +Get or set the arena associated with the calling thread\&. The arena index must be less than the maximum number of arenas (see the +"arenas\&.narenas" +mallctl)\&. If the specified arena was not initialized beforehand (see the +"arenas\&.initialized" +mallctl), it will be automatically initialized as a side effect of calling this interface\&. +.RE +.PP +"thread\&.allocated" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] +.RS 4 +Get the total number of bytes ever allocated by the calling thread\&. This counter has the potential to wrap around; it is up to the application to appropriately interpret the counter in such cases\&. +.RE +.PP +"thread\&.allocatedp" (\fBuint64_t *\fR) r\- [\fB\-\-enable\-stats\fR] +.RS 4 +Get a pointer to the the value that is returned by the +"thread\&.allocated" +mallctl\&. This is useful for avoiding the overhead of repeated +\fBmallctl*\fR\fB\fR +calls\&. +.RE +.PP +"thread\&.deallocated" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] +.RS 4 +Get the total number of bytes ever deallocated by the calling thread\&. This counter has the potential to wrap around; it is up to the application to appropriately interpret the counter in such cases\&. +.RE +.PP +"thread\&.deallocatedp" (\fBuint64_t *\fR) r\- [\fB\-\-enable\-stats\fR] +.RS 4 +Get a pointer to the the value that is returned by the +"thread\&.deallocated" +mallctl\&. This is useful for avoiding the overhead of repeated +\fBmallctl*\fR\fB\fR +calls\&. +.RE +.PP +"thread\&.tcache\&.enabled" (\fBbool\fR) rw [\fB\-\-enable\-tcache\fR] +.RS 4 +Enable/disable calling thread\*(Aqs tcache\&. The tcache is implicitly flushed as a side effect of becoming disabled (see +"thread\&.tcache\&.flush")\&. +.RE +.PP +"thread\&.tcache\&.flush" (\fBvoid\fR) \-\- [\fB\-\-enable\-tcache\fR] +.RS 4 +Flush calling thread\*(Aqs tcache\&. This interface releases all cached objects and internal data structures associated with the calling thread\*(Aqs thread\-specific cache\&. Ordinarily, this interface need not be called, since automatic periodic incremental garbage collection occurs, and the thread cache is automatically discarded when a thread exits\&. However, garbage collection is triggered by allocation activity, so it is possible for a thread that stops allocating/deallocating to retain its cache indefinitely, in which case the developer may find manual flushing useful\&. +.RE +.PP +"arenas\&.narenas" (\fBunsigned\fR) r\- +.RS 4 +Maximum number of arenas\&. +.RE +.PP +"arenas\&.initialized" (\fBbool *\fR) r\- +.RS 4 +An array of +"arenas\&.narenas" +booleans\&. Each boolean indicates whether the corresponding arena is initialized\&. +.RE +.PP +"arenas\&.quantum" (\fBsize_t\fR) r\- +.RS 4 +Quantum size\&. +.RE +.PP +"arenas\&.page" (\fBsize_t\fR) r\- +.RS 4 +Page size\&. +.RE +.PP +"arenas\&.tcache_max" (\fBsize_t\fR) r\- [\fB\-\-enable\-tcache\fR] +.RS 4 +Maximum thread\-cached size class\&. +.RE +.PP +"arenas\&.nbins" (\fBunsigned\fR) r\- +.RS 4 +Number of bin size classes\&. +.RE +.PP +"arenas\&.nhbins" (\fBunsigned\fR) r\- [\fB\-\-enable\-tcache\fR] +.RS 4 +Total number of thread cache bin size classes\&. +.RE +.PP +"arenas\&.bin\&.\&.size" (\fBsize_t\fR) r\- +.RS 4 +Maximum size supported by size class\&. +.RE +.PP +"arenas\&.bin\&.\&.nregs" (\fBuint32_t\fR) r\- +.RS 4 +Number of regions per page run\&. +.RE +.PP +"arenas\&.bin\&.\&.run_size" (\fBsize_t\fR) r\- +.RS 4 +Number of bytes per page run\&. +.RE +.PP +"arenas\&.nlruns" (\fBsize_t\fR) r\- +.RS 4 +Total number of large size classes\&. +.RE +.PP +"arenas\&.lrun\&.\&.size" (\fBsize_t\fR) r\- +.RS 4 +Maximum size supported by this large size class\&. +.RE +.PP +"arenas\&.purge" (\fBunsigned\fR) \-w +.RS 4 +Purge unused dirty pages for the specified arena, or for all arenas if none is specified\&. +.RE +.PP +"prof\&.active" (\fBbool\fR) rw [\fB\-\-enable\-prof\fR] +.RS 4 +Control whether sampling is currently active\&. See the +"opt\&.prof_active" +option for additional information\&. +.RE +.PP +"prof\&.dump" (\fBconst char *\fR) \-w [\fB\-\-enable\-prof\fR] +.RS 4 +Dump a memory profile to the specified file, or if NULL is specified, to a file according to the pattern +\&.\&.\&.m\&.heap, where + +is controlled by the +"opt\&.prof_prefix" +option\&. +.RE +.PP +"prof\&.interval" (\fBuint64_t\fR) r\- [\fB\-\-enable\-prof\fR] +.RS 4 +Average number of bytes allocated between inverval\-based profile dumps\&. See the +"opt\&.lg_prof_interval" +option for additional information\&. +.RE +.PP +"stats\&.cactive" (\fBsize_t *\fR) r\- [\fB\-\-enable\-stats\fR] +.RS 4 +Pointer to a counter that contains an approximate count of the current number of bytes in active pages\&. The estimate may be high, but never low, because each arena rounds up to the nearest multiple of the chunk size when computing its contribution to the counter\&. Note that the +"epoch" +mallctl has no bearing on this counter\&. Furthermore, counter consistency is maintained via atomic operations, so it is necessary to use an atomic operation in order to guarantee a consistent read when dereferencing the pointer\&. +.RE +.PP +"stats\&.allocated" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR] +.RS 4 +Total number of bytes allocated by the application\&. +.RE +.PP +"stats\&.active" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR] +.RS 4 +Total number of bytes in active pages allocated by the application\&. This is a multiple of the page size, and greater than or equal to +"stats\&.allocated"\&. +.RE +.PP +"stats\&.mapped" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR] +.RS 4 +Total number of bytes in chunks mapped on behalf of the application\&. This is a multiple of the chunk size, and is at least as large as +"stats\&.active"\&. This does not include inactive chunks embedded in the DSS\&. +.RE +.PP +"stats\&.chunks\&.current" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR] +.RS 4 +Total number of chunks actively mapped on behalf of the application\&. This does not include inactive chunks embedded in the DSS\&. +.RE +.PP +"stats\&.chunks\&.total" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] +.RS 4 +Cumulative number of chunks allocated\&. +.RE +.PP +"stats\&.chunks\&.high" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR] +.RS 4 +Maximum number of active chunks at any time thus far\&. +.RE +.PP +"stats\&.huge\&.allocated" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR] +.RS 4 +Number of bytes currently allocated by huge objects\&. +.RE +.PP +"stats\&.huge\&.nmalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] +.RS 4 +Cumulative number of huge allocation requests\&. +.RE +.PP +"stats\&.huge\&.ndalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] +.RS 4 +Cumulative number of huge deallocation requests\&. +.RE +.PP +"stats\&.arenas\&.\&.nthreads" (\fBunsigned\fR) r\- +.RS 4 +Number of threads currently assigned to arena\&. +.RE +.PP +"stats\&.arenas\&.\&.pactive" (\fBsize_t\fR) r\- +.RS 4 +Number of pages in active runs\&. +.RE +.PP +"stats\&.arenas\&.\&.pdirty" (\fBsize_t\fR) r\- +.RS 4 +Number of pages within unused runs that are potentially dirty, and for which +\fBmadvise\fR\fB\fI\&.\&.\&.\fR\fR\fB \fR\fB\fI\fBMADV_DONTNEED\fR\fR\fR +or similar has not been called\&. +.RE +.PP +"stats\&.arenas\&.\&.mapped" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR] +.RS 4 +Number of mapped bytes\&. +.RE +.PP +"stats\&.arenas\&.\&.npurge" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] +.RS 4 +Number of dirty page purge sweeps performed\&. +.RE +.PP +"stats\&.arenas\&.\&.nmadvise" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] +.RS 4 +Number of +\fBmadvise\fR\fB\fI\&.\&.\&.\fR\fR\fB \fR\fB\fI\fBMADV_DONTNEED\fR\fR\fR +or similar calls made to purge dirty pages\&. +.RE +.PP +"stats\&.arenas\&.\&.npurged" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] +.RS 4 +Number of pages purged\&. +.RE +.PP +"stats\&.arenas\&.\&.small\&.allocated" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR] +.RS 4 +Number of bytes currently allocated by small objects\&. +.RE +.PP +"stats\&.arenas\&.\&.small\&.nmalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] +.RS 4 +Cumulative number of allocation requests served by small bins\&. +.RE +.PP +"stats\&.arenas\&.\&.small\&.ndalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] +.RS 4 +Cumulative number of small objects returned to bins\&. +.RE +.PP +"stats\&.arenas\&.\&.small\&.nrequests" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] +.RS 4 +Cumulative number of small allocation requests\&. +.RE +.PP +"stats\&.arenas\&.\&.large\&.allocated" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR] +.RS 4 +Number of bytes currently allocated by large objects\&. +.RE +.PP +"stats\&.arenas\&.\&.large\&.nmalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] +.RS 4 +Cumulative number of large allocation requests served directly by the arena\&. +.RE +.PP +"stats\&.arenas\&.\&.large\&.ndalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] +.RS 4 +Cumulative number of large deallocation requests served directly by the arena\&. +.RE +.PP +"stats\&.arenas\&.\&.large\&.nrequests" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] +.RS 4 +Cumulative number of large allocation requests\&. +.RE +.PP +"stats\&.arenas\&.\&.bins\&.\&.allocated" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR] +.RS 4 +Current number of bytes allocated by bin\&. +.RE +.PP +"stats\&.arenas\&.\&.bins\&.\&.nmalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] +.RS 4 +Cumulative number of allocations served by bin\&. +.RE +.PP +"stats\&.arenas\&.\&.bins\&.\&.ndalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] +.RS 4 +Cumulative number of allocations returned to bin\&. +.RE +.PP +"stats\&.arenas\&.\&.bins\&.\&.nrequests" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] +.RS 4 +Cumulative number of allocation requests\&. +.RE +.PP +"stats\&.arenas\&.\&.bins\&.\&.nfills" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR \fB\-\-enable\-tcache\fR] +.RS 4 +Cumulative number of tcache fills\&. +.RE +.PP +"stats\&.arenas\&.\&.bins\&.\&.nflushes" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR \fB\-\-enable\-tcache\fR] +.RS 4 +Cumulative number of tcache flushes\&. +.RE +.PP +"stats\&.arenas\&.\&.bins\&.\&.nruns" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] +.RS 4 +Cumulative number of runs created\&. +.RE +.PP +"stats\&.arenas\&.\&.bins\&.\&.nreruns" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] +.RS 4 +Cumulative number of times the current run from which to allocate changed\&. +.RE +.PP +"stats\&.arenas\&.\&.bins\&.\&.curruns" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR] +.RS 4 +Current number of runs\&. +.RE +.PP +"stats\&.arenas\&.\&.lruns\&.\&.nmalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] +.RS 4 +Cumulative number of allocation requests for this size class served directly by the arena\&. +.RE +.PP +"stats\&.arenas\&.\&.lruns\&.\&.ndalloc" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] +.RS 4 +Cumulative number of deallocation requests for this size class served directly by the arena\&. +.RE +.PP +"stats\&.arenas\&.\&.lruns\&.\&.nrequests" (\fBuint64_t\fR) r\- [\fB\-\-enable\-stats\fR] +.RS 4 +Cumulative number of allocation requests for this size class\&. +.RE +.PP +"stats\&.arenas\&.\&.lruns\&.\&.curruns" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR] +.RS 4 +Current number of runs for this size class\&. +.RE +.SH "DEBUGGING MALLOC PROBLEMS" +.PP +When debugging, it is a good idea to configure/build jemalloc with the +\fB\-\-enable\-debug\fR +and +\fB\-\-enable\-fill\fR +options, and recompile the program with suitable options and symbols for debugger support\&. When so configured, jemalloc incorporates a wide variety of run\-time assertions that catch application errors such as double\-free, write\-after\-free, etc\&. +.PP +Programs often accidentally depend on \(lquninitialized\(rq memory actually being filled with zero bytes\&. Junk filling (see the +"opt\&.junk" +option) tends to expose such bugs in the form of obviously incorrect results and/or coredumps\&. Conversely, zero filling (see the +"opt\&.zero" +option) eliminates the symptoms of such bugs\&. Between these two options, it is usually possible to quickly detect, diagnose, and eliminate such bugs\&. +.PP +This implementation does not provide much detail about the problems it detects, because the performance impact for storing such information would be prohibitive\&. However, jemalloc does integrate with the most excellent +\m[blue]\fBValgrind\fR\m[]\&\s-2\u[2]\d\s+2 +tool if the +\fB\-\-enable\-valgrind\fR +configuration option is enabled and the +"opt\&.valgrind" +option is enabled\&. +.SH "DIAGNOSTIC MESSAGES" +.PP +If any of the memory allocation/deallocation functions detect an error or warning condition, a message will be printed to file descriptor +\fBSTDERR_FILENO\fR\&. Errors will result in the process dumping core\&. If the +"opt\&.abort" +option is set, most warnings are treated as errors\&. +.PP +The +\fImalloc_message\fR +variable allows the programmer to override the function which emits the text strings forming the errors and warnings if for some reason the +\fBSTDERR_FILENO\fR +file descriptor is not suitable for this\&. +\fBmalloc_message\fR\fB\fR +takes the +\fIcbopaque\fR +pointer argument that is +\fBNULL\fR +unless overridden by the arguments in a call to +\fBmalloc_stats_print\fR\fB\fR, followed by a string pointer\&. Please note that doing anything which tries to allocate memory in this function is likely to result in a crash or deadlock\&. +.PP +All messages are prefixed by \(lq:\(rq\&. +.SH "RETURN VALUES" +.SS "Standard API" +.PP +The +\fBmalloc\fR\fB\fR +and +\fBcalloc\fR\fB\fR +functions return a pointer to the allocated memory if successful; otherwise a +\fBNULL\fR +pointer is returned and +\fIerrno\fR +is set to +ENOMEM\&. +.PP +The +\fBposix_memalign\fR\fB\fR +function returns the value 0 if successful; otherwise it returns an error value\&. The +\fBposix_memalign\fR\fB\fR +function will fail if: +.PP +EINVAL +.RS 4 +The +\fIalignment\fR +parameter is not a power of 2 at least as large as +sizeof(\fBvoid *\fR)\&. +.RE +.PP +ENOMEM +.RS 4 +Memory allocation error\&. +.RE +.PP +The +\fBaligned_alloc\fR\fB\fR +function returns a pointer to the allocated memory if successful; otherwise a +\fBNULL\fR +pointer is returned and +\fIerrno\fR +is set\&. The +\fBaligned_alloc\fR\fB\fR +function will fail if: +.PP +EINVAL +.RS 4 +The +\fIalignment\fR +parameter is not a power of 2\&. +.RE +.PP +ENOMEM +.RS 4 +Memory allocation error\&. +.RE +.PP +The +\fBrealloc\fR\fB\fR +function returns a pointer, possibly identical to +\fIptr\fR, to the allocated memory if successful; otherwise a +\fBNULL\fR +pointer is returned, and +\fIerrno\fR +is set to +ENOMEM +if the error was the result of an allocation failure\&. The +\fBrealloc\fR\fB\fR +function always leaves the original buffer intact when an error occurs\&. +.PP +The +\fBfree\fR\fB\fR +function returns no value\&. +.SS "Non\-standard API" +.PP +The +\fBmalloc_usable_size\fR\fB\fR +function returns the usable size of the allocation pointed to by +\fIptr\fR\&. +.PP +The +\fBmallctl\fR\fB\fR, +\fBmallctlnametomib\fR\fB\fR, and +\fBmallctlbymib\fR\fB\fR +functions return 0 on success; otherwise they return an error value\&. The functions will fail if: +.PP +EINVAL +.RS 4 +\fInewp\fR +is not +\fBNULL\fR, and +\fInewlen\fR +is too large or too small\&. Alternatively, +\fI*oldlenp\fR +is too large or too small; in this case as much data as possible are read despite the error\&. +.RE +.PP +ENOMEM +.RS 4 +\fI*oldlenp\fR +is too short to hold the requested value\&. +.RE +.PP +ENOENT +.RS 4 +\fIname\fR +or +\fImib\fR +specifies an unknown/invalid value\&. +.RE +.PP +EPERM +.RS 4 +Attempt to read or write void value, or attempt to write read\-only value\&. +.RE +.PP +EAGAIN +.RS 4 +A memory allocation failure occurred\&. +.RE +.PP +EFAULT +.RS 4 +An interface with side effects failed in some way not directly related to +\fBmallctl*\fR\fB\fR +read/write processing\&. +.RE +.SS "Experimental API" +.PP +The +\fBallocm\fR\fB\fR, +\fBrallocm\fR\fB\fR, +\fBsallocm\fR\fB\fR, +\fBdallocm\fR\fB\fR, and +\fBnallocm\fR\fB\fR +functions return +\fBALLOCM_SUCCESS\fR +on success; otherwise they return an error value\&. The +\fBallocm\fR\fB\fR, +\fBrallocm\fR\fB\fR, and +\fBnallocm\fR\fB\fR +functions will fail if: +.PP +ALLOCM_ERR_OOM +.RS 4 +Out of memory\&. Insufficient contiguous memory was available to service the allocation request\&. The +\fBallocm\fR\fB\fR +function additionally sets +\fI*ptr\fR +to +\fBNULL\fR, whereas the +\fBrallocm\fR\fB\fR +function leaves +\fB*ptr\fR +unmodified\&. +.RE +The +\fBrallocm\fR\fB\fR +function will also fail if: +.PP +ALLOCM_ERR_NOT_MOVED +.RS 4 +\fBALLOCM_NO_MOVE\fR +was specified, but the reallocation request could not be serviced without moving the object\&. +.RE +.SH "ENVIRONMENT" +.PP +The following environment variable affects the execution of the allocation functions: +.PP +\fBMALLOC_CONF\fR +.RS 4 +If the environment variable +\fBMALLOC_CONF\fR +is set, the characters it contains will be interpreted as options\&. +.RE +.SH "EXAMPLES" +.PP +To dump core whenever a problem occurs: +.sp +.if n \{\ +.RS 4 +.\} +.nf +ln \-s \*(Aqabort:true\*(Aq /etc/malloc\&.conf +.fi +.if n \{\ +.RE +.\} +.PP +To specify in the source a chunk size that is 16 MiB: +.sp +.if n \{\ +.RS 4 +.\} +.nf +malloc_conf = "lg_chunk:24"; +.fi +.if n \{\ +.RE +.\} +.SH "SEE ALSO" +.PP +\fBmadvise\fR(2), +\fBmmap\fR(2), +\fBsbrk\fR(2), +\fButrace\fR(2), +\fBalloca\fR(3), +\fBatexit\fR(3), +\fBgetpagesize\fR(3) +.SH "STANDARDS" +.PP +The +\fBmalloc\fR\fB\fR, +\fBcalloc\fR\fB\fR, +\fBrealloc\fR\fB\fR, and +\fBfree\fR\fB\fR +functions conform to ISO/IEC 9899:1990 (\(lqISO C90\(rq)\&. +.PP +The +\fBposix_memalign\fR\fB\fR +function conforms to IEEE Std 1003\&.1\-2001 (\(lqPOSIX\&.1\(rq)\&. +.SH "HISTORY" +.PP +The +\fBmalloc_usable_size\fR\fB\fR +and +\fBposix_memalign\fR\fB\fR +functions first appeared in FreeBSD 7\&.0\&. +.PP +The +\fBaligned_alloc\fR\fB\fR, +\fBmalloc_stats_print\fR\fB\fR, +\fBmallctl*\fR\fB\fR, and +\fB*allocm\fR\fB\fR +functions first appeared in FreeBSD 10\&.0\&. +.SH "AUTHOR" +.PP +\fBJason Evans\fR +.RS 4 +.RE +.SH "NOTES" +.IP " 1." 4 +jemalloc website +.RS 4 +\%http://www.canonware.com/jemalloc/ +.RE +.IP " 2." 4 +Valgrind +.RS 4 +\%http://http://valgrind.org/ +.RE +.IP " 3." 4 +google-perftools package +.RS 4 +\%http://code.google.com/p/google-perftools/ +.RE Index: contrib/jemalloc/COPYING =================================================================== --- contrib/jemalloc/COPYING (revision 0) +++ contrib/jemalloc/COPYING (working copy) @@ -0,0 +1,27 @@ +Unless otherwise specified, files in the jemalloc source distribution are +subject to the following license: +-------------------------------------------------------------------------------- +Copyright (C) 2002-2012 Jason Evans . +All rights reserved. +Copyright (C) 2007-2012 Mozilla Foundation. All rights reserved. +Copyright (C) 2009-2012 Facebook, Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: +1. Redistributions of source code must retain the above copyright notice(s), + this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright notice(s), + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY EXPRESS +OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO +EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF +ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +-------------------------------------------------------------------------------- Index: contrib/jemalloc/FREEBSD-Xlist =================================================================== --- contrib/jemalloc/FREEBSD-Xlist (revision 0) +++ contrib/jemalloc/FREEBSD-Xlist (working copy) @@ -0,0 +1,24 @@ +$FreeBSD$ +.git +.gitignore +ChangeLog +FREEBSD-* +INSTALL +Makefile* +README +autogen.sh +autom4te.cache/ +bin/ +config.* +configure* +doc/*.in +doc/*.xml +doc/*.xsl +doc/*.html +include/jemalloc/internal/jemalloc_internal.h.in +include/jemalloc/internal/size_classes.sh +include/jemalloc/jemalloc.h.in +include/jemalloc/jemalloc_defs.h.in +install-sh +src/zone.c +test/