Index: lib/libc/stdlib/malloc.c =================================================================== RCS file: /home/ncvs/src/lib/libc/stdlib/malloc.c,v retrieving revision 1.90.2.1 diff -u -r1.90.2.1 malloc.c --- lib/libc/stdlib/malloc.c 18 Sep 2005 03:45:24 -0000 1.90.2.1 +++ lib/libc/stdlib/malloc.c 21 Feb 2007 20:10:15 -0000 @@ -21,6 +21,15 @@ #undef MALLOC_EXTRA_SANITY /* + * Once sbrk(3) space is exhausted we spill over into mmap(2) space that is + * backed by mmap(MAP_ANON). Note that each mmap consumes a separate VM pager + * object which prevents coalescing of objects and vm_map_entry items, so we + * allocate slabs via mmap and sub-allocate from there. + * MALLOC_MMAP is the slab size. + */ +#define MALLOC_MMAP (32 * 1024 * 1024) + +/* * What to use for Junk. This is the byte value we use to fill with * when the 'J' option is enabled. */ @@ -277,6 +286,9 @@ mmap(NULL, (size), PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, \ MMAP_FD, (off_t)0); +/* Overflow from brk() into mmap space. 1 = enabled, 0 = original behavior */ +static int malloc_mmap = 1; + /* * Necessary function declarations */ @@ -320,6 +332,61 @@ _malloc_message(_getprogname(), malloc_func, " warning: ", p); } +static void *mmap_base; +static void *mmap_end; +static void *mmap_brk; + + +static void * +mmap_chunk(size_t pages) +{ + void *mapbase; + size_t size; + + /* Free up any leftovers of any previous last chunk */ + if (mmap_brk < mmap_end) { + munmap(mmap_brk, mmap_end - mmap_brk); + mmap_end = mmap_brk; + } + size = MALLOC_MMAP; /* 32MB chunks for extension */ + if (pages * malloc_pagesize > size) + size = pages * malloc_pagesize; + mapbase = MMAP(size); + if (mapbase == (void *)-1) + return (NULL); + mmap_base = mapbase; + mmap_end = mapbase + size; + mmap_brk = mapbase; + return (mapbase); +} + +static void * +mmap_pages(size_t pages) +{ + caddr_t result, tail; + size_t size; + void *ret; + + size = pages * malloc_pagesize; + /* Grab a new slab as needed */ + if (mmap_base == NULL || (mmap_brk + size) > mmap_end) { + ret = mmap_chunk(pages); + if (ret == NULL) + return (NULL); + } + /* Now suballoc */ + result = mmap_brk; + tail = result + size; + if (tail < result) + return (NULL); + mmap_brk = tail; + /* Update accounting and page directory index */ + last_index = ptr2index(tail) - 1; + if ((last_index+1) >= malloc_ninfo && !extend_pgdir(last_index)) + return (NULL); + return (result); +} + /* * Allocate a number of pages from the OS */ @@ -334,6 +401,15 @@ return (NULL); if (brk(tail)) { + /* + * If we can't fit it on the heap, try mmap. Note that this might + * just be a malloc that can't fit in the heap, there might still + * be more heap space left. We can't stop checking for whether + * future operations might fit, or if something is free'd from + * the heap and it shrinks. + */ + if (errno == ENOMEM && malloc_mmap) + return (mmap_pages(pages)); #ifdef MALLOC_EXTRA_SANITY wrterror("(ES): map_pages fails\n"); #endif /* MALLOC_EXTRA_SANITY */ @@ -447,6 +523,8 @@ case 'R': malloc_realloc = 1; break; case 'j': malloc_junk = 0; break; case 'J': malloc_junk = 1; break; + case 'm': malloc_mmap = 0; break; + case 'M': malloc_mmap = 1; break; #ifdef HAS_UTRACE case 'u': malloc_utrace = 0; break; case 'U': malloc_utrace = 1; break;