Index: lib/libc/stdlib/malloc.c =================================================================== RCS file: /home/ncvs/src/lib/libc/stdlib/malloc.c,v retrieving revision 1.49.2.5 diff -u -r1.49.2.5 malloc.c --- lib/libc/stdlib/malloc.c 20 Apr 2004 14:43:25 -0000 1.49.2.5 +++ lib/libc/stdlib/malloc.c 21 Feb 2007 20:10:23 -0000 @@ -22,6 +22,15 @@ #endif /* + * Once sbrk(3) space is exhausted we spill over into mmap(2) space that is + * backed by mmap(MAP_ANON). Note that each mmap consumes a separate VM pager + * object which prevents coalescing of objects and vm_map_entry items, so we + * allocate slabs via mmap and sub-allocate from there. + * MALLOC_MMAP is the slab size. + */ +#define MALLOC_MMAP (32 * 1024 * 1024) + +/* * What to use for Junk. This is the byte value we use to fill with * when the 'J' option is enabled. */ @@ -262,6 +271,9 @@ mmap(0, (size), PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, \ MMAP_FD, 0); +/* Overflow from brk() into mmap space. 1 = enabled, 0 = original behavior */ +static int malloc_mmap = 1; + /* * Necessary function declarations */ @@ -296,6 +308,61 @@ _write(STDERR_FILENO, p, strlen(p)); } +static void *mmap_base; +static void *mmap_end; +static void *mmap_brk; + + +static void * +mmap_chunk(size_t pages) +{ + void *mapbase; + size_t size; + + /* Free up any leftovers of any previous last chunk */ + if (mmap_brk < mmap_end) { + munmap(mmap_brk, mmap_end - mmap_brk); + mmap_end = mmap_brk; + } + size = MALLOC_MMAP; /* 32MB chunks for extension */ + if (pages * malloc_pagesize > size) + size = pages * malloc_pagesize; + mapbase = MMAP(size); + if (mapbase == (void *)-1) + return (NULL); + mmap_base = mapbase; + mmap_end = mapbase + size; + mmap_brk = mapbase; + return (mapbase); +} + +static void * +mmap_pages(size_t pages) +{ + caddr_t result, tail; + size_t size; + void *ret; + + size = pages * malloc_pagesize; + /* Grab a new slab as needed */ + if (mmap_base == NULL || (mmap_brk + size) > mmap_end) { + ret = mmap_chunk(pages); + if (ret == NULL) + return (NULL); + } + /* Now suballoc */ + result = mmap_brk; + tail = result + size; + if (tail < result) + return (NULL); + mmap_brk = tail; + /* Update accounting and page directory index */ + last_index = ptr2index(tail) - 1; + if ((last_index+1) >= malloc_ninfo && !extend_pgdir(last_index)) + return (NULL); + return (result); +} + /* * Allocate a number of pages from the OS */ @@ -310,6 +377,15 @@ return 0; if (brk(tail)) { + /* + * If we can't fit it on the heap, try mmap. Note that this might + * just be a malloc that can't fit in the heap, there might still + * be more heap space left. We can't stop checking for whether + * future operations might fit, or if something is free'd from + * the heap and it shrinks. + */ + if (errno == ENOMEM && malloc_mmap) + return (mmap_pages(pages)); #ifdef EXTRA_SANITY wrterror("(ES): map_pages fails\n"); #endif /* EXTRA_SANITY */ @@ -420,6 +496,8 @@ case 'R': malloc_realloc = 1; break; case 'j': malloc_junk = 0; break; case 'J': malloc_junk = 1; break; + case 'm': malloc_mmap = 0; break; + case 'M': malloc_mmap = 1; break; #ifdef HAS_UTRACE case 'u': malloc_utrace = 0; break; case 'U': malloc_utrace = 1; break;