Index: kern/kern_malloc.c =================================================================== --- kern/kern_malloc.c (revision 238451) +++ kern/kern_malloc.c (working copy) @@ -739,17 +739,17 @@ kmeminit(void *dummy) * This allows for kmem map sparseness, but limits the size * to something sane. Be careful to not overflow the 32bit * ints while doing the check or the adjustment. */ if (vm_kmem_size / 2 / PAGE_SIZE > mem_size) vm_kmem_size = 2 * mem_size * PAGE_SIZE; #ifdef DEBUG_MEMGUARD - tmp = memguard_fudge(vm_kmem_size, vm_kmem_size_max); + tmp = memguard_fudge(vm_kmem_size, kernel_map); #else tmp = vm_kmem_size; #endif kmem_map = kmem_suballoc(kernel_map, &kmembase, &kmemlimit, tmp, TRUE); kmem_map->system_map = 1; #ifdef DEBUG_MEMGUARD Index: vm/memguard.c =================================================================== --- vm/memguard.c (revision 238451) +++ vm/memguard.c (working copy) @@ -154,49 +154,49 @@ SYSCTL_ULONG(_vm_memguard, OID_AUTO, fre &memguard_frequency_hits, 0, "# times MemGuard randomly chose"); /* * Return a fudged value to be used for vm_kmem_size for allocating * the kmem_map. The memguard memory will be a submap. */ unsigned long -memguard_fudge(unsigned long km_size, unsigned long km_max) +memguard_fudge(unsigned long km_size, const struct vm_map *parent_map) { - u_long mem_pgs = cnt.v_page_count; + u_long mem_pgs, parent_size; vm_memguard_divisor = 10; TUNABLE_INT_FETCH("vm.memguard.divisor", &vm_memguard_divisor); + parent_size = vm_map_max(parent_map) - vm_map_min(parent_map) + + PAGE_SIZE; /* Pick a conservative value if provided value sucks. */ if ((vm_memguard_divisor <= 0) || - ((km_size / vm_memguard_divisor) == 0)) + ((parent_size / vm_memguard_divisor) == 0)) vm_memguard_divisor = 10; /* * Limit consumption of physical pages to * 1/vm_memguard_divisor of system memory. If the KVA is * smaller than this then the KVA limit comes into play first. * This prevents memguard's page promotions from completely * using up memory, since most malloc(9) calls are sub-page. */ + mem_pgs = cnt.v_page_count; memguard_physlimit = (mem_pgs / vm_memguard_divisor) * PAGE_SIZE; /* * We want as much KVA as we can take safely. Use at most our - * allotted fraction of kmem_max. Limit this to twice the - * physical memory to avoid using too much memory as pagetable - * pages. - */ - memguard_mapsize = km_max / vm_memguard_divisor; - /* size must be multiple of PAGE_SIZE */ - memguard_mapsize = round_page(memguard_mapsize); - if (memguard_mapsize == 0 || - memguard_mapsize / (2 * PAGE_SIZE) > mem_pgs) + * allotted fraction of the parent map's size. Limit this to + * twice the physical memory to avoid using too much memory as + * pagetable pages (size must be multiple of PAGE_SIZE). + */ + memguard_mapsize = round_page(parent_size / vm_memguard_divisor); + if (memguard_mapsize / (2 * PAGE_SIZE) > mem_pgs) memguard_mapsize = mem_pgs * 2 * PAGE_SIZE; - if (km_max > 0 && km_size + memguard_mapsize > km_max) - return (km_max); + if (km_size + memguard_mapsize > parent_size) + memguard_mapsize = 0; return (km_size + memguard_mapsize); } /* * Initialize the MemGuard mock allocator. All objects from MemGuard come * out of a single VM map (contiguous chunk of address space). */ void Index: vm/memguard.h =================================================================== --- vm/memguard.h (revision 238451) +++ vm/memguard.h (working copy) @@ -30,17 +30,17 @@ #define _VM_MEMGUARD_H_ #include "opt_vm.h" struct malloc_type; struct vm_map; #ifdef DEBUG_MEMGUARD -unsigned long memguard_fudge(unsigned long, unsigned long); +unsigned long memguard_fudge(unsigned long, const struct vm_map *); void memguard_init(struct vm_map *); void *memguard_alloc(unsigned long, int); void *memguard_realloc(void *, unsigned long, struct malloc_type *, int); void memguard_free(void *); int memguard_cmp_mtp(struct malloc_type *, unsigned long); int memguard_cmp_zone(uma_zone_t); int is_memguard_addr(void *); #else Index: vm/vm_map.h =================================================================== --- vm/vm_map.h (revision 238451) +++ vm/vm_map.h (working copy) @@ -195,23 +195,23 @@ struct vm_map { /* * vm_flags_t values */ #define MAP_WIREFUTURE 0x01 /* wire all future pages */ #define MAP_BUSY_WAKEUP 0x02 #ifdef _KERNEL static __inline vm_offset_t -vm_map_max(vm_map_t map) +vm_map_max(const struct vm_map *map) { return (map->max_offset); } static __inline vm_offset_t -vm_map_min(vm_map_t map) +vm_map_min(const struct vm_map *map) { return (map->min_offset); } static __inline pmap_t vm_map_pmap(vm_map_t map) { return (map->pmap);