diff --git a/sys/i386/include/param.h b/sys/i386/include/param.h index b494f35..2d5f4a5 100644 --- a/sys/i386/include/param.h +++ b/sys/i386/include/param.h @@ -140,9 +140,12 @@ * Ceiling on size of buffer cache (really only effects write queueing, * the VM page cache is not effected), can be changed via * the kern.maxbcache /boot/loader.conf variable. + * + * The value is equal to the size of the auto-tuned buffer map for + * the machine with 4GB of RAM, see vfs_bio.c:kern_vfs_bio_buffer_alloc(). */ #ifndef VM_BCACHE_SIZE_MAX -#define VM_BCACHE_SIZE_MAX (200 * 1024 * 1024) +#define VM_BCACHE_SIZE_MAX (7224 * 16 * 1024) #endif /* diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c index 30f935c..14f5b3d 100644 --- a/sys/kern/vfs_bio.c +++ b/sys/kern/vfs_bio.c @@ -579,7 +579,7 @@ kern_vfs_bio_buffer_alloc(caddr_t v, long physmem_est) * to the amount of the buffer mapped for typical UFS load. * * Clip the buffer map to reserve space for the transient - * BIOs, if its extent is bigger than 90% of the maximum + * BIOs, if its extent is bigger than 80% of the maximum * buffer map extent on the platform. * * The fall-back to the maxbuf in case of maxbcache unset, @@ -589,7 +589,7 @@ kern_vfs_bio_buffer_alloc(caddr_t v, long physmem_est) if (bio_transient_maxcnt == 0 && unmapped_buf_allowed) { maxbuf_sz = maxbcache != 0 ? maxbcache : maxbuf * BKVASIZE; buf_sz = (long)nbuf * BKVASIZE; - if (buf_sz < maxbuf_sz / 10 * 9) { + if (buf_sz < maxbuf_sz / 5 * 4) { /* * There is more KVA than memory. Do not * adjust buffer map size, and assign the rest @@ -599,10 +599,10 @@ kern_vfs_bio_buffer_alloc(caddr_t v, long physmem_est) } else { /* * Buffer map spans all KVA we could afford on - * this platform. Give 10% of the buffer map + * this platform. Give 20% of the buffer map * to the transient bio map. */ - biotmap_sz = buf_sz / 10; + biotmap_sz = buf_sz / 5; buf_sz -= biotmap_sz; } if (biotmap_sz / INT_MAX > MAXPHYS)