Index: default_pager.c =================================================================== RCS file: /home/ncvs/src/sys/vm/default_pager.c,v retrieving revision 1.28 diff -u -r1.28 default_pager.c --- default_pager.c 4 Jul 2001 16:20:26 -0000 1.28 +++ default_pager.c 5 Mar 2002 22:49:53 -0000 @@ -93,7 +93,6 @@ * the swapblk in the underlying vm_page's when we free the vm_page or * garbage collect the vm_page cache list. */ - static void default_pager_dealloc(object) vm_object_t object; @@ -108,7 +107,6 @@ * OBJT_SWAP at the time a swap-backed vm_page_t is freed, we will never * see a vm_page with assigned swap here. */ - static int default_pager_getpages(object, m, count, reqpage) vm_object_t object; @@ -125,7 +123,6 @@ * object will be converted when the written-out vm_page_t is moved from the * cache to the free list. */ - static void default_pager_putpages(object, m, c, sync, rtvals) vm_object_t object; @@ -149,7 +146,6 @@ * deal with it since it must already deal with it plus deal with swap * meta-data structures. */ - static boolean_t default_pager_haspage(object, pindex, before, after) vm_object_t object; Index: pmap.h =================================================================== RCS file: /home/ncvs/src/sys/vm/pmap.h,v retrieving revision 1.42 diff -u -r1.42 pmap.h --- pmap.h 27 Feb 2002 18:03:02 -0000 1.42 +++ pmap.h 5 Mar 2002 22:52:04 -0000 @@ -71,7 +71,6 @@ #ifndef _PMAP_VM_ #define _PMAP_VM_ - /* * Each machine dependent implementation is expected to * keep certain statistics. They may do this anyway they @@ -87,7 +86,6 @@ #include #ifdef _KERNEL - struct proc; struct thread; @@ -147,7 +145,5 @@ vm_offset_t pmap_addr_hint __P((vm_object_t obj, vm_offset_t addr, vm_size_t size)); void *pmap_kenter_temporary __P((vm_offset_t pa, int i)); void pmap_init2 __P((void)); - #endif /* _KERNEL */ - #endif /* _PMAP_VM_ */ Index: swap_pager.c =================================================================== RCS file: /home/ncvs/src/sys/vm/swap_pager.c,v retrieving revision 1.167 diff -u -r1.167 swap_pager.c --- swap_pager.c 27 Feb 2002 19:18:10 -0000 1.167 +++ swap_pager.c 6 Mar 2002 14:56:18 -0000 @@ -109,7 +109,6 @@ * vm_swap_size is in page-sized chunks now. It was DEV_BSIZE'd chunks * in the old system. */ - extern int vm_swap_size; /* number of free swap blocks, in pages */ int swap_pager_full; /* swap space exhaustion (task killing) */ @@ -156,7 +155,6 @@ * calls hooked from other parts of the VM system and do not appear here. * (see vm/swap_pager.h). */ - static vm_object_t swap_pager_alloc __P((void *handle, vm_ooffset_t size, vm_prot_t prot, vm_ooffset_t offset)); @@ -188,7 +186,6 @@ * swap_*() routines are externally accessible. swp_*() routines are * internal. */ - int dmmax; static int dmmax_mask; int nswap_lowat = 128; /* in pages, swap_pager_almost_full warn */ @@ -204,14 +201,12 @@ /* * Swap bitmap functions */ - static __inline void swp_pager_freeswapspace __P((daddr_t blk, int npages)); static __inline daddr_t swp_pager_getswapspace __P((int npages)); /* * Metadata functions */ - static void swp_pager_meta_build __P((vm_object_t, vm_pindex_t, daddr_t)); static void swp_pager_meta_free __P((vm_object_t, vm_pindex_t, daddr_t)); static void swp_pager_meta_free_all __P((vm_object_t)); @@ -229,7 +224,6 @@ * This routine may not block. * This routine must be called at splvm() */ - static __inline void swp_sizecheck() { @@ -254,7 +248,6 @@ * before much else so be careful what you depend on. Most of the VM * system has yet to be initialized at this point. */ - static void swap_pager_init() { @@ -271,7 +264,6 @@ /* * Device Stripe, in PAGE_SIZE'd blocks */ - dmmax = SWB_NPAGES * 2; dmmax_mask = ~(dmmax - 1); } @@ -282,7 +274,6 @@ * Expected to be started from pageout process once, prior to entering * its main loop. */ - void swap_pager_swap_init() { @@ -310,7 +301,6 @@ * have one NFS swap device due to the command/ack latency over NFS. * So it all works out pretty well. */ - nsw_cluster_max = min((MAXPHYS/PAGE_SIZE), MAX_PAGEOUT_CLUSTER); mtx_lock(&pbuf_mtx); @@ -326,12 +316,10 @@ * can hold 16 pages, so this is probably overkill. This reservation * is typically limited to around 70MB by default. */ - n = cnt.v_page_count; if (maxswzone && n > maxswzone / sizeof(struct swblock)) n = maxswzone / sizeof(struct swblock); n2 = n; - do { swap_zone = zinit( "SWAPMETA", @@ -348,7 +336,6 @@ */ n -= ((n + 2) / 3); } while (n > 0); - if (swap_zone == NULL) panic("failed to zinit swap_zone."); if (n2 != n) @@ -363,12 +350,9 @@ * n: size of hash table, must be power of 2 * swhash_mask: hash table index mask */ - for (n = 1; n < n2 / 8; n *= 2) ; - swhash = malloc(sizeof(struct swblock *) * n, M_VMPGDATA, M_WAITOK | M_ZERO); - swhash_mask = n - 1; } @@ -388,7 +372,6 @@ * a new swap object w/handle when a default object with that handle * already exists. */ - static vm_object_t swap_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot, vm_ooffset_t offset) @@ -439,7 +422,6 @@ * * The object must be locked or unreferenceable. */ - static void swap_pager_dealloc(object) vm_object_t object; @@ -493,7 +475,6 @@ * This routine may not block * This routine must be called at splvm(). */ - static __inline daddr_t swp_pager_getswapspace(npages) int npages; @@ -514,7 +495,7 @@ swdevt[BLK2DEVIDX(blk)].sw_used += npages; swp_sizecheck(); } - return(blk); + return (blk); } /* @@ -531,7 +512,6 @@ * This routine may not block * This routine must be called at splvm(). */ - static __inline void swp_pager_freeswapspace(blk, npages) daddr_t blk; @@ -561,7 +541,6 @@ * This routine may be called at any spl. We up our spl to splvm temporarily * in order to perform the metadata removal. */ - void swap_pager_freespace(object, start, size) vm_object_t object; @@ -583,7 +562,6 @@ * * Returns 0 on success, -1 on failure. */ - int swap_pager_reserve(vm_object_t object, vm_pindex_t start, vm_size_t size) { @@ -601,7 +579,7 @@ if (n == 0) { swp_pager_meta_free(object, beg, start - beg); splx(s); - return(-1); + return (-1); } } } @@ -613,7 +591,7 @@ } swp_pager_meta_free(object, start, n); splx(s); - return(0); + return (0); } /* @@ -642,7 +620,6 @@ * The source and destination objects must be locked or * inaccessible (XXX are they ?) */ - void swap_pager_copy(srcobject, dstobject, offset, destroysource) vm_object_t srcobject; @@ -660,7 +637,6 @@ * If destroysource is set, we remove the source object from the * swap_pager internal queue now. */ - if (destroysource) { mtx_lock(&sw_alloc_mtx); if (srcobject->handle == NULL) { @@ -682,7 +658,6 @@ /* * transfer source to destination. */ - for (i = 0; i < dstobject->size; ++i) { daddr_t dstaddr; @@ -692,7 +667,6 @@ * if the destination is a resident page, in which case the * source is thrown away. */ - dstaddr = swp_pager_meta_ctl(dstobject, i, 0); if (dstaddr == SWAPBLK_NONE) { @@ -726,7 +700,6 @@ * We have to revert the type to OBJT_DEFAULT so we do not accidently * double-remove the object from the swap queues. */ - if (destroysource) { swp_pager_meta_free_all(srcobject); /* @@ -753,7 +726,6 @@ * (that is handled in getpages/putpages). It probably isn't worth * doing here. */ - boolean_t swap_pager_haspage(object, pindex, before, after) vm_object_t object; @@ -767,7 +739,6 @@ /* * do we have good backing store at the requested index ? */ - s = splvm(); blk0 = swp_pager_meta_ctl(object, pindex, 0); @@ -783,7 +754,6 @@ /* * find backwards-looking contiguous good backing store */ - if (before != NULL) { int i; @@ -802,7 +772,6 @@ /* * find forward-looking contiguous good backing store */ - if (after != NULL) { int i; @@ -837,7 +806,6 @@ * This routine may not block * This routine must be called at splvm() */ - static void swap_pager_unswapped(m) vm_page_t m; @@ -862,7 +830,6 @@ * sequencing when we run multiple ops in parallel to satisfy a request. * But this is swap, so we let it all hang out. */ - static void swap_pager_strategy(vm_object_t object, struct bio *bp) { @@ -884,7 +851,6 @@ /* * Clear error indication, initialize page index, count, data pointer. */ - bp->bio_error = 0; bp->bio_flags &= ~BIO_ERROR; bp->bio_resid = bp->bio_bcount; @@ -899,7 +865,6 @@ /* * Deal with BIO_DELETE */ - if (bp->bio_cmd == BIO_DELETE) { /* * FREE PAGE(s) - destroy underlying swap that is no longer @@ -942,7 +907,6 @@ * - we cross a physical disk boundry in the * stripe. */ - if ( nbp && (nbp->b_blkno + btoc(nbp->b_bcount) != blk || ((nbp->b_blkno ^ blk) & dmmax_mask) @@ -966,7 +930,6 @@ * Add new swapblk to nbp, instantiating nbp if necessary. * Zero-fill reads are able to take a shortcut. */ - if (blk == SWAPBLK_NONE) { /* * We can only get here if we are reading. Since @@ -992,7 +955,6 @@ /* * Flush out last buffer */ - splx(s); if (nbp) { @@ -1010,7 +972,6 @@ /* * Wait for completion. */ - waitchainbuf(bp, 0, 1); } @@ -1033,7 +994,6 @@ * The parent has BUSY'd the pages. We should return with 'm' * left busy, but the others adjusted. */ - static int swap_pager_getpages(object, m, count, reqpage) vm_object_t object; @@ -1069,7 +1029,6 @@ * The swp_*() calls must be made at splvm(). vm_page_free() does * not need to be, but it will go a little faster if it is. */ - s = splvm(); blk = swp_pager_meta_ctl(mreq->object, mreq->pindex, 0); @@ -1098,7 +1057,6 @@ * free pages outside our collection range. Note: we never free * mreq, it must remain busy throughout. */ - { int k; @@ -1114,14 +1072,12 @@ * Return VM_PAGER_FAIL if we have nothing to do. Return mreq * still busy, but the others unbusied. */ - if (blk == SWAPBLK_NONE) - return(VM_PAGER_FAIL); + return (VM_PAGER_FAIL); /* * Get a swap buffer header to perform the IO */ - bp = getpbuf(&nsw_rcount); kva = (vm_offset_t) bp->b_data; @@ -1130,7 +1086,6 @@ * * NOTE: B_PAGING is set by pbgetvp() */ - pmap_qenter(kva, m + i, j - i); bp->b_iocmd = BIO_READ; @@ -1162,7 +1117,6 @@ * We still hold the lock on mreq, and our automatic completion routine * does not remove it. */ - vm_object_pip_add(mreq->object, bp->b_npages); lastpindex = m[j-1]->pindex; @@ -1185,9 +1139,7 @@ * cleared on completion. If an I/O error occurs, SWAPBLK_NONE * is set in the meta-data. */ - s = splvm(); - while ((mreq->flags & PG_SWAPINPROG) != 0) { vm_page_flag_set(mreq, PG_WANTED | PG_REFERENCED); cnt.v_intrans++; @@ -1200,19 +1152,17 @@ ); } } - splx(s); /* - * mreq is left bussied after completion, but all the other pages + * mreq is left busied after completion, but all the other pages * are freed. If we had an unrecoverable read error the page will * not be valid. */ - if (mreq->valid != VM_PAGE_BITS_ALL) { - return(VM_PAGER_ERROR); + return (VM_PAGER_ERROR); } else { - return(VM_PAGER_OK); + return (VM_PAGER_OK); } /* @@ -1245,7 +1195,6 @@ * those whos rtvals[] entry is not set to VM_PAGER_PEND on return. * We need to unbusy the rest on I/O completion. */ - void swap_pager_putpages(object, m, count, sync, rtvals) vm_object_t object; @@ -1271,7 +1220,6 @@ * check for bogus sysops * force sync if not pageout process */ - if (object->type != OBJT_SWAP) swp_pager_meta_build(object, 0, SWAPBLK_NONE); @@ -1284,7 +1232,6 @@ * Update nsw parameters from swap_async_max sysctl values. * Do not let the sysop crash the machine with bogus numbers. */ - mtx_lock(&pbuf_mtx); if (swap_async_max != nsw_wcount_async_max) { int n; @@ -1322,7 +1269,6 @@ * The page is left dirty until the pageout operation completes * successfully. */ - for (i = 0; i < count; i += n) { int s; int j; @@ -1332,7 +1278,6 @@ /* * Maximum I/O size is limited by a number of factors. */ - n = min(BLIST_MAX_ALLOC, count - i); n = min(n, nsw_cluster_max); @@ -1374,7 +1319,6 @@ * * NOTE: B_PAGING is set by pbgetvp() */ - if (sync == TRUE) { bp = getpbuf(&nsw_wcount_sync); } else { @@ -1426,7 +1370,6 @@ * * NOTE: b_blkno is destroyed by the call to VOP_STRATEGY */ - if (sync == FALSE) { bp->b_iodone = swp_pager_async_iodone; BUF_KERNPROC(bp); @@ -1443,7 +1386,6 @@ * * NOTE: b_blkno is destroyed by the call to VOP_STRATEGY */ - bp->b_iodone = swp_pager_sync_iodone; BUF_STRATEGY(bp); @@ -1454,19 +1396,15 @@ * double-free. */ s = splbio(); - while ((bp->b_flags & B_DONE) == 0) { tsleep(bp, PVM, "swwrt", 0); } - for (j = 0; j < n; ++j) rtvals[i+j] = VM_PAGER_PEND; - /* * Now that we are through with the bp, we can call the * normal async completion, which frees everything up. */ - swp_pager_async_iodone(bp); splx(s); } @@ -1480,7 +1418,6 @@ * * This routine may not block. This routine is called at splbio() or better. */ - static void swp_pager_sync_iodone(bp) struct buf *bp; @@ -1508,7 +1445,6 @@ * We up ourselves to splvm() as required for various vm_page related * calls. */ - static void swp_pager_async_iodone(bp) struct buf *bp; @@ -1518,13 +1454,11 @@ vm_object_t object = NULL; GIANT_REQUIRED; - bp->b_flags |= B_DONE; /* * report error */ - if (bp->b_ioflags & BIO_ERROR) { printf( "swap_pager: I/O error - %s failed; blkno %ld," @@ -1539,7 +1473,6 @@ /* * set object, raise to splvm(). */ - if (bp->b_npages) object = bp->b_pages[0]->object; s = splvm(); @@ -1557,7 +1490,6 @@ * but do not free it in the rlist. The errornous block(s) are thus * never reallocated as swap. Redirty the page and continue. */ - for (i = 0; i < bp->b_npages; ++i) { vm_page_t m = bp->b_pages[i]; @@ -1570,7 +1502,6 @@ * can never be used again. But I can't from an * interrupt. */ - if (bp->b_iocmd == BIO_READ) { /* * When reading, reqpage needs to stay @@ -1593,10 +1524,8 @@ * not legal to mess with object->memq from an * interrupt. */ - m->valid = 0; vm_page_flag_clear(m, PG_ZERO); - if (i != bp->b_pager.pg_reqpage) vm_page_free(m); else @@ -1639,7 +1568,6 @@ * vm_page_wakeup(). We do not set reqpage's * valid bits here, it is up to the caller. */ - pmap_clear_modify(m); m->valid = VM_PAGE_BITS_ALL; vm_page_undirty(m); @@ -1677,14 +1605,12 @@ * adjust pip. NOTE: the original parent may still have its own * pip refs on the object. */ - if (object) vm_object_pip_wakeupn(object, bp->b_npages); /* * release the physical I/O buffer */ - relpbuf( bp, ((bp->b_iocmd == BIO_READ) ? &nsw_rcount : @@ -1721,7 +1647,6 @@ * * This routine must be called at splvm(). */ - static __inline struct swblock ** swp_pager_hash(vm_object_t object, vm_pindex_t index) { @@ -1730,7 +1655,6 @@ index &= ~SWAP_META_MASK; pswap = &swhash[(index ^ (int)(intptr_t)object) & swhash_mask]; - while ((swap = *pswap) != NULL) { if (swap->swb_object == object && swap->swb_index == index @@ -1739,7 +1663,7 @@ } pswap = &swap->swb_hnext; } - return(pswap); + return (pswap); } /* @@ -1755,7 +1679,6 @@ * This routine must be called at splvm(), except when used to convert * an OBJT_DEFAULT object into an OBJT_SWAP object. */ - static void swp_pager_meta_build( vm_object_t object, @@ -1769,7 +1692,6 @@ /* * Convert default object to swap object if necessary */ - if (object->type != OBJT_SWAP) { object->type = OBJT_SWAP; object->un_pager.swp.swp_bcount = 0; @@ -1796,7 +1718,6 @@ * anything just return. If we run out of space in the map we wait * and, since the hash table may have changed, retry. */ - retry: pswap = swp_pager_hash(object, index); @@ -1825,7 +1746,6 @@ /* * Delete prior contents of metadata */ - index &= SWAP_META_MASK; if (swap->swb_pages[index] != SWAPBLK_NONE) { @@ -1836,7 +1756,6 @@ /* * Enter block into metadata */ - swap->swb_pages[index] = swapblk; if (swapblk != SWAPBLK_NONE) ++swap->swb_count; @@ -1854,7 +1773,6 @@ * * This routine must be called at splvm() */ - static void swp_pager_meta_free(vm_object_t object, vm_pindex_t index, daddr_t count) { @@ -1900,7 +1818,6 @@ * * This routine must be called at splvm() */ - static void swp_pager_meta_free_all(vm_object_t object) { @@ -1960,7 +1877,6 @@ * SWM_FREE remove and free swap block from metadata * SWM_POP remove from meta data but do not free.. pop it out */ - static daddr_t swp_pager_meta_ctl( vm_object_t object, @@ -1976,9 +1892,8 @@ * The meta data only exists of the object is OBJT_SWAP * and even then might not be allocated yet. */ - if (object->type != OBJT_SWAP) - return(SWAPBLK_NONE); + return (SWAPBLK_NONE); r1 = SWAPBLK_NONE; pswap = swp_pager_hash(object, index); @@ -2002,7 +1917,7 @@ } } } - return(r1); + return (r1); } /******************************************************** @@ -2022,7 +1937,6 @@ * on dealing with b_resid. Since users of these routines may issue * multiple children simultaneously, sequencing of the error can be lost. */ - static void vm_pager_chain_iodone(struct buf *nbp) { @@ -2060,7 +1974,6 @@ * I/O completes, the parent buffer will be B_SIGNAL'd. Errors are * automatically propagated to the parent */ - struct buf * getchainbuf(struct bio *bp, struct vnode *vp, int flags) { @@ -2086,7 +1999,7 @@ if (vp) pbgetvp(vp, nbp); - return(nbp); + return (nbp); } void Index: swap_pager.h =================================================================== RCS file: /home/ncvs/src/sys/vm/swap_pager.h,v retrieving revision 1.30 diff -u -r1.30 swap_pager.h --- swap_pager.h 13 Dec 2000 10:00:57 -0000 1.30 +++ swap_pager.h 6 Mar 2002 14:23:58 -0000 @@ -66,9 +66,7 @@ * * Overall memory utilization is about the same as the old swap structure. */ - #define SWCORRECT(n) (sizeof(void *) * (n) / sizeof(daddr_t)) - #define SWAP_META_PAGES (SWB_NPAGES * 2) #define SWAP_META_MASK (SWAP_META_PAGES - 1) @@ -106,6 +104,5 @@ struct buf; void swstrategy __P((struct buf *bp)); /* probably needs to move elsewhere */ -#endif - +#endif /* _KERNEL */ #endif /* _SWAP_PAGER_ */ Index: vm.h =================================================================== RCS file: /home/ncvs/src/sys/vm/vm.h,v retrieving revision 1.20 diff -u -r1.20 vm.h --- vm.h 25 Jan 2002 21:33:10 -0000 1.20 +++ vm.h 6 Mar 2002 14:24:21 -0000 @@ -111,7 +111,7 @@ */ struct vm_page; typedef struct vm_page *vm_page_t; -#endif +#endif /* _KERNEL */ /* * Information passed from the machine-independant VM initialization code Index: vm_extern.h =================================================================== RCS file: /home/ncvs/src/sys/vm/vm_extern.h,v retrieving revision 1.52 diff -u -r1.52 vm_extern.h --- vm_extern.h 7 Feb 2002 20:58:47 -0000 1.52 +++ vm_extern.h 6 Mar 2002 14:24:36 -0000 @@ -57,7 +57,7 @@ int sbrk __P((struct thread *, void *, int *)); int sstk __P((struct thread *, void *, int *)); int swapon __P((struct thread *, void *, int *)); -#endif +#endif /* TYPEDEF_FOR_UAP */ int grow __P((struct proc *, size_t)); int grow_stack __P((struct proc *, size_t)); @@ -97,7 +97,5 @@ void vm_object_print __P((/* db_expr_t */ long, boolean_t, /* db_expr_t */ long, char *)); int vm_fault_quick __P((caddr_t v, int prot)); - #endif /* _KERNEL */ - #endif /* !_VM_EXTERN_H_ */ Index: vm_fault.c =================================================================== RCS file: /home/ncvs/src/sys/vm/vm_fault.c,v retrieving revision 1.128 diff -u -r1.128 vm_fault.c --- vm_fault.c 19 Feb 2002 18:34:02 -0000 1.128 +++ vm_fault.c 6 Mar 2002 14:19:18 -0000 @@ -72,7 +72,6 @@ /* * Page fault handling module. */ - #include #include #include @@ -162,7 +161,6 @@ * * default objects are zero-fill, there is no real pager. */ - #define TRYPAGER (fs.object->type != OBJT_DEFAULT && \ (((fault_flags & VM_FAULT_WIRE_MASK) == 0) || wired)) @@ -294,15 +292,12 @@ /* * Search for the page at object/offset. */ - fs.object = fs.first_object; fs.pindex = fs.first_pindex; - while (TRUE) { /* * If the object is dead, we stop here */ - if (fs.object->flags & OBJ_DEAD) { unlock_and_deallocate(&fs); return (KERN_PROTECTION_FAILURE); @@ -311,7 +306,6 @@ /* * See if page is resident */ - fs.m = vm_page_lookup(fs.object, fs.pindex); if (fs.m != NULL) { int queue, s; @@ -338,8 +332,8 @@ vm_object_deallocate(fs.first_object); goto RetryFault; } - queue = fs.m->queue; + s = splvm(); vm_pageq_remove_nowakeup(fs.m); splx(s); @@ -357,7 +351,6 @@ * (readable), jump to readrest, else break-out ( we * found the page ). */ - vm_page_busy(fs.m); if (((fs.m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) && fs.m->object != kernel_object && fs.m->object != kmem_object) { @@ -371,7 +364,6 @@ * Page is not resident, If this is the search termination * or the pager might contain the page, allocate a new page. */ - if (TRYPAGER || fs.object == fs.first_object) { if (fs.pindex >= fs.object->size) { unlock_and_deallocate(&fs); @@ -403,7 +395,6 @@ * pager has it, and potentially fault in additional pages * at the same time. */ - if (TRYPAGER) { int rv; int reqpage; @@ -441,12 +432,12 @@ * included in the lookahead - NFS piecemeal * writes will barf on it badly. */ - - for(tmppindex = fs.first_pindex - 1; + for (tmppindex = fs.first_pindex - 1; tmppindex >= firstpindex; --tmppindex) { vm_page_t mt; - mt = vm_page_lookup( fs.first_object, tmppindex); + + mt = vm_page_lookup(fs.first_object, tmppindex); if (mt == NULL || (mt->valid != VM_PAGE_BITS_ALL)) break; if (mt->busy || @@ -514,7 +505,7 @@ * if moved. */ fs.m = vm_page_lookup(fs.object, fs.pindex); - if(!fs.m) { + if (!fs.m) { unlock_and_deallocate(&fs); goto RetryFault; } @@ -535,7 +526,6 @@ * past us, and inserting the page in that object at * the same time that we are. */ - if (rv == VM_PAGER_ERROR) printf("vm_fault: pager read error, pid %d (%s)\n", curproc->p_pid, curproc->p_comm); @@ -575,7 +565,6 @@ * Move on to the next object. Lock the next object before * unlocking the current one. */ - fs.pindex += OFF_TO_IDX(fs.object->backing_object_offset); next_object = fs.object->backing_object; if (next_object == NULL) { @@ -626,12 +615,10 @@ * top-level object, we have to copy it into a new page owned by the * top-level object. */ - if (fs.object != fs.first_object) { /* * We only really need to copy if we want to write it. */ - if (fault_type & VM_PROT_WRITE) { /* * This allows pages to be virtually copied from a @@ -709,13 +696,11 @@ * fs.object != fs.first_object due to above * conditional */ - vm_object_pip_wakeup(fs.object); /* * Only use the new page below... */ - cnt.v_cow_faults++; fs.m = fs.first_m; fs.object = fs.first_object; @@ -730,7 +715,6 @@ * We must verify that the maps have not changed since our last * lookup. */ - if (!fs.lookup_still_valid && (fs.map->timestamp != map_generation)) { vm_object_t retry_object; @@ -747,7 +731,6 @@ * avoid a deadlock between the inode and exec_map that can * occur due to locks being obtained in different orders. */ - if (fs.vp != NULL) { vput(fs.vp); fs.vp = NULL; @@ -776,7 +759,6 @@ * list (the easiest thing to do here). If no one needs it, * pageout will grab it eventually. */ - if (result != KERN_SUCCESS) { release_page(&fs); unlock_and_deallocate(&fs); @@ -845,28 +827,22 @@ /* * Page had better still be busy */ - KASSERT(fs.m->flags & PG_BUSY, ("vm_fault: page %p not busy!", fs.m)); - unlock_things(&fs); /* * Sanity check: page must be completely valid or it is not fit to * map into user space. vm_pager_get_pages() ensures this. */ - if (fs.m->valid != VM_PAGE_BITS_ALL) { vm_page_zero_invalid(fs.m, TRUE); printf("Warning: page %p partially invalid on fault\n", fs.m); } - pmap_enter(fs.map->pmap, vaddr, fs.m, prot, wired); - if (((fault_flags & VM_FAULT_WIRE_MASK) == 0) && (wired == 0)) { pmap_prefault(fs.map->pmap, vaddr, fs.entry); } - vm_page_flag_clear(fs.m, PG_ZERO); vm_page_flag_set(fs.m, PG_MAPPED|PG_REFERENCED); if (fault_flags & VM_FAULT_HOLD) @@ -876,7 +852,6 @@ * If the page is not wired down, then put it where the pageout daemon * can find it. */ - if (fault_flags & VM_FAULT_WIRE_MASK) { if (wired) vm_page_wire(fs.m); @@ -899,10 +874,8 @@ /* * Unlock everything, and return */ - vm_page_wakeup(fs.m); vm_object_deallocate(fs.first_object); - return (KERN_SUCCESS); } @@ -928,14 +901,12 @@ * Inform the physical mapping system that the range of addresses may * not fault, so that page tables and such can be locked down as well. */ - pmap_pageable(pmap, start, end, FALSE); /* * We simulate a fault to get the page and enter it in the physical * map. */ - for (va = start; va < end; va += PAGE_SIZE) { rv = vm_fault(map, va, VM_PROT_READ|VM_PROT_WRITE, VM_FAULT_CHANGE_WIRING); @@ -973,7 +944,6 @@ * Inform the physical mapping system that the range of addresses may * not fault, so that page tables and such can be locked down as well. */ - pmap_pageable(pmap, start, end, FALSE); /* @@ -1012,7 +982,6 @@ * Since the pages are wired down, we must be able to get their * mappings from the physical map system. */ - for (va = start; va < end; va += PAGE_SIZE) { pa = pmap_extract(pmap, va); if (pa != (vm_offset_t) 0) { @@ -1025,7 +994,6 @@ * Inform the physical mapping system that the range of addresses may * fault, so that page tables and such may be unwired themselves. */ - pmap_pageable(pmap, start, end, TRUE); } @@ -1041,7 +1009,6 @@ * The source map entry must be wired down (or be a sharing map * entry corresponding to a main map entry that is wired down). */ - void vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry) vm_map_t dst_map; @@ -1112,7 +1079,6 @@ /* * Enter it in the pmap... */ - vm_page_flag_clear(dst_m, PG_ZERO); pmap_enter(dst_map->pmap, vaddr, dst_m, prot, FALSE); vm_page_flag_set(dst_m, PG_WRITEABLE|PG_MAPPED); @@ -1173,7 +1139,6 @@ /* * if the requested page is not available, then give up now */ - if (!vm_pager_has_page(object, pindex, &cbehind, &cahead)) { return 0; } @@ -1214,8 +1179,8 @@ startpindex = pindex - rbehind; } - for ( tpindex = pindex - 1; tpindex >= startpindex; tpindex -= 1) { - if (vm_page_lookup( object, tpindex)) { + for (tpindex = pindex - 1; tpindex >= startpindex; tpindex -= 1) { + if (vm_page_lookup(object, tpindex)) { startpindex = tpindex + 1; break; } @@ -1223,7 +1188,7 @@ break; } - for(i = 0, tpindex = startpindex; tpindex < pindex; i++, tpindex++) { + for (i = 0, tpindex = startpindex; tpindex < pindex; i++, tpindex++) { rtm = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL); if (rtm == NULL) { @@ -1256,7 +1221,7 @@ if (endpindex > object->size) endpindex = object->size; - for( ; tpindex < endpindex; i++, tpindex++) { + for (; tpindex < endpindex; i++, tpindex++) { if (vm_page_lookup(object, tpindex)) { break; Index: vm_glue.c =================================================================== RCS file: /home/ncvs/src/sys/vm/vm_glue.c,v retrieving revision 1.126 diff -u -r1.126 vm_glue.c --- vm_glue.c 26 Feb 2002 01:01:37 -0000 1.126 +++ vm_glue.c 5 Mar 2002 23:46:14 -0000 @@ -99,7 +99,6 @@ * * Note: proc0 from proc.h */ - static void vm_init_limits __P((void *)); SYSINIT(vm_limits, SI_SUB_VM_CONF, SI_ORDER_FIRST, vm_init_limits, &proc0) @@ -413,7 +412,6 @@ if ((p->p_sflag & PS_SWAPINREQ) == 0) { pri -= kg->kg_nice * 8; } - /* * if this ksegrp is higher priority Index: vm_init.c =================================================================== RCS file: /home/ncvs/src/sys/vm/vm_init.c,v retrieving revision 1.29 diff -u -r1.29 vm_init.c --- vm_init.c 22 Aug 2001 04:07:27 -0000 1.29 +++ vm_init.c 5 Mar 2002 23:46:42 -0000 @@ -90,7 +90,6 @@ /* * System initialization */ - static void vm_mem_init __P((void *)); SYSINIT(vm_mem, SI_SUB_VM, SI_ORDER_FIRST, vm_mem_init, NULL) @@ -100,7 +99,6 @@ * * The start and end address of physical memory is passed in. */ - /* ARGSUSED*/ static void vm_mem_init(dummy) Index: vm_kern.c =================================================================== RCS file: /home/ncvs/src/sys/vm/vm_kern.c,v retrieving revision 1.75 diff -u -r1.75 vm_kern.c --- vm_kern.c 9 Mar 2002 16:24:27 -0000 1.75 +++ vm_kern.c 10 Mar 2002 15:15:53 -0000 @@ -97,7 +97,6 @@ * Allocate pageable memory to the kernel's address map. * "map" must be kernel_map or a submap of kernel_map. */ - vm_offset_t kmem_alloc_pageable(map, size) vm_map_t map; @@ -123,7 +122,6 @@ * * Same as kmem_alloc_pageable, except that it create a nofault entry. */ - vm_offset_t kmem_alloc_nofault(map, size) vm_map_t map; @@ -199,7 +197,6 @@ * We're intentionally not activating the pages we allocate to prevent a * race with page-out. vm_map_pageable will wire the pages. */ - for (i = 0; i < size; i += PAGE_SIZE) { vm_page_t mem; @@ -215,7 +212,6 @@ /* * And finally, mark the data as non-pageable. */ - (void) vm_map_pageable(map, (vm_offset_t) addr, addr + size, FALSE); return (addr); @@ -443,7 +439,6 @@ * * This routine may block. */ - vm_offset_t kmem_alloc_wait(map, size) vm_map_t map; @@ -504,7 +499,6 @@ * new map will thus map the range between VM_MIN_KERNEL_ADDRESS and * `start' as allocated, and the range between `start' and `end' as free. */ - void kmem_init(start, end) vm_offset_t start, end; Index: vm_map.c =================================================================== RCS file: /home/ncvs/src/sys/vm/vm_map.c,v retrieving revision 1.213 diff -u -r1.213 vm_map.c --- vm_map.c 7 Mar 2002 03:54:56 -0000 1.213 +++ vm_map.c 10 Mar 2002 15:15:53 -0000 @@ -191,7 +191,7 @@ } static __inline void -vmspace_dofree( struct vmspace *vm) +vmspace_dofree(struct vmspace *vm) { CTR1(KTR_VM, "vmspace_free: %p", vm); /* @@ -260,7 +260,7 @@ } } } - return(count); + return (count); } u_char @@ -320,7 +320,7 @@ int vm_map_lock_upgrade(vm_map_t map) { - return(_vm_map_lock_upgrade(map, curthread)); + return (_vm_map_lock_upgrade(map, curthread)); } void @@ -349,19 +349,19 @@ vm_offset_t vm_map_min(vm_map_t map) { - return(map->min_offset); + return (map->min_offset); } vm_offset_t vm_map_max(vm_map_t map) { - return(map->max_offset); + return (map->max_offset); } struct pmap * vm_map_pmap(vm_map_t map) { - return(map->pmap); + return (map->pmap); } struct pmap * @@ -454,7 +454,7 @@ kmapentzone : mapentzone); if (new_entry == NULL) panic("vm_map_entry_create: kernel resources exhausted"); - return(new_entry); + return (new_entry); } /* @@ -524,7 +524,6 @@ /* * Start looking either from the head of the list, or from the hint. */ - cur = map->hint; if (cur == &map->header) @@ -556,7 +555,6 @@ /* * Search linearly */ - while (cur != last) { if (cur->end > address) { if (address >= cur->start) { @@ -564,7 +562,6 @@ * Save this lookup for future hints, and * return */ - *entry = cur; SAVE_HINT(map, cur); return (TRUE); @@ -605,7 +602,6 @@ /* * Check that the start and end points are not bogus. */ - if ((start < map->min_offset) || (end > map->max_offset) || (start >= end)) return (KERN_INVALID_ADDRESS); @@ -614,7 +610,6 @@ * Find the entry prior to the proposed starting address; if it's part * of an existing entry, this range is bogus. */ - if (vm_map_lookup_entry(map, start, &temp_entry)) return (KERN_NO_SPACE); @@ -623,7 +618,6 @@ /* * Assert that the next entry doesn't overlap the end point. */ - if ((prev_entry->next != &map->header) && (prev_entry->next->start < end)) return (KERN_NO_SPACE); @@ -698,7 +692,6 @@ /* * Create a new entry */ - new_entry = vm_map_entry_create(map); new_entry->start = start; new_entry->end = end; @@ -716,7 +709,6 @@ /* * Insert the new entry into the list */ - vm_map_entry_link(map, prev_entry, new_entry); map->size += new_entry->end - new_entry->start; @@ -958,7 +950,6 @@ * entry BEFORE this one, so that this entry has the specified * starting address. */ - vm_map_simplify_entry(map, entry); /* @@ -968,7 +959,6 @@ * map. This is a bit of a hack, but is also about the best place to * put this improvement. */ - if (entry->object.vm_object == NULL && !map->system_map) { vm_object_t object; object = vm_object_allocate(OBJT_DEFAULT, @@ -998,7 +988,6 @@ * the specified address; if necessary, * it splits the entry into two. */ - #define vm_map_clip_end(map, entry, endaddr) \ { \ if (endaddr < entry->end) \ @@ -1021,7 +1010,6 @@ * map. This is a bit of a hack, but is also about the best place to * put this improvement. */ - if (entry->object.vm_object == NULL && !map->system_map) { vm_object_t object; object = vm_object_allocate(OBJT_DEFAULT, @@ -1033,7 +1021,6 @@ /* * Create a new entry and insert it AFTER the specified entry */ - new_entry = vm_map_entry_create(map); *new_entry = *entry; @@ -1145,7 +1132,6 @@ /* * Make a first pass to check for protection violations. */ - current = entry; while ((current != &map->header) && (current->start < end)) { if (current->eflags & MAP_ENTRY_IS_SUB_MAP) { @@ -1163,9 +1149,7 @@ * Go back and fix up protections. [Note that clipping is not * necessary the second time.] */ - current = entry; - while ((current != &map->header) && (current->start < end)) { vm_prot_t old_prot; @@ -1183,22 +1167,17 @@ * Update physical map if necessary. Worry about copy-on-write * here -- CHECK THIS XXX */ - if (current->protection != old_prot) { #define MASK(entry) (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \ VM_PROT_ALL) - pmap_protect(map->pmap, current->start, current->end, current->protection & MASK(current)); #undef MASK } - vm_map_simplify_entry(map, current); - current = current->next; } - vm_map_unlock(map); return (KERN_SUCCESS); } @@ -1211,7 +1190,6 @@ * the vm_map_entry structure, or those effecting the underlying * objects. */ - int vm_map_madvise( vm_map_t map, @@ -1230,7 +1208,6 @@ * various clipping operations. Otherwise we only need a read-lock * on the map. */ - switch(behav) { case MADV_NORMAL: case MADV_SEQUENTIAL: @@ -1254,7 +1231,6 @@ /* * Locate starting entry and clip if necessary. */ - VM_MAP_RANGE_CHECK(map, start, end); if (vm_map_lookup_entry(map, start, &entry)) { @@ -1358,7 +1334,7 @@ } vm_map_unlock_read(map); } - return(0); + return (0); } @@ -1593,7 +1569,6 @@ * changing the pageability for the entire region. We do so before * making any changes. */ - if (vm_map_lookup_entry(map, start, &start_entry) == FALSE) { vm_map_unlock(map); return (KERN_INVALID_ADDRESS); @@ -1604,9 +1579,7 @@ * Actions are rather different for wiring and unwiring, so we have * two separate cases. */ - if (new_pageable) { - vm_map_clip_start(map, entry, start); /* @@ -1614,7 +1587,6 @@ * really wired down and that there are no holes. */ while ((entry != &map->header) && (entry->start < end)) { - if (entry->wired_count == 0 || (entry->end < end && (entry->next == &map->header || @@ -1889,7 +1861,7 @@ while (object && object->backing_object) { object = object->backing_object; offset += object->backing_object_offset; - if (object->size < OFF_TO_IDX( offset + size)) + if (object->size < OFF_TO_IDX(offset + size)) size = IDX_TO_OFF(object->size) - offset; } if (object && (object->type == OBJT_VNODE) && @@ -1984,7 +1956,6 @@ /* * Find the start of the region, and clip it */ - if (!vm_map_lookup_entry(map, start, &first_entry)) entry = first_entry->next; else { @@ -2000,7 +1971,6 @@ /* * Save the free space hint */ - if (entry == &map->header) { map->first_free = &map->header; } else if (map->first_free->start >= start) { @@ -2010,7 +1980,6 @@ /* * Step through all entries in this region */ - while ((entry != &map->header) && (entry->start < end)) { vm_map_entry_t next; vm_offset_t s, e; @@ -2123,19 +2092,16 @@ /* * No holes allowed! */ - if (start < entry->start) { return (FALSE); } /* * Check protection associated with entry. */ - if ((entry->protection & protection) != protection) { return (FALSE); } /* go to next entry */ - start = entry->end; entry = entry->next; } @@ -2389,14 +2355,12 @@ * Insert the entry into the new map -- we know we're * inserting at the end of the new map. */ - vm_map_entry_link(new_map, new_map->header.prev, new_entry); /* * Update the physical map */ - pmap_copy(new_map->pmap, old_map->pmap, new_entry->start, (old_entry->end - old_entry->start), @@ -2643,7 +2607,6 @@ * Unshare the specified VM space for exec. If other processes are * mapped to it, then create a new one. The new vmspace is null. */ - void vmspace_exec(struct proc *p) { @@ -2673,7 +2636,6 @@ * Unshare the specified VM space for forcing COW. This * is called by rfork, for the (RFMEM|RFPROC) == 0 case. */ - void vmspace_unshare(struct proc *p) { @@ -2690,7 +2652,6 @@ if (p == curthread->td_proc) /* XXXKSE ? */ pmap_activate(curthread); } - /* * vm_map_lookup: @@ -2731,28 +2692,23 @@ GIANT_REQUIRED; RetryLookup:; - /* * Lookup the faulting address. */ vm_map_lock_read(map); - #define RETURN(why) \ { \ vm_map_unlock_read(map); \ - return(why); \ + return (why); \ } /* * If the map has an interesting hint, try it before calling full * blown lookup routine. */ - entry = map->hint; - *out_entry = entry; - if ((entry == &map->header) || (vaddr < entry->start) || (vaddr >= entry->end)) { vm_map_entry_t tmp_entry; @@ -2771,7 +2727,6 @@ /* * Handle submaps. */ - if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { vm_map_t old_map = map; @@ -2786,17 +2741,14 @@ * pages with an override. This is to implement a forced * COW for debuggers. */ - if (fault_type & VM_PROT_OVERRIDE_WRITE) prot = entry->max_protection; else prot = entry->protection; - fault_type &= (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE); if ((fault_type & prot) != fault_type) { RETURN(KERN_PROTECTION_FAILURE); } - if ((entry->eflags & MAP_ENTRY_USER_WIRED) && (entry->eflags & MAP_ENTRY_COW) && (fault_type & VM_PROT_WRITE) && @@ -2808,7 +2760,6 @@ * If this page is not pageable, we have to get it for all possible * accesses. */ - *wired = (entry->wired_count != 0); if (*wired) prot = fault_type = entry->protection; @@ -2816,7 +2767,6 @@ /* * If the entry was copy-on-write, we either ... */ - if (entry->eflags & MAP_ENTRY_NEEDS_COPY) { /* * If we want to write the page, we may as well handle that @@ -2825,7 +2775,6 @@ * If we don't need to write the page, we just demote the * permissions allowed. */ - if (fault_type & VM_PROT_WRITE) { /* * Make a new object, and place it in the object @@ -2833,15 +2782,12 @@ * -- one just moved from the map to the new * object. */ - if (vm_map_lock_upgrade(map)) goto RetryLookup; - vm_object_shadow( &entry->object.vm_object, &entry->offset, atop(entry->end - entry->start)); - entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; vm_map_lock_downgrade(map); } else { @@ -2849,7 +2795,6 @@ * We're attempting to read a copy-on-write page -- * don't allow writes. */ - prot &= ~VM_PROT_WRITE; } } @@ -2861,7 +2806,6 @@ !map->system_map) { if (vm_map_lock_upgrade(map)) goto RetryLookup; - entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT, atop(entry->end - entry->start)); entry->offset = 0; @@ -2872,14 +2816,12 @@ * Return the object/offset from this entry. If the entry was * copy-on-write or empty, it has been fixed up. */ - *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset); *object = entry->object.vm_object; /* * Return whether this is the only map sharing this data. */ - *out_prot = prot; return (KERN_SUCCESS); @@ -2892,7 +2834,6 @@ * Releases locks acquired by a vm_map_lookup * (according to the handle returned by that lookup). */ - void vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry) { @@ -3052,18 +2993,17 @@ } /* - * Force copy on write for mmaped regions - */ + * Force copy on write for mmaped regions + */ vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize); /* - * Point the object appropriately - */ + * Point the object appropriately + */ if (oldobject != srcobject) { - - /* - * Set the object optimization hint flag - */ + /* + * Set the object optimization hint flag + */ vm_object_set_flag(srcobject, OBJ_OPT); vm_object_reference(srcobject); @@ -3157,7 +3097,7 @@ if (object->shadow_count > object->ref_count) panic("vm_freeze_copyopts: sc > rc"); - while((robject = TAILQ_FIRST(&object->shadow_head)) != NULL) { + while ((robject = TAILQ_FIRST(&object->shadow_head)) != NULL) { vm_pindex_t bo_pindex; vm_page_t m_in, m_out; Index: vm_map.h =================================================================== RCS file: /home/ncvs/src/sys/vm/vm_map.h,v retrieving revision 1.69 diff -u -r1.69 vm_map.h --- vm_map.h 5 Feb 2002 21:23:05 -0000 1.69 +++ vm_map.h 6 Mar 2002 14:25:19 -0000 @@ -67,7 +67,6 @@ /* * Virtual memory map module definitions. */ - #ifndef _VM_MAP_ #define _VM_MAP_ @@ -94,7 +93,6 @@ * another map (called a "sharing map") which denotes read-write * sharing with other maps. */ - union vm_map_object { struct vm_object *vm_object; /* object object */ struct vm_map *sub_map; /* belongs to another map */ @@ -193,7 +191,6 @@ }; #ifdef _KERNEL - u_char vm_map_entry_behavior(struct vm_map_entry *entry); void vm_map_entry_set_behavior(struct vm_map_entry *entry, u_char behavior); @@ -214,7 +211,7 @@ lockmgr(&(map)->lock, LK_DRAIN|LK_INTERLOCK, \ &(map)->ref_lock, curthread); \ (map)->timestamp++; \ - } while(0) + } while (0) #endif void vm_map_lock(vm_map_t map); @@ -231,7 +228,6 @@ struct pmap *vmspace_pmap(struct vmspace *vmspace); long vmspace_resident_count(struct vmspace *vmspace); - #endif /* _KERNEL */ @@ -293,5 +289,5 @@ int vm_map_growstack (struct proc *p, vm_offset_t addr); int vmspace_swap_count (struct vmspace *vmspace); -#endif +#endif /* _KERNEL */ #endif /* _VM_MAP_ */ Index: vm_mmap.c =================================================================== RCS file: /home/ncvs/src/sys/vm/vm_mmap.c,v retrieving revision 1.133 diff -u -r1.133 vm_mmap.c --- vm_mmap.c 27 Feb 2002 18:32:23 -0000 1.133 +++ vm_mmap.c 6 Mar 2002 14:57:03 -0000 @@ -93,7 +93,6 @@ * if attacked from compromised user account but generous enough such that * multi-threaded processes are not unduly inconvenienced. */ - static void vmmapentry_rsrc_init __P((void *)); SYSINIT(vmmersrc, SI_SUB_KVM_RSRC, SI_ORDER_FIRST, vmmapentry_rsrc_init, NULL) @@ -380,7 +379,6 @@ * we're at securelevel < 1, to allow the XIG X server * to continue to work. */ - if ((flags & MAP_SHARED) != 0 || (vp->v_type == VCHR && disablexworkaround)) { if ((fp->f_flag & FWRITE) != 0) { @@ -517,7 +515,7 @@ size += pageoff; size = (vm_size_t) round_page(size); if (addr + size < addr) - return(EINVAL); + return (EINVAL); if ((flags & (MS_ASYNC|MS_INVALIDATE)) == (MS_ASYNC|MS_INVALIDATE)) return (EINVAL); @@ -558,7 +556,7 @@ switch (rv) { case KERN_SUCCESS: - return(0); + return (0); case KERN_INVALID_ADDRESS: return (EINVAL); /* Sun returns ENOMEM? */ case KERN_FAILURE: @@ -594,7 +592,7 @@ size += pageoff; size = (vm_size_t) round_page(size); if (addr + size < addr) - return(EINVAL); + return (EINVAL); if (size == 0) return (0); @@ -672,7 +670,7 @@ size += pageoff; size = (vm_size_t) round_page(size); if (addr + size < addr) - return(EINVAL); + return (EINVAL); mtx_lock(&Giant); ret = vm_map_protect(&td->td_proc->p_vmspace->vm_map, addr, @@ -716,7 +714,7 @@ size += pageoff; size = (vm_size_t) round_page(size); if (addr + size < addr) - return(EINVAL); + return (EINVAL); mtx_lock(&Giant); ret = vm_map_inherit(&td->td_proc->p_vmspace->vm_map, addr, addr+size, @@ -921,7 +919,7 @@ * the byte vector is zeroed for those skipped entries. */ while ((lastvecindex + 1) < vecindex) { - error = subyte( vec + lastvecindex, 0); + error = subyte(vec + lastvecindex, 0); if (error) { error = EFAULT; goto done2; @@ -932,7 +930,7 @@ /* * Pass the page information to the user */ - error = subyte( vec + vecindex, mincoreinfo); + error = subyte(vec + vecindex, mincoreinfo); if (error) { error = EFAULT; goto done2; @@ -962,7 +960,7 @@ */ vecindex = OFF_TO_IDX(end - first_addr); while ((lastvecindex + 1) < vecindex) { - error = subyte( vec + lastvecindex, 0); + error = subyte(vec + lastvecindex, 0); if (error) { error = EFAULT; goto done2; Index: vm_object.c =================================================================== RCS file: /home/ncvs/src/sys/vm/vm_object.c,v retrieving revision 1.201 diff -u -r1.201 vm_object.c --- vm_object.c 6 Mar 2002 02:42:56 -0000 1.201 +++ vm_object.c 10 Mar 2002 15:16:25 -0000 @@ -174,7 +174,7 @@ object->resident_page_count = 0; object->shadow_count = 0; object->pg_color = next_index; - if ( size > (PQ_L2_SIZE / 3 + PQ_PRIME1)) + if (size > (PQ_L2_SIZE / 3 + PQ_PRIME1)) incr = PQ_L2_SIZE / 3 + PQ_PRIME1; else incr = size; @@ -308,7 +308,6 @@ * * Returns a new object with the given size. */ - vm_object_t vm_object_allocate(objtype_t type, vm_size_t size) { @@ -589,7 +588,6 @@ * * The object must be locked. */ - void vm_object_page_clean(vm_object_t object, vm_pindex_t start, vm_pindex_t end, int flags) { @@ -695,7 +693,6 @@ * stay dirty so do not mess with the page and do not clear the * object flags. */ - clearobjflags = 1; TAILQ_FOREACH(p, &object->memq, listq) { @@ -839,7 +836,7 @@ (tp->flags & PG_CLEANCHK) == 0 || (tp->busy != 0)) break; - if((tp->queue - tp->pc) == PQ_CACHE) { + if ((tp->queue - tp->pc) == PQ_CACHE) { vm_page_flag_clear(tp, PG_CLEANCHK); break; } @@ -900,7 +897,6 @@ * NOTE: If the page is already at VM_PROT_NONE, calling * vm_page_protect will have no effect. */ - void vm_object_pmap_copy_1(vm_object_t object, vm_pindex_t start, vm_pindex_t end) { @@ -981,7 +977,6 @@ /* * Locate and adjust resident pages */ - for (; pindex < end; pindex += 1) { relookup: tobject = object; @@ -1076,7 +1071,6 @@ * The new object and offset into that object * are returned in the source parameters. */ - void vm_object_shadow( vm_object_t *object, /* IN/OUT */ @@ -1092,7 +1086,6 @@ /* * Don't create the new object if the old object isn't shared. */ - if (source != NULL && source->ref_count == 1 && source->handle == NULL && @@ -1128,13 +1121,11 @@ * Store the offset into the source object, and fix up the offset into * the new object. */ - result->backing_object_offset = *offset; /* * Return the new things */ - *offset = 0; *object = result; } @@ -1161,7 +1152,6 @@ /* * Initial conditions */ - if (op & OBSC_TEST_ALL_SHADOWED) { /* * We do not want to have to test for the existence of @@ -1174,7 +1164,7 @@ */ if (backing_object->type != OBJT_DEFAULT) { splx(s); - return(0); + return (0); } } if (op & OBSC_COLLAPSE_WAIT) { @@ -1184,7 +1174,6 @@ /* * Our scan */ - p = TAILQ_FIRST(&backing_object->memq); while (p) { vm_page_t next = TAILQ_NEXT(p, listq); @@ -1201,7 +1190,6 @@ * note that we do not busy the backing object's * page. */ - if ( p->pindex < backing_offset_index || new_pindex >= object->size @@ -1233,7 +1221,6 @@ /* * Check for busy page */ - if (op & (OBSC_COLLAPSE_WAIT | OBSC_COLLAPSE_NOWAIT)) { vm_page_t pp; @@ -1331,7 +1318,7 @@ p = next; } splx(s); - return(r); + return (r); } @@ -1416,19 +1403,16 @@ * vm_object_backing_scan fails the shadowing test in this * case. */ - if (backing_object->ref_count == 1) { /* * If there is exactly one reference to the backing * object, we can collapse it into the parent. */ - vm_object_backing_scan(object, OBSC_COLLAPSE_WAIT); /* * Move the pager from backing_object to object. */ - if (backing_object->type == OBJT_SWAP) { vm_object_pip_add(backing_object, 1); @@ -1440,7 +1424,6 @@ * new swapper is able to optimize the * destroy-source case. */ - vm_object_pip_add(object, 1); swap_pager_copy( backing_object, @@ -1456,7 +1439,6 @@ * backing_object->backing_object moves from within * backing_object to within object. */ - TAILQ_REMOVE( &object->backing_object->shadow_head, object, @@ -1514,7 +1496,6 @@ * If we do not entirely shadow the backing object, * there is nothing we can do so we give up. */ - if (vm_object_backing_scan(object, OBSC_TEST_ALL_SHADOWED) == 0) { break; } @@ -1524,7 +1505,6 @@ * chain. Deallocating backing_object will not remove * it, since its reference count is at least 2. */ - TAILQ_REMOVE( &backing_object->shadow_head, object, @@ -1611,7 +1591,6 @@ * The busy flags are only cleared at * interrupt -- minimize the spl transitions */ - if (vm_page_sleep_busy(p, TRUE, "vmopar")) goto again; @@ -1714,7 +1693,6 @@ * another object . has a copy elsewhere (any of which mean that the * pages not mapped to prev_entry may be in use anyway) */ - if (prev_object->backing_object != NULL) { return (FALSE); } @@ -1789,7 +1767,7 @@ tmpe = map->header.next; entcount = map->nentries; while (entcount-- && (tmpe != &map->header)) { - if( _vm_object_in_map(map, object, tmpe)) { + if (_vm_object_in_map(map, object, tmpe)) { return 1; } tmpe = tmpe->next; @@ -1799,14 +1777,14 @@ tmpe = tmpm->header.next; entcount = tmpm->nentries; while (entcount-- && tmpe != &tmpm->header) { - if( _vm_object_in_map(tmpm, object, tmpe)) { + if (_vm_object_in_map(tmpm, object, tmpe)) { return 1; } tmpe = tmpe->next; } } else if ((obj = entry->object.vm_object) != NULL) { for (; obj; obj = obj->backing_object) - if( obj == object) { + if (obj == object) { return 1; } } @@ -1820,21 +1798,21 @@ /* sx_slock(&allproc_lock); */ LIST_FOREACH(p, &allproc, p_list) { - if( !p->p_vmspace /* || (p->p_flag & (P_SYSTEM|P_WEXIT)) */) + if (!p->p_vmspace /* || (p->p_flag & (P_SYSTEM|P_WEXIT)) */) continue; - if( _vm_object_in_map(&p->p_vmspace->vm_map, object, 0)) { + if (_vm_object_in_map(&p->p_vmspace->vm_map, object, 0)) { /* sx_sunlock(&allproc_lock); */ return 1; } } /* sx_sunlock(&allproc_lock); */ - if( _vm_object_in_map( kernel_map, object, 0)) + if (_vm_object_in_map(kernel_map, object, 0)) return 1; - if( _vm_object_in_map( kmem_map, object, 0)) + if (_vm_object_in_map(kmem_map, object, 0)) return 1; - if( _vm_object_in_map( pager_map, object, 0)) + if (_vm_object_in_map(pager_map, object, 0)) return 1; - if( _vm_object_in_map( buffer_map, object, 0)) + if (_vm_object_in_map(buffer_map, object, 0)) return 1; return 0; } @@ -1949,7 +1927,7 @@ vm_page_t m; db_printf("new object: %p\n", (void *)object); - if ( nl > 18) { + if (nl > 18) { c = cngetc(); if (c != ' ') return; @@ -1967,7 +1945,7 @@ if (rcount) { db_printf(" index(%ld)run(%d)pa(0x%lx)\n", (long)fidx, rcount, (long)pa); - if ( nl > 18) { + if (nl > 18) { c = cngetc(); if (c != ' ') return; @@ -1997,7 +1975,7 @@ db_printf(" index(%ld)run(%d)pa(0x%lx)", (long)fidx, rcount, (long)pa); db_printf("pd(%ld)\n", (long)padiff); - if ( nl > 18) { + if (nl > 18) { c = cngetc(); if (c != ' ') return; @@ -2012,7 +1990,7 @@ if (rcount) { db_printf(" index(%ld)run(%d)pa(0x%lx)\n", (long)fidx, rcount, (long)pa); - if ( nl > 18) { + if (nl > 18) { c = cngetc(); if (c != ' ') return; Index: vm_page.c =================================================================== RCS file: /home/ncvs/src/sys/vm/vm_page.c,v retrieving revision 1.179 diff -u -r1.179 vm_page.c --- vm_page.c 4 Mar 2002 18:55:26 -0000 1.179 +++ vm_page.c 6 Mar 2002 15:02:13 -0000 @@ -123,7 +123,6 @@ * Associated with page of user-allocatable memory is a * page structure. */ - static struct vm_page **vm_page_buckets; /* Array of buckets */ static int vm_page_bucket_count; /* How big is array? */ static int vm_page_hash_mask; /* Mask for hash function */ @@ -160,7 +159,6 @@ * for the object/offset-to-page hash table headers. * Each page cell is initialized and placed on the free list. */ - vm_offset_t vm_page_startup(vm_offset_t starta, vm_offset_t enda, vm_offset_t vaddr) { @@ -207,7 +205,6 @@ * Initialize the queue headers for the free queue, the active queue * and the inactive queue. */ - vm_pageq_init(); /* @@ -255,13 +252,10 @@ * use (taking into account the overhead of a page structure per * page). */ - first_page = phys_avail[0] / PAGE_SIZE; - page_range = phys_avail[(nblocks - 1) * 2 + 1] / PAGE_SIZE - first_page; npages = (total - (page_range * sizeof(struct vm_page)) - (end - new_end)) / PAGE_SIZE; - end = new_end; /* @@ -317,7 +311,7 @@ { int i = ((uintptr_t)object + pindex) ^ object->hash_rand; - return(i & vm_page_hash_mask); + return (i & vm_page_hash_mask); } void @@ -347,7 +341,6 @@ * * wakeup anyone waiting for the page. */ - void vm_page_flash(vm_page_t m) { @@ -364,7 +357,6 @@ * page. * */ - void vm_page_wakeup(vm_page_t m) { @@ -377,7 +369,6 @@ * * */ - void vm_page_io_start(vm_page_t m) { @@ -424,7 +415,6 @@ * protection and therefore can be safely called if the page is already * at VM_PROT_NONE (it will be a NOP effectively ). */ - void vm_page_protect(vm_page_t mem, int prot) { @@ -507,7 +497,6 @@ * PG_BUSY to m->busy or vise versa (which would create a timing * window). */ - int vm_page_sleep_busy(vm_page_t m, int also_m_busy, const char *msg) { @@ -522,17 +511,16 @@ tsleep(m, PVM, msg, 0); } splx(s); - return(TRUE); + return (TRUE); /* not reached */ } - return(FALSE); + return (FALSE); } /* * vm_page_dirty: * * make page all dirty */ - void vm_page_dirty(vm_page_t m) { @@ -546,7 +534,6 @@ * * Set page to not be dirty. Note: does not clear pmap modify bits */ - void vm_page_undirty(vm_page_t m) { @@ -566,7 +553,6 @@ * The object and page must be locked, and must be splhigh. * This routine may not block. */ - void vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex) { @@ -580,14 +566,12 @@ /* * Record the object/offset pair in this page */ - m->object = object; m->pindex = pindex; /* * Insert it into the object_object/offset hash table */ - bucket = &vm_page_buckets[vm_page_hash(object, pindex)]; m->hnext = *bucket; *bucket = m; @@ -596,14 +580,12 @@ /* * Now link into the object's list of backed pages. */ - TAILQ_INSERT_TAIL(&object->memq, m, listq); object->generation++; /* * show that the object has one more resident page. */ - object->resident_page_count++; /* @@ -626,7 +608,6 @@ * The underlying pmap entry (if any) is NOT removed here. * This routine may not block. */ - void vm_page_remove(vm_page_t m) { @@ -644,7 +625,6 @@ /* * Basically destroy the page. */ - vm_page_wakeup(m); object = m->object; @@ -656,7 +636,6 @@ * Note: we must NULL-out m->hnext to prevent loops in detached * buffers with vm_page_lookup(). */ - { struct vm_page **bucket; @@ -674,13 +653,11 @@ /* * Now remove from the object's list of backed pages. */ - TAILQ_REMOVE(&object->memq, m, listq); /* * And show that the object has one fewer resident page. */ - object->resident_page_count--; object->generation++; @@ -702,7 +679,6 @@ * This routine may not block. * This is a critical path routine */ - vm_page_t vm_page_lookup(vm_object_t object, vm_pindex_t pindex) { @@ -713,7 +689,6 @@ /* * Search the hash table for this object/offset pair */ - retry: generation = vm_page_bucket_generation; bucket = &vm_page_buckets[vm_page_hash(object, pindex)]; @@ -752,7 +727,6 @@ * or vm_page_dirty() will panic. Dirty pages are not allowed * on the cache. */ - void vm_page_rename(vm_page_t m, vm_object_t new_object, vm_pindex_t new_pindex) { @@ -806,7 +780,6 @@ * This routine must be called at splvm(). * This routine may not block. */ - static __inline vm_page_t vm_page_select_free(vm_object_t object, vm_pindex_t pindex, boolean_t prefer_zero) { @@ -817,7 +790,7 @@ (pindex + object->pg_color) & PQ_L2_MASK, prefer_zero ); - return(m); + return (m); } /* @@ -838,7 +811,6 @@ * interrupt (VM_ALLOC_INTERRUPT). We are not allowed to mess with * the page cache in this case. */ - vm_page_t vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int page_req) { @@ -853,7 +825,6 @@ /* * The pager is allowed to eat deeper into the free page list. */ - if ((curproc == pageproc) && (page_req != VM_ALLOC_INTERRUPT)) { page_req = VM_ALLOC_SYSTEM; }; @@ -930,7 +901,6 @@ /* * Initialize structure. Only the PG_ZERO flag is inherited. */ - if (m->flags & PG_ZERO) { vm_page_zero_count--; m->flags = PG_ZERO | PG_BUSY; @@ -950,7 +920,6 @@ * could cause us to block allocating memory). We cannot block * anywhere. */ - vm_page_insert(m, object, pindex); /* @@ -961,7 +930,6 @@ pagedaemon_wakeup(); splx(s); - return (m); } @@ -971,7 +939,6 @@ * Block until free pages are available for allocation * - Called in various places before memory allocations. */ - void vm_wait(void) { @@ -1001,7 +968,6 @@ * processes will be able to grab memory first. Do not change * this balance without careful testing first. */ - void vm_waitpfault(void) { @@ -1033,13 +999,10 @@ GIANT_REQUIRED; s = splvm(); - if (m->queue != PQ_ACTIVE) { if ((m->queue - m->pc) == PQ_CACHE) cnt.v_reactivated++; - vm_pageq_remove(m); - if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) { if (m->act_count < ACT_INIT) m->act_count = ACT_INIT; @@ -1049,7 +1012,6 @@ if (m->act_count < ACT_INIT) m->act_count = ACT_INIT; } - splx(s); } @@ -1124,7 +1086,6 @@ * callback routine until after we've put the page on the * appropriate free queue. */ - vm_pageq_remove_nowakeup(m); vm_page_remove(m); @@ -1132,7 +1093,6 @@ * If fictitious remove object association and * return, otherwise delay object association removal. */ - if ((m->flags & PG_FICTITIOUS) != 0) { splx(s); return; @@ -1153,7 +1113,6 @@ * If we've exhausted the object's resident pages we want to free * it up. */ - if (object && (object->type == OBJT_VNODE) && ((object->flags & OBJ_DEAD) == 0) @@ -1167,12 +1126,11 @@ /* * Clear the UNMANAGED flag when freeing an unmanaged page. */ - if (m->flags & PG_UNMANAGED) { - m->flags &= ~PG_UNMANAGED; + m->flags &= ~PG_UNMANAGED; } else { #ifdef __alpha__ - pmap_page_is_free(m); + pmap_page_is_free(m); #endif } @@ -1189,16 +1147,13 @@ * Put zero'd pages on the end ( where we look for zero'd pages * first ) and non-zerod pages at the head. */ - if (m->flags & PG_ZERO) { TAILQ_INSERT_TAIL(&pq->pl, m, pageq); ++vm_page_zero_count; } else { TAILQ_INSERT_HEAD(&pq->pl, m, pageq); } - vm_page_free_wakeup(); - splx(s); } @@ -1220,7 +1175,6 @@ * will eventually be extended to support 4MB unmanaged physical * mappings. */ - void vm_page_unmanage(vm_page_t m) { @@ -1379,13 +1333,13 @@ if (m->dirty || m->hold_count || m->busy || m->wire_count || (m->flags & (PG_BUSY|PG_UNMANAGED))) { - return(0); + return (0); } vm_page_test_dirty(m); if (m->dirty) - return(0); + return (0); vm_page_cache(m); - return(1); + return (1); } /* @@ -1399,15 +1353,15 @@ { if (m->dirty || m->hold_count || m->busy || m->wire_count || (m->flags & (PG_BUSY|PG_UNMANAGED))) { - return(0); + return (0); } vm_page_test_dirty(m); if (m->dirty) - return(0); + return (0); vm_page_busy(m); vm_page_protect(m, VM_PROT_NONE); vm_page_free(m); - return(1); + return (1); } /* @@ -1434,7 +1388,6 @@ * Remove all pmaps and indicate that the page is not * writeable or mapped. */ - vm_page_protect(m, VM_PROT_NONE); if (m->dirty != 0) { panic("vm_page_cache: caching a dirty page, pindex: %ld", @@ -1468,7 +1421,6 @@ * space from active. The idea is to not force this to happen too * often. */ - void vm_page_dontneed(vm_page_t m) { @@ -1482,7 +1434,6 @@ /* * occassionally leave the page alone */ - if ((dnw & 0x01F0) == 0 || m->queue == PQ_INACTIVE || m->queue - m->pc == PQ_CACHE @@ -1565,7 +1516,6 @@ * * Inputs are required to range within a page. */ - __inline int vm_page_bits(int base, int size) { @@ -1578,7 +1528,7 @@ ); if (size == 0) /* handle degenerate case */ - return(0); + return (0); first_bit = base >> DEV_BSHIFT; last_bit = (base + size - 1) >> DEV_BSHIFT; @@ -1614,7 +1564,6 @@ * bit is clear, we have to zero out a portion of the * first block. */ - if ((frag = base & ~(DEV_BSIZE - 1)) != base && (m->valid & (1 << (base >> DEV_BSHIFT))) == 0 ) { @@ -1630,9 +1579,7 @@ * valid bit is clear, we have to zero out a portion of * the last block. */ - endoff = base + size; - if ((frag = endoff & ~(DEV_BSIZE - 1)) != endoff && (m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0 ) { @@ -1654,7 +1601,6 @@ * clear dirty bits for DEV_BSIZE chunks that are fully within * the range. */ - pagebits = vm_page_bits(base, size); m->valid |= pagebits; #if 0 /* NOT YET */ @@ -1722,7 +1668,6 @@ * Pages are most often semi-valid when the end of a file is mapped * into memory and the file's size is not page aligned. */ - void vm_page_zero_invalid(vm_page_t m, boolean_t setvalid) { @@ -1735,7 +1680,6 @@ * valid bit may be set ) have already been zerod by * vm_page_set_validclean(). */ - for (b = i = 0; i <= PAGE_SIZE / DEV_BSIZE; ++i) { if (i == (PAGE_SIZE / DEV_BSIZE) || (m->valid & (1 << i)) @@ -1756,7 +1700,6 @@ * as being valid. We can do this if there are no cache consistancy * issues. e.g. it is ok to do with UFS, but not ok to do with NFS. */ - if (setvalid) m->valid = VM_PAGE_BITS_ALL; } @@ -1770,7 +1713,6 @@ * * May not block. */ - int vm_page_is_valid(vm_page_t m, int base, int size) { @@ -1785,7 +1727,6 @@ /* * update dirty bits from pmap/mmu. May not block. */ - void vm_page_test_dirty(vm_page_t m) { Index: vm_page.h =================================================================== RCS file: /home/ncvs/src/sys/vm/vm_page.h,v retrieving revision 1.96 diff -u -r1.96 vm_page.h --- vm_page.h 4 Mar 2002 18:55:26 -0000 1.96 +++ vm_page.h 6 Mar 2002 14:29:03 -0000 @@ -173,7 +173,7 @@ #else #define PQ_CACHESIZE 128 #endif -#endif +#endif /* !defined(PQ_CACHESIZE) */ #if PQ_CACHESIZE >= 1024 #define PQ_PRIME1 31 /* Prime number somewhat less than PQ_HASH_SIZE */ @@ -225,7 +225,7 @@ extern struct vpgqueues vm_page_queues[PQ_COUNT]; -#endif +#endif /* !defined(KLD_MODULE) */ /* * These are the flags defined for vm_page. @@ -256,7 +256,6 @@ /* * Misc constants. */ - #define ACT_DECLINE 1 #define ACT_ADVANCE 3 #define ACT_INIT 5 @@ -372,6 +371,5 @@ void vm_page_zero_invalid(vm_page_t m, boolean_t setvalid); void vm_page_free_toq(vm_page_t m); void vm_page_zero_idle_wakeup(void); - #endif /* _KERNEL */ #endif /* !_VM_PAGE_ */ Index: vm_pageout.c =================================================================== RCS file: /home/ncvs/src/sys/vm/vm_pageout.c,v retrieving revision 1.189 diff -u -r1.189 vm_pageout.c --- vm_pageout.c 27 Feb 2002 18:03:02 -0000 1.189 +++ vm_pageout.c 6 Mar 2002 14:31:05 -0000 @@ -219,7 +219,6 @@ * block. Note the careful timing, however, the busy bit isn't set till * late and we cannot do anything that will mess with the page. */ - static int vm_pageout_clean(m) vm_page_t m; @@ -276,7 +275,6 @@ * first and attempt to align our cluster, then do a * forward scan if room remains. */ - more: while (ib && pageout_count < vm_pageout_page_count) { vm_page_t p; @@ -359,7 +357,6 @@ * the parent to do more sophisticated things we may have to change * the ordering. */ - int vm_pageout_flush(mc, count, flags) vm_page_t *mc; @@ -382,7 +379,6 @@ * NOTE! mc[i]->dirty may be partial or fragmented due to an * edge case with file fragments. */ - for (i = 0; i < count; i++) { KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL, ("vm_pageout_flush page %p index %d/%d: partially invalid page", mc[i], i, count)); vm_page_io_start(mc[i]); @@ -479,9 +475,9 @@ remove_mode = map_remove_only; if (object->shadow_count > 1) remove_mode = 1; - /* - * scan the objects entire memory queue - */ + /* + * scan the objects entire memory queue + */ rcount = object->resident_page_count; p = TAILQ_FIRST(&object->memq); while (p && (rcount-- > 0)) { @@ -606,14 +602,13 @@ vm_map_unlock(map); return; } -#endif +#endif /* !defined(NO_SWAPPING) */ /* * Don't try to be fancy - being fancy can lead to VOP_LOCK's and therefore * to vnode deadlocks. We only do it for OBJT_DEFAULT and OBJT_SWAP objects * which we know can be trivially freed. */ - void vm_pageout_page_free(vm_page_t m) { vm_object_t object = m->object; @@ -690,12 +685,10 @@ * daemon cannot clean enough pages in the first pass, we let it go * all out in succeeding passes. */ - if ((maxlaunder = vm_max_launder) <= 1) maxlaunder = 1; if (pass) maxlaunder = 10000; - rescan0: addl_page_shortage = addl_page_shortage_init; maxscan = cnt.v_inactive_count; @@ -727,7 +720,7 @@ continue; } /* - * Dont mess with busy pages, keep in the front of the + * Don't mess with busy pages, keep in the front of the * queue, most likely are being paged out. */ if (m->busy || (m->flags & PG_BUSY)) { @@ -972,7 +965,6 @@ * track the per-page activity counter and use it to locate * deactivation candidates. */ - pcount = cnt.v_active_count; m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl); @@ -1061,7 +1053,6 @@ * are considered basically 'free', moving pages from cache to free * does not effect other calculations. */ - while (cnt.v_free_count < cnt.v_free_reserved) { static int cache_rover = 0; m = vm_pageq_find(PQ_CACHE, cache_rover, FALSE); @@ -1305,7 +1296,6 @@ return 1; } - /* * vm_pageout is the high level pageout daemon. */ @@ -1319,7 +1309,6 @@ /* * Initialize some paging parameters. */ - cnt.v_interrupt_free_min = 2; if (cnt.v_page_count < 2000) vm_pageout_page_count = 8; @@ -1367,7 +1356,6 @@ vm_pageout_stats_interval = 5; if (vm_pageout_full_stats_interval == 0) vm_pageout_full_stats_interval = vm_pageout_stats_interval * 4; - /* * Set maximum free per pass @@ -1469,7 +1457,6 @@ * scan the processes for exceeding their rlimits or if * process is swapped out -- deactivate pages */ - sx_slock(&allproc_lock); LIST_FOREACH(p, &allproc, p_list) { vm_pindex_t limit, size; @@ -1515,4 +1502,4 @@ sx_sunlock(&allproc_lock); } } -#endif +#endif /* !defined(NO_SWAPPING) */ Index: vm_pageout.h =================================================================== RCS file: /home/ncvs/src/sys/vm/vm_pageout.h,v retrieving revision 1.28 diff -u -r1.28 vm_pageout.h --- vm_pageout.h 19 Feb 2002 18:34:02 -0000 1.28 +++ vm_pageout.h 6 Mar 2002 14:31:24 -0000 @@ -109,7 +109,5 @@ void vm_pageout_cluster __P((vm_page_t, vm_object_t)); int vm_pageout_flush __P((vm_page_t *, int, int)); void vm_pageout_page_free __P((vm_page_t)); - -#endif - #endif +#endif /* _VM_VM_PAGEOUT_H_ */ Index: vm_pageq.c =================================================================== RCS file: /home/ncvs/src/sys/vm/vm_pageq.c,v retrieving revision 1.3 diff -u -r1.3 vm_pageq.c --- vm_pageq.c 4 Mar 2002 18:55:26 -0000 1.3 +++ vm_pageq.c 6 Mar 2002 15:02:25 -0000 @@ -58,7 +58,7 @@ mtx_lock(&vm_pageq_mtx[queue]); #endif } - return(vpq); + return (vpq); } void @@ -127,7 +127,6 @@ * This routine must be called at splhigh(). * This routine may not block. */ - void vm_pageq_remove_nowakeup(vm_page_t m) { @@ -150,7 +149,6 @@ * This routine must be called at splhigh(). * This routine may not block. */ - void vm_pageq_remove(vm_page_t m) { @@ -190,7 +188,6 @@ * This routine may only be called from the vm_page_list_find() macro * in vm_page.h */ - static __inline vm_page_t _vm_pageq_find(int basequeue, int index) { @@ -206,18 +203,16 @@ * same place. Even though this is not totally optimal, we've already * blown it by missing the cache case so we do not care. */ - - for(i = PQ_L2_SIZE / 2; i > 0; --i) { + for (i = PQ_L2_SIZE / 2; i > 0; --i) { if ((m = TAILQ_FIRST(&pq[(index + i) & PQ_L2_MASK].pl)) != NULL) break; if ((m = TAILQ_FIRST(&pq[(index - i) & PQ_L2_MASK].pl)) != NULL) break; } - return(m); + return (m); } - -#endif +#endif /* PQ_L2_SIZE > 1 */ vm_page_t vm_pageq_find(int basequeue, int index, boolean_t prefer_zero) @@ -242,6 +237,6 @@ m = TAILQ_FIRST(&vm_page_queues[basequeue].pl); } #endif - return(m); + return (m); } Index: vm_pager.c =================================================================== RCS file: /home/ncvs/src/sys/vm/vm_pager.c,v retrieving revision 1.77 diff -u -r1.77 vm_pager.c --- vm_pager.c 5 Mar 2002 18:20:57 -0000 1.77 +++ vm_pager.c 6 Mar 2002 14:32:09 -0000 @@ -272,7 +272,6 @@ * called with no specific spl * Execute strategy routine directly to pager. */ - void vm_pager_strategy(vm_object_t object, struct bio *bp) { @@ -310,7 +309,6 @@ if (pgops && ((*pgops)->pgo_sync != NULL)) (*(*pgops)->pgo_sync) (); } - #endif vm_offset_t Index: vm_pager.h =================================================================== RCS file: /home/ncvs/src/sys/vm/vm_pager.h,v retrieving revision 1.33 diff -u -r1.33 vm_pager.h --- vm_pager.h 4 Jul 2001 16:20:28 -0000 1.33 +++ vm_pager.h 6 Mar 2002 15:02:30 -0000 @@ -83,7 +83,6 @@ #define VM_PAGER_PUT_INVAL 0x2 #ifdef _KERNEL - #ifdef MALLOC_DECLARE MALLOC_DECLARE(M_VMPGDATA); #endif @@ -114,7 +113,6 @@ * ( or into VM space somewhere ). If the pagein was successful, we * must fully validate it. */ - static __inline int vm_pager_get_pages( vm_object_t object, @@ -130,7 +128,7 @@ if (r == VM_PAGER_OK && m[reqpage]->valid != VM_PAGE_BITS_ALL) { vm_page_zero_invalid(m[reqpage], TRUE); } - return(r); + return (r); } static __inline void @@ -156,7 +154,6 @@ * * This routine does not have to be called at any particular spl. */ - static __inline boolean_t vm_pager_has_page( vm_object_t object, @@ -179,7 +176,6 @@ * * This function may not block. */ - static __inline void vm_pager_page_unswapped(vm_page_t m) { @@ -188,6 +184,5 @@ (*pagertab[m->object->type]->pgo_pageunswapped)(m); } -#endif - +#endif /* _KERNEL */ #endif /* _VM_PAGER_ */ Index: vm_swap.c =================================================================== RCS file: /home/ncvs/src/sys/vm/vm_swap.c,v retrieving revision 1.113 diff -u -r1.113 vm_swap.c --- vm_swap.c 27 Feb 2002 18:32:23 -0000 1.113 +++ vm_swap.c 6 Mar 2002 03:15:40 -0000 @@ -82,7 +82,6 @@ * * The bp is expected to be locked and *not* B_DONE on call. */ - static int swapdev_strategy(ap) struct vop_strategy_args /* { Index: vnode_pager.c =================================================================== RCS file: /home/ncvs/src/sys/vm/vnode_pager.c,v retrieving revision 1.141 diff -u -r1.141 vnode_pager.c --- vnode_pager.c 27 Feb 2002 18:32:23 -0000 1.141 +++ vnode_pager.c 6 Mar 2002 14:20:24 -0000 @@ -229,7 +229,7 @@ after, before); if (err) return TRUE; - if ( bn == -1) + if (bn == -1) return FALSE; if (pagesperblock > 0) { poff = pindex - (reqblock * pagesperblock); @@ -393,7 +393,7 @@ rtaddress = -1; else { rtaddress = block + voffset / DEV_BSIZE; - if( run) { + if (run) { *run += 1; *run *= bsize/PAGE_SIZE; *run -= voffset/PAGE_SIZE; @@ -681,7 +681,6 @@ * clean up and return. Otherwise we have to re-read the * media. */ - if (m[reqpage]->valid == VM_PAGE_BITS_ALL) { for (i = 0; i < count; i++) { if (i != reqpage) @@ -694,12 +693,12 @@ /* * here on direct device I/O */ - firstaddr = -1; + /* * calculate the run that includes the required page */ - for(first = 0, i = 0; i < count; i = runend) { + for (first = 0, i = 0; i < count; i = runend) { firstaddr = vnode_pager_addr(vp, IDX_TO_OFF(m[i]->pindex), &runpg); if (firstaddr == -1) { @@ -920,7 +919,6 @@ /* * Call device-specific putpages function */ - vp = object->handle; if (vp->v_type != VREG) mp = NULL; Index: vnode_pager.h =================================================================== RCS file: /home/ncvs/src/sys/vm/vnode_pager.h,v retrieving revision 1.15 diff -u -r1.15 vnode_pager.h --- vnode_pager.h 19 Apr 2001 06:18:22 -0000 1.15 +++ vnode_pager.h 6 Mar 2002 14:33:30 -0000 @@ -55,6 +55,5 @@ int vnode_pager_generic_putpages __P((struct vnode *vp, vm_page_t *m, int count, boolean_t sync, int *rtvals)); -#endif - +#endif /* _KERNEL */ #endif /* _VNODE_PAGER_ */