Index: device_pager.c =================================================================== RCS file: /home/ncvs/src/sys/vm/device_pager.c,v retrieving revision 1.52 diff -u -r1.52 device_pager.c --- device_pager.c 4 Jul 2001 16:20:26 -0000 1.52 +++ device_pager.c 6 Mar 2002 17:15:42 -0000 @@ -113,11 +113,11 @@ */ dev = handle; mapfunc = devsw(dev)->d_mmap; - if (mapfunc == NULL || mapfunc == (d_mmap_t *)nullop) { - printf("obsolete map function %p\n", (void *)mapfunc); - return (NULL); - } - + /* XXX Should these move to a generic invariant function for devs? */ + KASSERT(mapfunc != NULL, + ("Missing map function for device %p", dev)); + KASSERT(mapfunc != (d_mmap_t *)nullop, + ("nullop (obsolete) map function for device %p", dev)); /* * Offset should be page aligned. */ @@ -220,10 +220,11 @@ * all of the original pages. */ page = dev_pager_getfake(paddr); + if (page == NULL) + return (VM_PAGER_FAIL); TAILQ_INSERT_TAIL(&object->un_pager.devp.devp_pglist, page, pageq); - for (i = 0; i < count; i++) { + for (i = 0; i < count; i++) vm_page_free(m[i]); - } vm_page_insert(page, object, offset); return (VM_PAGER_OK); @@ -261,7 +262,10 @@ vm_page_t m; m = zalloc(fakepg_zone); - + if (m == NULL) { + printf("dev_pager_getfake: Unable to allocate page header\n"); + return (NULL); + } m->flags = PG_BUSY | PG_FICTITIOUS; m->valid = VM_PAGE_BITS_ALL; m->dirty = 0; Index: pmap.h =================================================================== RCS file: /home/ncvs/src/sys/vm/pmap.h,v retrieving revision 1.42 diff -u -r1.42 pmap.h --- pmap.h 27 Feb 2002 18:03:02 -0000 1.42 +++ pmap.h 6 Mar 2002 03:00:37 -0000 @@ -93,6 +93,8 @@ #ifdef __alpha__ void pmap_page_is_free __P((vm_page_t m)); +#else +#define pmap_page_is_free(m) #endif void pmap_change_wiring __P((pmap_t, vm_offset_t, boolean_t)); void pmap_clear_modify __P((vm_page_t m)); Index: swap_pager.c =================================================================== RCS file: /home/ncvs/src/sys/vm/swap_pager.c,v retrieving revision 1.167 diff -u -r1.167 swap_pager.c --- swap_pager.c 27 Feb 2002 19:18:10 -0000 1.167 +++ swap_pager.c 6 Mar 2002 16:43:54 -0000 @@ -595,6 +595,8 @@ s = splvm(); while (size) { if (n == 0) { + /* XXX I am not certain I like this control structure, + * reading-wise */ n = BLIST_MAX_ALLOC; while ((blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE) { n >>= 1; @@ -874,13 +876,9 @@ GIANT_REQUIRED; - /* XXX: KASSERT instead ? */ - if (bp->bio_bcount & PAGE_MASK) { - biofinish(bp, NULL, EINVAL); - printf("swap_pager_strategy: bp %p blk %d size %d, not page bounded\n", bp, (int)bp->bio_pblkno, (int)bp->bio_bcount); - return; - } - + KASSERT((bp->bio_bcount & PAGE_MASK) == 0, + ("swap_pager_strategy: bp %p blk %d size %d, not page bounded\n", + bp, (int)bp->bio_pblkno, (int)bp->bio_bcount)); /* * Clear error indication, initialize page index, count, data pointer. */ @@ -1192,6 +1190,7 @@ vm_page_flag_set(mreq, PG_WANTED | PG_REFERENCED); cnt.v_intrans++; if (tsleep(mreq, PSWP, "swread", hz*20)) { + /* XXX panic? Ask user to contact us? */ printf( "swap_pager: indefinite wait buffer: device:" " %s, blkno: %ld, size: %ld\n", @@ -1729,16 +1728,11 @@ struct swblock *swap; index &= ~SWAP_META_MASK; - pswap = &swhash[(index ^ (int)(intptr_t)object) & swhash_mask]; - - while ((swap = *pswap) != NULL) { - if (swap->swb_object == object && - swap->swb_index == index - ) { + for (pswap = &swhash[(index ^ (int)(intptr_t)object) & swhash_mask]; + (swap = *pswap); + pswap = &swap->swb_hnext) + if (swap->swb_object == object && swap->swb_index == index) break; - } - pswap = &swap->swb_hnext; - } return(pswap); } Index: vm_fault.c =================================================================== RCS file: /home/ncvs/src/sys/vm/vm_fault.c,v retrieving revision 1.128 diff -u -r1.128 vm_fault.c --- vm_fault.c 19 Feb 2002 18:34:02 -0000 1.128 +++ vm_fault.c 6 Mar 2002 16:44:56 -0000 @@ -331,7 +331,7 @@ * around with a vm_page_t->busy page except, perhaps, * to pmap it. */ - if ((fs.m->flags & PG_BUSY) || fs.m->busy) { + if (VM_PAGE_ISBUSY(fs.m)) { unlock_things(&fs); (void)vm_page_sleep_busy(fs.m, TRUE, "vmpfw"); cnt.v_intrans++; @@ -449,8 +449,8 @@ mt = vm_page_lookup( fs.first_object, tmppindex); if (mt == NULL || (mt->valid != VM_PAGE_BITS_ALL)) break; - if (mt->busy || - (mt->flags & (PG_BUSY | PG_FICTITIOUS | PG_UNMANAGED)) || + if (VM_PAGE_ISBUSY(mt) || + (mt->flags & (PG_FICTITIOUS | PG_UNMANAGED)) || mt->hold_count || mt->wire_count) continue; @@ -547,8 +547,8 @@ * around having the machine panic on a kernel space * fault w/ I/O error. */ - if (((fs.map != kernel_map) && (rv == VM_PAGER_ERROR)) || - (rv == VM_PAGER_BAD)) { + if ((fs.map != kernel_map && rv == VM_PAGER_ERROR) || + rv == VM_PAGER_BAD) { vm_page_free(fs.m); fs.m = NULL; unlock_and_deallocate(&fs); @@ -854,6 +854,7 @@ /* * Sanity check: page must be completely valid or it is not fit to * map into user space. vm_pager_get_pages() ensures this. + * XXX KASSERT? */ if (fs.m->valid != VM_PAGE_BITS_ALL) { @@ -863,9 +864,8 @@ pmap_enter(fs.map->pmap, vaddr, fs.m, prot, wired); - if (((fault_flags & VM_FAULT_WIRE_MASK) == 0) && (wired == 0)) { + if (!(fault_flags & VM_FAULT_WIRE_MASK) && !wired) pmap_prefault(fs.map->pmap, vaddr, fs.entry); - } vm_page_flag_clear(fs.m, PG_ZERO); vm_page_flag_set(fs.m, PG_MAPPED|PG_REFERENCED); @@ -882,17 +882,15 @@ vm_page_wire(fs.m); else vm_page_unwire(fs.m, 1); - } else { + } else vm_page_activate(fs.m); - } mtx_lock_spin(&sched_lock); if (curproc && (curproc->p_sflag & PS_INMEM) && curproc->p_stats) { - if (hardfault) { + if (hardfault) curproc->p_stats->p_ru.ru_majflt++; - } else { + else curproc->p_stats->p_ru.ru_minflt++; - } } mtx_unlock_spin(&sched_lock); Index: vm_glue.c =================================================================== RCS file: /home/ncvs/src/sys/vm/vm_glue.c,v retrieving revision 1.126 diff -u -r1.126 vm_glue.c --- vm_glue.c 26 Feb 2002 01:01:37 -0000 1.126 +++ vm_glue.c 6 Mar 2002 16:45:20 -0000 @@ -269,6 +269,7 @@ */ p2->p_stats = &up->u_stats; if (p2->p_sigacts == NULL) { + /* XXX KASSERT? */ if (p2->p_procsig->ps_refcnt != 1) printf ("PID:%d NULL sigacts with refcnt not 1!\n",p2->p_pid); p2->p_sigacts = &up->u_sigacts; Index: vm_map.c =================================================================== RCS file: /home/ncvs/src/sys/vm/vm_map.c,v retrieving revision 1.213 diff -u -r1.213 vm_map.c --- vm_map.c 7 Mar 2002 03:54:56 -0000 1.213 +++ vm_map.c 10 Mar 2002 14:57:28 -0000 @@ -1146,8 +1146,9 @@ * Make a first pass to check for protection violations. */ - current = entry; - while ((current != &map->header) && (current->start < end)) { + for (current = entry; + current != &map->header && current->start < end; + current = current->next) { if (current->eflags & MAP_ENTRY_IS_SUB_MAP) { vm_map_unlock(map); return (KERN_INVALID_ARGUMENT); @@ -1156,7 +1157,6 @@ vm_map_unlock(map); return (KERN_PROTECTION_FAILURE); } - current = current->next; } /* @@ -1164,9 +1164,9 @@ * necessary the second time.] */ - current = entry; - - while ((current != &map->header) && (current->start < end)) { + for (current = entry; + current != &map->header && current->start < end; + current = current->next) { vm_prot_t old_prot; vm_map_clip_end(map, current, end); @@ -1195,8 +1195,6 @@ } vm_map_simplify_entry(map, current); - - current = current->next; } vm_map_unlock(map); @@ -1398,14 +1396,12 @@ } else entry = temp_entry->next; - while ((entry != &map->header) && (entry->start < end)) { + for (; + entry != &map->header && entry->start < end; + entry = entry->next) { vm_map_clip_end(map, entry, end); - entry->inheritance = new_inheritance; - vm_map_simplify_entry(map, entry); - - entry = entry->next; } vm_map_unlock(map); @@ -1437,16 +1433,16 @@ } if (new_pageable) { - - entry = start_entry; - vm_map_clip_start(map, entry, start); + vm_map_clip_start(map, start_entry, start); /* * Now decrement the wiring count for each region. If a region * becomes completely unwired, unwire its physical pages and * mappings. */ - while ((entry != &map->header) && (entry->start < end)) { + for (entry = start_entry; + entry != &map->header && entry->start < end; + entry = entry->next) { if (entry->eflags & MAP_ENTRY_USER_WIRED) { vm_map_clip_end(map, entry, end); entry->eflags &= ~MAP_ENTRY_USER_WIRED; @@ -1455,45 +1451,33 @@ vm_fault_unwire(map, entry->start, entry->end); } vm_map_simplify_entry(map,entry); - entry = entry->next; } } else { - - entry = start_entry; - - while ((entry != &map->header) && (entry->start < end)) { - - if (entry->eflags & MAP_ENTRY_USER_WIRED) { - entry = entry->next; + for (entry = start_entry; + entry != &map->header && entry->start < end; + entry = entry->next) { + if (entry->eflags & MAP_ENTRY_USER_WIRED) continue; - } - if (entry->wired_count != 0) { entry->wired_count++; entry->eflags |= MAP_ENTRY_USER_WIRED; - entry = entry->next; continue; } /* Here on entry being newly wired */ - - if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { - int copyflag = entry->eflags & MAP_ENTRY_NEEDS_COPY; - if (copyflag && ((entry->protection & VM_PROT_WRITE) != 0)) { - + if (!(entry->eflags & MAP_ENTRY_IS_SUB_MAP)) { + if (entry->eflags & MAP_ENTRY_NEEDS_COPY && + entry->protection & VM_PROT_WRITE) { vm_object_shadow(&entry->object.vm_object, &entry->offset, atop(entry->end - entry->start)); entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; - } else if (entry->object.vm_object == NULL && !map->system_map) { - entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT, atop(entry->end - entry->start)); entry->offset = (vm_offset_t) 0; - } } @@ -1549,7 +1533,7 @@ return (KERN_INVALID_ADDRESS); } } - vm_map_simplify_entry(map,entry); + vm_map_simplify_entry(map, entry); } } map->timestamp++; @@ -1598,7 +1582,6 @@ vm_map_unlock(map); return (KERN_INVALID_ADDRESS); } - entry = start_entry; /* * Actions are rather different for wiring and unwiring, so we have @@ -1606,15 +1589,14 @@ */ if (new_pageable) { - - vm_map_clip_start(map, entry, start); - + vm_map_clip_start(map, start_entry, start); /* * Unwiring. First ensure that the range to be unwired is * really wired down and that there are no holes. */ - while ((entry != &map->header) && (entry->start < end)) { - + for (entry = start_entry; + entry != &map->header && entry->start < end; + entry = entry->next) if (entry->wired_count == 0 || (entry->end < end && (entry->next == &map->header || @@ -1622,25 +1604,19 @@ vm_map_unlock(map); return (KERN_INVALID_ARGUMENT); } - entry = entry->next; - } - /* * Now decrement the wiring count for each region. If a region * becomes completely unwired, unwire its physical pages and * mappings. */ - entry = start_entry; - while ((entry != &map->header) && (entry->start < end)) { + for (entry = start_entry; + entry != &map->header && entry->start < end; + entry = entry->next) { vm_map_clip_end(map, entry, end); - entry->wired_count--; if (entry->wired_count == 0) vm_fault_unwire(map, entry->start, entry->end); - vm_map_simplify_entry(map, entry); - - entry = entry->next; } } else { /* @@ -1671,9 +1647,10 @@ /* * Pass 1. */ - while ((entry != &map->header) && (entry->start < end)) { + for (entry = start_entry; + entry != &map->header && entry->start < end; + entry = entry->next) { if (entry->wired_count == 0) { - /* * Perform actions of vm_map_lookup that need * the write lock on the map: create a shadow @@ -1685,10 +1662,8 @@ * hold the lock on the sub map. */ if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { - int copyflag = entry->eflags & MAP_ENTRY_NEEDS_COPY; - if (copyflag && - ((entry->protection & VM_PROT_WRITE) != 0)) { - + if (entry->eflags & MAP_ENTRY_NEEDS_COPY && + entry->protection & VM_PROT_WRITE) { vm_object_shadow(&entry->object.vm_object, &entry->offset, atop(entry->end - entry->start)); @@ -1705,7 +1680,6 @@ vm_map_clip_start(map, entry, start); vm_map_clip_end(map, entry, end); entry->wired_count++; - /* * Check for holes */ @@ -1717,14 +1691,13 @@ * need to be undone, but the wired counts * need to be restored. */ - while (entry != &map->header && entry->end > start) { + for (; + entry != &map->header && entry->end > start; + entry = entry->prev) entry->wired_count--; - entry = entry->prev; - } vm_map_unlock(map); return (KERN_INVALID_ARGUMENT); } - entry = entry->next; } /* @@ -1750,8 +1723,9 @@ } rv = 0; - entry = start_entry; - while (entry != &map->header && entry->start < end) { + for (entry = start_entry; + entry != &map->header && entry->start < end; + entry = entry->next) { /* * If vm_fault_wire fails for any page we need to undo * what has been done. We decrement the wiring count @@ -1770,7 +1744,6 @@ entry->wired_count--; } } - entry = entry->next; } if (vm_map_pmap(map) == kernel_pmap) { @@ -1978,6 +1951,7 @@ vm_object_t object; vm_map_entry_t entry; vm_map_entry_t first_entry; + vm_map_entry_t next; GIANT_REQUIRED; @@ -2008,11 +1982,12 @@ } /* - * Step through all entries in this region + * Step through and delete all entries in this region */ - - while ((entry != &map->header) && (entry->start < end)) { - vm_map_entry_t next; + for (; + (next = entry->next), + (entry != &map->header && entry->start < end); + entry = next) { vm_offset_t s, e; vm_pindex_t offidxstart, offidxend, count; @@ -2063,7 +2038,6 @@ * modify bits will be set in the wrong object!) */ vm_map_entry_delete(map, entry); - entry = next; } return (KERN_SUCCESS); } @@ -2110,34 +2084,19 @@ vm_map_entry_t tmp_entry; GIANT_REQUIRED; - - if (!vm_map_lookup_entry(map, start, &tmp_entry)) { + if (!vm_map_lookup_entry(map, start, &tmp_entry)) return (FALSE); - } - entry = tmp_entry; - - while (start < end) { - if (entry == &map->header) { + for (entry = tmp_entry; + start < end; + (start = entry->end), (entry = entry->next)) { + if (entry == &map->header) return (FALSE); - } - /* - * No holes allowed! - */ - - if (start < entry->start) { + /* No holes allowed! */ + if (start < entry->start) return (FALSE); - } - /* - * Check protection associated with entry. - */ - - if ((entry->protection & protection) != protection) { + /* Check protection associated with entry. */ + if ((entry->protection & protection) != protection) return (FALSE); - } - /* go to next entry */ - - start = entry->end; - entry = entry->next; } return (TRUE); } @@ -2337,9 +2296,9 @@ new_map = &vm2->vm_map; /* XXX */ new_map->timestamp = 1; - old_entry = old_map->header.next; - - while (old_entry != &old_map->header) { + for (old_entry = old_map->header.next; + old_entry != &old_map->header; + old_entry = old_entry->next) { if (old_entry->eflags & MAP_ENTRY_IS_SUB_MAP) panic("vm_map_fork: encountered a submap"); @@ -2418,7 +2377,6 @@ new_entry); break; } - old_entry = old_entry->next; } new_map->size = old_map->size; @@ -3157,7 +3115,7 @@ if (object->shadow_count > object->ref_count) panic("vm_freeze_copyopts: sc > rc"); - while((robject = TAILQ_FIRST(&object->shadow_head)) != NULL) { + while ((robject = TAILQ_FIRST(&object->shadow_head))) { vm_pindex_t bo_pindex; vm_page_t m_in, m_out; Index: vm_map.h =================================================================== RCS file: /home/ncvs/src/sys/vm/vm_map.h,v retrieving revision 1.69 diff -u -r1.69 vm_map.h --- vm_map.h 5 Feb 2002 21:23:05 -0000 1.69 +++ vm_map.h 6 Mar 2002 15:08:56 -0000 @@ -140,7 +140,7 @@ #define MAP_ENTRY_NOCOREDUMP 0x0400 /* don't include in a core */ /* - * Maps are doubly-linked lists of map entries, kept sorted + * Maps are circular doubly-linked lists of map entries, kept sorted * by address. A single hint is provided to start * searches again from the last successful search, * insertion, or removal. @@ -163,6 +163,7 @@ unsigned int timestamp; /* Version number */ vm_map_entry_t first_free; /* First free space hint */ struct pmap *pmap; /* Physical map */ +/* XXX Kill? */ #define min_offset header.start #define max_offset header.end }; @@ -178,6 +179,7 @@ int vm_refcnt; /* number of references */ caddr_t vm_shm; /* SYS5 shared memory private data XXX */ /* we copy from vm_startcopy to the end of the structure on fork */ +/* XXX Kill? */ #define vm_startcopy vm_rssize segsz_t vm_rssize; /* current resident set size in pages */ segsz_t vm_swrss; /* resident set size before last swap */ @@ -188,6 +190,7 @@ caddr_t vm_daddr; /* user virtual address of data XXX */ caddr_t vm_maxsaddr; /* user VA at max stack growth */ caddr_t vm_minsaddr; /* user VA at max stack growth */ +/* XXX Kill? */ #define vm_endcopy vm_freer struct proc *vm_freer; /* vm freed on whose behalf */ }; Index: vm_mmap.c =================================================================== RCS file: /home/ncvs/src/sys/vm/vm_mmap.c,v retrieving revision 1.133 diff -u -r1.133 vm_mmap.c --- vm_mmap.c 27 Feb 2002 18:32:23 -0000 1.133 +++ vm_mmap.c 6 Mar 2002 22:47:51 -0000 @@ -1150,6 +1150,10 @@ * cause pmap inconsistencies...so we want to be sure to * disallow this in all cases. */ + /* XXX Do we really want to allow calls here which are not page + * aligned? Probably a KASSERT *and* this or a similar check for the + * normal case (WITH printf, methinks) would be better? + */ if (foff & PAGE_MASK) return (EINVAL); Index: vm_object.c =================================================================== RCS file: /home/ncvs/src/sys/vm/vm_object.c,v retrieving revision 1.201 diff -u -r1.201 vm_object.c --- vm_object.c 6 Mar 2002 02:42:56 -0000 1.201 +++ vm_object.c 10 Mar 2002 15:04:48 -0000 @@ -343,11 +343,9 @@ #endif object->ref_count++; - if (object->type == OBJT_VNODE) { - while (vget((struct vnode *) object->handle, LK_RETRY|LK_NOOBJ, curthread)) { + if (object->type == OBJT_VNODE) + while (vget((struct vnode *)object->handle, LK_RETRY|LK_NOOBJ, curthread)) printf("vm_object_reference: delay in getting object\n"); - } - } } /* @@ -437,10 +435,8 @@ robject->ref_count++; - while ( - robject->paging_in_progress || - object->paging_in_progress - ) { + while (robject->paging_in_progress || + object->paging_in_progress) { vm_object_pip_sleep(robject, "objde1"); vm_object_pip_sleep(object, "objde2"); } @@ -542,18 +538,16 @@ * remove them from the object. */ s = splvm(); - while ((p = TAILQ_FIRST(&object->memq)) != NULL) { - KASSERT(!p->busy && (p->flags & PG_BUSY) == 0, + while ((p = TAILQ_FIRST(&object->memq))) { + KASSERT(!VM_PAGE_ISBUSY(p), ("vm_object_terminate: freeing busy page %p " "p->busy = %d, p->flags %x\n", p, p->busy, p->flags)); + vm_page_busy(p); if (p->wire_count == 0) { - vm_page_busy(p); vm_page_free(p); cnt.v_pfree++; - } else { - vm_page_busy(p); + } else vm_page_remove(p); - } } splx(s); @@ -615,11 +609,7 @@ vm_object_set_flag(object, OBJ_CLEANING); tstart = start; - if (end == 0) { - tend = object->size; - } else { - tend = end; - } + tend = end == 0 ? object->size : end; /* * If the caller is smart and only msync()s a range he knows is @@ -839,12 +829,15 @@ (tp->flags & PG_CLEANCHK) == 0 || (tp->busy != 0)) break; - if((tp->queue - tp->pc) == PQ_CACHE) { + if (VM_PAGE_ISBUSY(tp) || + !(tp->flags & PG_CLEANCHK)) + break; + if ((tp->queue - tp->pc) == PQ_CACHE) { vm_page_flag_clear(tp, PG_CLEANCHK); break; } vm_page_test_dirty(tp); - if ((tp->dirty & tp->valid) == 0) { + if (!(tp->dirty & tp->valid)) { vm_page_flag_clear(tp, PG_CLEANCHK); break; } @@ -1151,6 +1144,7 @@ vm_page_t p; vm_object_t backing_object; vm_pindex_t backing_offset_index; + vm_page_t next; s = splvm(); GIANT_REQUIRED; @@ -1185,10 +1179,11 @@ * Our scan */ - p = TAILQ_FIRST(&backing_object->memq); - while (p) { - vm_page_t next = TAILQ_NEXT(p, listq); + for (p = TAILQ_FIRST(&backing_object->memq); + p != NULL; + p = next) { vm_pindex_t new_pindex = p->pindex - backing_offset_index; + next = TAILQ_NEXT(p, listq); if (op & OBSC_TEST_ALL_SHADOWED) { vm_page_t pp; @@ -1202,13 +1197,9 @@ * page. */ - if ( - p->pindex < backing_offset_index || - new_pindex >= object->size - ) { - p = next; + if (p->pindex < backing_offset_index || + new_pindex >= object->size) continue; - } /* * See if the parent has the page or if the parent's @@ -1221,10 +1212,8 @@ */ pp = vm_page_lookup(object, new_pindex); - if ( - (pp == NULL || pp->valid == 0) && - !vm_pager_has_page(object, new_pindex, NULL, NULL) - ) { + if ((pp == NULL || pp->valid == 0) && + !vm_pager_has_page(object, new_pindex, NULL, NULL)) { r = 0; break; } @@ -1238,16 +1227,11 @@ vm_page_t pp; if (op & OBSC_COLLAPSE_NOWAIT) { - if ( - (p->flags & PG_BUSY) || + if (VM_PAGE_ISBUSY(p) || !p->valid || p->hold_count || - p->wire_count || - p->busy - ) { - p = next; + p->wire_count) continue; - } } else if (op & OBSC_COLLAPSE_WAIT) { if (vm_page_sleep_busy(p, TRUE, "vmocol")) { /* @@ -1257,7 +1241,7 @@ * should not have changed so we * just restart our scan. */ - p = TAILQ_FIRST(&backing_object->memq); + next = TAILQ_FIRST(&backing_object->memq); continue; } } @@ -1267,10 +1251,8 @@ */ vm_page_busy(p); - KASSERT( - p->object == backing_object, - ("vm_object_qcollapse(): object mismatch") - ); + KASSERT(p->object == backing_object, + ("vm_object_qcollapse(): object mismatch")); /* * Destroy any associated swap @@ -1293,7 +1275,6 @@ */ vm_page_protect(p, VM_PROT_NONE); vm_page_free(p); - p = next; continue; } @@ -1311,7 +1292,6 @@ */ vm_page_protect(p, VM_PROT_NONE); vm_page_free(p); - p = next; continue; } @@ -1328,7 +1308,6 @@ vm_page_rename(p, object, new_pindex); /* page automatically made dirty by rename */ } - p = next; } splx(s); return(r); @@ -1369,7 +1348,7 @@ { GIANT_REQUIRED; - while (TRUE) { + for (;;) { vm_object_t backing_object; /* @@ -1627,40 +1606,31 @@ } } } else { - while (size > 0) { - if ((p = vm_page_lookup(object, start)) != 0) { - - if (p->wire_count != 0) { - vm_page_protect(p, VM_PROT_NONE); - if (!clean_only) - p->valid = 0; - start += 1; - size -= 1; + for (; size > 0; start++, size--) { + p = vm_page_lookup(object, start); + if (!p) + continue; + if (p->wire_count != 0) { + vm_page_protect(p, VM_PROT_NONE); + if (!clean_only) + p->valid = 0; + continue; + } + /* + * The busy flags are only cleared at + * interrupt -- minimize the spl transitions + */ + if (vm_page_sleep_busy(p, TRUE, "vmopar")) + goto again; + if (clean_only && p->valid) { + vm_page_test_dirty(p); + if (p->valid & p->dirty) { continue; } - - /* - * The busy flags are only cleared at - * interrupt -- minimize the spl transitions - */ - if (vm_page_sleep_busy(p, TRUE, "vmopar")) - goto again; - - if (clean_only && p->valid) { - vm_page_test_dirty(p); - if (p->valid & p->dirty) { - start += 1; - size -= 1; - continue; - } - } - - vm_page_busy(p); - vm_page_protect(p, VM_PROT_NONE); - vm_page_free(p); } - start += 1; - size -= 1; + vm_page_busy(p); + vm_page_protect(p, VM_PROT_NONE); + vm_page_free(p); } } vm_object_pip_wakeup(object); @@ -1789,27 +1759,22 @@ tmpe = map->header.next; entcount = map->nentries; while (entcount-- && (tmpe != &map->header)) { - if( _vm_object_in_map(map, object, tmpe)) { + if (_vm_object_in_map(map, object, tmpe)) return 1; - } tmpe = tmpe->next; } } else if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { tmpm = entry->object.sub_map; tmpe = tmpm->header.next; - entcount = tmpm->nentries; - while (entcount-- && tmpe != &tmpm->header) { - if( _vm_object_in_map(tmpm, object, tmpe)) { + for (entcount = tmpm->nentries; + (entcount-- && tmpe != &tmpm->header); + tmpe = tmpe->next) + if (_vm_object_in_map(tmpm, object, tmpe)) return 1; - } - tmpe = tmpe->next; - } - } else if ((obj = entry->object.vm_object) != NULL) { + } else if ((obj = entry->object.vm_object) != NULL) for (; obj; obj = obj->backing_object) - if( obj == object) { + if (obj == object) return 1; - } - } return 0; } @@ -1820,21 +1785,21 @@ /* sx_slock(&allproc_lock); */ LIST_FOREACH(p, &allproc, p_list) { - if( !p->p_vmspace /* || (p->p_flag & (P_SYSTEM|P_WEXIT)) */) + if (!p->p_vmspace /* || (p->p_flag & (P_SYSTEM|P_WEXIT)) */) continue; - if( _vm_object_in_map(&p->p_vmspace->vm_map, object, 0)) { + if (_vm_object_in_map(&p->p_vmspace->vm_map, object, 0)) { /* sx_sunlock(&allproc_lock); */ return 1; } } /* sx_sunlock(&allproc_lock); */ - if( _vm_object_in_map( kernel_map, object, 0)) + if (_vm_object_in_map(kernel_map, object, 0)) return 1; - if( _vm_object_in_map( kmem_map, object, 0)) + if (_vm_object_in_map(kmem_map, object, 0)) return 1; - if( _vm_object_in_map( pager_map, object, 0)) + if (_vm_object_in_map(pager_map, object, 0)) return 1; - if( _vm_object_in_map( buffer_map, object, 0)) + if (_vm_object_in_map(buffer_map, object, 0)) return 1; return 0; } @@ -1850,10 +1815,9 @@ TAILQ_FOREACH(object, &vm_object_list, object_list) { if (object->handle == NULL && (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) { - if (object->ref_count == 0) { + if (object->ref_count == 0) db_printf("vmochk: internal obj has zero ref count: %ld\n", (long)object->size); - } if (!vm_object_in_map(object)) { db_printf( "vmochk: internal obj is not in a map: " Index: vm_object.h =================================================================== RCS file: /home/ncvs/src/sys/vm/vm_object.h,v retrieving revision 1.75 diff -u -r1.75 vm_object.h --- vm_object.h 26 Oct 2001 00:08:05 -0000 1.75 +++ vm_object.h 6 Mar 2002 01:59:30 -0000 @@ -164,10 +164,6 @@ extern vm_object_t kernel_object; /* the single kernel object */ extern vm_object_t kmem_object; -#endif /* _KERNEL */ - -#ifdef _KERNEL - void vm_object_set_flag(vm_object_t object, u_short bits); void vm_object_clear_flag(vm_object_t object, u_short bits); void vm_object_pip_add(vm_object_t object, short i); @@ -196,5 +192,4 @@ void vm_object_madvise (vm_object_t, vm_pindex_t, int, int); void vm_object_init2 (void); #endif /* _KERNEL */ - #endif /* _VM_OBJECT_ */ Index: vm_page.c =================================================================== RCS file: /home/ncvs/src/sys/vm/vm_page.c,v retrieving revision 1.179 diff -u -r1.179 vm_page.c --- vm_page.c 4 Mar 2002 18:55:26 -0000 1.179 +++ vm_page.c 8 Mar 2002 11:27:02 -0000 @@ -247,7 +247,7 @@ *bucket = NULL; bucket++; } - for (i = 0; i < BUCKET_HASH_SIZE; ++i) + for (i = 0; i < BUCKET_HASH_SIZE; i++) mtx_init(&vm_buckets_mtx[i], "vm buckets hash mutexes", MTX_DEF); /* @@ -287,16 +287,17 @@ */ cnt.v_page_count = 0; cnt.v_free_count = 0; + /* XXX Can npages be < 0 here? If not, the below checks for > 0 can + * be replaced with != 0, which makes the code simpler. Then the + * npages-- can be moved to be part of the last part of the iterator, + * for a cleaner structure. + */ for (i = 0; phys_avail[i + 1] && npages > 0; i += 2) { - pa = phys_avail[i]; - if (i == biggestone) - last_pa = new_end; - else - last_pa = phys_avail[i + 1]; - while (pa < last_pa && npages-- > 0) { + for (pa = phys_avail[i], + last_pa = (i == biggestone ? new_end : phys_avail[i + 1]); + pa < last_pa && npages-- > 0; + pa += PAGE_SIZE) vm_pageq_add_new_page(pa); - pa += PAGE_SIZE; - } } return (vaddr); } @@ -497,7 +498,7 @@ /* * vm_page_sleep_busy: * - * Wait until page is no longer PG_BUSY or (if also_m_busy is TRUE) + * Wait until page is no longer PG_BUSY and (if also_m_busy is TRUE) * m->busy is zero. Returns TRUE if it had to sleep ( including if * it almost had to sleep and made temporary spl*() mods), FALSE * otherwise. @@ -514,12 +515,18 @@ GIANT_REQUIRED; if ((m->flags & PG_BUSY) || (also_m_busy && m->busy)) { int s = splvm(); - if ((m->flags & PG_BUSY) || (also_m_busy && m->busy)) { + while ((m->flags & PG_BUSY) || (also_m_busy && m->busy)) { /* * Page is busy. Wait and retry. */ vm_page_flag_set(m, PG_WANTED | PG_REFERENCED); tsleep(m, PVM, msg, 0); + /* XXX This KASSERT *triggers* - thus the change to + * while above + KASSERT(!(m->flags & PG_BUSY) && + !(also_m_busy && m->busy), + ("vm_page_sleep_busy: Wakeup on buffer that is still busy")); + */ } splx(s); return(TRUE); @@ -574,6 +581,7 @@ GIANT_REQUIRED; + /* XXX This happens on RELENG_4 - phoenix */ if (m->object != NULL) panic("vm_page_insert: already inserted"); @@ -660,12 +668,18 @@ { struct vm_page **bucket; - bucket = &vm_page_buckets[vm_page_hash(m->object, m->pindex)]; - while (*bucket != m) { + for (bucket = &vm_page_buckets[vm_page_hash(m->object, m->pindex)]; + *bucket != m; + bucket = &(*bucket)->hnext) if (*bucket == NULL) panic("vm_page_remove(): page not found in hash"); - bucket = &(*bucket)->hnext; - } + /* + * The following code is assumed to be runnable while a + * vm_page_lookup is in progress; however, it is NOT OK for a + * vm_page_lookup to actually execute code between the below + * lines. Ie, it cannot run on another CPU, or in a thread + * that can be switched with this one. + */ *bucket = m->hnext; m->hnext = NULL; vm_page_bucket_generation++; @@ -707,13 +721,14 @@ vm_page_lookup(vm_object_t object, vm_pindex_t pindex) { vm_page_t m; - struct vm_page **bucket; int generation; /* * Search the hash table for this object/offset pair */ +#if 0 + struct vm_page **bucket; retry: generation = vm_page_bucket_generation; bucket = &vm_page_buckets[vm_page_hash(object, pindex)]; @@ -727,6 +742,22 @@ if (vm_page_bucket_generation != generation) goto retry; return (NULL); +#else + /* + * EE: I suspect the following code of being better, but have no way + * to test. It results in one more branch when the page is found, but + * less cache usage. It is at least more readable... + */ + do { + generation = vm_page_bucket_generation; + for (m = vm_page_buckets[vm_page_hash(object, pindex)]; + m != NULL; + m = m->hnext) + if (m->object == object && m->pindex == pindex) + break; + } while (vm_page_bucket_generation != generation); + return (m); +#endif } /* @@ -783,14 +814,14 @@ vm_page_t m; GIANT_REQUIRED; - while (TRUE) { - m = vm_pageq_find( - PQ_CACHE, + for (;;) { + m = vm_pageq_find(PQ_CACHE, (pindex + object->pg_color) & PQ_L2_MASK, - FALSE - ); - if (m && ((m->flags & (PG_BUSY|PG_UNMANAGED)) || m->busy || - m->hold_count || m->wire_count)) { + FALSE); + if (m == NULL) + return NULL; + if (VM_PAGE_ISBUSY(m) || (m->flags & PG_UNMANAGED) || + m->hold_count || m->wire_count) { vm_page_deactivate(m); continue; } @@ -810,14 +841,10 @@ static __inline vm_page_t vm_page_select_free(vm_object_t object, vm_pindex_t pindex, boolean_t prefer_zero) { - vm_page_t m; - - m = vm_pageq_find( - PQ_FREE, - (pindex + object->pg_color) & PQ_L2_MASK, - prefer_zero - ); - return(m); + return vm_pageq_find( + PQ_FREE, + (pindex + object->pg_color) & PQ_L2_MASK, + prefer_zero); } /* @@ -854,9 +881,8 @@ * The pager is allowed to eat deeper into the free page list. */ - if ((curproc == pageproc) && (page_req != VM_ALLOC_INTERRUPT)) { + if (curproc == pageproc && page_req != VM_ALLOC_INTERRUPT) page_req = VM_ALLOC_SYSTEM; - }; s = splvm(); @@ -866,10 +892,7 @@ * Allocate from the free queue if there are plenty of pages * in it. */ - if (page_req == VM_ALLOC_ZERO) - m = vm_page_select_free(object, pindex, TRUE); - else - m = vm_page_select_free(object, pindex, FALSE); + m = vm_page_select_free(object, pindex, page_req == VM_ALLOC_ZERO); } else if ( (page_req == VM_ALLOC_SYSTEM && cnt.v_cache_count == 0 && @@ -889,10 +912,8 @@ m = vm_page_select_cache(object, pindex); if (m == NULL) { splx(s); -#if defined(DIAGNOSTIC) - if (cnt.v_cache_count > 0) - printf("vm_page_alloc(NORMAL): missing pages on cache queue: %d\n", cnt.v_cache_count); -#endif + KASSERT(cnt.v_cache_count <= 0, + ("vm_page_alloc(NORMAL): missing pages on cache queue: %d\n", cnt.v_cache_count)); vm_pageout_deficit++; pagedaemon_wakeup(); return (NULL); @@ -916,10 +937,8 @@ * At this point we had better have found a good page. */ - KASSERT( - m != NULL, - ("vm_page_alloc(): missing page on free queue\n") - ); + KASSERT(m != NULL, + ("vm_page_alloc(): missing page on free queue\n")); /* * Remove from free queue @@ -1168,13 +1187,10 @@ * Clear the UNMANAGED flag when freeing an unmanaged page. */ - if (m->flags & PG_UNMANAGED) { + if (m->flags & PG_UNMANAGED) m->flags &= ~PG_UNMANAGED; - } else { -#ifdef __alpha__ + else pmap_page_is_free(m); -#endif - } if (m->hold_count != 0) { m->flags &= ~PG_ZERO; @@ -1377,10 +1393,9 @@ { GIANT_REQUIRED; - if (m->dirty || m->hold_count || m->busy || m->wire_count || - (m->flags & (PG_BUSY|PG_UNMANAGED))) { + if (m->dirty || m->hold_count || m->wire_count || + VM_PAGE_ISBUSY(m) || m->flags & PG_UNMANAGED) return(0); - } vm_page_test_dirty(m); if (m->dirty) return(0); @@ -1397,10 +1412,9 @@ int vm_page_try_to_free(vm_page_t m) { - if (m->dirty || m->hold_count || m->busy || m->wire_count || - (m->flags & (PG_BUSY|PG_UNMANAGED))) { + if (m->dirty || m->hold_count || m->wire_count || + VM_PAGE_ISBUSY(m) || m->flags & PG_UNMANAGED) return(0); - } vm_page_test_dirty(m); if (m->dirty) return(0); @@ -1423,10 +1437,13 @@ int s; GIANT_REQUIRED; - if ((m->flags & (PG_BUSY|PG_UNMANAGED)) || m->busy || m->wire_count) { - printf("vm_page_cache: attempting to cache busy page\n"); - return; - } + KASSERT(!VM_PAGE_ISBUSY(m), + ("vm_page_cache: attempting to cache busy page\n")); + KASSERT(!(m->flags & PG_UNMANAGED), + ("vm_page_cache: attempting to cache unmanaged page\n")); + KASSERT(m->wire_count == 0, + ("vm_page_cache: attempting to cache wired page\n")); + if ((m->queue - m->pc) == PQ_CACHE) return; @@ -1497,7 +1514,7 @@ if (m->dirty || (dnw & 0x0070) == 0) { /* - * Deactivate the page 3 times out of 32. + * Deactivate the page 4 times out of 32. */ head = 0; } else { @@ -1524,36 +1541,32 @@ vm_page_t m; int s, generation; + KASSERT(allocflags & VM_ALLOC_RETRY, + ("vm_page_grab: Will always retry")); GIANT_REQUIRED; retrylookup: if ((m = vm_page_lookup(object, pindex)) != NULL) { - if (m->busy || (m->flags & PG_BUSY)) { + if (!VM_PAGE_ISBUSY(m)) { + vm_page_busy(m); + return m; + } else { generation = object->generation; s = splvm(); while ((object->generation == generation) && - (m->busy || (m->flags & PG_BUSY))) { + VM_PAGE_ISBUSY(m)) { vm_page_flag_set(m, PG_WANTED | PG_REFERENCED); tsleep(m, PVM, "pgrbwt", 0); - if ((allocflags & VM_ALLOC_RETRY) == 0) { - splx(s); - return NULL; - } } splx(s); goto retrylookup; - } else { - vm_page_busy(m); - return m; } - } - - m = vm_page_alloc(object, pindex, allocflags & ~VM_ALLOC_RETRY); - if (m == NULL) { - VM_WAIT; - if ((allocflags & VM_ALLOC_RETRY) == 0) - return NULL; - goto retrylookup; + } else { + m = vm_page_alloc(object, pindex, allocflags & ~VM_ALLOC_RETRY); + if (m == NULL) { + VM_WAIT; + goto retrylookup; + } } return m; Index: vm_page.h =================================================================== RCS file: /home/ncvs/src/sys/vm/vm_page.h,v retrieving revision 1.96 diff -u -r1.96 vm_page.h --- vm_page.h 4 Mar 2002 18:55:26 -0000 1.96 +++ vm_page.h 6 Mar 2002 15:40:08 -0000 @@ -132,6 +132,8 @@ #elif PAGE_SIZE == 8192 u_short valid; /* map of valid DEV_BSIZE chunks */ u_short dirty; /* map of dirty DEV_BSIZE chunks */ +#else +# error "Invalid page size for vm_page; edit vm_page.h appropriately" #endif }; @@ -254,6 +256,12 @@ #define PG_MARKER 0x1000 /* special queue marker page */ /* + * Macro to check if a page is busy (to avoid repeating the same semantics all + * over the code) + */ +#define VM_PAGE_ISBUSY(m) ((m)->busy || ((m)->flags & PG_BUSY)) + +/* * Misc constants. */ @@ -306,10 +314,10 @@ #if PAGE_SIZE == 4096 #define VM_PAGE_BITS_ALL 0xff -#endif - -#if PAGE_SIZE == 8192 +#elif PAGE_SIZE == 8192 #define VM_PAGE_BITS_ALL 0xffff +#else +#error "Invalid page size for vm_page; edit vm_page.h appropriately" #endif #define VM_ALLOC_NORMAL 0 Index: vm_pageout.c =================================================================== RCS file: /home/ncvs/src/sys/vm/vm_pageout.c,v retrieving revision 1.189 diff -u -r1.189 vm_pageout.c --- vm_pageout.c 27 Feb 2002 18:03:02 -0000 1.189 +++ vm_pageout.c 6 Mar 2002 16:53:02 -0000 @@ -246,10 +246,8 @@ /* * Don't mess with the page if it's busy, held, or special */ - if ((m->hold_count != 0) || - ((m->busy != 0) || (m->flags & (PG_BUSY|PG_UNMANAGED)))) { + if (m->hold_count || VM_PAGE_ISBUSY(m) || m->flags & PG_UNMANAGED) return 0; - } mc[vm_pageout_page_count] = m; pageout_count = 1; @@ -290,8 +288,8 @@ ib = 0; break; } - if (((p->queue - p->pc) == PQ_CACHE) || - (p->flags & (PG_BUSY|PG_UNMANAGED)) || p->busy) { + if ((p->queue - p->pc) == PQ_CACHE || VM_PAGE_ISBUSY(p) || + p->flags & PG_UNMANAGED) { ib = 0; break; } @@ -320,8 +318,8 @@ if ((p = vm_page_lookup(object, pindex + is)) == NULL) break; - if (((p->queue - p->pc) == PQ_CACHE) || - (p->flags & (PG_BUSY|PG_UNMANAGED)) || p->busy) { + if ((p->queue - p->pc) == PQ_CACHE || VM_PAGE_ISBUSY(p) || + p->flags & PG_UNMANAGED) { break; } vm_page_test_dirty(p); @@ -483,29 +481,28 @@ * scan the objects entire memory queue */ rcount = object->resident_page_count; - p = TAILQ_FIRST(&object->memq); - while (p && (rcount-- > 0)) { + for (p = TAILQ_FIRST(&object->memq); + p && (rcount-- > 0); + p = next) { int actcount; + if (pmap_resident_count(vm_map_pmap(map)) <= desired) return; + /* XXX If gcc optimizes the case correctly, the next + * line should be moved before the if () above for + * clarity. + */ next = TAILQ_NEXT(p, listq); cnt.v_pdpages++; - if (p->wire_count != 0 || - p->hold_count != 0 || - p->busy != 0 || - (p->flags & (PG_BUSY|PG_UNMANAGED)) || - !pmap_page_exists_quick(vm_map_pmap(map), p)) { - p = next; + if (p->wire_count || p->hold_count || VM_PAGE_ISBUSY(p) || + p->flags & PG_UNMANAGED || + !pmap_page_exists_quick(vm_map_pmap(map), p)) continue; - } - actcount = pmap_ts_referenced(p); - if (actcount) { + if (actcount) vm_page_flag_set(p, PG_REFERENCED); - } else if (p->flags & PG_REFERENCED) { + else if (p->flags & PG_REFERENCED) actcount = 1; - } - if ((p->queue != PQ_ACTIVE) && (p->flags & PG_REFERENCED)) { vm_page_activate(p); @@ -517,9 +514,8 @@ if (!remove_mode && (vm_pageout_algorithm || (p->act_count == 0))) { vm_page_protect(p, VM_PROT_NONE); vm_page_deactivate(p); - } else { + } else vm_pageq_requeue(p); - } } else { vm_page_activate(p); vm_page_flag_clear(p, PG_REFERENCED); @@ -527,10 +523,8 @@ p->act_count += ACT_ADVANCE; vm_pageq_requeue(p); } - } else if (p->queue == PQ_INACTIVE) { + } else if (p->queue == PQ_INACTIVE) vm_page_protect(p, VM_PROT_NONE); - } - p = next; } object = object->backing_object; } @@ -730,7 +724,7 @@ * Dont mess with busy pages, keep in the front of the * queue, most likely are being paged out. */ - if (m->busy || (m->flags & PG_BUSY)) { + if (VM_PAGE_ISBUSY(m)) { addl_page_shortage++; continue; } @@ -907,7 +901,7 @@ * page back onto the end of the queue so that * statistics are more correct if we don't. */ - if (m->busy || (m->flags & PG_BUSY)) { + if (VM_PAGE_ISBUSY(m)) { vput(vp); vn_finished_write(mp); continue; @@ -974,27 +968,17 @@ */ pcount = cnt.v_active_count; - m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl); - - while ((m != NULL) && (pcount-- > 0) && (page_shortage > 0)) { - - /* - * This is a consistency check, and should likely be a panic - * or warning. - */ - if (m->queue != PQ_ACTIVE) { - break; - } - + for (m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl); + m && pcount-- > 0 && page_shortage > 0; + m = next) { + KASSERT(m->queue == PQ_ACTIVE, + ("vm_pageout_scan: Page in PQ_ACTIVE queue claims to not be")); next = TAILQ_NEXT(m, pageq); /* * Don't deactivate pages that are busy. */ - if ((m->busy != 0) || - (m->flags & PG_BUSY) || - (m->hold_count != 0)) { + if (VM_PAGE_ISBUSY(m) || m->hold_count) { vm_pageq_requeue(m); - m = next; continue; } @@ -1009,9 +993,8 @@ */ actcount = 0; if (m->object->ref_count != 0) { - if (m->flags & PG_REFERENCED) { - actcount += 1; - } + if (m->flags & PG_REFERENCED) + actcount++; actcount += pmap_ts_referenced(m); if (actcount) { m->act_count += ACT_ADVANCE + actcount; @@ -1043,14 +1026,11 @@ vm_page_cache(m); else vm_page_deactivate(m); - } else { + } else vm_page_deactivate(m); - } - } else { + } else vm_pageq_requeue(m); - } } - m = next; } s = splvm(); @@ -1065,18 +1045,16 @@ while (cnt.v_free_count < cnt.v_free_reserved) { static int cache_rover = 0; m = vm_pageq_find(PQ_CACHE, cache_rover, FALSE); - if (!m) + if (m == NULL) break; - if ((m->flags & (PG_BUSY|PG_UNMANAGED)) || - m->busy || - m->hold_count || - m->wire_count) { -#ifdef INVARIANTS - printf("Warning: busy page %p found in cache\n", m); -#endif - vm_page_deactivate(m); - continue; - } + KASSERT(!VM_PAGE_ISBUSY(m), + ("Busy page %p found in cache", m)); + KASSERT((m->flags & PG_UNMANAGED) == 0, + ("Unmanaged page %p found in cache", m)); + KASSERT(m->hold_count == 0, + ("Held page %p found in cache", m)); + KASSERT(m->wire_count == 0, + ("Wired page %p found in cache", m)); cache_rover = (cache_rover + PQ_PRIME2) & PQ_L2_MASK; vm_pageout_page_free(m); cnt.v_dfree++; @@ -1221,27 +1199,22 @@ tpcount = (vm_pageout_stats_max * cnt.v_active_count) / cnt.v_page_count; if (pcount > tpcount) pcount = tpcount; - } else { + } else fullintervalcount = 0; - } - m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl); - while ((m != NULL) && (pcount-- > 0)) { + for (m = TAILQ_FIRST(&vm_page_queues[PQ_ACTIVE].pl); + m && (pcount-- > 0); + m = next) { int actcount; - if (m->queue != PQ_ACTIVE) { - break; - } - + KASSERT(m->queue == PQ_ACTIVE, + ("vm_pageout_page_stats: Page in PQ_ACTIVE queue claims to not be")); next = TAILQ_NEXT(m, pageq); /* * Don't deactivate pages that are busy. */ - if ((m->busy != 0) || - (m->flags & PG_BUSY) || - (m->hold_count != 0)) { + if (VM_PAGE_ISBUSY(m) || m->hold_count) { vm_pageq_requeue(m); - m = next; continue; } @@ -1275,8 +1248,6 @@ vm_pageq_requeue(m); } } - - m = next; } splx(s0); } Index: vm_pageq.c =================================================================== RCS file: /home/ncvs/src/sys/vm/vm_pageq.c,v retrieving revision 1.3 diff -u -r1.3 vm_pageq.c --- vm_pageq.c 4 Mar 2002 18:55:26 -0000 1.3 +++ vm_pageq.c 6 Mar 2002 03:13:08 -0000 @@ -226,6 +226,10 @@ GIANT_REQUIRED; + /* XXX Test if constant propagation is good enough that we would be + * able to just set index = 0 and an ifdef'ed conditional return + * instead of messing around with completely different code. + */ #if PQ_L2_SIZE > 1 if (prefer_zero) { m = TAILQ_LAST(&vm_page_queues[basequeue+index].pl, pglist); Index: vm_swap.c =================================================================== RCS file: /home/ncvs/src/sys/vm/vm_swap.c,v retrieving revision 1.113 diff -u -r1.113 vm_swap.c --- vm_swap.c 27 Feb 2002 18:32:23 -0000 1.113 +++ vm_swap.c 6 Mar 2002 03:16:18 -0000 @@ -95,6 +95,8 @@ struct vnode *vp; struct buf *bp; + /* XXX Need KASSERT() for the bp being locked */ + /* XXX Need KASSERT() for the bp not being B_DONE */ bp = ap->a_bp; sz = howmany(bp->b_bcount, PAGE_SIZE); Index: vnode_pager.c =================================================================== RCS file: /home/ncvs/src/sys/vm/vnode_pager.c,v retrieving revision 1.141 diff -u -r1.141 vnode_pager.c --- vnode_pager.c 27 Feb 2002 18:32:23 -0000 1.141 +++ vnode_pager.c 6 Mar 2002 16:52:15 -0000 @@ -874,9 +874,8 @@ } } } - if (error) { + if (error) printf("vnode_pager_getpages: I/O read error\n"); - } return (error ? VM_PAGER_ERROR : VM_PAGER_OK); } @@ -961,19 +960,16 @@ int ioflags; GIANT_REQUIRED; + KASSERT((int)m[0]->pindex >= 0, + ("vnode_pager_putpages: attempt to write meta-data!!! -- 0x%lx(%x)\n", + (long)m[0]->pindex, m[0]->dirty)); + object = vp->v_object; count = bytecount / PAGE_SIZE; for (i = 0; i < count; i++) rtvals[i] = VM_PAGER_AGAIN; - if ((int) m[0]->pindex < 0) { - printf("vnode_pager_putpages: attempt to write meta-data!!! -- 0x%lx(%x)\n", - (long)m[0]->pindex, m[0]->dirty); - rtvals[0] = VM_PAGER_BAD; - return VM_PAGER_BAD; - } - maxsize = count * PAGE_SIZE; ncount = count; @@ -1006,9 +1002,8 @@ ncount = 0; } if (ncount < count) { - for (i = ncount; i < count; i++) { + for (i = ncount; i < count; i++) rtvals[i] = VM_PAGER_BAD; - } } } @@ -1034,16 +1029,13 @@ cnt.v_vnodeout++; cnt.v_vnodepgsout += ncount; - if (error) { + if (error) printf("vnode_pager_putpages: I/O error %d\n", error); - } - if (auio.uio_resid) { + if (auio.uio_resid) printf("vnode_pager_putpages: residual I/O %d at %lu\n", auio.uio_resid, (u_long)m[0]->pindex); - } - for (i = 0; i < ncount; i++) { + for (i = 0; i < ncount; i++) rtvals[i] = VM_PAGER_OK; - } return rtvals[0]; }