Index: alpha/alpha/pmap.c =================================================================== RCS file: /home/ncvs/src/sys/alpha/alpha/pmap.c,v retrieving revision 1.96 diff -u -r1.96 pmap.c --- alpha/alpha/pmap.c 7 Jul 2002 23:05:10 -0000 1.96 +++ alpha/alpha/pmap.c 8 Jul 2002 01:26:43 -0000 @@ -2525,6 +2525,21 @@ bzero((char *)(caddr_t)va + off, size); } + +/* + * pmap_zero_page_idle zeros the specified hardware page by + * mapping it into virtual memory and using bzero to clear + * its contents. This is for the vm_pagezero idle process. + */ + +void +pmap_zero_page_idle(vm_page_t m) +{ + vm_offset_t va = ALPHA_PHYS_TO_K0SEG(VM_PAGE_TO_PHYS(m)); + bzero((caddr_t) va, PAGE_SIZE); +} + + /* * pmap_copy_page copies the specified (machine independent) * page by mapping the page into virtual memory and using Index: i386/i386/pmap.c =================================================================== RCS file: /home/ncvs/src/sys/i386/i386/pmap.c,v retrieving revision 1.334 diff -u -r1.334 pmap.c --- i386/i386/pmap.c 8 Jul 2002 00:17:43 -0000 1.334 +++ i386/i386/pmap.c 8 Jul 2002 01:32:27 -0000 @@ -175,9 +175,9 @@ * All those kernel PT submaps that BSD is so fond of */ pt_entry_t *CMAP1 = 0; -static pt_entry_t *CMAP2, *ptmmap; +static pt_entry_t *CMAP2, *CMAP3, *ptmmap; caddr_t CADDR1 = 0, ptvmmap = 0; -static caddr_t CADDR2; +static caddr_t CADDR2, CADDR3; static pt_entry_t *msgbufmap; struct msgbuf *msgbufp = 0; @@ -326,9 +326,11 @@ /* * CMAP1/CMAP2 are used for zeroing and copying pages. + * CMAP3 is used for the idle process page zeroing. */ SYSMAP(caddr_t, CMAP1, CADDR1, 1) SYSMAP(caddr_t, CMAP2, CADDR2, 1) + SYSMAP(caddr_t, CMAP3, CADDR3, 1) /* * Crashdump maps. @@ -2683,6 +2685,38 @@ #endif bzero((char *)CADDR2 + off, size); *CMAP2 = 0; +} + +/* + * pmap_zero_page_idle zeros the specified hardware page by mapping + * the page into KVM and using bzero to clear its contents. This + * is intended to be called from the vm_pagezero process only and + * outside of Giant. + */ +void +pmap_zero_page_idle(vm_page_t m) +{ + vm_offset_t phys = VM_PAGE_TO_PHYS(m); + + if (*CMAP3) + panic("pmap_zero_page: CMAP3 busy"); + + *CMAP3 = PG_V | PG_RW | phys | PG_A | PG_M; +#ifdef SMP + mtx_lock(&Giant); /* IPI sender not MPSAFE */ +#endif + invltlb_1pg((vm_offset_t)CADDR3); +#ifdef SMP + mtx_unlock(&Giant); +#endif + +#if defined(I686_CPU) + if (cpu_class == CPUCLASS_686) + i686_pagezero(CADDR3); + else +#endif + bzero(CADDR3, PAGE_SIZE); + *CMAP3 = 0; } /* Index: ia64/ia64/pmap.c =================================================================== RCS file: /home/ncvs/src/sys/ia64/ia64/pmap.c,v retrieving revision 1.67 diff -u -r1.67 pmap.c --- ia64/ia64/pmap.c 7 Jul 2002 23:05:16 -0000 1.67 +++ ia64/ia64/pmap.c 8 Jul 2002 01:26:43 -0000 @@ -2035,6 +2035,22 @@ bzero((char *)(caddr_t)va + off, size); } + +/* + * pmap_zero_page_idle zeros the specified hardware page by + * mapping it into virtual memory and using bzero to clear + * its contents. This is for the vm_idlezero process. + */ + +void +pmap_zero_page_area(vm_page_t m) +{ + vm_offset_t va = IA64_PHYS_TO_RR7(VM_PAGE_TO_PHYS(m)); + bzero((caddr_t) va, PAGE_SIZE); +} + + +/* /* * pmap_copy_page copies the specified (machine independent) * page by mapping the page into virtual memory and using Index: powerpc/powerpc/pmap.c =================================================================== RCS file: /home/ncvs/src/sys/powerpc/powerpc/pmap.c,v retrieving revision 1.41 diff -u -r1.41 pmap.c --- powerpc/powerpc/pmap.c 7 Jul 2002 23:05:27 -0000 1.41 +++ powerpc/powerpc/pmap.c 8 Jul 2002 01:36:20 -0000 @@ -909,6 +909,17 @@ TODO; } +void +pmap_zero_page_idle(vm_page_t m) +{ + + /* XXX this is called outside of Giant, is pmap_zero_page safe? */ + /* XXX maybe have a dedicated mapping for this to avoid the problem? */ + mtx_lock(&Giant); + pmap_zero_page(m); + mtx_unlock(&Giant); +} + /* * Map the given physical page at the specified virtual address in the * target pmap with the protection requested. If specified the page Index: sparc64/sparc64/pmap.c =================================================================== RCS file: /home/ncvs/src/sys/sparc64/sparc64/pmap.c,v retrieving revision 1.67 diff -u -r1.67 pmap.c --- sparc64/sparc64/pmap.c 7 Jul 2002 23:05:27 -0000 1.67 +++ sparc64/sparc64/pmap.c 8 Jul 2002 01:33:46 -0000 @@ -1429,6 +1429,22 @@ aszero(ASI_PHYS_USE_EC, pa + off, size); } +void +pmap_zero_page_idle(vm_page_t m) +{ + vm_offset_t pa = VM_PAGE_TO_PHYS(m); + + CTR1(KTR_PMAP, "pmap_zero_page_idle: pa=%#lx", pa); +#ifdef SMP + mtx_lock(&Giant); +#endif + dcache_inval_phys(pa, pa + PAGE_SIZE - 1); +#ifdef SMP + mtx_unlock(&Giant); +#endif + aszero(ASI_PHYS_USE_EC, pa, PAGE_SIZE); +} + /* * Copy a page of physical memory by temporarily mapping it into the tlb. */ Index: vm/pmap.h =================================================================== RCS file: /home/ncvs/src/sys/vm/pmap.h,v retrieving revision 1.47 diff -u -r1.47 pmap.h --- vm/pmap.h 7 Jul 2002 23:05:27 -0000 1.47 +++ vm/pmap.h 8 Jul 2002 01:26:43 -0000 @@ -126,6 +126,7 @@ void pmap_remove_pages(pmap_t, vm_offset_t, vm_offset_t); void pmap_zero_page(vm_page_t); void pmap_zero_page_area(vm_page_t, int off, int size); +void pmap_zero_page_idle(vm_page_t); void pmap_prefault(pmap_t, vm_offset_t, vm_map_entry_t); int pmap_mincore(pmap_t pmap, vm_offset_t addr); void pmap_new_thread(struct thread *td); Index: vm/vm_zeroidle.c =================================================================== RCS file: /home/ncvs/src/sys/vm/vm_zeroidle.c,v retrieving revision 1.10 diff -u -r1.10 vm_zeroidle.c --- vm/vm_zeroidle.c 7 Jul 2002 19:27:57 -0000 1.10 +++ vm/vm_zeroidle.c 8 Jul 2002 01:37:53 -0000 @@ -30,7 +30,7 @@ SYSCTL_INT(_vm_stats_misc, OID_AUTO, cnt_prezero, CTLFLAG_RD, &cnt_prezero, 0, ""); -static int idlezero_enable = 0; +static int idlezero_enable = 1; SYSCTL_INT(_vm, OID_AUTO, idlezero_enable, CTLFLAG_RW, &idlezero_enable, 0, ""); TUNABLE_INT("vm.idlezero_enable", &idlezero_enable); @@ -83,9 +83,9 @@ TAILQ_REMOVE(&vm_page_queues[m->queue].pl, m, pageq); m->queue = PQ_NONE; mtx_unlock(&vm_page_queue_free_mtx); - /* maybe drop out of Giant here */ - pmap_zero_page(m); - /* and return here */ + mtx_unlock(&Giant); + pmap_zero_page_idle(m); + mtx_lock(&Giant); mtx_lock(&vm_page_queue_free_mtx); vm_page_flag_set(m, PG_ZERO); m->queue = PQ_FREE + m->pc; @@ -109,7 +109,7 @@ vm_page_zero_idle_wakeup(void) { - if (vm_page_zero_check()) + if (idlezero_enable && vm_page_zero_check()) wakeup(&zero_state); } @@ -119,17 +119,19 @@ struct thread *td = curthread; struct rtprio rtp; int pages = 0; + int pri; rtp.prio = RTP_PRIO_MAX; rtp.type = RTP_PRIO_IDLE; mtx_lock_spin(&sched_lock); rtp_to_pri(&rtp, td->td_ksegrp); + pri = td->td_priority; mtx_unlock_spin(&sched_lock); for (;;) { if (vm_page_zero_check()) { pages += vm_page_zero_idle(); - if (pages > idlezero_maxrun) { + if (pages > idlezero_maxrun || procrunnable()) { mtx_lock_spin(&sched_lock); td->td_proc->p_stats->p_ru.ru_nvcsw++; mi_switch(); @@ -137,7 +139,7 @@ pages = 0; } } else { - tsleep(&zero_state, PPAUSE, "pgzero", hz * 300); + tsleep(&zero_state, pri, "pgzero", hz * 300); pages = 0; } }