Index: sparc64/include/tsb.h =================================================================== RCS file: /home/ncvs/src/sys/sparc64/include/tsb.h,v retrieving revision 1.15 diff -u -r1.15 tsb.h --- sparc64/include/tsb.h 4 Jun 2002 19:40:45 -0000 1.15 +++ sparc64/include/tsb.h 10 Aug 2002 19:49:05 -0000 @@ -43,12 +43,9 @@ (TSB_BSHIFT - TSB_BUCKET_SHIFT - TTE_SHIFT) #define TSB_BUCKET_MASK ((1 << TSB_BUCKET_ADDRESS_BITS) - 1) -#define TSB_KERNEL_SIZE \ - ((KVA_PAGES * PAGE_SIZE_4M) / sizeof(struct tte)) -#define TSB_KERNEL_MASK (TSB_KERNEL_SIZE - 1) -#define TSB_KERNEL_VA_MASK (TSB_KERNEL_MASK << TTE_SHIFT) - extern struct tte *tsb_kernel; +extern vm_size_t tsb_kernel_mask; +extern vm_size_t tsb_kernel_size; extern vm_offset_t tsb_kernel_phys; static __inline struct tte * @@ -66,7 +63,7 @@ static __inline struct tte * tsb_kvpntotte(vm_offset_t vpn) { - return (&tsb_kernel[vpn & TSB_KERNEL_MASK]); + return (&tsb_kernel[vpn & tsb_kernel_mask]); } static __inline struct tte * Index: sparc64/include/vmparam.h =================================================================== RCS file: /home/ncvs/src/sys/sparc64/include/vmparam.h,v retrieving revision 1.11 diff -u -r1.11 vmparam.h --- sparc64/include/vmparam.h 13 Jul 2002 03:29:10 -0000 1.11 +++ sparc64/include/vmparam.h 10 Aug 2002 19:49:05 -0000 @@ -88,9 +88,9 @@ * that if this moves above the va hole, we will have to deal with sign * extension of virtual addresses. */ -#define VM_MAXUSER_ADDRESS ((vm_offset_t)0x7fe00000000) +#define VM_MAXUSER_ADDRESS (0x7fe00000000UL) -#define VM_MIN_ADDRESS ((vm_offset_t)0) +#define VM_MIN_ADDRESS (0UL) #define VM_MAX_ADDRESS (VM_MAXUSER_ADDRESS) /* @@ -116,19 +116,6 @@ #endif /* - * Number of 4 meg pages to use for the kernel tsb. - */ -#ifndef KVA_PAGES -#define KVA_PAGES (1) -#endif - -/* - * Range of kernel virtual addresses. max = min + range. - */ -#define KVA_RANGE \ - ((KVA_PAGES * PAGE_SIZE_4M) << (PAGE_SHIFT - TTE_SHIFT)) - -/* * Lowest kernel virtual address, where the kernel is loaded. This is also * arbitrary. We pick a resonably low address, which allows all of kernel * text, data and bss to be below the 4 gigabyte mark, yet still high enough @@ -136,17 +123,19 @@ * same as for x86 with default KVA_PAGES... */ #define VM_MIN_KERNEL_ADDRESS (0xc0000000) -#define VM_MAX_KERNEL_ADDRESS (VM_MIN_KERNEL_ADDRESS + KVA_RANGE - PAGE_SIZE) -#define KERNBASE (VM_MIN_KERNEL_ADDRESS) - #define VM_MIN_PROM_ADDRESS (0xf0000000) #define VM_MAX_PROM_ADDRESS (0xffffe000) +#define KERNBASE (VM_MIN_KERNEL_ADDRESS) +#define VM_MAX_KERNEL_ADDRESS (vm_max_kernel_address) + /* * Initial pagein size of beginning of executable file. */ #ifndef VM_INITIAL_PAGEIN #define VM_INITIAL_PAGEIN 16 #endif + +extern vm_offset_t vm_max_kernel_address; #endif /* !_MACHINE_VMPARAM_H_ */ Index: sparc64/sparc64/exception.S =================================================================== RCS file: /home/ncvs/src/sys/sparc64/sparc64/exception.S,v retrieving revision 1.37 diff -u -r1.37 exception.S --- sparc64/sparc64/exception.S 27 Jul 2002 21:57:38 -0000 1.37 +++ sparc64/sparc64/exception.S 10 Aug 2002 19:49:06 -0000 @@ -67,6 +67,9 @@ #include "assym.s" +#define TSB_KERNEL_MASK 0x0 +#define TSB_KERNEL 0x0 + .register %g2,#ignore .register %g3,#ignore .register %g6,#ignore @@ -1360,15 +1363,22 @@ srlx %g6, TAR_VPN_SHIFT, %g6 /* - * Find the index into the kernel tsb. + * Find the index into the kernel tsb. The tsb mask gets patched at + * startup. */ - set TSB_KERNEL_MASK, %g4 + .globl tl1_immu_miss_load_tsb_mask +tl1_immu_miss_load_tsb_mask: + sethi %hi(TSB_KERNEL_MASK), %g4 + or %g4, %lo(TSB_KERNEL_MASK), %g4 and %g6, %g4, %g3 /* - * Compute the tte address. + * Compute the tte address. The address of the kernel tsb gets + * patched at startup. */ - ldxa [%g0 + AA_IMMU_TSB] %asi, %g4 + .globl tl1_immu_miss_load_tsb +tl1_immu_miss_load_tsb: + sethi %hi(TSB_KERNEL), %g4 sllx %g3, TTE_SHIFT, %g3 add %g3, %g4, %g3 @@ -1449,16 +1459,23 @@ EMPTY /* - * Find the index into the kernel tsb. + * Find the index into the kernel tsb. The tsb mask gets patched at + * startup. */ - set TSB_KERNEL_MASK, %g4 + .globl tl1_dmmu_miss_load_tsb_mask +tl1_dmmu_miss_load_tsb_mask: + sethi %hi(TSB_KERNEL_MASK), %g4 + or %g4, %lo(TSB_KERNEL_MASK), %g4 srlx %g6, TAR_VPN_SHIFT, %g6 and %g6, %g4, %g3 /* - * Compute the tte address. + * Compute the tte address. The address of the kernel tsb gets + * patched at startup. */ - ldxa [%g0 + AA_DMMU_TSB] %asi, %g4 + .globl tl1_dmmu_miss_load_tsb +tl1_dmmu_miss_load_tsb: + sethi %hi(TSB_KERNEL), %g4 sllx %g3, TTE_SHIFT, %g3 add %g3, %g4, %g3 @@ -1606,16 +1623,23 @@ mov %g6, %g2 /* - * Find the index into the kernel tsb. + * Find the index into the kernel tsb. The tsb mask gets patched at + * startup. */ - set TSB_KERNEL_MASK, %g4 + .globl tl1_dmmu_prot_load_tsb_mask +tl1_dmmu_prot_load_tsb_mask: + sethi %hi(TSB_KERNEL_MASK), %g4 + or %g4, %lo(TSB_KERNEL_MASK), %g4 srlx %g6, TAR_VPN_SHIFT, %g6 and %g6, %g4, %g5 /* - * Compute the tte address. + * Compute the tte address. The address of the kernel tsb gets + * patched at startup. */ - ldxa [%g0 + AA_DMMU_TSB] %asi, %g4 + .globl tl1_dmmu_prot_load_tsb +tl1_dmmu_prot_load_tsb: + sethi %hi(TSB_KERNEL), %g4 sllx %g5, TTE_SHIFT, %g5 add %g4, %g5, %g3 Index: sparc64/sparc64/genassym.c =================================================================== RCS file: /home/ncvs/src/sys/sparc64/sparc64/genassym.c,v retrieving revision 1.39 diff -u -r1.39 genassym.c --- sparc64/sparc64/genassym.c 29 Jul 2002 00:37:05 -0000 1.39 +++ sparc64/sparc64/genassym.c 10 Aug 2002 19:49:06 -0000 @@ -100,7 +100,6 @@ ASSYM(TSB_BUCKET_ADDRESS_BITS, TSB_BUCKET_ADDRESS_BITS); ASSYM(TSB_BUCKET_SHIFT, TSB_BUCKET_SHIFT); -ASSYM(TSB_KERNEL_MASK, TSB_KERNEL_MASK); ASSYM(INT_SHIFT, INT_SHIFT); ASSYM(PTR_SHIFT, PTR_SHIFT); Index: sparc64/sparc64/pmap.c =================================================================== RCS file: /home/ncvs/src/sys/sparc64/sparc64/pmap.c,v retrieving revision 1.75 diff -u -r1.75 pmap.c --- sparc64/sparc64/pmap.c 5 Aug 2002 00:04:18 -0000 1.75 +++ sparc64/sparc64/pmap.c 10 Aug 2002 19:49:07 -0000 @@ -93,6 +93,7 @@ #include #include +#include #include #include #include @@ -146,6 +147,8 @@ vm_offset_t virtual_end; vm_offset_t kernel_vm_end; +vm_offset_t vm_max_kernel_address; + /* * Kernel pmap. */ @@ -160,6 +163,13 @@ static vm_offset_t pmap_map_direct(vm_page_t m); +extern int tl1_immu_miss_load_tsb[]; +extern int tl1_immu_miss_load_tsb_mask[]; +extern int tl1_dmmu_miss_load_tsb[]; +extern int tl1_dmmu_miss_load_tsb_mask[]; +extern int tl1_dmmu_prot_load_tsb[]; +extern int tl1_dmmu_prot_load_tsb_mask[]; + /* * If user pmap is processed with pmap_remove and with pmap_remove and the * resident count drops to 0, there are no more pages to remove, so we @@ -267,6 +277,7 @@ vm_offset_t pa; vm_offset_t va; vm_size_t physsz; + vm_size_t virtsz; ihandle_t pmem; ihandle_t vmem; int sz; @@ -274,13 +285,6 @@ int j; /* - * Set the start and end of kva. The kernel is loaded at the first - * available 4 meg super page, so round up to the end of the page. - */ - virtual_avail = roundup2(ekva, PAGE_SIZE_4M); - virtual_end = VM_MAX_KERNEL_ADDRESS; - - /* * Find out what physical memory is available from the prom and * initialize the phys_avail array. This must be done before * pmap_bootstrap_alloc is called. @@ -309,17 +313,64 @@ } physmem = btoc(physsz); + virtsz = roundup(physsz / 2, 1024 * 1024 * 1024); + vm_max_kernel_address = VM_MIN_KERNEL_ADDRESS + virtsz; + tsb_kernel_size = virtsz >> (PAGE_SHIFT - TTE_SHIFT); + tsb_kernel_mask = (tsb_kernel_size >> TTE_SHIFT) - 1; + + /* + * Set the start and end of kva. The kernel is loaded at the first + * available 4 meg super page, so round up to the end of the page. + */ + virtual_avail = roundup2(ekva, PAGE_SIZE_4M); + virtual_end = vm_max_kernel_address; + /* - * Allocate the kernel tsb and lock it in the tlb. + * Allocate the kernel tsb. */ - pa = pmap_bootstrap_alloc(KVA_PAGES * PAGE_SIZE_4M); + pa = pmap_bootstrap_alloc(tsb_kernel_size); if (pa & PAGE_MASK_4M) panic("pmap_bootstrap: tsb unaligned\n"); tsb_kernel_phys = pa; tsb_kernel = (struct tte *)virtual_avail; - virtual_avail += KVA_PAGES * PAGE_SIZE_4M; + virtual_avail += tsb_kernel_size; + + /* + * Patch the virtual address and the tsb mask into the trap table. + */ +#define SETHI_G4(x) \ + EIF_OP(IOP_FORM2) | EIF_F2_RD(4) | EIF_F2_OP2(INS0_SETHI) | \ + EIF_IMM((x) >> 10, 22) +#define OR_G4_I_G4(x) \ + EIF_OP(IOP_MISC) | EIF_F3_RD(4) | EIF_F3_OP3(INS2_OR) | \ + EIF_F3_RS1(4) | EIF_F3_I(1) | EIF_IMM(x, 10) + + tl1_immu_miss_load_tsb[0] = SETHI_G4((vm_offset_t)tsb_kernel); + tl1_immu_miss_load_tsb_mask[0] = SETHI_G4(tsb_kernel_mask); + tl1_immu_miss_load_tsb_mask[1] = OR_G4_I_G4(tsb_kernel_mask); + flush(tl1_immu_miss_load_tsb); + flush(tl1_immu_miss_load_tsb_mask); + flush(tl1_immu_miss_load_tsb_mask + 1); + + tl1_dmmu_miss_load_tsb[0] = SETHI_G4((vm_offset_t)tsb_kernel); + tl1_dmmu_miss_load_tsb_mask[0] = SETHI_G4(tsb_kernel_mask); + tl1_dmmu_miss_load_tsb_mask[1] = OR_G4_I_G4(tsb_kernel_mask); + flush(tl1_dmmu_miss_load_tsb); + flush(tl1_dmmu_miss_load_tsb_mask); + flush(tl1_dmmu_miss_load_tsb_mask + 1); + + tl1_dmmu_prot_load_tsb[0] = SETHI_G4((vm_offset_t)tsb_kernel); + tl1_dmmu_prot_load_tsb_mask[0] = SETHI_G4(tsb_kernel_mask); + tl1_dmmu_prot_load_tsb_mask[1] = OR_G4_I_G4(tsb_kernel_mask); + flush(tl1_dmmu_prot_load_tsb); + flush(tl1_dmmu_prot_load_tsb_mask); + flush(tl1_dmmu_prot_load_tsb_mask + 1); + + /* + * Lock it in the tlb. + */ pmap_map_tsb(); - bzero(tsb_kernel, KVA_PAGES * PAGE_SIZE_4M); + bzero(tsb_kernel, tsb_kernel_size); /* * Enter fake 8k pages for the 4MB kernel pages, so that @@ -431,9 +482,9 @@ /* * Map the 4mb tsb pages. */ - for (i = 0; i < KVA_PAGES; i++) { - va = (vm_offset_t)tsb_kernel + i * PAGE_SIZE_4M; - pa = tsb_kernel_phys + i * PAGE_SIZE_4M; + for (i = 0; i < tsb_kernel_size; i += PAGE_SIZE_4M) { + va = (vm_offset_t)tsb_kernel + i; + pa = tsb_kernel_phys + i; /* XXX - cheetah */ data = TD_V | TD_4M | TD_PA(pa) | TD_L | TD_CP | TD_CV | TD_P | TD_W; @@ -441,14 +492,6 @@ TLB_TAR_CTX(TLB_CTX_KERNEL)); stxa_sync(0, ASI_DTLB_DATA_IN_REG, data); } - - /* - * Load the tsb registers. - */ - stxa(AA_DMMU_TSB, ASI_DMMU, (vm_offset_t)tsb_kernel); - stxa(AA_IMMU_TSB, ASI_IMMU, (vm_offset_t)tsb_kernel); - membar(Sync); - flush(tsb_kernel); /* * Set the secondary context to be the kernel context (needed for Index: sparc64/sparc64/tsb.c =================================================================== RCS file: /home/ncvs/src/sys/sparc64/sparc64/tsb.c,v retrieving revision 1.25 diff -u -r1.25 tsb.c --- sparc64/sparc64/tsb.c 26 Jul 2002 15:54:04 -0000 1.25 +++ sparc64/sparc64/tsb.c 10 Aug 2002 19:49:07 -0000 @@ -92,6 +92,8 @@ #endif struct tte *tsb_kernel; +vm_size_t tsb_kernel_mask; +vm_size_t tsb_kernel_size; vm_offset_t tsb_kernel_phys; struct tte *