--- //depot/vendor/freebsd/src/sys/amd64/acpica/madt.c 2006/03/27 16:00:19 +++ //depot/user/jhb/pat/amd64/acpica/madt.c 2006/04/14 18:07:20 @@ -203,15 +203,15 @@ /* * Map in the RSDP. Since ACPI uses AcpiOsMapMemory() which in turn - * calls pmap_mapdev() to find the RSDP, we assume that we can use - * pmap_mapdev() to map the RSDP. + * calls pmap_mapbios() to find the RSDP, we assume that we can use + * pmap_mapbios() to map the RSDP. */ if (AcpiOsGetRootPointer(ACPI_LOGICAL_ADDRESSING, &rsdp_ptr) != AE_OK) return (ENXIO); #ifdef __i386__ KASSERT(rsdp_ptr.Pointer.Physical < KERNLOAD, ("RSDP too high")); #endif - rsdp = pmap_mapdev(rsdp_ptr.Pointer.Physical, sizeof(RSDP_DESCRIPTOR)); + rsdp = pmap_mapbios(rsdp_ptr.Pointer.Physical, sizeof(RSDP_DESCRIPTOR)); if (rsdp == NULL) { if (bootverbose) printf("MADT: Failed to map RSDP\n"); @@ -261,7 +261,7 @@ break; madt_unmap_table(rsdt); } - pmap_unmapdev((vm_offset_t)rsdp, sizeof(RSDP_DESCRIPTOR)); + pmap_unmapbios((vm_offset_t)rsdp, sizeof(RSDP_DESCRIPTOR)); if (madt_physaddr == 0) { if (bootverbose) printf("MADT: No MADT table found\n"); @@ -335,7 +335,7 @@ madt_setup_local(void) { - madt = pmap_mapdev(madt_physaddr, madt_length); + madt = pmap_mapbios(madt_physaddr, madt_length); lapic_init((uintptr_t)madt->LocalApicAddress); printf("ACPI APIC Table: <%.*s %.*s>\n", (int)sizeof(madt->OemId), madt->OemId, --- //depot/vendor/freebsd/src/sys/amd64/amd64/pmap.c 2006/07/20 17:51:02 +++ //depot/user/jhb/pat/amd64/amd64/pmap.c 2006/07/27 20:58:45 @@ -665,6 +665,84 @@ * Low level helper routines..... ***************************************************/ +/* + * Determine the appropriate bits to set in a PTE or PDE for a specified + * caching mode. + */ +static int +pmap_cache_bits(int mode, boolean_t is_pde) +{ + int pat_flag, pat_index, cache_bits; + + /* The PAT bit is different for PTE's and PDE's. */ + pat_flag = is_pde ? PG_PDE_PAT : PG_PTE_PAT; + + /* If we don't support PAT, map extended modes to older ones. */ + if (!(cpu_feature & CPUID_PAT)) { + switch (mode) { + case PAT_UNCACHEABLE: + case PAT_WRITE_THROUGH: + case PAT_WRITE_BACK: + break; + case PAT_UNCACHED: + case PAT_WRITE_COMBINING: + case PAT_WRITE_PROTECTED: + mode = PAT_UNCACHEABLE; + break; + } + } + + /* Map the caching mode to a PAT index. */ + switch (mode) { +#ifdef PAT_WORKS + case PAT_UNCACHEABLE: + pat_index = 3; + break; + case PAT_WRITE_THROUGH: + pat_index = 1; + break; + case PAT_WRITE_BACK: + pat_index = 0; + break; + case PAT_UNCACHED: + pat_index = 2; + break; + case PAT_WRITE_COMBINING: + pat_index = 5; + break; + case PAT_WRITE_PROTECTED: + pat_index = 4; + break; +#else + case PAT_UNCACHED: + case PAT_UNCACHEABLE: + case PAT_WRITE_PROTECTED: + pat_index = 3; + break; + case PAT_WRITE_THROUGH: + pat_index = 1; + break; + case PAT_WRITE_BACK: + pat_index = 0; + break; + case PAT_WRITE_COMBINING: + pat_index = 2; + break; +#endif + default: + panic("Unknown caching mode %d\n", mode); + } + + /* Map the 3-bit index value into the PAT, PCD, and PWT bits. */ + cache_bits = 0; + if (pat_index & 0x4) + cache_bits |= pat_flag; + if (pat_index & 0x2) + cache_bits |= PG_NC_PCD; + if (pat_index & 0x1) + cache_bits |= PG_NC_PWT; + return (cache_bits); +} #ifdef SMP /* * For SMP, these functions have to use the IPI mechanism for coherence. @@ -962,6 +1040,15 @@ pte_store(pte, pa | PG_RW | PG_V | PG_G); } +PMAP_INLINE void +pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode) +{ + pt_entry_t *pte; + + pte = vtopte(va); + pte_store(pte, pa | PG_RW | PG_V | PG_G | pmap_cache_bits(mode, 0)); +} + /* * Remove a page from the kernel pagetables. * Note: not SMP coherent. @@ -2281,6 +2368,10 @@ if (pmap == kernel_pmap) newpte |= PG_G; + /* Preserve any caching attributes. */ + /* XXX: Should this be conditional on something? */ + newpte |= (origpte & (PG_PTE_PAT | PG_NC_PCD | PG_NC_PWT)); + /* * if the mapping or permission bits are different, we need * to update the pte. @@ -3119,6 +3210,46 @@ * Miscellaneous support routines follow */ +/* Adjust the cache mode for a 4KB page mapped via a PTE. */ +static __inline void +pmap_pte_attr(vm_offset_t va, int mode) +{ + pt_entry_t *pte; + u_int opte, npte; + + pte = vtopte(va); + + /* + * The cache mode bits are all in the low 32-bits of the + * PTE, so we can just spin on updating the low 32-bits. + */ + do { + opte = *(u_int *)pte; + npte = opte & ~(PG_PTE_PAT | PG_NC_PCD | PG_NC_PWT); + npte |= pmap_cache_bits(mode, 0); + } while (npte != opte && !atomic_cmpset_int((u_int *)pte, opte, npte)); +} + +/* Adjust the cache mode for a 2MB page mapped via a PDE. */ +static __inline void +pmap_pde_attr(vm_offset_t va, int mode) +{ + pd_entry_t *pde; + u_int opde, npde; + + pde = pmap_pde(kernel_pmap, va); + + /* + * The cache mode bits are all in the low 32-bits of the + * PDE, so we can just spin on updating the low 32-bits. + */ + do { + opde = *(u_int *)pde; + npde = opde & ~(PG_PDE_PAT | PG_NC_PCD | PG_NC_PWT); + npde |= pmap_cache_bits(mode, 1); + } while (npde != opde && !atomic_cmpset_int((u_int *)pde, opde, npde)); +} + /* * Map a set of physical memory pages into the kernel virtual * address space. Return a pointer to where it is mapped. This @@ -3126,12 +3257,15 @@ * NOT real memory. */ void * -pmap_mapdev(vm_paddr_t pa, vm_size_t size) +pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode) { vm_offset_t va, tmpva, offset; - /* If this fits within the direct map window, use it */ - if (pa < dmaplimit && (pa + size) < dmaplimit) + /* + * If this fits within the direct map window and use WB caching + * mode, use the direct map. + */ + if (pa < dmaplimit && (pa + size) < dmaplimit && mode == PAT_WRITE_BACK) return ((void *)PHYS_TO_DMAP(pa)); offset = pa & PAGE_MASK; size = roundup(offset + size, PAGE_SIZE); @@ -3140,15 +3274,30 @@ panic("pmap_mapdev: Couldn't alloc kernel virtual memory"); pa = trunc_page(pa); for (tmpva = va; size > 0; ) { - pmap_kenter(tmpva, pa); + pmap_kenter_attr(tmpva, pa, mode); size -= PAGE_SIZE; tmpva += PAGE_SIZE; pa += PAGE_SIZE; } pmap_invalidate_range(kernel_pmap, va, tmpva); + pmap_invalidate_cache(); return ((void *)(va + offset)); } +void * +pmap_mapdev(vm_paddr_t pa, vm_size_t size) +{ + + return (pmap_mapdev_attr(pa, size, PAT_UNCACHEABLE)); +} + +void * +pmap_mapbios(vm_paddr_t pa, vm_size_t size) +{ + + return (pmap_mapdev_attr(pa, size, PAT_WRITE_BACK)); +} + void pmap_unmapdev(vm_offset_t va, vm_size_t size) { @@ -3166,6 +3315,73 @@ kmem_free(kernel_map, base, size); } +int +pmap_change_attr(va, size, mode) + vm_offset_t va; + vm_size_t size; + int mode; +{ + vm_offset_t base, offset, tmpva; + pd_entry_t *pde; + pt_entry_t *pte; + + base = va & PG_FRAME; + offset = va & PAGE_MASK; + size = roundup(offset + size, PAGE_SIZE); + + /* Only supported on kernel virtual addresses. */ + if (base <= VM_MAXUSER_ADDRESS) + return (EINVAL); + + /* + * XXX: We have to support tearing 2MB pages down into 4k pages if + * needed here. + */ + /* Pages that aren't mapped aren't supported. */ + for (tmpva = base; tmpva < (base + size); ) { + pde = pmap_pde(kernel_pmap, tmpva); + if (*pde == 0) + return (EINVAL); + if (*pde & PG_PS) { + /* Handle 2MB pages that are completely contained. */ + if (size >= NBPDR) { + tmpva += NBPDR; + continue; + } + return (EINVAL); + } + pte = vtopte(va); + if (*pte == 0) + return (EINVAL); + tmpva += PAGE_SIZE; + } + + /* + * Ok, all the pages exist, so run through them updating their + * cache mode. + */ + for (tmpva = base; size > 0; ) { + pde = pmap_pde(kernel_pmap, tmpva); + if (*pde & PG_PS) { + pmap_pde_attr(tmpva, mode); + tmpva += NBPDR; + size -= NBPDR; + } else { + pmap_pte_attr(tmpva, mode); + tmpva += PAGE_SIZE; + size -= PAGE_SIZE; + } + } + + /* + * Flush CPU caches to make sure any data isn't cached that shouldn't + * be, etc. + */ + pmap_invalidate_range(kernel_pmap, base, tmpva); + pmap_invalidate_cache(); + return (0); +} + /* * perform the pmap work for mincore */ --- //depot/vendor/freebsd/src/sys/amd64/include/pmap.h 2006/05/01 22:10:22 +++ //depot/user/jhb/pat/amd64/include/pmap.h 2006/07/27 20:58:45 @@ -300,14 +300,19 @@ extern vm_offset_t virtual_end; #define pmap_page_is_mapped(m) (!TAILQ_EMPTY(&(m)->md.pv_list)) +#define pmap_unmapbios(va, sz) pmap_unmapdev((va), (sz)) void pmap_bootstrap(vm_paddr_t *); +int pmap_change_attr(vm_offset_t, vm_size_t, int); void pmap_init_pat(void); void pmap_kenter(vm_offset_t va, vm_paddr_t pa); +void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode); void *pmap_kenter_temporary(vm_paddr_t pa, int i); vm_paddr_t pmap_kextract(vm_offset_t); void pmap_kremove(vm_offset_t); +void *pmap_mapbios(vm_paddr_t, vm_size_t); void *pmap_mapdev(vm_paddr_t, vm_size_t); +void *pmap_mapdev_attr(vm_paddr_t, vm_size_t, int); void pmap_unmapdev(vm_offset_t, vm_size_t); void pmap_invalidate_page(pmap_t, vm_offset_t); void pmap_invalidate_range(pmap_t, vm_offset_t, vm_offset_t); --- //depot/vendor/freebsd/src/sys/dev/acpica/Osd/OsdMemory.c 2005/09/11 18:40:38 +++ //depot/user/jhb/pat/dev/acpica/Osd/OsdMemory.c 2006/07/27 21:09:58 @@ -58,7 +58,7 @@ AcpiOsMapMemory(ACPI_PHYSICAL_ADDRESS PhysicalAddress, ACPI_SIZE Length, void **LogicalAddress) { - *LogicalAddress = pmap_mapdev((vm_offset_t)PhysicalAddress, Length); + *LogicalAddress = pmap_mapbios((vm_offset_t)PhysicalAddress, Length); if (*LogicalAddress == NULL) return (AE_BAD_ADDRESS); return (AE_OK); @@ -67,7 +67,7 @@ void AcpiOsUnmapMemory(void *LogicalAddress, ACPI_SIZE Length) { - pmap_unmapdev((vm_offset_t)LogicalAddress, Length); + pmap_unmapbios((vm_offset_t)LogicalAddress, Length); } ACPI_STATUS --- //depot/vendor/freebsd/src/sys/i386/acpica/acpi_machdep.c 2006/06/10 08:11:24 +++ //depot/user/jhb/pat/i386/acpica/acpi_machdep.c 2006/07/27 21:20:38 @@ -347,9 +347,9 @@ int year; /* BIOS address 0xffff5 contains the date in the format mm/dd/yy. */ - va = pmap_mapdev(0xffff0, 16); + va = pmap_mapbios(0xffff0, 16); sscanf(va + 11, "%2d", &year); - pmap_unmapdev((vm_offset_t)va, 16); + pmap_unmapbios((vm_offset_t)va, 16); /* * Date must be >= 1/1/1999 or we don't trust ACPI. Note that this --- //depot/vendor/freebsd/src/sys/i386/acpica/madt.c 2006/03/27 16:00:19 +++ //depot/user/jhb/pat/i386/acpica/madt.c 2006/04/14 18:07:20 @@ -203,15 +203,15 @@ /* * Map in the RSDP. Since ACPI uses AcpiOsMapMemory() which in turn - * calls pmap_mapdev() to find the RSDP, we assume that we can use - * pmap_mapdev() to map the RSDP. + * calls pmap_mapbios() to find the RSDP, we assume that we can use + * pmap_mapbios() to map the RSDP. */ if (AcpiOsGetRootPointer(ACPI_LOGICAL_ADDRESSING, &rsdp_ptr) != AE_OK) return (ENXIO); #ifdef __i386__ KASSERT(rsdp_ptr.Pointer.Physical < KERNLOAD, ("RSDP too high")); #endif - rsdp = pmap_mapdev(rsdp_ptr.Pointer.Physical, sizeof(RSDP_DESCRIPTOR)); + rsdp = pmap_mapbios(rsdp_ptr.Pointer.Physical, sizeof(RSDP_DESCRIPTOR)); if (rsdp == NULL) { if (bootverbose) printf("MADT: Failed to map RSDP\n"); @@ -261,7 +261,7 @@ break; madt_unmap_table(rsdt); } - pmap_unmapdev((vm_offset_t)rsdp, sizeof(RSDP_DESCRIPTOR)); + pmap_unmapbios((vm_offset_t)rsdp, sizeof(RSDP_DESCRIPTOR)); if (madt_physaddr == 0) { if (bootverbose) printf("MADT: No MADT table found\n"); @@ -335,7 +335,7 @@ madt_setup_local(void) { - madt = pmap_mapdev(madt_physaddr, madt_length); + madt = pmap_mapbios(madt_physaddr, madt_length); lapic_init((uintptr_t)madt->LocalApicAddress); printf("ACPI APIC Table: <%.*s %.*s>\n", (int)sizeof(madt->OemId), madt->OemId, --- //depot/vendor/freebsd/src/sys/i386/i386/pmap.c 2006/07/20 17:51:02 +++ //depot/user/jhb/pat/i386/i386/pmap.c 2006/07/27 20:58:45 @@ -630,6 +630,84 @@ * Low level helper routines..... ***************************************************/ +/* + * Determine the appropriate bits to set in a PTE or PDE for a specified + * caching mode. + */ +static int +pmap_cache_bits(int mode, boolean_t is_pde) +{ + int pat_flag, pat_index, cache_bits; + + /* The PAT bit is different for PTE's and PDE's. */ + pat_flag = is_pde ? PG_PDE_PAT : PG_PTE_PAT; + + /* If we don't support PAT, map extended modes to older ones. */ + if (!(cpu_feature & CPUID_PAT)) { + switch (mode) { + case PAT_UNCACHEABLE: + case PAT_WRITE_THROUGH: + case PAT_WRITE_BACK: + break; + case PAT_UNCACHED: + case PAT_WRITE_COMBINING: + case PAT_WRITE_PROTECTED: + mode = PAT_UNCACHEABLE; + break; + } + } + + /* Map the caching mode to a PAT index. */ + switch (mode) { +#ifdef PAT_WORKS + case PAT_UNCACHEABLE: + pat_index = 3; + break; + case PAT_WRITE_THROUGH: + pat_index = 1; + break; + case PAT_WRITE_BACK: + pat_index = 0; + break; + case PAT_UNCACHED: + pat_index = 2; + break; + case PAT_WRITE_COMBINING: + pat_index = 5; + break; + case PAT_WRITE_PROTECTED: + pat_index = 4; + break; +#else + case PAT_UNCACHED: + case PAT_UNCACHEABLE: + case PAT_WRITE_PROTECTED: + pat_index = 3; + break; + case PAT_WRITE_THROUGH: + pat_index = 1; + break; + case PAT_WRITE_BACK: + pat_index = 0; + break; + case PAT_WRITE_COMBINING: + pat_index = 2; + break; +#endif + default: + panic("Unknown caching mode %d\n", mode); + } + + /* Map the 3-bit index value into the PAT, PCD, and PWT bits. */ + cache_bits = 0; + if (pat_index & 0x4) + cache_bits |= pat_flag; + if (pat_index & 0x2) + cache_bits |= PG_NC_PCD; + if (pat_index & 0x1) + cache_bits |= PG_NC_PWT; + return (cache_bits); +} #ifdef SMP /* * For SMP, these functions have to use the IPI mechanism for coherence. @@ -1002,6 +1080,15 @@ pte_store(pte, pa | PG_RW | PG_V | pgeflag); } +PMAP_INLINE void +pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode) +{ + pt_entry_t *pte; + + pte = vtopte(va); + pte_store(pte, pa | PG_RW | PG_V | pgeflag | pmap_cache_bits(mode, 0)); +} + /* * Remove a page from the kernel pagetables. * Note: not SMP coherent. @@ -2356,6 +2443,10 @@ if (pmap == kernel_pmap) newpte |= pgeflag; + /* Preserve any caching attributes. */ + /* XXX: Should this be conditional on something? */ + newpte |= (origpte & (PG_PTE_PAT | PG_NC_PCD | PG_NC_PWT)); + /* * if the mapping or permission bits are different, we need * to update the pte. @@ -3243,7 +3334,7 @@ * NOT real memory. */ void * -pmap_mapdev(vm_paddr_t pa, vm_size_t size) +pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode) { vm_offset_t va, tmpva, offset; @@ -3259,15 +3350,30 @@ panic("pmap_mapdev: Couldn't alloc kernel virtual memory"); for (tmpva = va; size > 0; ) { - pmap_kenter(tmpva, pa); + pmap_kenter_attr(tmpva, pa, mode); size -= PAGE_SIZE; tmpva += PAGE_SIZE; pa += PAGE_SIZE; } pmap_invalidate_range(kernel_pmap, va, tmpva); + pmap_invalidate_cache(); return ((void *)(va + offset)); } +void * +pmap_mapdev(vm_paddr_t pa, vm_size_t size) +{ + + return (pmap_mapdev_attr(pa, size, PAT_UNCACHEABLE)); +} + +void * +pmap_mapbios(vm_paddr_t pa, vm_size_t size) +{ + + return (pmap_mapdev_attr(pa, size, PAT_WRITE_BACK)); +} + void pmap_unmapdev(vm_offset_t va, vm_size_t size) { @@ -3284,6 +3390,67 @@ kmem_free(kernel_map, base, size); } +int +pmap_change_attr(va, size, mode) + vm_offset_t va; + vm_size_t size; + int mode; +{ + vm_offset_t base, offset, tmpva; + pt_entry_t *pte; + u_int opte, npte; + pd_entry_t *pde; + + base = va & PG_FRAME; + offset = va & PAGE_MASK; + size = roundup(offset + size, PAGE_SIZE); + + /* Only supported on kernel virtual addresses. */ + if (base <= VM_MAXUSER_ADDRESS) + return (EINVAL); + + /* 4MB pages and pages that aren't mapped aren't supported. */ + for (tmpva = base; tmpva < (base + size); tmpva += PAGE_SIZE) { + pde = pmap_pde(kernel_pmap, tmpva); + if (*pde & PG_PS) + return (EINVAL); + if (*pde == 0) + return (EINVAL); + pte = vtopte(va); + if (*pte == 0) + return (EINVAL); + } + + /* + * Ok, all the pages exist and are 4k, so run through them updating + * their cache mode. + */ + for (tmpva = base; size > 0; ) { + pte = vtopte(tmpva); + + /* + * The cache mode bits are all in the low 32-bits of the + * PTE, so we can just spin on updating the low 32-bits. + */ + do { + opte = *(u_int *)pte; + npte = opte & ~(PG_PTE_PAT | PG_NC_PCD | PG_NC_PWT); + npte |= pmap_cache_bits(mode, 0); + } while (npte != opte && + !atomic_cmpset_int((u_int *)pte, opte, npte)); + tmpva += PAGE_SIZE; + size -= PAGE_SIZE; + } + + /* + * Flush CPU caches to make sure any data isn't cached that shouldn't + * be, etc. + */ + pmap_invalidate_range(kernel_pmap, base, tmpva); + pmap_invalidate_cache(); + return (0); +} + /* * perform the pmap work for mincore */ --- //depot/vendor/freebsd/src/sys/i386/include/pmap.h 2006/05/01 22:10:22 +++ //depot/user/jhb/pat/i386/include/pmap.h 2006/07/27 20:58:45 @@ -368,13 +368,18 @@ extern vm_offset_t virtual_end; #define pmap_page_is_mapped(m) (!TAILQ_EMPTY(&(m)->md.pv_list)) +#define pmap_unmapbios(va, sz) pmap_unmapdev((va), (sz)) void pmap_bootstrap(vm_paddr_t, vm_paddr_t); +int pmap_change_attr(vm_offset_t, vm_size_t, int); void pmap_init_pat(void); void pmap_kenter(vm_offset_t va, vm_paddr_t pa); +void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode); void *pmap_kenter_temporary(vm_paddr_t pa, int i); void pmap_kremove(vm_offset_t); +void *pmap_mapbios(vm_paddr_t, vm_size_t); void *pmap_mapdev(vm_paddr_t, vm_size_t); +void *pmap_mapdev_attr(vm_paddr_t, vm_size_t, int); void pmap_unmapdev(vm_offset_t, vm_size_t); pt_entry_t *pmap_pte(pmap_t, vm_offset_t) __pure2; void pmap_set_pg(void); --- //depot/vendor/freebsd/src/sys/ia64/include/pmap.h 2005/12/06 21:11:21 +++ //depot/user/jhb/pat/ia64/include/pmap.h 2006/07/27 21:09:58 @@ -119,6 +119,8 @@ extern int pmap_vhpt_log2size; #define pmap_page_is_mapped(m) (!TAILQ_EMPTY(&(m)->md.pv_list)) +#define pmap_mapbios(pa, sz) pmap_mapdev(pa, sz) +#define pmap_unmapbios(va, sz) pmap_unmapdev(va, sz) vm_offset_t pmap_steal_memory(vm_size_t); void pmap_bootstrap(void);