/*- * Copyright (c) 2007 Ariff Abdullah * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ * * "Little-pmap": Lost Technology for RELENG_5/RELENG_6. * */ #ifndef _LPMAP_C_ #define _LPMAP_C_ #if !(defined(__i386__) || defined(__amd64__)) #error "Not applicable for non-i386/non-amd64" #endif /* For cpu_feature, VM_MAXUSER_ADDRESS */ #include #include #include #ifndef PG_PTE_PAT #define PG_PTE_PAT 0x0080 #endif #ifndef PG_PDE_PAT #define PG_PDE_PAT 0x1000 #endif #ifndef PAT_UNCACHEABLE #define PAT_UNCACHEABLE 0x00 #endif #ifndef PAT_WRITE_COMBINING #define PAT_WRITE_COMBINING 0x01 #endif #ifndef PAT_WRITE_THROUGH #define PAT_WRITE_THROUGH 0x04 #endif #ifndef PAT_WRITE_PROTECTED #define PAT_WRITE_PROTECTED 0x05 #endif #ifndef PAT_WRITE_BACK #define PAT_WRITE_BACK 0x06 #endif #ifndef PAT_UNCACHED #define PAT_UNCACHED 0x07 #endif #define lpmap_p(x, y...) /*printf("%s(): "x, __func__, y)*/ #undef pmap_change_attr #define pmap_change_attr lpmap_change_attr #if defined(__i386__) #define lpmap_pde(m, v) (&((m)->pm_pdir[(vm_offset_t)(v) >> PDRSHIFT])) #elif defined(__amd64__) static __inline vm_pindex_t lpmap_pde_index(vm_offset_t va) { return ((va >> PDRSHIFT) & ((1UL << NPDEPGSHIFT) - 1)); } static __inline vm_pindex_t lpmap_pdpe_index(vm_offset_t va) { return ((va >> PDPSHIFT) & ((1UL << NPDPEPGSHIFT) - 1)); } static __inline vm_pindex_t lpmap_pml4e_index(vm_offset_t va) { return ((va >> PML4SHIFT) & ((1UL << NPML4EPGSHIFT) - 1)); } static __inline pd_entry_t * lpmap_pdpe_to_pde(pdp_entry_t *pdpe, vm_offset_t va) { pd_entry_t *pde; pde = (pd_entry_t *)PHYS_TO_DMAP(*pdpe & PG_FRAME); return (&pde[lpmap_pde_index(va)]); } static __inline pml4_entry_t * lpmap_pml4e(pmap_t pmap, vm_offset_t va) { if (pmap == NULL) return (NULL); return (&pmap->pm_pml4[lpmap_pml4e_index(va)]); } static __inline pdp_entry_t * lpmap_pml4e_to_pdpe(pml4_entry_t *pml4e, vm_offset_t va) { pdp_entry_t *pdpe; pdpe = (pdp_entry_t *)PHYS_TO_DMAP(*pml4e & PG_FRAME); return (&pdpe[lpmap_pdpe_index(va)]); } static __inline pdp_entry_t * lpmap_pdpe(pmap_t pmap, vm_offset_t va) { pml4_entry_t *pml4e; pml4e = lpmap_pml4e(pmap, va); if (pml4e == NULL || (*pml4e & PG_V) == 0) return (NULL); return (lpmap_pml4e_to_pdpe(pml4e, va)); } static __inline pd_entry_t * lpmap_pde(pmap_t pmap, vm_offset_t va) { pdp_entry_t *pdpe; pdpe = lpmap_pdpe(pmap, va); if (pdpe == NULL || (*pdpe & PG_V) == 0) return (NULL); return (lpmap_pdpe_to_pde(pdpe, va)); } #endif static __inline int lpmap_validate_range(vm_offset_t base, vm_size_t size) { vm_offset_t va; pt_entry_t *pte; pd_entry_t *pde; if (base <= VM_MAXUSER_ADDRESS) { lpmap_p("base <= VM_MAXUSER_ADDRESS : base=0x%jx\n", (uintmax_t)base); return (EINVAL); } va = base; while (va < (base + size)) { pde = lpmap_pde(kernel_pmap, va); if (pde == NULL || *pde == 0) { lpmap_p("Failed: %s : va=0x%jx\n", (pde == NULL) ? "pde == NULL" : "*pde == 0", (uintmax_t)va); return (EINVAL); } if (*pde & PG_PS) { #if defined(__amd64__) if (size < NBPDR) { lpmap_p("Failed: size < NBPDR : va=0x%jx\n", (uintmax_t)va); return (EINVAL); } va += NBPDR; #else lpmap_p("Failed: (*pde & PG_PS) != 0 : va=0x%jx\n", (uintmax_t)va); return (EINVAL); #endif } else { pte = vtopte(va); if (pte == NULL || *pte == 0) { lpmap_p("Failed: %s : va=0x%jx\n", (pte == NULL) ? "pte == NULL" : "*pte == 0", (uintmax_t)va); return (EINVAL); } va += PAGE_SIZE; } } return (0); } #if (__FreeBSD_version >= 800000 && __FreeBSD_version < 800005) || \ __FreeBSD_version < 700055 extern int osreldate; #else #include #endif static int lpmap_change_attr(vm_offset_t va, vm_size_t size, int mode) { vm_offset_t base, offset; pt_entry_t *pte; #if defined(__amd64__) pd_entry_t *pde; #endif u_int opxe, npxe, ptefl, pdefl, attr; int err, flushtlb; switch (mode) { case PAT_UNCACHED: case PAT_WRITE_COMBINING: case PAT_WRITE_PROTECTED: if (!(cpu_feature & CPUID_PAT)) mode = PAT_UNCACHEABLE; break; default: break; } attr = 0; switch (mode) { case PAT_UNCACHED: case PAT_UNCACHEABLE: case PAT_WRITE_PROTECTED: attr |= PG_NC_PCD; case PAT_WRITE_THROUGH: attr |= PG_NC_PWT; break; case PAT_WRITE_COMBINING: attr |= PG_NC_PCD; break; case PAT_WRITE_BACK: break; default: lpmap_p("Unsupported mode=0x%08x\n", mode); return (EINVAL); break; } ptefl = PG_NC_PCD | PG_NC_PWT; pdefl = PG_NC_PCD | PG_NC_PWT; if (osreldate >= 602110) { ptefl |= PG_PTE_PAT; pdefl |= PG_PDE_PAT; } base = va & PG_FRAME; offset = va & PAGE_MASK; size = roundup(offset + size, PAGE_SIZE); err = lpmap_validate_range(base, size); if (err != 0) { lpmap_p("Validation failed! " "vm_offset_t=0x%jx vm_size_t=%ju attr=0x%04x\n", (uintmax_t)va, (uintmax_t)size, attr); return (err); } lpmap_p("vm_offset_t=0x%jx vm_size_t=%ju attr=0x%04x\n", (uintmax_t)va, (uintmax_t)size, attr); flushtlb = 0; while (size > 0) { #if defined(__amd64__) pde = lpmap_pde(kernel_pmap, base); if (*pde & PG_PS) { do { opxe = *(u_int *)pde; npxe = opxe & ~pdefl; npxe |= attr; } while (npxe != opxe && ++flushtlb && !atomic_cmpset_int((u_int *)pde, opxe, npxe)); size -= NBPDR; base += NBPDR; } else { #endif pte = vtopte(base); do { opxe = *(u_int *)pte; npxe = opxe & ~ptefl; npxe |= attr; } while (npxe != opxe && ++flushtlb && !atomic_cmpset_int((u_int *)pte, opxe, npxe)); size -= PAGE_SIZE; base += PAGE_SIZE; #if defined(__amd64__) } #endif } /* XXX Gross!! */ if (flushtlb != 0) { lpmap_p("flushtlb=%d\n", flushtlb); pmap_invalidate_all(kernel_pmap); } return (0); } #endif /* !_LPMAP_C_ */