Index: sys/amd64/vmm/intel/vmcs.c =================================================================== --- sys/amd64/vmm/intel/vmcs.c (revision 279210) +++ sys/amd64/vmm/intel/vmcs.c (working copy) @@ -342,18 +342,6 @@ */ VMPTRLD(vmcs); - /* Initialize guest IA32_PAT MSR with the default value */ - pat = PAT_VALUE(0, PAT_WRITE_BACK) | - PAT_VALUE(1, PAT_WRITE_THROUGH) | - PAT_VALUE(2, PAT_UNCACHED) | - PAT_VALUE(3, PAT_UNCACHEABLE) | - PAT_VALUE(4, PAT_WRITE_BACK) | - PAT_VALUE(5, PAT_WRITE_THROUGH) | - PAT_VALUE(6, PAT_UNCACHED) | - PAT_VALUE(7, PAT_UNCACHEABLE); - if ((error = vmwrite(VMCS_GUEST_IA32_PAT, pat)) != 0) - goto done; - /* Host state */ /* Initialize host IA32_PAT MSR */ Index: sys/amd64/vmm/intel/vmx.c =================================================================== --- sys/amd64/vmm/intel/vmx.c (revision 279210) +++ sys/amd64/vmm/intel/vmx.c (working copy) @@ -100,13 +100,11 @@ (VM_EXIT_HOST_LMA | \ VM_EXIT_SAVE_EFER | \ VM_EXIT_LOAD_EFER | \ - VM_EXIT_ACKNOWLEDGE_INTERRUPT | \ - VM_EXIT_SAVE_PAT | \ - VM_EXIT_LOAD_PAT) + VM_EXIT_ACKNOWLEDGE_INTERRUPT) #define VM_EXIT_CTLS_ZERO_SETTING VM_EXIT_SAVE_DEBUG_CONTROLS -#define VM_ENTRY_CTLS_ONE_SETTING (VM_ENTRY_LOAD_EFER | VM_ENTRY_LOAD_PAT) +#define VM_ENTRY_CTLS_ONE_SETTING (VM_ENTRY_LOAD_EFER) #define VM_ENTRY_CTLS_ZERO_SETTING \ (VM_ENTRY_LOAD_DEBUG_CONTROLS | \ @@ -859,10 +857,6 @@ * VM exit and entry respectively. It is also restored from the * host VMCS area on a VM exit. * - * MSR_PAT is saved and restored in the guest VMCS are on a VM exit - * and entry respectively. It is also restored from the host VMCS - * area on a VM exit. - * * The TSC MSR is exposed read-only. Writes are disallowed as that * will impact the host TSC. * XXX Writes would be implemented with a wrmsr trap, and @@ -874,7 +868,6 @@ guest_msr_rw(vmx, MSR_SYSENTER_ESP_MSR) || guest_msr_rw(vmx, MSR_SYSENTER_EIP_MSR) || guest_msr_rw(vmx, MSR_EFER) || - guest_msr_rw(vmx, MSR_PAT) || guest_msr_ro(vmx, MSR_TSC)) panic("vmx_vminit: error setting guest msr access"); Index: sys/amd64/vmm/intel/vmx.h =================================================================== --- sys/amd64/vmm/intel/vmx.h (revision 279210) +++ sys/amd64/vmm/intel/vmx.h (working copy) @@ -103,6 +103,7 @@ IDX_MSR_STAR, IDX_MSR_SF_MASK, IDX_MSR_KGSBASE, + IDX_MSR_PAT, GUEST_MSR_NUM /* must be the last enumeration */ }; Index: sys/amd64/vmm/intel/vmx_msr.c =================================================================== --- sys/amd64/vmm/intel/vmx_msr.c (revision 279210) +++ sys/amd64/vmm/intel/vmx_msr.c (working copy) @@ -230,6 +230,25 @@ return (false); } +static bool +pat_valid(uint64_t val) +{ + int i, pa; + + /* + * From Intel SDM: Table "Memory Types That Can Be Encoded With PAT" + * + * Extract PA0 through PA7 and validate that each one encodes a + * valid memory type. + */ + for (i = 0; i < 8; i++) { + pa = (val >> (i * 8)) & 0xff; + if (pa == 2 || pa == 3 || pa >= 8) + return (false); + } + return (true); +} + void vmx_msr_init(void) { @@ -302,6 +321,10 @@ void vmx_msr_guest_init(struct vmx *vmx, int vcpuid) { + uint64_t *guest_msrs; + + guest_msrs = vmx->guest_msrs[vcpuid]; + /* * The permissions bitmap is shared between all vcpus so initialize it * once when initializing the vBSP. @@ -313,6 +336,19 @@ guest_msr_rw(vmx, MSR_SF_MASK); guest_msr_rw(vmx, MSR_KGSBASE); } + + /* + * Initialize guest IA32_PAT MSR with default value after reset. + */ + guest_msrs[IDX_MSR_PAT] = PAT_VALUE(0, PAT_WRITE_BACK) | + PAT_VALUE(1, PAT_WRITE_THROUGH) | + PAT_VALUE(2, PAT_UNCACHED) | + PAT_VALUE(3, PAT_UNCACHEABLE) | + PAT_VALUE(4, PAT_WRITE_BACK) | + PAT_VALUE(5, PAT_WRITE_THROUGH) | + PAT_VALUE(6, PAT_UNCACHED) | + PAT_VALUE(7, PAT_UNCACHEABLE); + return; } @@ -353,8 +389,12 @@ int vmx_rdmsr(struct vmx *vmx, int vcpuid, u_int num, uint64_t *val, bool *retu) { - int error = 0; + const uint64_t *guest_msrs; + int error; + guest_msrs = vmx->guest_msrs[vcpuid]; + error = 0; + switch (num) { case MSR_IA32_MISC_ENABLE: *val = misc_enable; @@ -366,6 +406,9 @@ case MSR_TURBO_RATIO_LIMIT1: *val = turbo_ratio_limit; break; + case MSR_PAT: + *val = guest_msrs[IDX_MSR_PAT]; + break; default: error = EINVAL; break; @@ -376,10 +419,13 @@ int vmx_wrmsr(struct vmx *vmx, int vcpuid, u_int num, uint64_t val, bool *retu) { + uint64_t *guest_msrs; uint64_t changed; int error; + guest_msrs = vmx->guest_msrs[vcpuid]; error = 0; + switch (num) { case MSR_IA32_MISC_ENABLE: changed = val ^ misc_enable; @@ -401,6 +447,12 @@ error = EINVAL; break; + case MSR_PAT: + if (pat_valid(val)) + guest_msrs[IDX_MSR_PAT] = val; + else + vm_inject_gp(vmx->vm, vcpuid); + break; default: error = EINVAL; break;