Index: sys/amd64/include/vmm_instruction_emul.h =================================================================== --- sys/amd64/include/vmm_instruction_emul.h (revision 271417) +++ sys/amd64/include/vmm_instruction_emul.h (working copy) @@ -93,7 +93,7 @@ int vmm_gla2gpa(struct vm *vm, int vcpuid, struct vm_guest_paging *paging, uint64_t gla, int prot, uint64_t *gpa); -void vie_init(struct vie *vie); +void vie_init(struct vie *vie, const char *inst_bytes, int inst_length); /* * Decode the instruction fetched into 'vie' so it can be emulated. Index: sys/amd64/vmm/amd/svm.c =================================================================== --- sys/amd64/vmm/amd/svm.c (revision 271419) +++ sys/amd64/vmm/amd/svm.c (working copy) @@ -34,6 +34,7 @@ #include #include #include +#include #include #include @@ -67,6 +68,9 @@ #include "svm_softc.h" #include "npt.h" +SYSCTL_DECL(_hw_vmm); +SYSCTL_NODE(_hw_vmm, OID_AUTO, svm, CTLFLAG_RW, NULL, NULL); + /* * SVM CPUID function 0x8000_000A, edx bit decoding. */ @@ -96,9 +100,17 @@ static int svm_getdesc(void *arg, int vcpu, int type, struct seg_desc *desc); static uint32_t svm_feature; /* AMD SVM features. */ +SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, features, CTLFLAG_RD, &svm_feature, 0, + "SVM features advertised by CPUID.8000000AH:EDX"); +static int disable_npf_assist; +SYSCTL_INT(_hw_vmm_svm, OID_AUTO, disable_npf_assist, CTLFLAG_RWTUN, + &disable_npf_assist, 0, NULL); + /* Maximum ASIDs supported by the processor */ static uint32_t nasid; +SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, num_asids, CTLFLAG_RD, &nasid, 0, + "Number of ASIDs supported by this processor"); /* Current ASID generation for each host cpu */ static struct asid asid[MAXCPU]; @@ -218,6 +230,12 @@ return (svm_feature & AMD_CPUID_SVM_FLUSH_BY_ASID); } +static __inline int +decode_assist(void) +{ + return (svm_feature & AMD_CPUID_SVM_DECODE_ASSIST); +} + /* * Enable SVM for a CPU. */ @@ -792,8 +810,13 @@ { struct vm_guest_paging *paging; struct vmcb_segment *seg; + struct vmcb_ctrl *ctrl; + char *inst_bytes; + int inst_len; + ctrl = &vmcb->ctrl; paging = &vmexit->u.inst_emul.paging; + vmexit->exitcode = VM_EXITCODE_INST_EMUL; vmexit->u.inst_emul.gpa = gpa; vmexit->u.inst_emul.gla = VIE_INVALID_GLA; @@ -800,11 +823,9 @@ svm_paging_info(vmcb, paging); /* - * If DecodeAssist SVM feature doesn't exist, we don't have NPF - * instuction length. RIP will be calculated based on the length - * determined by instruction emulation. + * The inst_length will be determined by decoding the instruction. */ - vmexit->inst_length = VIE_INST_SIZE; + vmexit->inst_length = 0; seg = vmcb_seg(vmcb, VM_REG_GUEST_CS); switch(paging->cpu_mode) { @@ -820,6 +841,18 @@ vmexit->u.inst_emul.cs_d = 0; break; } + + /* + * Copy the instruction bytes into 'vie' if available. + */ + if (decode_assist() && !disable_npf_assist) { + inst_len = ctrl->inst_len; + inst_bytes = ctrl->inst_bytes; + } else { + inst_len = 0; + inst_bytes = NULL; + } + vie_init(&vmexit->u.inst_emul.vie, inst_bytes, inst_len); } /* @@ -1182,7 +1215,7 @@ loop = false; break; default: - /* Return to user space. */ + /* Return to user space. */ loop = false; update_rip = false; VCPU_CTR3(svm_sc->vm, vcpu, "VMEXIT=0x%lx" @@ -1190,7 +1223,7 @@ ctrl->exitcode, info1, info2); VCPU_CTR3(svm_sc->vm, vcpu, "SVM:RIP: 0x%lx nRIP:0x%lx" " Inst decoder len:%d\n", state->rip, - ctrl->nrip, ctrl->inst_decode_size); + ctrl->nrip, ctrl->inst_len); vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_UNKNOWN, 1); break; } Index: sys/amd64/vmm/amd/vmcb.h =================================================================== --- sys/amd64/vmm/amd/vmcb.h (revision 271419) +++ sys/amd64/vmm/amd/vmcb.h (working copy) @@ -218,8 +218,8 @@ uint32_t vmcb_clean; /* 0xC0: VMCB clean bits for caching */ uint32_t :32; /* 0xC4: Reserved */ uint64_t nrip; /* 0xC8: Guest next nRIP. */ - uint8_t inst_decode_size; /* 0xD0: Instruction decode */ - uint8_t inst_decode_bytes[15]; + uint8_t inst_len; /* 0xD0: #NPF decode assist */ + uint8_t inst_bytes[15]; uint8_t padd6[0x320]; } __attribute__ ((__packed__)); CTASSERT(sizeof(struct vmcb_ctrl) == 1024); Index: sys/amd64/vmm/intel/vmx.c =================================================================== --- sys/amd64/vmm/intel/vmx.c (revision 271417) +++ sys/amd64/vmm/intel/vmx.c (working copy) @@ -1847,6 +1847,7 @@ vmexit->u.inst_emul.cs_d = 0; break; } + vie_init(&vmexit->u.inst_emul.vie, NULL, 0); } static int Index: sys/amd64/vmm/vmm.c =================================================================== --- sys/amd64/vmm/vmm.c (revision 271417) +++ sys/amd64/vmm/vmm.c (working copy) @@ -1216,7 +1216,7 @@ mem_region_read_t mread; mem_region_write_t mwrite; enum vm_cpu_mode cpu_mode; - int cs_d, error; + int cs_d, error, length; vcpu = &vm->vcpu[vcpuid]; vme = &vcpu->exitinfo; @@ -1228,11 +1228,21 @@ paging = &vme->u.inst_emul.paging; cpu_mode = paging->cpu_mode; - vie_init(vie); - /* Fetch, decode and emulate the faulting instruction */ - error = vmm_fetch_instruction(vm, vcpuid, paging, vme->rip, - vme->inst_length, vie); + if (vie->num_valid == 0) { + /* + * If the instruction length is not known then assume a + * maximum size instruction. + */ + length = vme->inst_length ? vme->inst_length : VIE_INST_SIZE; + error = vmm_fetch_instruction(vm, vcpuid, paging, vme->rip, + length, vie); + } else { + /* + * The instruction bytes have already been copied into 'vie' + */ + error = 0; + } if (error == 1) return (0); /* Resume guest to handle page fault */ else if (error == -1) @@ -1243,13 +1253,10 @@ if (vmm_decode_instruction(vm, vcpuid, gla, cpu_mode, cs_d, vie) != 0) return (EFAULT); - /* - * AMD-V doesn't provide instruction length which is nRIP - RIP - * for some of the exit including Nested Page Fault. Use instruction - * length calculated by software instruction emulation to update - * RIP of vcpu. + /* + * If the instruction length is not specified the update it now. */ - if (vme->inst_length == VIE_INST_SIZE) + if (vme->inst_length == 0) vme->inst_length = vie->num_processed; /* return to userland unless this is an in-kernel emulated device */ Index: sys/amd64/vmm/vmm_instruction_emul.c =================================================================== --- sys/amd64/vmm/vmm_instruction_emul.c (revision 271417) +++ sys/amd64/vmm/vmm_instruction_emul.c (working copy) @@ -1025,13 +1025,20 @@ #ifdef _KERNEL void -vie_init(struct vie *vie) +vie_init(struct vie *vie, const char *inst_bytes, int inst_length) { + KASSERT(inst_length >= 0 && inst_length <= VIE_INST_SIZE, + ("%s: invalid instruction length (%d)", __func__, inst_length)); bzero(vie, sizeof(struct vie)); vie->base_register = VM_REG_LAST; vie->index_register = VM_REG_LAST; + + if (inst_length) { + bcopy(inst_bytes, vie->inst, inst_length); + vie->num_valid = inst_length; + } } static int