Index: sys/amd64/vmm/amd/svm.c =================================================================== --- sys/amd64/vmm/amd/svm.c (revision 271570) +++ sys/amd64/vmm/amd/svm.c (working copy) @@ -1004,25 +1004,71 @@ } static int -nmi_blocked(struct svm_softc *sc, int vcpu) +svm_modify_intr_shadow(struct svm_softc *sc, int vcpu, int running, + uint64_t val) { - /* XXX need to track NMI blocking */ + struct vmcb_ctrl *ctrl; + int oldval, newval; + + ctrl = svm_get_vmcb_ctrl(sc, vcpu); + oldval = ctrl->intr_shadow; + newval = val ? 1 : 0; + if (newval != oldval) { + ctrl->intr_shadow = newval; + VCPU_CTR1(sc->vm, vcpu, "Setting intr_shadow to %d", newval); + } return (0); } +/* + * Once an NMI is injected it blocks delivery of further NMIs until the handler + * executes an IRET. The IRET intercept is enabled when an NMI is injected to + * to track when the vcpu is done handling the NMI. + */ +static int +nmi_blocked(struct svm_softc *sc, int vcpu) +{ + int blocked; + + blocked = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, + VMCB_INTCPT_IRET); + return (blocked); +} + static void enable_nmi_blocking(struct svm_softc *sc, int vcpu) { - /* XXX enable iret intercept */ + + VCPU_CTR0(sc->vm, vcpu, "vNMI blocking enabled"); + svm_set_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET, 1); } -#ifdef notyet static void -clear_nmi_blocking(struct svm_softc *sc, int vcpu) +clear_nmi_blocking(struct svm_softc *sc, int vcpu, int running) { - /* XXX disable iret intercept */ + int error; + + VCPU_CTR0(sc->vm, vcpu, "vNMI blocking cleared"); + /* + * When the IRET intercept is cleared the vcpu will attempt to execute + * the "iret" when it runs next. However, it is possible to inject the + * another NMI into the vcpu before the "iret" has actually executed. + * + * For e.g. if the "iret" encounters a #NPF when accessing the stack + * it will trap back into the hypervisor. If an NMI is pending for + * the vcpu it will be injected into the guest. + * + * XXX this needs to be fixed + */ + svm_set_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET, 0); + + /* + * Set 'intr_shadow' to prevent an NMI from being injected on the + * immediate VMRUN. + */ + error = svm_modify_intr_shadow(sc, vcpu, running, 1); + KASSERT(!error, ("%s: error %d setting intr_shadow", __func__, error)); } -#endif #ifdef KTR static const char * @@ -1055,6 +1101,8 @@ return ("vintr"); case VMCB_EXIT_MSR: return ("msr"); + case VMCB_EXIT_IRET: + return ("iret"); default: snprintf(reasonbuf, sizeof(reasonbuf), "%#lx", reason); return (reasonbuf); @@ -1149,6 +1197,14 @@ svm_save_intinfo(svm_sc, vcpu); switch (code) { + case VMCB_EXIT_IRET: + /* + * Restart execution at "iret" but with the intercept cleared. + */ + vmexit->inst_length = 0; + clear_nmi_blocking(svm_sc, vcpu, 1); + handled = 1; + break; case VMCB_EXIT_VINTR: /* interrupt window exiting */ vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_VINTR, 1); handled = 1; @@ -1345,6 +1401,14 @@ */ VCPU_CTR0(sc->vm, vcpu, "Cannot inject NMI due " "to NMI-blocking"); + } else if (ctrl->intr_shadow) { + /* + * Can't inject an NMI if the vcpu is in an intr_shadow. + */ + VCPU_CTR0(sc->vm, vcpu, "Cannot inject NMI due to " + "interrupt shadow"); + need_intr_window = 1; + goto done; } else if (ctrl->eventinj & VMCB_EVENTINJ_VALID) { /* * If there is already an exception/interrupt pending