Index: sys/amd64/include/vmm.h =================================================================== --- sys/amd64/include/vmm.h (revision 271445) +++ sys/amd64/include/vmm.h (working copy) @@ -82,6 +82,7 @@ VM_REG_GUEST_PDPTE1, VM_REG_GUEST_PDPTE2, VM_REG_GUEST_PDPTE3, + VM_REG_GUEST_INTR_SHADOW, VM_REG_LAST }; Index: sys/amd64/vmm/intel/vmx.c =================================================================== --- sys/amd64/vmm/intel/vmx.c (revision 271445) +++ sys/amd64/vmm/intel/vmx.c (working copy) @@ -2712,6 +2712,46 @@ } static int +vmx_get_intr_shadow(struct vmx *vmx, int vcpu, int running, uint64_t *retval) +{ + uint64_t gi; + int error; + + error = vmcs_getreg(&vmx->vmcs[vcpu], running, + VMCS_IDENT(VMCS_GUEST_INTERRUPTIBILITY), &gi); + *retval = (gi & HWINTR_BLOCKING) ? 1 : 0; + return (error); +} + +static int +vmx_modify_intr_shadow(struct vmx *vmx, int vcpu, int running, uint64_t val) +{ + struct vmcs *vmcs; + uint64_t gi; + int error, ident; + + /* + * Forcing the vcpu into an interrupt shadow is not supported. + */ + if (val) { + error = EINVAL; + goto done; + } + + vmcs = &vmx->vmcs[vcpu]; + ident = VMCS_IDENT(VMCS_GUEST_INTERRUPTIBILITY); + error = vmcs_getreg(vmcs, running, ident, &gi); + if (error == 0) { + gi &= ~HWINTR_BLOCKING; + error = vmcs_setreg(vmcs, running, ident, gi); + } +done: + VCPU_CTR2(vmx->vm, vcpu, "Setting intr_shadow to %#lx %s", val, + error ? "failed" : "succeeded"); + return (error); +} + +static int vmx_shadow_reg(int reg) { int shreg; @@ -2742,6 +2782,9 @@ if (running && hostcpu != curcpu) panic("vmx_getreg: %s%d is running", vm_name(vmx->vm), vcpu); + if (reg == VM_REG_GUEST_INTR_SHADOW) + return (vmx_get_intr_shadow(vmx, vcpu, running, retval)); + if (vmxctx_getreg(&vmx->ctx[vcpu], reg, retval) == 0) return (0); @@ -2760,6 +2803,9 @@ if (running && hostcpu != curcpu) panic("vmx_setreg: %s%d is running", vm_name(vmx->vm), vcpu); + if (reg == VM_REG_GUEST_INTR_SHADOW) + return (vmx_modify_intr_shadow(vmx, vcpu, running, val)); + if (vmxctx_setreg(&vmx->ctx[vcpu], reg, val) == 0) return (0); Index: sys/amd64/vmm/vmm.c =================================================================== --- sys/amd64/vmm/vmm.c (revision 271445) +++ sys/amd64/vmm/vmm.c (working copy) @@ -1090,7 +1090,7 @@ { struct vcpu *vcpu; const char *wmesg; - int t, vcpu_halted, vm_halted; + int error, t, vcpu_halted, vm_halted; KASSERT(!CPU_ISSET(vcpuid, &vm->halted_cpus), ("vcpu already halted")); @@ -1098,6 +1098,22 @@ vcpu_halted = 0; vm_halted = 0; + /* + * The typical way to halt a cpu is to execute: "sti; hlt" + * + * STI sets RFLAGS.IF to enable interrupts. However, the processor + * remains in an "interrupt shadow" for an additional instruction + * following the STI. This guarantees that "sti; hlt" sequence is + * atomic and a pending interrupt will be recognized after the HLT. + * + * After the HLT emulation is done the vcpu is no longer in an + * interrupt shadow and a pending interrupt can be injected on + * the next entry into the guest. + */ + error = vm_set_register(vm, vcpuid, VM_REG_GUEST_INTR_SHADOW, 0); + KASSERT(error == 0, ("%s: error %d clearing interrupt shadow", + __func__, error)); + vcpu_lock(vcpu); while (1) { /*