Index: sys/amd64/vmm/amd/svm.c =================================================================== --- sys/amd64/vmm/amd/svm.c (revision 271939) +++ sys/amd64/vmm/amd/svm.c (working copy) @@ -126,11 +126,6 @@ */ static uint8_t hsave[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE); -/* - * S/w saved host context. - */ -static struct svm_regctx host_ctx[MAXCPU]; - static VMM_STAT_AMD(VCPU_EXITINTINFO, "VM exits during event delivery"); static VMM_STAT_AMD(VCPU_INTINFO_INJECTED, "Events pending at VM entry"); static VMM_STAT_AMD(VMEXIT_VINTR, "VM exits due to interrupt window"); @@ -679,7 +674,7 @@ { uint64_t val; - val = in ? regs->e.g.sctx_rdi : regs->e.g.sctx_rsi; + val = in ? regs->sctx_rdi : regs->sctx_rsi; return (val); } @@ -1156,7 +1151,7 @@ state = svm_get_vmcb_state(sc, vcpu); ctx = svm_get_guest_regctx(sc, vcpu); state->rax = result & 0xffffffff; - ctx->e.g.sctx_rdx = result >> 32; + ctx->sctx_rdx = result >> 32; } return (error); @@ -1315,7 +1310,7 @@ case VMCB_EXIT_MSR: /* MSR access. */ eax = state->rax; ecx = ctx->sctx_rcx; - edx = ctx->e.g.sctx_rdx; + edx = ctx->sctx_rdx; retu = false; if (info1) { @@ -1357,7 +1352,7 @@ (uint32_t *)&state->rax, (uint32_t *)&ctx->sctx_rbx, (uint32_t *)&ctx->sctx_rcx, - (uint32_t *)&ctx->e.g.sctx_rdx); + (uint32_t *)&ctx->sctx_rdx); break; case VMCB_EXIT_HLT: vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_HLT, 1); @@ -1775,7 +1770,7 @@ svm_vmrun(void *arg, int vcpu, register_t rip, pmap_t pmap, void *rend_cookie, void *suspended_cookie) { - struct svm_regctx *hctx, *gctx; + struct svm_regctx *gctx; struct svm_softc *svm_sc; struct svm_vcpu *vcpustate; struct vmcb_state *state; @@ -1806,7 +1801,6 @@ thiscpu = curcpu; gctx = svm_get_guest_regctx(svm_sc, vcpu); - hctx = &host_ctx[thiscpu]; vmcb_pa = svm_sc->vcpu[vcpu].vmcb_pa; if (vcpustate->lastcpu != thiscpu) { @@ -1885,7 +1879,7 @@ /* Launch Virtual Machine. */ VCPU_CTR1(vm, vcpu, "Resume execution at %#lx", state->rip); - svm_launch(vmcb_pa, gctx, hctx); + svm_launch(vmcb_pa, gctx); CPU_CLR_ATOMIC(thiscpu, &pmap->pm_active); @@ -1950,11 +1944,11 @@ case VM_REG_GUEST_RCX: return (®ctx->sctx_rcx); case VM_REG_GUEST_RDX: - return (®ctx->e.g.sctx_rdx); + return (®ctx->sctx_rdx); case VM_REG_GUEST_RDI: - return (®ctx->e.g.sctx_rdi); + return (®ctx->sctx_rdi); case VM_REG_GUEST_RSI: - return (®ctx->e.g.sctx_rsi); + return (®ctx->sctx_rsi); case VM_REG_GUEST_RBP: return (®ctx->sctx_rbp); case VM_REG_GUEST_R8: Index: sys/amd64/vmm/amd/svm.h =================================================================== --- sys/amd64/vmm/amd/svm.h (revision 271939) +++ sys/amd64/vmm/amd/svm.h (working copy) @@ -34,33 +34,15 @@ printf("SVM ERROR:%s " fmt "\n", __func__, ##args); /* - * Software saved machine state for guest and host. + * Guest register state that is saved outside the VMCB. */ - -/* Additional guest register state */ -struct svm_gctx { +struct svm_regctx { + register_t sctx_rbp; + register_t sctx_rbx; + register_t sctx_rcx; register_t sctx_rdx; register_t sctx_rdi; register_t sctx_rsi; - /* Points to host context area. */ - register_t sctx_hostctx_base; -}; - -/* Additional host register state */ -struct svm_hctx { - uint16_t sctx_fs; - uint16_t sctx_gs; - - register_t sctx_rsp; -}; - -/* Common register context area for guest and host. */ -struct svm_regctx { - register_t sctx_rbp; - - register_t sctx_rbx; - register_t sctx_rcx; - register_t sctx_r8; register_t sctx_r9; register_t sctx_r10; @@ -69,14 +51,9 @@ register_t sctx_r13; register_t sctx_r14; register_t sctx_r15; - - union { - struct svm_hctx h; /* host-specific register state */ - struct svm_gctx g; /* guest-specific register state */ - } e; }; -void svm_launch(uint64_t pa, struct svm_regctx *, struct svm_regctx *); +void svm_launch(uint64_t pa, struct svm_regctx *); static __inline void disable_gintr(void) Index: sys/amd64/vmm/amd/svm_genassym.c =================================================================== --- sys/amd64/vmm/amd/svm_genassym.c (revision 271922) +++ sys/amd64/vmm/amd/svm_genassym.c (working copy) @@ -35,7 +35,9 @@ ASSYM(SCTX_RBX, offsetof(struct svm_regctx, sctx_rbx)); ASSYM(SCTX_RCX, offsetof(struct svm_regctx, sctx_rcx)); ASSYM(SCTX_RBP, offsetof(struct svm_regctx, sctx_rbp)); - +ASSYM(SCTX_RDX, offsetof(struct svm_regctx, sctx_rdx)); +ASSYM(SCTX_RDI, offsetof(struct svm_regctx, sctx_rdi)); +ASSYM(SCTX_RSI, offsetof(struct svm_regctx, sctx_rsi)); ASSYM(SCTX_R8, offsetof(struct svm_regctx, sctx_r8)); ASSYM(SCTX_R9, offsetof(struct svm_regctx, sctx_r9)); ASSYM(SCTX_R10, offsetof(struct svm_regctx, sctx_r10)); @@ -44,14 +46,3 @@ ASSYM(SCTX_R13, offsetof(struct svm_regctx, sctx_r13)); ASSYM(SCTX_R14, offsetof(struct svm_regctx, sctx_r14)); ASSYM(SCTX_R15, offsetof(struct svm_regctx, sctx_r15)); - -/* Guest only registers. */ -ASSYM(SCTX_GUEST_RDX, offsetof(struct svm_regctx, e.g.sctx_rdx)); -ASSYM(SCTX_GUEST_RDI, offsetof(struct svm_regctx, e.g.sctx_rdi)); -ASSYM(SCTX_GUEST_RSI, offsetof(struct svm_regctx, e.g.sctx_rsi)); -ASSYM(SCTX_GUEST_HCTX_BASE, offsetof(struct svm_regctx, e.g.sctx_hostctx_base)); - -/* Host only registers. */ -ASSYM(SCTX_HOST_GS, offsetof(struct svm_regctx, e.h.sctx_gs)); -ASSYM(SCTX_HOST_FS, offsetof(struct svm_regctx, e.h.sctx_fs)); -ASSYM(SCTX_HOST_RSP, offsetof(struct svm_regctx, e.h.sctx_rsp)); Index: sys/amd64/vmm/amd/svm_support.S =================================================================== --- sys/amd64/vmm/amd/svm_support.S (revision 271922) +++ sys/amd64/vmm/amd/svm_support.S (working copy) @@ -28,110 +28,88 @@ #include "svm_assym.s" /* - * Macros to save and restore GPRs. + * Be friendly to DTrace FBT's prologue/epilogue pattern matching. + * + * They are also responsible for saving/restoring the host %rbp across VMRUN. */ -#define SAVE_GPR_STATE(reg); \ - movq %rbp, SCTX_RBP(reg); \ - movq %rbx, SCTX_RBX(reg); \ - movq %rcx, SCTX_RCX(reg); \ - movq %r8, SCTX_R8(reg); \ - movq %r9, SCTX_R9(reg); \ - movq %r10, SCTX_R10(reg); \ - movq %r11, SCTX_R11(reg); \ - movq %r12, SCTX_R12(reg); \ - movq %r13, SCTX_R13(reg); \ - movq %r14, SCTX_R14(reg); \ - movq %r15, SCTX_R15(reg); \ +#define VENTER push %rbp ; mov %rsp,%rbp +#define VLEAVE pop %rbp -#define LOAD_GPR_STATE(reg) \ - movq SCTX_RBP(reg), %rbp; \ - movq SCTX_RBX(reg), %rbx; \ - movq SCTX_RCX(reg), %rcx; \ - movq SCTX_R8(reg), %r8; \ - movq SCTX_R9(reg), %r9; \ - movq SCTX_R10(reg), %r10; \ - movq SCTX_R11(reg), %r11; \ - movq SCTX_R12(reg), %r12; \ - movq SCTX_R13(reg), %r13; \ - movq SCTX_R14(reg), %r14; \ - movq SCTX_R15(reg), %r15; \ - /* - * Macros to save and restore vcpu registers which are not - * done by SVM. + * svm_launch(uint64_t vmcb, struct svm_regctx *gctx) + * %rdi: physical address of VMCB + * %rsi: pointer to guest context */ -#define SAVE_GUEST_STATE(reg) \ - movq %rdi, SCTX_GUEST_RDI(reg); \ - movq %rsi, SCTX_GUEST_RSI(reg); \ - movq %rdx, SCTX_GUEST_RDX(reg); \ - SAVE_GPR_STATE(reg) +ENTRY(svm_launch) + VENTER -#define LOAD_GUEST_STATE(reg) \ - movq SCTX_GUEST_RDI(reg), %rdi; \ - movq SCTX_GUEST_RSI(reg), %rsi; \ - movq SCTX_GUEST_RDX(reg), %rdx; \ - LOAD_GPR_STATE(reg) + /* + * Host register state saved across a VMRUN. + * + * All "callee saved registers" except: + * %rsp: because it is preserved by the processor across VMRUN. + * %rbp: because it is saved/restored by the function prologue/epilogue. + */ + push %rbx + push %r12 + push %r13 + push %r14 + push %r15 -/* - * Macros to save and restore host registers which are not - * saved by SVM. - */ -#define SAVE_HOST_STATE(reg) \ - mov %fs, SCTX_HOST_FS(reg); \ - mov %gs, SCTX_HOST_GS(reg); \ - movq %rsp, SCTX_HOST_RSP(reg); \ - SAVE_GPR_STATE(reg) + /* Save the physical address of the VMCB in %rax */ + movq %rdi, %rax -#define LOAD_HOST_STATE(reg) \ - mov SCTX_HOST_FS(reg), %fs; \ - mov SCTX_HOST_GS(reg), %gs; \ - movq SCTX_HOST_RSP(reg), %rsp; \ - LOAD_GPR_STATE(reg) + push %rsi /* push guest context pointer on the stack */ -/* - * This is where virtual machine vcpu start execution. - * int svm_launch(vmcb_pa, gswctx, hswctx) - * vmcb_pa - VMCB physical address is in %rdi. - * gswctx - Guest context is in %rsi. - * hswctx - Host context is in %rdx. - * - * Note: SVM guarantees host RSP and RAX will be restored - * back after guest exit. RAX is where VMCB Phy addr is so - * we are left with only RSP. RSP will hold base for guest - * software context which will have base for host software - * context. - */ -ENTRY(svm_launch) - - /* Save host GPRs. */ - SAVE_HOST_STATE(%rdx) - /* - * Move the parameters to final destinations. - * RAX - VMCB phy addr. - * RSP - Guest software context. - * SCTX_GUEST_HOST(guest) - Host software context. + * Restore guest state. */ - movq %rdi, %rax - movq %rsi, %rsp - movq %rdx, SCTX_GUEST_HCTX_BASE(%rsp) + movq SCTX_R8(%rsi), %r8 + movq SCTX_R9(%rsi), %r9 + movq SCTX_R10(%rsi), %r10 + movq SCTX_R11(%rsi), %r11 + movq SCTX_R12(%rsi), %r12 + movq SCTX_R13(%rsi), %r13 + movq SCTX_R14(%rsi), %r14 + movq SCTX_R15(%rsi), %r15 + movq SCTX_RBP(%rsi), %rbp + movq SCTX_RBX(%rsi), %rbx + movq SCTX_RCX(%rsi), %rcx + movq SCTX_RDX(%rsi), %rdx + movq SCTX_RDI(%rsi), %rdi + movq SCTX_RSI(%rsi), %rsi /* %rsi must be restored last */ - /* Load guest context. */ - LOAD_GUEST_STATE(%rsp) - vmload %rax - vmrun %rax - vmsave %rax - /* Save guest state. */ - SAVE_GUEST_STATE(%rsp) + pop %rax /* pop guest context pointer from the stack */ - /* Restore host context base in RDX. */ - movq SCTX_GUEST_HCTX_BASE(%rsp), %rdx - /* Restore host GPRs. */ - LOAD_HOST_STATE(%rdx) + /* + * Save guest state. + */ + movq %r8, SCTX_R8(%rax) + movq %r9, SCTX_R9(%rax) + movq %r10, SCTX_R10(%rax) + movq %r11, SCTX_R11(%rax) + movq %r12, SCTX_R12(%rax) + movq %r13, SCTX_R13(%rax) + movq %r14, SCTX_R14(%rax) + movq %r15, SCTX_R15(%rax) + movq %rbp, SCTX_RBP(%rax) + movq %rbx, SCTX_RBX(%rax) + movq %rcx, SCTX_RCX(%rax) + movq %rdx, SCTX_RDX(%rax) + movq %rdi, SCTX_RDI(%rax) + movq %rsi, SCTX_RSI(%rax) + /* Restore host state */ + pop %r15 + pop %r14 + pop %r13 + pop %r12 + pop %rbx + + VLEAVE ret END(svm_launch)