/* * Assuming that we need a unique ASID per vcpu. This may not be necessary * if the TLB entries mapping guest_linear -> host_physical are also tagged * as belonging to a particular vcpu (e.g. with the VMCB physical address). */ uint32_t max_asid; struct asid { uint32_t num; /* 0 is reserved for the host */ uint64_t generation; /* 0 is invalid */ }; static struct asid host_asid[MAXCPU]; struct svm_vcpu { /* Cached value of 'pmap->nptgen'. Must be initialized to 0. */ long nptgen[MAXCPU]; /* ASID value of this vcpu. Must be initialized to 0. */ struct asid asid[MAXCPU]; }; asid_init() { foreach host cpu { host_asid[cpu].num = 1; host_asid[cpu].generation = 1; } } int check_asid(struct asid *guest_asid) { uint32_t next; int flushall = 0; KASSERT(inside_critical_section); if (guest_asid->generation != host_asid[curcpu].generation) { if (host_asid[curcpu].num >= max_asid) { flushall = 1; host_asid[curcpu].num = 1; host_asid[curcpu].generation++; } guest_asid->generation = host_asid[curcpu].generation; guest_asid->num = host_asid[curcpu].num++; assert(guest_asid->num > 0 && guest_asid->num < max_asid); } return (flushall); } void invalidate_asid(struct asid *guest_asid) { guest_asid->generation = 0; /* invalid generation */ } vm_loop() { disable_gintr(); pmap->pm_active |= (1 << curcpu); flush_guest = 0; nptgen = pmap->nptgen; /* Invalidate stale mappings from when we last ran on this cpu */ if (vcpustate->lastcpu != curcpu) flush_guest = 1; /* Invalidate stale mappings because the nested mappings have changed */ if (vcpustate->nptgen[curcpu] != nptgen) { flush_guest = 1; vcpustate->nptgen[curcpu] = nptgen; } if (flush_guest) invalidate_asid(&vcpustate->asid[curcpu]); flush_all = check_asid(&vcpustate->asid[curcpu]); if (flush_all) ctrl->tlb_ctrl = TLB_FLUSH_ALL; else ctrl->tlb_ctrl = TLB_FLUSH_NOTHING; pmap->pm_active &= ~(1 << curcpu); enable_gintr(); }