Index: sys/amd64/include/apicvar.h =================================================================== --- sys/amd64/include/apicvar.h (revision 243836) +++ sys/amd64/include/apicvar.h (working copy) @@ -209,6 +209,7 @@ void lapic_eoi(void); int lapic_id(void); void lapic_init(vm_paddr_t addr); +void lapic_init_ap(void); int lapic_intr_pending(u_int vector); void lapic_ipi_raw(register_t icrlo, u_int dest); void lapic_ipi_vectored(u_int vector, int dest); Index: sys/amd64/amd64/mp_machdep.c =================================================================== --- sys/amd64/amd64/mp_machdep.c (revision 243836) +++ sys/amd64/amd64/mp_machdep.c (working copy) @@ -708,6 +708,8 @@ wrmsr(MSR_STAR, msr); wrmsr(MSR_SF_MASK, PSL_NT|PSL_T|PSL_I|PSL_C|PSL_D); + lapic_init_ap(); + /* Disable local APIC just to be sure. */ lapic_disable(); Index: sys/x86/x86/local_apic.c =================================================================== --- sys/x86/x86/local_apic.c (revision 243836) +++ sys/x86/x86/local_apic.c (working copy) @@ -50,12 +50,14 @@ #include #include #include +#include #include #include #include #include +#include #include #include #include @@ -158,8 +160,16 @@ vm_paddr_t lapic_paddr; static u_long lapic_timer_divisor; static struct eventtimer lapic_et; + static int x2apic; +SYSCTL_INT(_machdep, OID_AUTO, x2apic, CTLFLAG_RD, &x2apic, 0, "x2apic mode"); +static int x2apic_desired = -1; /* enable only if running in a VM */ +TUNABLE_INT("machdep.x2apic_desired", &x2apic_desired); +SYSCTL_INT(_machdep, OID_AUTO, x2apic_desired, CTLFLAG_RDTUN, + &x2apic_desired, 0, + "0 (disable), 1 (enable), -1 (leave it up to the kernel)"); + static void lapic_enable(void); static void lapic_resume(struct pic *pic); static void lapic_timer_oneshot(struct lapic *, @@ -247,6 +257,17 @@ return (value); } +static void +x2apic_init(void) +{ + uint64_t apic_base; + + apic_base = rdmsr(MSR_APICBASE); + + if ((apic_base & APICBASE_X2APIC) == 0) + wrmsr(MSR_APICBASE, apic_base | APICBASE_X2APIC); +} + /* * Map the local APIC and setup necessary interrupt vectors. */ @@ -256,9 +277,21 @@ u_int regs[4]; int i, arat; - if ((cpu_feature2 & CPUID2_X2APIC) != 0 && - (rdmsr(MSR_APICBASE) & APICBASE_X2APIC) != 0) { - x2apic = 1; + if ((cpu_feature2 & CPUID2_X2APIC) != 0) { + if (rdmsr(MSR_APICBASE) & APICBASE_X2APIC) + x2apic = 1; + else if (x2apic_desired != 0) { + /* + * The default behavior is to enable x2apic only if + * the kernel is executing inside a virtual machine. + */ + if (vm_guest != VM_GUEST_NO || x2apic_desired == 1) + x2apic = 1; + } + } + + if (x2apic) { + x2apic_init(); if (bootverbose) printf("Local APIC access using x2APIC MSRs\n"); } else { @@ -317,6 +350,14 @@ } } +void +lapic_init_ap(void) +{ + + if (x2apic) + x2apic_init(); +} + /* * Create a local APIC instance. */ @@ -934,9 +975,26 @@ lapic_set_icr(uint64_t value) { - if (x2apic) + /* + * Access to x2apic MSR registers is not a serializing condition. + * + * A number of IPI handlers (e.g. rendezvous, tlb shootdown) + * depend on shared state in memory between the cpu that + * originated the IPI and the cpus that are the target. + * + * Insert a memory barrier to ensure that changes to memory + * are globally visible to the other cpus. + */ + if (x2apic) { + /* + * XXX + * Intel's architecture spec seems to suggest that an + * "sfence" should be sufficient here but empirically + * an "mfence" is required to do the job. + */ + mb(); wrmsr(MSR_APIC_ICR, value); - else { + } else { lapic->icr_hi = value >> 32; lapic->icr_lo = value; }