lkml.org 
[lkml]   [2015]   [Sep]   [28]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[FYI PATCH 04/20] KVM: x86: introduce lapic_in_kernel
    Date
    Avoid pointer chasing and memory barriers, and simplify the code
    when split irqchip (LAPIC in kernel, IOAPIC/PIC in userspace)
    is introduced.

    Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
    ---
    arch/x86/kvm/irq.c | 6 +++---
    arch/x86/kvm/irq.h | 8 ++++++++
    arch/x86/kvm/lapic.c | 4 ++--
    arch/x86/kvm/mmu.c | 2 +-
    arch/x86/kvm/svm.c | 4 ++--
    arch/x86/kvm/vmx.c | 46 ++++++++++++++++++++--------------------------
    arch/x86/kvm/x86.c | 18 +++++++++---------
    7 files changed, 45 insertions(+), 43 deletions(-)

    diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c
    index c0dad893dc59..b653ae202c8e 100644
    --- a/arch/x86/kvm/irq.c
    +++ b/arch/x86/kvm/irq.c
    @@ -57,7 +57,7 @@ static int kvm_cpu_has_extint(struct kvm_vcpu *v)
    */
    int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v)
    {
    - if (!irqchip_in_kernel(v->kvm))
    + if (!lapic_in_kernel(v))
    return v->arch.interrupt.pending;

    if (kvm_cpu_has_extint(v))
    @@ -75,7 +75,7 @@ int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v)
    */
    int kvm_cpu_has_interrupt(struct kvm_vcpu *v)
    {
    - if (!irqchip_in_kernel(v->kvm))
    + if (!lapic_in_kernel(v))
    return v->arch.interrupt.pending;

    if (kvm_cpu_has_extint(v))
    @@ -103,7 +103,7 @@ int kvm_cpu_get_interrupt(struct kvm_vcpu *v)
    {
    int vector;

    - if (!irqchip_in_kernel(v->kvm))
    + if (!lapic_in_kernel(v))
    return v->arch.interrupt.nr;

    vector = kvm_cpu_get_extint(v);
    diff --git a/arch/x86/kvm/irq.h b/arch/x86/kvm/irq.h
    index 3d782a2c336a..9e6e7e04de98 100644
    --- a/arch/x86/kvm/irq.h
    +++ b/arch/x86/kvm/irq.h
    @@ -92,6 +92,14 @@ static inline int irqchip_in_kernel(struct kvm *kvm)
    return vpic != NULL;
    }

    +static inline int lapic_in_kernel(struct kvm_vcpu *vcpu)
    +{
    + /* Same as irqchip_in_kernel(vcpu->kvm), but with less
    + * pointer chasing and no unnecessary memory barriers.
    + */
    + return vcpu->arch.apic != NULL;
    +}
    +
    void kvm_pic_reset(struct kvm_kpic_state *s);

    void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu);
    diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
    index c568d69c7060..c4bcc86d6dc4 100644
    --- a/arch/x86/kvm/lapic.c
    +++ b/arch/x86/kvm/lapic.c
    @@ -1985,7 +1985,7 @@ int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data)
    struct kvm_lapic *apic = vcpu->arch.apic;
    u32 reg = (msr - APIC_BASE_MSR) << 4;

    - if (!irqchip_in_kernel(vcpu->kvm) || !apic_x2apic_mode(apic))
    + if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic))
    return 1;

    if (reg == APIC_ICR2)
    @@ -2002,7 +2002,7 @@ int kvm_x2apic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
    struct kvm_lapic *apic = vcpu->arch.apic;
    u32 reg = (msr - APIC_BASE_MSR) << 4, low, high = 0;

    - if (!irqchip_in_kernel(vcpu->kvm) || !apic_x2apic_mode(apic))
    + if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic))
    return 1;

    if (reg == APIC_DFR || reg == APIC_ICR2) {
    diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
    index ff606f507913..c3f39aa9b9cb 100644
    --- a/arch/x86/kvm/mmu.c
    +++ b/arch/x86/kvm/mmu.c
    @@ -3427,7 +3427,7 @@ static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn)

    static bool can_do_async_pf(struct kvm_vcpu *vcpu)
    {
    - if (unlikely(!irqchip_in_kernel(vcpu->kvm) ||
    + if (unlikely(!lapic_in_kernel(vcpu) ||
    kvm_event_needs_reinjection(vcpu)))
    return false;

    diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
    index 98889c882ced..89d278a0ad37 100644
    --- a/arch/x86/kvm/svm.c
    +++ b/arch/x86/kvm/svm.c
    @@ -3154,7 +3154,7 @@ static int cr8_write_interception(struct vcpu_svm *svm)
    u8 cr8_prev = kvm_get_cr8(&svm->vcpu);
    /* instruction emulation calls kvm_set_cr8() */
    r = cr_interception(svm);
    - if (irqchip_in_kernel(svm->vcpu.kvm))
    + if (lapic_in_kernel(&svm->vcpu))
    return r;
    if (cr8_prev <= kvm_get_cr8(&svm->vcpu))
    return r;
    @@ -3409,7 +3409,7 @@ static int interrupt_window_interception(struct vcpu_svm *svm)
    * If the user space waits to inject interrupts, exit as soon as
    * possible
    */
    - if (!irqchip_in_kernel(svm->vcpu.kvm) &&
    + if (!lapic_in_kernel(&svm->vcpu) &&
    kvm_run->request_interrupt_window &&
    !kvm_cpu_has_interrupt(&svm->vcpu)) {
    kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
    diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
    index 32a38494dd6f..d5b87be89631 100644
    --- a/arch/x86/kvm/vmx.c
    +++ b/arch/x86/kvm/vmx.c
    @@ -809,7 +809,6 @@ static void kvm_cpu_vmxon(u64 addr);
    static void kvm_cpu_vmxoff(void);
    static bool vmx_mpx_supported(void);
    static bool vmx_xsaves_supported(void);
    -static int vmx_vm_has_apicv(struct kvm *kvm);
    static int vmx_cpu_uses_apicv(struct kvm_vcpu *vcpu);
    static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr);
    static void vmx_set_segment(struct kvm_vcpu *vcpu,
    @@ -947,9 +946,9 @@ static inline bool cpu_has_vmx_tpr_shadow(void)
    return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW;
    }

    -static inline bool vm_need_tpr_shadow(struct kvm *kvm)
    +static inline bool cpu_need_tpr_shadow(struct kvm_vcpu *vcpu)
    {
    - return (cpu_has_vmx_tpr_shadow()) && (irqchip_in_kernel(kvm));
    + return cpu_has_vmx_tpr_shadow() && lapic_in_kernel(vcpu);
    }

    static inline bool cpu_has_secondary_exec_ctrls(void)
    @@ -1063,9 +1062,9 @@ static inline bool cpu_has_vmx_ple(void)
    SECONDARY_EXEC_PAUSE_LOOP_EXITING;
    }

    -static inline bool vm_need_virtualize_apic_accesses(struct kvm *kvm)
    +static inline bool cpu_need_virtualize_apic_accesses(struct kvm_vcpu *vcpu)
    {
    - return flexpriority_enabled && irqchip_in_kernel(kvm);
    + return flexpriority_enabled && lapic_in_kernel(vcpu);
    }

    static inline bool cpu_has_vmx_vpid(void)
    @@ -2378,7 +2377,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
    vmx->nested.nested_vmx_pinbased_ctls_high |=
    PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
    PIN_BASED_VMX_PREEMPTION_TIMER;
    - if (vmx_vm_has_apicv(vmx->vcpu.kvm))
    + if (vmx_cpu_uses_apicv(&vmx->vcpu))
    vmx->nested.nested_vmx_pinbased_ctls_high |=
    PIN_BASED_POSTED_INTR;

    @@ -4333,14 +4332,9 @@ static void vmx_disable_intercept_msr_write_x2apic(u32 msr)
    msr, MSR_TYPE_W);
    }

    -static int vmx_vm_has_apicv(struct kvm *kvm)
    -{
    - return enable_apicv && irqchip_in_kernel(kvm);
    -}
    -
    static int vmx_cpu_uses_apicv(struct kvm_vcpu *vcpu)
    {
    - return vmx_vm_has_apicv(vcpu->kvm);
    + return enable_apicv && lapic_in_kernel(vcpu);
    }

    static int vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
    @@ -4520,7 +4514,7 @@ static u32 vmx_pin_based_exec_ctrl(struct vcpu_vmx *vmx)
    {
    u32 pin_based_exec_ctrl = vmcs_config.pin_based_exec_ctrl;

    - if (!vmx_vm_has_apicv(vmx->vcpu.kvm))
    + if (!vmx_cpu_uses_apicv(&vmx->vcpu))
    pin_based_exec_ctrl &= ~PIN_BASED_POSTED_INTR;
    return pin_based_exec_ctrl;
    }
    @@ -4532,7 +4526,7 @@ static u32 vmx_exec_control(struct vcpu_vmx *vmx)
    if (vmx->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)
    exec_control &= ~CPU_BASED_MOV_DR_EXITING;

    - if (!vm_need_tpr_shadow(vmx->vcpu.kvm)) {
    + if (!cpu_need_tpr_shadow(&vmx->vcpu)) {
    exec_control &= ~CPU_BASED_TPR_SHADOW;
    #ifdef CONFIG_X86_64
    exec_control |= CPU_BASED_CR8_STORE_EXITING |
    @@ -4549,7 +4543,7 @@ static u32 vmx_exec_control(struct vcpu_vmx *vmx)
    static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx)
    {
    u32 exec_control = vmcs_config.cpu_based_2nd_exec_ctrl;
    - if (!vm_need_virtualize_apic_accesses(vmx->vcpu.kvm))
    + if (!cpu_need_virtualize_apic_accesses(&vmx->vcpu))
    exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
    if (vmx->vpid == 0)
    exec_control &= ~SECONDARY_EXEC_ENABLE_VPID;
    @@ -4563,7 +4557,7 @@ static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx)
    exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST;
    if (!ple_gap)
    exec_control &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING;
    - if (!vmx_vm_has_apicv(vmx->vcpu.kvm))
    + if (!vmx_cpu_uses_apicv(&vmx->vcpu))
    exec_control &= ~(SECONDARY_EXEC_APIC_REGISTER_VIRT |
    SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
    exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
    @@ -4624,7 +4618,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
    vmx_secondary_exec_control(vmx));
    }

    - if (vmx_vm_has_apicv(vmx->vcpu.kvm)) {
    + if (vmx_cpu_uses_apicv(&vmx->vcpu)) {
    vmcs_write64(EOI_EXIT_BITMAP0, 0);
    vmcs_write64(EOI_EXIT_BITMAP1, 0);
    vmcs_write64(EOI_EXIT_BITMAP2, 0);
    @@ -4768,7 +4762,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)

    if (cpu_has_vmx_tpr_shadow() && !init_event) {
    vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0);
    - if (vm_need_tpr_shadow(vcpu->kvm))
    + if (cpu_need_tpr_shadow(vcpu))
    vmcs_write64(VIRTUAL_APIC_PAGE_ADDR,
    __pa(vcpu->arch.apic->regs));
    vmcs_write32(TPR_THRESHOLD, 0);
    @@ -4776,7 +4770,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)

    kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);

    - if (vmx_vm_has_apicv(vcpu->kvm))
    + if (vmx_cpu_uses_apicv(vcpu))
    memset(&vmx->pi_desc, 0, sizeof(struct pi_desc));

    if (vmx->vpid != 0)
    @@ -5316,7 +5310,7 @@ static int handle_cr(struct kvm_vcpu *vcpu)
    u8 cr8 = (u8)val;
    err = kvm_set_cr8(vcpu, cr8);
    kvm_complete_insn_gp(vcpu, err);
    - if (irqchip_in_kernel(vcpu->kvm))
    + if (lapic_in_kernel(vcpu))
    return 1;
    if (cr8_prev <= cr8)
    return 1;
    @@ -5535,7 +5529,7 @@ static int handle_interrupt_window(struct kvm_vcpu *vcpu)
    * If the user space waits to inject interrupts, exit as soon as
    * possible
    */
    - if (!irqchip_in_kernel(vcpu->kvm) &&
    + if (!lapic_in_kernel(vcpu) &&
    vcpu->run->request_interrupt_window &&
    !kvm_cpu_has_interrupt(vcpu)) {
    vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
    @@ -7944,10 +7938,10 @@ static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
    * apicv
    */
    if (!cpu_has_vmx_virtualize_x2apic_mode() ||
    - !vmx_vm_has_apicv(vcpu->kvm))
    + !vmx_cpu_uses_apicv(vcpu))
    return;

    - if (!vm_need_tpr_shadow(vcpu->kvm))
    + if (!cpu_need_tpr_shadow(vcpu))
    return;

    sec_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
    @@ -8052,7 +8046,7 @@ static void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr)
    static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu)
    {
    u64 *eoi_exit_bitmap = vcpu->arch.eoi_exit_bitmap;
    - if (!vmx_vm_has_apicv(vcpu->kvm))
    + if (!vmx_cpu_uses_apicv(vcpu))
    return;

    vmcs_write64(EOI_EXIT_BITMAP0, eoi_exit_bitmap[0]);
    @@ -8551,7 +8545,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
    put_cpu();
    if (err)
    goto free_vmcs;
    - if (vm_need_virtualize_apic_accesses(kvm)) {
    + if (cpu_need_virtualize_apic_accesses(&vmx->vcpu)) {
    err = alloc_apic_access_page(kvm);
    if (err)
    goto free_vmcs;
    @@ -9339,7 +9333,7 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
    vmcs_write64(APIC_ACCESS_ADDR,
    page_to_phys(vmx->nested.apic_access_page));
    } else if (!(nested_cpu_has_virt_x2apic_mode(vmcs12)) &&
    - (vm_need_virtualize_apic_accesses(vmx->vcpu.kvm))) {
    + cpu_need_virtualize_apic_accesses(&vmx->vcpu)) {
    exec_control |=
    SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
    kvm_vcpu_reload_apic_access_page(vcpu);
    diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
    index 7a9af61dad07..e25bc4e2b7a9 100644
    --- a/arch/x86/kvm/x86.c
    +++ b/arch/x86/kvm/x86.c
    @@ -788,7 +788,7 @@ int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
    {
    if (cr8 & CR8_RESERVED_BITS)
    return 1;
    - if (irqchip_in_kernel(vcpu->kvm))
    + if (lapic_in_kernel(vcpu))
    kvm_lapic_set_tpr(vcpu, cr8);
    else
    vcpu->arch.cr8 = cr8;
    @@ -798,7 +798,7 @@ EXPORT_SYMBOL_GPL(kvm_set_cr8);

    unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
    {
    - if (irqchip_in_kernel(vcpu->kvm))
    + if (lapic_in_kernel(vcpu))
    return kvm_lapic_get_cr8(vcpu);
    else
    return vcpu->arch.cr8;
    @@ -3179,7 +3179,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
    struct kvm_vapic_addr va;

    r = -EINVAL;
    - if (!irqchip_in_kernel(vcpu->kvm))
    + if (!lapic_in_kernel(vcpu))
    goto out;
    r = -EFAULT;
    if (copy_from_user(&va, argp, sizeof va))
    @@ -5670,7 +5670,7 @@ void kvm_arch_exit(void)
    int kvm_vcpu_halt(struct kvm_vcpu *vcpu)
    {
    ++vcpu->stat.halt_exits;
    - if (irqchip_in_kernel(vcpu->kvm)) {
    + if (lapic_in_kernel(vcpu)) {
    vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
    return 1;
    } else {
    @@ -6166,7 +6166,7 @@ void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
    {
    struct page *page = NULL;

    - if (!irqchip_in_kernel(vcpu->kvm))
    + if (!lapic_in_kernel(vcpu))
    return;

    if (!kvm_x86_ops->set_apic_access_page_addr)
    @@ -6204,7 +6204,7 @@ void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
    static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
    {
    int r;
    - bool req_int_win = !irqchip_in_kernel(vcpu->kvm) &&
    + bool req_int_win = !lapic_in_kernel(vcpu) &&
    vcpu->run->request_interrupt_window;
    bool req_immediate_exit = false;

    @@ -6601,7 +6601,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
    }

    /* re-sync apic's tpr */
    - if (!irqchip_in_kernel(vcpu->kvm)) {
    + if (!lapic_in_kernel(vcpu)) {
    if (kvm_set_cr8(vcpu, kvm_run->cr8) != 0) {
    r = -EINVAL;
    goto out;
    @@ -7301,7 +7301,7 @@ bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)

    bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
    {
    - return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL);
    + return irqchip_in_kernel(vcpu->kvm) == lapic_in_kernel(vcpu);
    }

    struct static_key kvm_no_apic_vcpu __read_mostly;
    @@ -7395,7 +7395,7 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
    kvm_mmu_destroy(vcpu);
    srcu_read_unlock(&vcpu->kvm->srcu, idx);
    free_page((unsigned long)vcpu->arch.pio_data);
    - if (!irqchip_in_kernel(vcpu->kvm))
    + if (!lapic_in_kernel(vcpu))
    static_key_slow_dec(&kvm_no_apic_vcpu);
    }

    --
    1.8.3.1



    \
     
     \ /
      Last update: 2015-09-28 13:01    [W:4.333 / U:0.796 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site