lkml.org 
[lkml]   [2019]   [Sep]   [13]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v3 07/16] svm: Add support for setup/destroy virutal APIC backing page for AVIC
    Date
    Activate/deactivate AVIC requires setting/unsetting the memory region used
    for virtual APIC backing page (APIC_ACCESS_PAGE_PRIVATE_MEMSLOT).
    So, re-factor avic_init_access_page() to avic_setup_access_page()
    and add srcu_read_lock/unlock, which are needed to allow this function
    to be called during run-time.

    Also, introduce avic_destroy_access_page() to unset the page when
    deactivate AVIC.

    Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
    ---
    arch/x86/kvm/svm.c | 53 +++++++++++++++++++++++++++++++++++++++++++++++------
    1 file changed, 47 insertions(+), 6 deletions(-)

    diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
    index 8673617..2e06ee2 100644
    --- a/arch/x86/kvm/svm.c
    +++ b/arch/x86/kvm/svm.c
    @@ -1476,7 +1476,9 @@ static u64 svm_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
    static void avic_init_vmcb(struct vcpu_svm *svm)
    {
    struct vmcb *vmcb = svm->vmcb;
    - struct kvm_svm *kvm_svm = to_kvm_svm(svm->vcpu.kvm);
    + struct kvm *kvm = svm->vcpu.kvm;
    + struct kvm_svm *kvm_svm = to_kvm_svm(kvm);
    +
    phys_addr_t bpa = __sme_set(page_to_phys(svm->avic_backing_page));
    phys_addr_t lpa = __sme_set(page_to_phys(kvm_svm->avic_logical_id_table_page));
    phys_addr_t ppa = __sme_set(page_to_phys(kvm_svm->avic_physical_id_table_page));
    @@ -1485,7 +1487,13 @@ static void avic_init_vmcb(struct vcpu_svm *svm)
    vmcb->control.avic_logical_id = lpa & AVIC_HPA_MASK;
    vmcb->control.avic_physical_id = ppa & AVIC_HPA_MASK;
    vmcb->control.avic_physical_id |= AVIC_MAX_PHYSICAL_ID_COUNT;
    - vmcb->control.int_ctl |= AVIC_ENABLE_MASK;
    +
    + mutex_lock(&kvm->arch.apicv_lock);
    + if (kvm->arch.apicv_state == APICV_ACTIVATED)
    + vmcb->control.int_ctl |= AVIC_ENABLE_MASK;
    + else
    + vmcb->control.int_ctl &= ~AVIC_ENABLE_MASK;
    + mutex_unlock(&kvm->arch.apicv_lock);
    }

    static void init_vmcb(struct vcpu_svm *svm)
    @@ -1668,19 +1676,24 @@ static u64 *avic_get_physical_id_entry(struct kvm_vcpu *vcpu,
    * field of the VMCB. Therefore, we set up the
    * APIC_ACCESS_PAGE_PRIVATE_MEMSLOT (4KB) here.
    */
    -static int avic_init_access_page(struct kvm_vcpu *vcpu)
    +static int avic_setup_access_page(struct kvm_vcpu *vcpu, bool init)
    {
    struct kvm *kvm = vcpu->kvm;
    int ret = 0;

    mutex_lock(&kvm->slots_lock);
    +
    if (kvm->arch.apic_access_page_done)
    goto out;

    + if (!init)
    + srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
    ret = __x86_set_memory_region(kvm,
    APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
    APIC_DEFAULT_PHYS_BASE,
    PAGE_SIZE);
    + if (!init)
    + vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
    if (ret)
    goto out;

    @@ -1690,14 +1703,39 @@ static int avic_init_access_page(struct kvm_vcpu *vcpu)
    return ret;
    }

    +static void avic_destroy_access_page(struct kvm_vcpu *vcpu)
    +{
    + struct kvm *kvm = vcpu->kvm;
    +
    + mutex_lock(&kvm->slots_lock);
    +
    + if (!kvm->arch.apic_access_page_done)
    + goto out;
    +
    + srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
    + __x86_set_memory_region(kvm,
    + APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
    + APIC_DEFAULT_PHYS_BASE,
    + 0);
    + vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
    + kvm->arch.apic_access_page_done = false;
    +out:
    + mutex_unlock(&kvm->slots_lock);
    +}
    +
    static int avic_init_backing_page(struct kvm_vcpu *vcpu)
    {
    - int ret;
    + int ret = 0;
    u64 *entry, new_entry;
    int id = vcpu->vcpu_id;
    + struct kvm *kvm = vcpu->kvm;
    struct vcpu_svm *svm = to_svm(vcpu);

    - ret = avic_init_access_page(vcpu);
    + mutex_lock(&kvm->arch.apicv_lock);
    + if (kvm->arch.apicv_state == APICV_ACTIVATED)
    + ret = avic_setup_access_page(vcpu, true);
    + mutex_unlock(&kvm->arch.apicv_lock);
    +
    if (ret)
    return ret;

    @@ -2187,7 +2225,10 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
    /* We initialize this flag to true to make sure that the is_running
    * bit would be set the first time the vcpu is loaded.
    */
    - svm->avic_is_running = true;
    + mutex_lock(&kvm->arch.apicv_lock);
    + if (irqchip_in_kernel(kvm) && kvm->arch.apicv_state == APICV_ACTIVATED)
    + svm->avic_is_running = true;
    + mutex_unlock(&kvm->arch.apicv_lock);

    svm->nested.hsave = page_address(hsave_page);

    --
    1.8.3.1
    \
     
     \ /
      Last update: 2019-09-13 21:03    [W:2.459 / U:0.940 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site