lkml.org 
[lkml]   [2016]   [Feb]   [24]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 03/12] KVM: MMU: introduce kvm_mmu_flush_or_zap
    Date
    This is a generalization of mmu_pte_write_flush_tlb, that also
    takes care of calling kvm_mmu_commit_zap_page. The next
    patches will introduce more uses.

    Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
    ---
    arch/x86/kvm/mmu.c | 19 ++++++++++---------
    1 file changed, 10 insertions(+), 9 deletions(-)

    diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
    index 3060873d8cab..6bf74d8d4989 100644
    --- a/arch/x86/kvm/mmu.c
    +++ b/arch/x86/kvm/mmu.c
    @@ -4125,11 +4125,14 @@ static bool need_remote_flush(u64 old, u64 new)
    return (old & ~new & PT64_PERM_MASK) != 0;
    }

    -static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, bool zap_page,
    - bool remote_flush, bool local_flush)
    +static void kvm_mmu_flush_or_zap(struct kvm_vcpu *vcpu,
    + struct list_head *invalid_list,
    + bool remote_flush, bool local_flush)
    {
    - if (zap_page)
    + if (!list_empty(invalid_list)) {
    + kvm_mmu_commit_zap_page(vcpu->kvm, invalid_list);
    return;
    + }

    if (remote_flush)
    kvm_flush_remote_tlbs(vcpu->kvm);
    @@ -4256,7 +4259,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
    LIST_HEAD(invalid_list);
    u64 entry, gentry, *spte;
    int npte;
    - bool remote_flush, local_flush, zap_page;
    + bool remote_flush, local_flush;
    union kvm_mmu_page_role mask = { };

    mask.cr0_wp = 1;
    @@ -4273,7 +4276,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
    if (!ACCESS_ONCE(vcpu->kvm->arch.indirect_shadow_pages))
    return;

    - zap_page = remote_flush = local_flush = false;
    + remote_flush = local_flush = false;

    pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);

    @@ -4293,8 +4296,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
    for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
    if (detect_write_misaligned(sp, gpa, bytes) ||
    detect_write_flooding(sp)) {
    - zap_page |= !!kvm_mmu_prepare_zap_page(vcpu->kvm, sp,
    - &invalid_list);
    + kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
    ++vcpu->kvm->stat.mmu_flooded;
    continue;
    }
    @@ -4316,8 +4318,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
    ++spte;
    }
    }
    - mmu_pte_write_flush_tlb(vcpu, zap_page, remote_flush, local_flush);
    - kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
    + kvm_mmu_flush_or_zap(vcpu, &invalid_list, remote_flush, local_flush);
    kvm_mmu_audit(vcpu, AUDIT_POST_PTE_WRITE);
    spin_unlock(&vcpu->kvm->mmu_lock);
    }
    --
    1.8.3.1

    \
     
     \ /
      Last update: 2016-02-24 15:01    [W:4.070 / U:0.400 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site