lkml.org 
[lkml]   [2022]   [Oct]   [14]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    SubjectRe: [PATCH v8 7/8] KVM: Handle page fault for private memory
    On Thu, Sep 15, 2022, Chao Peng wrote:
    > diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
    > index a0f198cede3d..81ab20003824 100644
    > --- a/arch/x86/kvm/mmu/mmu.c
    > +++ b/arch/x86/kvm/mmu/mmu.c
    > @@ -3028,6 +3028,9 @@ int kvm_mmu_max_mapping_level(struct kvm *kvm,
    > break;
    > }
    >
    > + if (kvm_mem_is_private(kvm, gfn))

    Rather than reload the Xarray info, which is unnecessary overhead, pass in
    @is_private. The caller must hold mmu_lock, i.e. invalidations from
    private<->shared conversions will be stalled and will zap the new SPTE if the
    state is changed.

    E.g.

    diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
    index d68944f07b4b..44eea47697d8 100644
    --- a/arch/x86/kvm/mmu/mmu.c
    +++ b/arch/x86/kvm/mmu/mmu.c
    @@ -3072,8 +3072,8 @@ void kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
    * Enforce the iTLB multihit workaround after capturing the requested
    * level, which will be used to do precise, accurate accounting.
    */
    - fault->req_level = kvm_mmu_max_mapping_level(vcpu->kvm, slot,
    - fault->gfn, fault->max_level);
    + fault->req_level = kvm_mmu_max_mapping_level(vcpu->kvm, slot, fault->gfn,
    + fault->max_level, fault->is_private);
    if (fault->req_level == PG_LEVEL_4K || fault->huge_page_disallowed)
    return;

    @@ -6460,7 +6460,7 @@ static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
    */
    if (sp->role.direct &&
    sp->role.level < kvm_mmu_max_mapping_level(kvm, slot, sp->gfn,
    - PG_LEVEL_NUM)) {
    + PG_LEVEL_NUM, false)) {
    kvm_zap_one_rmap_spte(kvm, rmap_head, sptep);

    if (kvm_available_flush_tlb_with_range())
    diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h
    index 7670c13ce251..9acdf72537ce 100644
    --- a/arch/x86/kvm/mmu/spte.h
    +++ b/arch/x86/kvm/mmu/spte.h
    @@ -315,6 +315,12 @@ static inline bool is_dirty_spte(u64 spte)
    return dirty_mask ? spte & dirty_mask : spte & PT_WRITABLE_MASK;
    }

    +static inline bool is_private_spte(u64 spte)
    +{
    + /* FIXME: Query C-bit/S-bit for SEV/TDX. */
    + return false;
    +}
    +
    static inline u64 get_rsvd_bits(struct rsvd_bits_validate *rsvd_check, u64 pte,
    int level)
    {
    diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
    index 672f0432d777..69ba00157e90 100644
    --- a/arch/x86/kvm/mmu/tdp_mmu.c
    +++ b/arch/x86/kvm/mmu/tdp_mmu.c
    @@ -1767,8 +1767,9 @@ static void zap_collapsible_spte_range(struct kvm *kvm,
    if (iter.gfn < start || iter.gfn >= end)
    continue;

    - max_mapping_level = kvm_mmu_max_mapping_level(kvm, slot,
    - iter.gfn, PG_LEVEL_NUM);
    + max_mapping_level = kvm_mmu_max_mapping_level(kvm, slot, iter.gfn,
    + PG_LEVEL_NUM,
    + is_private_spte(iter.old_spte));
    if (max_mapping_level < iter.level)
    continue;

    \
     
     \ /
      Last update: 2022-10-14 20:58    [W:2.522 / U:0.380 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site