lkml.org 
[lkml]   [2022]   [Dec]   [21]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    Subject[Patch v3 2/9] KVM: x86/mmu: Remove zapped_obsolete_pages from struct kvm_arch{}
    From
    zapped_obsolete_pages list was used in struct kvm_arch{} to provide
    pages for KVM MMU shrinker. This is not needed now as KVM MMU shrinker
    has been repurposed to free shadow page caches and not
    zapped_obsolete_pages.

    Remove zapped_obsolete_pages from struct kvm_arch{} and use local list
    in kvm_zap_obsolete_pages().

    Signed-off-by: Vipin Sharma <vipinsh@google.com>
    ---
    arch/x86/include/asm/kvm_host.h | 1 -
    arch/x86/kvm/mmu/mmu.c | 8 ++++----
    2 files changed, 4 insertions(+), 5 deletions(-)

    diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
    index 89cc809e4a00..f89f02e18080 100644
    --- a/arch/x86/include/asm/kvm_host.h
    +++ b/arch/x86/include/asm/kvm_host.h
    @@ -1215,7 +1215,6 @@ struct kvm_arch {
    u8 mmu_valid_gen;
    struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
    struct list_head active_mmu_pages;
    - struct list_head zapped_obsolete_pages;
    /*
    * A list of kvm_mmu_page structs that, if zapped, could possibly be
    * replaced by an NX huge page. A shadow page is on this list if its
    diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
    index 157417e1cb6e..3364760a1695 100644
    --- a/arch/x86/kvm/mmu/mmu.c
    +++ b/arch/x86/kvm/mmu/mmu.c
    @@ -5987,6 +5987,7 @@ static void kvm_zap_obsolete_pages(struct kvm *kvm)
    {
    struct kvm_mmu_page *sp, *node;
    int nr_zapped, batch = 0;
    + LIST_HEAD(zapped_pages);
    bool unstable;

    restart:
    @@ -6019,8 +6020,8 @@ static void kvm_zap_obsolete_pages(struct kvm *kvm)
    goto restart;
    }

    - unstable = __kvm_mmu_prepare_zap_page(kvm, sp,
    - &kvm->arch.zapped_obsolete_pages, &nr_zapped);
    + unstable = __kvm_mmu_prepare_zap_page(kvm, sp, &zapped_pages,
    + &nr_zapped);
    batch += nr_zapped;

    if (unstable)
    @@ -6036,7 +6037,7 @@ static void kvm_zap_obsolete_pages(struct kvm *kvm)
    * kvm_mmu_load()), and the reload in the caller ensure no vCPUs are
    * running with an obsolete MMU.
    */
    - kvm_mmu_commit_zap_page(kvm, &kvm->arch.zapped_obsolete_pages);
    + kvm_mmu_commit_zap_page(kvm, &zapped_pages);
    }

    /*
    @@ -6112,7 +6113,6 @@ int kvm_mmu_init_vm(struct kvm *kvm)
    int r;

    INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
    - INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages);
    INIT_LIST_HEAD(&kvm->arch.possible_nx_huge_pages);
    spin_lock_init(&kvm->arch.mmu_unsync_pages_lock);

    --
    2.39.0.314.g84b9a713c41-goog
    \
     
     \ /
      Last update: 2023-03-26 23:17    [W:5.215 / U:1.908 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site