lkml.org 
[lkml]   [2019]   [Nov]   [27]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 4.19 257/306] KVM: MMU: Do not treat ZONE_DEVICE pages as being reserved
    Date
    From: Sean Christopherson <sean.j.christopherson@intel.com>

    commit a78986aae9b2988f8493f9f65a587ee433e83bc3 upstream.

    Explicitly exempt ZONE_DEVICE pages from kvm_is_reserved_pfn() and
    instead manually handle ZONE_DEVICE on a case-by-case basis. For things
    like page refcounts, KVM needs to treat ZONE_DEVICE pages like normal
    pages, e.g. put pages grabbed via gup(). But for flows such as setting
    A/D bits or shifting refcounts for transparent huge pages, KVM needs to
    to avoid processing ZONE_DEVICE pages as the flows in question lack the
    underlying machinery for proper handling of ZONE_DEVICE pages.

    This fixes a hang reported by Adam Borowski[*] in dev_pagemap_cleanup()
    when running a KVM guest backed with /dev/dax memory, as KVM straight up
    doesn't put any references to ZONE_DEVICE pages acquired by gup().

    Note, Dan Williams proposed an alternative solution of doing put_page()
    on ZONE_DEVICE pages immediately after gup() in order to simplify the
    auditing needed to ensure is_zone_device_page() is called if and only if
    the backing device is pinned (via gup()). But that approach would break
    kvm_vcpu_{un}map() as KVM requires the page to be pinned from map() 'til
    unmap() when accessing guest memory, unlike KVM's secondary MMU, which
    coordinates with mmu_notifier invalidations to avoid creating stale
    page references, i.e. doesn't rely on pages being pinned.

    [*] http://lkml.kernel.org/r/20190919115547.GA17963@angband.pl

    Reported-by: Adam Borowski <kilobyte@angband.pl>
    Analyzed-by: David Hildenbrand <david@redhat.com>
    Acked-by: Dan Williams <dan.j.williams@intel.com>
    Cc: stable@vger.kernel.org
    Fixes: 3565fce3a659 ("mm, x86: get_user_pages() for dax mappings")
    Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
    Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
    [sean: backport to 4.x; resolve conflict in mmu.c]
    Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
    Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

    ---
    arch/x86/kvm/mmu.c | 8 ++++----
    include/linux/kvm_host.h | 1 +
    virt/kvm/kvm_main.c | 26 +++++++++++++++++++++++---
    3 files changed, 28 insertions(+), 7 deletions(-)

    --- a/arch/x86/kvm/mmu.c
    +++ b/arch/x86/kvm/mmu.c
    @@ -3261,7 +3261,7 @@ static void transparent_hugepage_adjust(
    * here.
    */
    if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) &&
    - level == PT_PAGE_TABLE_LEVEL &&
    + !kvm_is_zone_device_pfn(pfn) && level == PT_PAGE_TABLE_LEVEL &&
    PageTransCompoundMap(pfn_to_page(pfn)) &&
    !mmu_gfn_lpage_is_disallowed(vcpu, gfn, PT_DIRECTORY_LEVEL)) {
    unsigned long mask;
    @@ -5709,9 +5709,9 @@ restart:
    * the guest, and the guest page table is using 4K page size
    * mapping if the indirect sp has level = 1.
    */
    - if (sp->role.direct &&
    - !kvm_is_reserved_pfn(pfn) &&
    - PageTransCompoundMap(pfn_to_page(pfn))) {
    + if (sp->role.direct && !kvm_is_reserved_pfn(pfn) &&
    + !kvm_is_zone_device_pfn(pfn) &&
    + PageTransCompoundMap(pfn_to_page(pfn))) {
    drop_spte(kvm, sptep);
    need_tlb_flush = 1;
    goto restart;
    --- a/include/linux/kvm_host.h
    +++ b/include/linux/kvm_host.h
    @@ -911,6 +911,7 @@ int kvm_cpu_has_pending_timer(struct kvm
    void kvm_vcpu_kick(struct kvm_vcpu *vcpu);

    bool kvm_is_reserved_pfn(kvm_pfn_t pfn);
    +bool kvm_is_zone_device_pfn(kvm_pfn_t pfn);

    struct kvm_irq_ack_notifier {
    struct hlist_node link;
    --- a/virt/kvm/kvm_main.c
    +++ b/virt/kvm/kvm_main.c
    @@ -147,10 +147,30 @@ __weak int kvm_arch_mmu_notifier_invalid
    return 0;
    }

    +bool kvm_is_zone_device_pfn(kvm_pfn_t pfn)
    +{
    + /*
    + * The metadata used by is_zone_device_page() to determine whether or
    + * not a page is ZONE_DEVICE is guaranteed to be valid if and only if
    + * the device has been pinned, e.g. by get_user_pages(). WARN if the
    + * page_count() is zero to help detect bad usage of this helper.
    + */
    + if (!pfn_valid(pfn) || WARN_ON_ONCE(!page_count(pfn_to_page(pfn))))
    + return false;
    +
    + return is_zone_device_page(pfn_to_page(pfn));
    +}
    +
    bool kvm_is_reserved_pfn(kvm_pfn_t pfn)
    {
    + /*
    + * ZONE_DEVICE pages currently set PG_reserved, but from a refcounting
    + * perspective they are "normal" pages, albeit with slightly different
    + * usage rules.
    + */
    if (pfn_valid(pfn))
    - return PageReserved(pfn_to_page(pfn));
    + return PageReserved(pfn_to_page(pfn)) &&
    + !kvm_is_zone_device_pfn(pfn);

    return true;
    }
    @@ -1727,7 +1747,7 @@ EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty)

    void kvm_set_pfn_dirty(kvm_pfn_t pfn)
    {
    - if (!kvm_is_reserved_pfn(pfn)) {
    + if (!kvm_is_reserved_pfn(pfn) && !kvm_is_zone_device_pfn(pfn)) {
    struct page *page = pfn_to_page(pfn);

    if (!PageReserved(page))
    @@ -1738,7 +1758,7 @@ EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);

    void kvm_set_pfn_accessed(kvm_pfn_t pfn)
    {
    - if (!kvm_is_reserved_pfn(pfn))
    + if (!kvm_is_reserved_pfn(pfn) && !kvm_is_zone_device_pfn(pfn))
    mark_page_accessed(pfn_to_page(pfn));
    }
    EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);

    \
     
     \ /
      Last update: 2019-11-27 22:06    [W:3.644 / U:0.008 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site