lkml.org 
[lkml]   [2021]   [Dec]   [6]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 5.15 073/207] KVM: Ensure local memslot copies operate on up-to-date arch-specific data
    Date
    From: Sean Christopherson <seanjc@google.com>

    commit bda44d844758c70c8dc1478e6fc9c25efa90c5a7 upstream.

    When modifying memslots, snapshot the "old" memslot and copy it to the
    "new" memslot's arch data after (re)acquiring slots_arch_lock. x86 can
    change a memslot's arch data while memslot updates are in-progress so
    long as it holds slots_arch_lock, thus snapshotting a memslot without
    holding the lock can result in the consumption of stale data.

    Fixes: b10a038e84d1 ("KVM: mmu: Add slots_arch_lock for memslot arch fields")
    Cc: stable@vger.kernel.org
    Cc: Ben Gardon <bgardon@google.com>
    Signed-off-by: Sean Christopherson <seanjc@google.com>
    Message-Id: <20211104002531.1176691-2-seanjc@google.com>
    Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
    Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
    ---
    virt/kvm/kvm_main.c | 47 +++++++++++++++++++++++++++++++----------------
    1 file changed, 31 insertions(+), 16 deletions(-)

    --- a/virt/kvm/kvm_main.c
    +++ b/virt/kvm/kvm_main.c
    @@ -1523,11 +1523,10 @@ static struct kvm_memslots *kvm_dup_mems

    static int kvm_set_memslot(struct kvm *kvm,
    const struct kvm_userspace_memory_region *mem,
    - struct kvm_memory_slot *old,
    struct kvm_memory_slot *new, int as_id,
    enum kvm_mr_change change)
    {
    - struct kvm_memory_slot *slot;
    + struct kvm_memory_slot *slot, old;
    struct kvm_memslots *slots;
    int r;

    @@ -1558,7 +1557,7 @@ static int kvm_set_memslot(struct kvm *k
    * Note, the INVALID flag needs to be in the appropriate entry
    * in the freshly allocated memslots, not in @old or @new.
    */
    - slot = id_to_memslot(slots, old->id);
    + slot = id_to_memslot(slots, new->id);
    slot->flags |= KVM_MEMSLOT_INVALID;

    /*
    @@ -1589,6 +1588,26 @@ static int kvm_set_memslot(struct kvm *k
    kvm_copy_memslots(slots, __kvm_memslots(kvm, as_id));
    }

    + /*
    + * Make a full copy of the old memslot, the pointer will become stale
    + * when the memslots are re-sorted by update_memslots(), and the old
    + * memslot needs to be referenced after calling update_memslots(), e.g.
    + * to free its resources and for arch specific behavior. This needs to
    + * happen *after* (re)acquiring slots_arch_lock.
    + */
    + slot = id_to_memslot(slots, new->id);
    + if (slot) {
    + old = *slot;
    + } else {
    + WARN_ON_ONCE(change != KVM_MR_CREATE);
    + memset(&old, 0, sizeof(old));
    + old.id = new->id;
    + old.as_id = as_id;
    + }
    +
    + /* Copy the arch-specific data, again after (re)acquiring slots_arch_lock. */
    + memcpy(&new->arch, &old.arch, sizeof(old.arch));
    +
    r = kvm_arch_prepare_memory_region(kvm, new, mem, change);
    if (r)
    goto out_slots;
    @@ -1596,14 +1615,18 @@ static int kvm_set_memslot(struct kvm *k
    update_memslots(slots, new, change);
    slots = install_new_memslots(kvm, as_id, slots);

    - kvm_arch_commit_memory_region(kvm, mem, old, new, change);
    + kvm_arch_commit_memory_region(kvm, mem, &old, new, change);
    +
    + /* Free the old memslot's metadata. Note, this is the full copy!!! */
    + if (change == KVM_MR_DELETE)
    + kvm_free_memslot(kvm, &old);

    kvfree(slots);
    return 0;

    out_slots:
    if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
    - slot = id_to_memslot(slots, old->id);
    + slot = id_to_memslot(slots, new->id);
    slot->flags &= ~KVM_MEMSLOT_INVALID;
    slots = install_new_memslots(kvm, as_id, slots);
    } else {
    @@ -1618,7 +1641,6 @@ static int kvm_delete_memslot(struct kvm
    struct kvm_memory_slot *old, int as_id)
    {
    struct kvm_memory_slot new;
    - int r;

    if (!old->npages)
    return -EINVAL;
    @@ -1631,12 +1653,7 @@ static int kvm_delete_memslot(struct kvm
    */
    new.as_id = as_id;

    - r = kvm_set_memslot(kvm, mem, old, &new, as_id, KVM_MR_DELETE);
    - if (r)
    - return r;
    -
    - kvm_free_memslot(kvm, old);
    - return 0;
    + return kvm_set_memslot(kvm, mem, &new, as_id, KVM_MR_DELETE);
    }

    /*
    @@ -1711,7 +1728,6 @@ int __kvm_set_memory_region(struct kvm *
    if (!old.npages) {
    change = KVM_MR_CREATE;
    new.dirty_bitmap = NULL;
    - memset(&new.arch, 0, sizeof(new.arch));
    } else { /* Modify an existing slot. */
    if ((new.userspace_addr != old.userspace_addr) ||
    (new.npages != old.npages) ||
    @@ -1725,9 +1741,8 @@ int __kvm_set_memory_region(struct kvm *
    else /* Nothing to change. */
    return 0;

    - /* Copy dirty_bitmap and arch from the current memslot. */
    + /* Copy dirty_bitmap from the current memslot. */
    new.dirty_bitmap = old.dirty_bitmap;
    - memcpy(&new.arch, &old.arch, sizeof(new.arch));
    }

    if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) {
    @@ -1753,7 +1768,7 @@ int __kvm_set_memory_region(struct kvm *
    bitmap_set(new.dirty_bitmap, 0, new.npages);
    }

    - r = kvm_set_memslot(kvm, mem, &old, &new, as_id, change);
    + r = kvm_set_memslot(kvm, mem, &new, as_id, change);
    if (r)
    goto out_bitmap;


    \
     
     \ /
      Last update: 2021-12-06 16:47    [W:9.159 / U:0.024 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site