lkml.org 
[lkml]   [2020]   [May]   [23]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v9 04/14] KVM: Pass in kvm pointer into mark_page_dirty_in_slot()
    Date
    The context will be needed to implement the kvm dirty ring.

    Signed-off-by: Peter Xu <peterx@redhat.com>
    ---
    virt/kvm/kvm_main.c | 33 +++++++++++++++++++--------------
    1 file changed, 19 insertions(+), 14 deletions(-)

    diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
    index ebdd98a30e82..a7460e93d457 100644
    --- a/virt/kvm/kvm_main.c
    +++ b/virt/kvm/kvm_main.c
    @@ -144,7 +144,9 @@ static void hardware_disable_all(void);

    static void kvm_io_bus_destroy(struct kvm_io_bus *bus);

    -static void mark_page_dirty_in_slot(struct kvm_memory_slot *memslot, gfn_t gfn);
    +static void mark_page_dirty_in_slot(struct kvm *kvm,
    + struct kvm_memory_slot *memslot,
    + gfn_t gfn);

    __visible bool kvm_rebooting;
    EXPORT_SYMBOL_GPL(kvm_rebooting);
    @@ -2120,7 +2122,8 @@ int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
    }
    EXPORT_SYMBOL_GPL(kvm_vcpu_map);

    -static void __kvm_unmap_gfn(struct kvm_memory_slot *memslot,
    +static void __kvm_unmap_gfn(struct kvm *kvm,
    + struct kvm_memory_slot *memslot,
    struct kvm_host_map *map,
    struct gfn_to_pfn_cache *cache,
    bool dirty, bool atomic)
    @@ -2145,7 +2148,7 @@ static void __kvm_unmap_gfn(struct kvm_memory_slot *memslot,
    #endif

    if (dirty)
    - mark_page_dirty_in_slot(memslot, map->gfn);
    + mark_page_dirty_in_slot(kvm, memslot, map->gfn);

    if (cache)
    cache->dirty |= dirty;
    @@ -2159,7 +2162,7 @@ static void __kvm_unmap_gfn(struct kvm_memory_slot *memslot,
    int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
    struct gfn_to_pfn_cache *cache, bool dirty, bool atomic)
    {
    - __kvm_unmap_gfn(gfn_to_memslot(vcpu->kvm, map->gfn), map,
    + __kvm_unmap_gfn(vcpu->kvm, gfn_to_memslot(vcpu->kvm, map->gfn), map,
    cache, dirty, atomic);
    return 0;
    }
    @@ -2167,8 +2170,8 @@ EXPORT_SYMBOL_GPL(kvm_unmap_gfn);

    void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
    {
    - __kvm_unmap_gfn(kvm_vcpu_gfn_to_memslot(vcpu, map->gfn), map, NULL,
    - dirty, false);
    + __kvm_unmap_gfn(vcpu->kvm, kvm_vcpu_gfn_to_memslot(vcpu, map->gfn),
    + map, NULL, dirty, false);
    }
    EXPORT_SYMBOL_GPL(kvm_vcpu_unmap);

    @@ -2342,7 +2345,8 @@ int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa,
    }
    EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_atomic);

    -static int __kvm_write_guest_page(struct kvm_memory_slot *memslot, gfn_t gfn,
    +static int __kvm_write_guest_page(struct kvm *kvm,
    + struct kvm_memory_slot *memslot, gfn_t gfn,
    const void *data, int offset, int len)
    {
    int r;
    @@ -2354,7 +2358,7 @@ static int __kvm_write_guest_page(struct kvm_memory_slot *memslot, gfn_t gfn,
    r = __copy_to_user((void __user *)addr + offset, data, len);
    if (r)
    return -EFAULT;
    - mark_page_dirty_in_slot(memslot, gfn);
    + mark_page_dirty_in_slot(kvm, memslot, gfn);
    return 0;
    }

    @@ -2363,7 +2367,7 @@ int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn,
    {
    struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);

    - return __kvm_write_guest_page(slot, gfn, data, offset, len);
    + return __kvm_write_guest_page(kvm, slot, gfn, data, offset, len);
    }
    EXPORT_SYMBOL_GPL(kvm_write_guest_page);

    @@ -2372,7 +2376,7 @@ int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
    {
    struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);

    - return __kvm_write_guest_page(slot, gfn, data, offset, len);
    + return __kvm_write_guest_page(vcpu->kvm, slot, gfn, data, offset, len);
    }
    EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_page);

    @@ -2491,7 +2495,7 @@ int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
    r = __copy_to_user((void __user *)ghc->hva + offset, data, len);
    if (r)
    return -EFAULT;
    - mark_page_dirty_in_slot(ghc->memslot, gpa >> PAGE_SHIFT);
    + mark_page_dirty_in_slot(kvm, ghc->memslot, gpa >> PAGE_SHIFT);

    return 0;
    }
    @@ -2558,7 +2562,8 @@ int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
    }
    EXPORT_SYMBOL_GPL(kvm_clear_guest);

    -static void mark_page_dirty_in_slot(struct kvm_memory_slot *memslot,
    +static void mark_page_dirty_in_slot(struct kvm *kvm,
    + struct kvm_memory_slot *memslot,
    gfn_t gfn)
    {
    if (memslot && memslot->dirty_bitmap) {
    @@ -2573,7 +2578,7 @@ void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
    struct kvm_memory_slot *memslot;

    memslot = gfn_to_memslot(kvm, gfn);
    - mark_page_dirty_in_slot(memslot, gfn);
    + mark_page_dirty_in_slot(kvm, memslot, gfn);
    }
    EXPORT_SYMBOL_GPL(mark_page_dirty);

    @@ -2582,7 +2587,7 @@ void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn)
    struct kvm_memory_slot *memslot;

    memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
    - mark_page_dirty_in_slot(memslot, gfn);
    + mark_page_dirty_in_slot(vcpu->kvm, memslot, gfn);
    }
    EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty);

    --
    2.26.2
    \
     
     \ /
      Last update: 2020-05-24 00:59    [W:2.257 / U:0.308 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site