lkml.org 
[lkml]   [2010]   [Jul]   [1]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    Subject[PATCH v4 2/6] KVM: MMU: introduce gfn_to_page_many_atomic() function
    This function not only return the gfn's page but also the page number
    after @gfn in the slot

    Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
    ---
    include/linux/kvm_host.h | 2 ++
    virt/kvm/kvm_main.c | 29 ++++++++++++++++++++++++++++-
    2 files changed, 30 insertions(+), 1 deletions(-)

    diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
    index e0fb543..53f663c 100644
    --- a/include/linux/kvm_host.h
    +++ b/include/linux/kvm_host.h
    @@ -288,6 +288,8 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
    void kvm_disable_largepages(void);
    void kvm_arch_flush_shadow(struct kvm *kvm);

    +int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn,
    + struct page **pages, int nr_pages, bool *enough);
    struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
    unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
    void kvm_release_page_clean(struct page *page);
    diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
    index 3f976b0..cc360d7 100644
    --- a/virt/kvm/kvm_main.c
    +++ b/virt/kvm/kvm_main.c
    @@ -923,15 +923,25 @@ static unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
    return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE;
    }

    -unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
    +static unsigned long gfn_to_hva_many(struct kvm *kvm, gfn_t gfn, int *entry)
    {
    struct kvm_memory_slot *slot;

    slot = gfn_to_memslot(kvm, gfn);
    +
    if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
    return bad_hva();
    +
    + if (entry)
    + *entry = slot->npages - (gfn - slot->base_gfn);
    +
    return gfn_to_hva_memslot(slot, gfn);
    }
    +
    +unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
    +{
    + return gfn_to_hva_many(kvm, gfn, NULL);
    +}
    EXPORT_SYMBOL_GPL(gfn_to_hva);

    static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr, bool atomic)
    @@ -1011,6 +1021,23 @@ pfn_t gfn_to_pfn_memslot(struct kvm *kvm,
    return hva_to_pfn(kvm, addr, false);
    }

    +int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn,
    + struct page **pages, int nr_pages, bool *enough)
    +{
    + unsigned long addr;
    + int entry, ret;
    +
    + addr = gfn_to_hva_many(kvm, gfn, &entry);
    + if (kvm_is_error_hva(addr))
    + return -1;
    +
    + entry = min(entry, nr_pages);
    + *enough = (entry == nr_pages) ? true : false;
    + ret = __get_user_pages_fast(addr, entry, 1, pages);
    + return ret;
    +}
    +EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic);
    +
    struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
    {
    pfn_t pfn;
    --
    1.6.1.2



    \
     
     \ /
      Last update: 2010-07-01 15:59    [W:0.047 / U:1.952 seconds]
    ©2003-2016 Jasper Spaans. hosted at Digital OceanAdvertise on this site