lkml.org 
[lkml]   [2010]   [Apr]   [27]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 12/22] KVM: MMU: Implement nested gva_to_gpa functions
    Date
    This patch adds the functions to do a nested l2_gva to
    l1_gpa page table walk.

    Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
    ---
    arch/x86/include/asm/kvm_host.h | 4 ++++
    arch/x86/kvm/mmu.c | 8 ++++++++
    arch/x86/kvm/paging_tmpl.h | 31 +++++++++++++++++++++++++++++++
    3 files changed, 43 insertions(+), 0 deletions(-)

    diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
    index ccdbd2f..3cbfb51 100644
    --- a/arch/x86/include/asm/kvm_host.h
    +++ b/arch/x86/include/asm/kvm_host.h
    @@ -287,6 +287,10 @@ struct kvm_vcpu_arch {
    bool tpr_access_reporting;

    struct kvm_mmu mmu;
    +
    + /* Used for two dimensional paging emulation */
    + struct kvm_mmu nested_mmu;
    +
    /* only needed in kvm_pv_mmu_op() path, but it's hot so
    * put it here to avoid allocation */
    struct kvm_pv_mmu_op_buffer mmu_op_buffer;
    diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
    index 4e0bfdb..8bd40b5 100644
    --- a/arch/x86/kvm/mmu.c
    +++ b/arch/x86/kvm/mmu.c
    @@ -2144,6 +2144,14 @@ static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr,
    return vaddr;
    }

    +static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gva_t vaddr,
    + u32 access, u32 *error)
    +{
    + if (error)
    + *error = 0;
    + return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, error);
    +}
    +
    static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
    u32 error_code)
    {
    diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
    index 101849a..7819a6f 100644
    --- a/arch/x86/kvm/paging_tmpl.h
    +++ b/arch/x86/kvm/paging_tmpl.h
    @@ -264,6 +264,16 @@ static int FNAME(walk_addr)(struct guest_walker *walker,
    write_fault, user_fault, fetch_fault);
    }

    +static int FNAME(walk_addr_nested)(struct guest_walker *walker,
    + struct kvm_vcpu *vcpu, gva_t addr,
    + int write_fault, int user_fault,
    + int fetch_fault)
    +{
    + return FNAME(walk_addr_generic)(walker, vcpu, &vcpu->arch.nested_mmu,
    + addr, write_fault, user_fault,
    + fetch_fault);
    +}
    +
    static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
    u64 *spte, const void *pte)
    {
    @@ -544,6 +554,27 @@ static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access,
    return gpa;
    }

    +static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr,
    + u32 access, u32 *error)
    +{
    + struct guest_walker walker;
    + gpa_t gpa = UNMAPPED_GVA;
    + int r;
    +
    + r = FNAME(walk_addr_nested)(&walker, vcpu, vaddr,
    + !!(access & PFERR_WRITE_MASK),
    + !!(access & PFERR_USER_MASK),
    + !!(access & PFERR_FETCH_MASK));
    +
    + if (r) {
    + gpa = gfn_to_gpa(walker.gfn);
    + gpa |= vaddr & ~PAGE_MASK;
    + } else if (error)
    + *error = walker.error_code;
    +
    + return gpa;
    +}
    +
    static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu,
    struct kvm_mmu_page *sp)
    {
    --
    1.7.0.4



    \
     
     \ /
      Last update: 2010-04-27 12:43    [W:0.029 / U:89.904 seconds]
    ©2003-2016 Jasper Spaans. hosted at Digital OceanAdvertise on this site