lkml.org 
[lkml]   [2008]   [Sep]   [25]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 27/39] KVM: MMU: Convert direct maps to use the generic shadow walker
    Date
    From: Avi Kivity <avi@qumranet.com>

    Signed-off-by: Avi Kivity <avi@qumranet.com>
    ---
    arch/x86/kvm/mmu.c | 93 ++++++++++++++++++++++++++++++---------------------
    1 files changed, 55 insertions(+), 38 deletions(-)

    diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
    index 8b95cf7..a1ca4ff 100644
    --- a/arch/x86/kvm/mmu.c
    +++ b/arch/x86/kvm/mmu.c
    @@ -1260,49 +1260,66 @@ static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
    {
    }

    -static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
    - int largepage, gfn_t gfn, pfn_t pfn)
    -{
    - hpa_t table_addr = vcpu->arch.mmu.root_hpa;
    - int pt_write = 0;
    - int level = vcpu->arch.mmu.shadow_root_level;
    -
    - for (; ; level--) {
    - u32 index = PT64_INDEX(v, level);
    - u64 *table;
    +struct direct_shadow_walk {
    + struct kvm_shadow_walk walker;
    + pfn_t pfn;
    + int write;
    + int largepage;
    + int pt_write;
    +};

    - ASSERT(VALID_PAGE(table_addr));
    - table = __va(table_addr);
    +static int direct_map_entry(struct kvm_shadow_walk *_walk,
    + struct kvm_vcpu *vcpu,
    + gva_t addr, u64 *sptep, int level)
    +{
    + struct direct_shadow_walk *walk =
    + container_of(_walk, struct direct_shadow_walk, walker);
    + struct kvm_mmu_page *sp;
    + gfn_t pseudo_gfn;
    + gfn_t gfn = addr >> PAGE_SHIFT;
    +
    + if (level == PT_PAGE_TABLE_LEVEL
    + || (walk->largepage && level == PT_DIRECTORY_LEVEL)) {
    + mmu_set_spte(vcpu, sptep, ACC_ALL, ACC_ALL,
    + 0, walk->write, 1, &walk->pt_write,
    + walk->largepage, gfn, walk->pfn, false);
    + return 1;
    + }

    - if (level == 1 || (largepage && level == 2)) {
    - mmu_set_spte(vcpu, &table[index], ACC_ALL, ACC_ALL,
    - 0, write, 1, &pt_write, largepage,
    - gfn, pfn, false);
    - return pt_write;
    + if (*sptep == shadow_trap_nonpresent_pte) {
    + pseudo_gfn = (addr & PT64_DIR_BASE_ADDR_MASK) >> PAGE_SHIFT;
    + sp = kvm_mmu_get_page(vcpu, pseudo_gfn, addr, level - 1,
    + 1, ACC_ALL, sptep);
    + if (!sp) {
    + pgprintk("nonpaging_map: ENOMEM\n");
    + kvm_release_pfn_clean(walk->pfn);
    + return -ENOMEM;
    }

    - if (table[index] == shadow_trap_nonpresent_pte) {
    - struct kvm_mmu_page *new_table;
    - gfn_t pseudo_gfn;
    -
    - pseudo_gfn = (v & PT64_DIR_BASE_ADDR_MASK)
    - >> PAGE_SHIFT;
    - new_table = kvm_mmu_get_page(vcpu, pseudo_gfn,
    - v, level - 1,
    - 1, ACC_ALL, &table[index]);
    - if (!new_table) {
    - pgprintk("nonpaging_map: ENOMEM\n");
    - kvm_release_pfn_clean(pfn);
    - return -ENOMEM;
    - }
    -
    - set_shadow_pte(&table[index],
    - __pa(new_table->spt)
    - | PT_PRESENT_MASK | PT_WRITABLE_MASK
    - | shadow_user_mask | shadow_x_mask);
    - }
    - table_addr = table[index] & PT64_BASE_ADDR_MASK;
    + set_shadow_pte(sptep,
    + __pa(sp->spt)
    + | PT_PRESENT_MASK | PT_WRITABLE_MASK
    + | shadow_user_mask | shadow_x_mask);
    }
    + return 0;
    +}
    +
    +static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
    + int largepage, gfn_t gfn, pfn_t pfn)
    +{
    + int r;
    + struct direct_shadow_walk walker = {
    + .walker = { .entry = direct_map_entry, },
    + .pfn = pfn,
    + .largepage = largepage,
    + .write = write,
    + .pt_write = 0,
    + };
    +
    + r = walk_shadow(&walker.walker, vcpu, (gva_t)gfn << PAGE_SHIFT);
    + if (r < 0)
    + return r;
    + return walker.pt_write;
    }

    static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
    --
    1.6.0.1


    \
     
     \ /
      Last update: 2008-09-25 14:09    [W:0.023 / U:61.536 seconds]
    ©2003-2016 Jasper Spaans. hosted at Digital OceanAdvertise on this site