lkml.org 
[lkml]   [2010]   [Apr]   [22]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    Subject[053/197] KVM: x86 emulator: fix memory access during x86 emulation
    2.6.32-stable review patch.  If anyone has any objections, please let us know.

    ------------------


    From: Gleb Natapov <gleb@redhat.com>

    commit 1871c6020d7308afb99127bba51f04548e7ca84e upstream

    Currently when x86 emulator needs to access memory, page walk is done with
    broadest permission possible, so if emulated instruction was executed
    by userspace process it can still access kernel memory. Fix that by
    providing correct memory access to page walker during emulation.

    Signed-off-by: Gleb Natapov <gleb@redhat.com>
    Signed-off-by: Avi Kivity <avi@redhat.com>
    Signed-off-by: Stefan Bader <stefan.bader@canonical.com>
    Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
    ---
    arch/x86/include/asm/kvm_emulate.h | 14 +++
    arch/x86/include/asm/kvm_host.h | 7 +
    arch/x86/kvm/emulate.c | 6 -
    arch/x86/kvm/mmu.c | 17 +---
    arch/x86/kvm/mmu.h | 6 +
    arch/x86/kvm/paging_tmpl.h | 11 ++-
    arch/x86/kvm/x86.c | 131 ++++++++++++++++++++++++++++---------
    7 files changed, 142 insertions(+), 50 deletions(-)

    --- a/arch/x86/include/asm/kvm_emulate.h
    +++ b/arch/x86/include/asm/kvm_emulate.h
    @@ -54,13 +54,23 @@ struct x86_emulate_ctxt;
    struct x86_emulate_ops {
    /*
    * read_std: Read bytes of standard (non-emulated/special) memory.
    - * Used for instruction fetch, stack operations, and others.
    + * Used for descriptor reading.
    * @addr: [IN ] Linear address from which to read.
    * @val: [OUT] Value read from memory, zero-extended to 'u_long'.
    * @bytes: [IN ] Number of bytes to read from memory.
    */
    int (*read_std)(unsigned long addr, void *val,
    - unsigned int bytes, struct kvm_vcpu *vcpu);
    + unsigned int bytes, struct kvm_vcpu *vcpu, u32 *error);
    +
    + /*
    + * fetch: Read bytes of standard (non-emulated/special) memory.
    + * Used for instruction fetch.
    + * @addr: [IN ] Linear address from which to read.
    + * @val: [OUT] Value read from memory, zero-extended to 'u_long'.
    + * @bytes: [IN ] Number of bytes to read from memory.
    + */
    + int (*fetch)(unsigned long addr, void *val,
    + unsigned int bytes, struct kvm_vcpu *vcpu, u32 *error);

    /*
    * read_emulated: Read bytes from emulated/special memory area.
    --- a/arch/x86/include/asm/kvm_host.h
    +++ b/arch/x86/include/asm/kvm_host.h
    @@ -256,7 +256,8 @@ struct kvm_mmu {
    void (*new_cr3)(struct kvm_vcpu *vcpu);
    int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err);
    void (*free)(struct kvm_vcpu *vcpu);
    - gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva);
    + gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access,
    + u32 *error);
    void (*prefetch_page)(struct kvm_vcpu *vcpu,
    struct kvm_mmu_page *page);
    int (*sync_page)(struct kvm_vcpu *vcpu,
    @@ -645,6 +646,10 @@ void __kvm_mmu_free_some_pages(struct kv
    int kvm_mmu_load(struct kvm_vcpu *vcpu);
    void kvm_mmu_unload(struct kvm_vcpu *vcpu);
    void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
    +gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, u32 *error);
    +gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, u32 *error);
    +gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, u32 *error);
    +gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, u32 *error);

    int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);

    --- a/arch/x86/kvm/emulate.c
    +++ b/arch/x86/kvm/emulate.c
    @@ -612,7 +612,7 @@ static int do_fetch_insn_byte(struct x86

    if (linear < fc->start || linear >= fc->end) {
    size = min(15UL, PAGE_SIZE - offset_in_page(linear));
    - rc = ops->read_std(linear, fc->data, size, ctxt->vcpu);
    + rc = ops->fetch(linear, fc->data, size, ctxt->vcpu, NULL);
    if (rc)
    return rc;
    fc->start = linear;
    @@ -667,11 +667,11 @@ static int read_descriptor(struct x86_em
    op_bytes = 3;
    *address = 0;
    rc = ops->read_std((unsigned long)ptr, (unsigned long *)size, 2,
    - ctxt->vcpu);
    + ctxt->vcpu, NULL);
    if (rc)
    return rc;
    rc = ops->read_std((unsigned long)ptr + 2, address, op_bytes,
    - ctxt->vcpu);
    + ctxt->vcpu, NULL);
    return rc;
    }

    --- a/arch/x86/kvm/mmu.c
    +++ b/arch/x86/kvm/mmu.c
    @@ -136,12 +136,6 @@ module_param(oos_shadow, bool, 0644);
    #define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \
    | PT64_NX_MASK)

    -#define PFERR_PRESENT_MASK (1U << 0)
    -#define PFERR_WRITE_MASK (1U << 1)
    -#define PFERR_USER_MASK (1U << 2)
    -#define PFERR_RSVD_MASK (1U << 3)
    -#define PFERR_FETCH_MASK (1U << 4)
    -
    #define PT_PDPE_LEVEL 3
    #define PT_DIRECTORY_LEVEL 2
    #define PT_PAGE_TABLE_LEVEL 1
    @@ -1639,7 +1633,7 @@ struct page *gva_to_page(struct kvm_vcpu
    {
    struct page *page;

    - gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
    + gpa_t gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);

    if (gpa == UNMAPPED_GVA)
    return NULL;
    @@ -2162,8 +2156,11 @@ void kvm_mmu_sync_roots(struct kvm_vcpu
    spin_unlock(&vcpu->kvm->mmu_lock);
    }

    -static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
    +static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr,
    + u32 access, u32 *error)
    {
    + if (error)
    + *error = 0;
    return vaddr;
    }

    @@ -2747,7 +2744,7 @@ int kvm_mmu_unprotect_page_virt(struct k
    if (tdp_enabled)
    return 0;

    - gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
    + gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);

    spin_lock(&vcpu->kvm->mmu_lock);
    r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
    @@ -3245,7 +3242,7 @@ static void audit_mappings_page(struct k
    if (is_shadow_present_pte(ent) && !is_last_spte(ent, level))
    audit_mappings_page(vcpu, ent, va, level - 1);
    else {
    - gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, va);
    + gpa_t gpa = kvm_mmu_gva_to_gpa_read(vcpu, va, NULL);
    gfn_t gfn = gpa >> PAGE_SHIFT;
    pfn_t pfn = gfn_to_pfn(vcpu->kvm, gfn);
    hpa_t hpa = (hpa_t)pfn << PAGE_SHIFT;
    --- a/arch/x86/kvm/mmu.h
    +++ b/arch/x86/kvm/mmu.h
    @@ -37,6 +37,12 @@
    #define PT32_ROOT_LEVEL 2
    #define PT32E_ROOT_LEVEL 3

    +#define PFERR_PRESENT_MASK (1U << 0)
    +#define PFERR_WRITE_MASK (1U << 1)
    +#define PFERR_USER_MASK (1U << 2)
    +#define PFERR_RSVD_MASK (1U << 3)
    +#define PFERR_FETCH_MASK (1U << 4)
    +
    int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]);

    static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
    --- a/arch/x86/kvm/paging_tmpl.h
    +++ b/arch/x86/kvm/paging_tmpl.h
    @@ -491,18 +491,23 @@ static void FNAME(invlpg)(struct kvm_vcp
    spin_unlock(&vcpu->kvm->mmu_lock);
    }

    -static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
    +static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access,
    + u32 *error)
    {
    struct guest_walker walker;
    gpa_t gpa = UNMAPPED_GVA;
    int r;

    - r = FNAME(walk_addr)(&walker, vcpu, vaddr, 0, 0, 0);
    + r = FNAME(walk_addr)(&walker, vcpu, vaddr,
    + !!(access & PFERR_WRITE_MASK),
    + !!(access & PFERR_USER_MASK),
    + !!(access & PFERR_FETCH_MASK));

    if (r) {
    gpa = gfn_to_gpa(walker.gfn);
    gpa |= vaddr & ~PAGE_MASK;
    - }
    + } else if (error)
    + *error = walker.error_code;

    return gpa;
    }
    --- a/arch/x86/kvm/x86.c
    +++ b/arch/x86/kvm/x86.c
    @@ -2505,14 +2505,41 @@ static int vcpu_mmio_read(struct kvm_vcp
    return kvm_io_bus_read(&vcpu->kvm->mmio_bus, addr, len, v);
    }

    -static int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes,
    - struct kvm_vcpu *vcpu)
    +gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
    +{
    + u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
    + return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, error);
    +}
    +
    + gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
    +{
    + u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
    + access |= PFERR_FETCH_MASK;
    + return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, error);
    +}
    +
    +gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
    +{
    + u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
    + access |= PFERR_WRITE_MASK;
    + return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, error);
    +}
    +
    +/* uses this to access any guest's mapped memory without checking CPL */
    +gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
    +{
    + return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, 0, error);
    +}
    +
    +static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
    + struct kvm_vcpu *vcpu, u32 access,
    + u32 *error)
    {
    void *data = val;
    int r = X86EMUL_CONTINUE;

    while (bytes) {
    - gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
    + gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr, access, error);
    unsigned offset = addr & (PAGE_SIZE-1);
    unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
    int ret;
    @@ -2535,14 +2562,37 @@ out:
    return r;
    }

    +/* used for instruction fetching */
    +static int kvm_fetch_guest_virt(gva_t addr, void *val, unsigned int bytes,
    + struct kvm_vcpu *vcpu, u32 *error)
    +{
    + u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
    + return kvm_read_guest_virt_helper(addr, val, bytes, vcpu,
    + access | PFERR_FETCH_MASK, error);
    +}
    +
    +static int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes,
    + struct kvm_vcpu *vcpu, u32 *error)
    +{
    + u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
    + return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
    + error);
    +}
    +
    +static int kvm_read_guest_virt_system(gva_t addr, void *val, unsigned int bytes,
    + struct kvm_vcpu *vcpu, u32 *error)
    +{
    + return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, error);
    +}
    +
    static int kvm_write_guest_virt(gva_t addr, void *val, unsigned int bytes,
    - struct kvm_vcpu *vcpu)
    + struct kvm_vcpu *vcpu, u32 *error)
    {
    void *data = val;
    int r = X86EMUL_CONTINUE;

    while (bytes) {
    - gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
    + gpa_t gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, error);
    unsigned offset = addr & (PAGE_SIZE-1);
    unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
    int ret;
    @@ -2572,6 +2622,7 @@ static int emulator_read_emulated(unsign
    struct kvm_vcpu *vcpu)
    {
    gpa_t gpa;
    + u32 error_code;

    if (vcpu->mmio_read_completed) {
    memcpy(val, vcpu->mmio_data, bytes);
    @@ -2581,17 +2632,20 @@ static int emulator_read_emulated(unsign
    return X86EMUL_CONTINUE;
    }

    - gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
    + gpa = kvm_mmu_gva_to_gpa_read(vcpu, addr, &error_code);
    +
    + if (gpa == UNMAPPED_GVA) {
    + kvm_inject_page_fault(vcpu, addr, error_code);
    + return X86EMUL_PROPAGATE_FAULT;
    + }

    /* For APIC access vmexit */
    if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
    goto mmio;

    - if (kvm_read_guest_virt(addr, val, bytes, vcpu)
    + if (kvm_read_guest_virt(addr, val, bytes, vcpu, NULL)
    == X86EMUL_CONTINUE)
    return X86EMUL_CONTINUE;
    - if (gpa == UNMAPPED_GVA)
    - return X86EMUL_PROPAGATE_FAULT;

    mmio:
    /*
    @@ -2630,11 +2684,12 @@ static int emulator_write_emulated_onepa
    struct kvm_vcpu *vcpu)
    {
    gpa_t gpa;
    + u32 error_code;

    - gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
    + gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, &error_code);

    if (gpa == UNMAPPED_GVA) {
    - kvm_inject_page_fault(vcpu, addr, 2);
    + kvm_inject_page_fault(vcpu, addr, error_code);
    return X86EMUL_PROPAGATE_FAULT;
    }

    @@ -2698,7 +2753,7 @@ static int emulator_cmpxchg_emulated(uns
    char *kaddr;
    u64 val;

    - gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
    + gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL);

    if (gpa == UNMAPPED_GVA ||
    (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
    @@ -2777,7 +2832,7 @@ void kvm_report_emulation_failure(struct

    rip_linear = rip + get_segment_base(vcpu, VCPU_SREG_CS);

    - kvm_read_guest_virt(rip_linear, (void *)opcodes, 4, vcpu);
    + kvm_read_guest_virt(rip_linear, (void *)opcodes, 4, vcpu, NULL);

    printk(KERN_ERR "emulation failed (%s) rip %lx %02x %02x %02x %02x\n",
    context, rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]);
    @@ -2785,7 +2840,8 @@ void kvm_report_emulation_failure(struct
    EXPORT_SYMBOL_GPL(kvm_report_emulation_failure);

    static struct x86_emulate_ops emulate_ops = {
    - .read_std = kvm_read_guest_virt,
    + .read_std = kvm_read_guest_virt_system,
    + .fetch = kvm_fetch_guest_virt,
    .read_emulated = emulator_read_emulated,
    .write_emulated = emulator_write_emulated,
    .cmpxchg_emulated = emulator_cmpxchg_emulated,
    @@ -2922,12 +2978,17 @@ static int pio_copy_data(struct kvm_vcpu
    gva_t q = vcpu->arch.pio.guest_gva;
    unsigned bytes;
    int ret;
    + u32 error_code;

    bytes = vcpu->arch.pio.size * vcpu->arch.pio.cur_count;
    if (vcpu->arch.pio.in)
    - ret = kvm_write_guest_virt(q, p, bytes, vcpu);
    + ret = kvm_write_guest_virt(q, p, bytes, vcpu, &error_code);
    else
    - ret = kvm_read_guest_virt(q, p, bytes, vcpu);
    + ret = kvm_read_guest_virt(q, p, bytes, vcpu, &error_code);
    +
    + if (ret == X86EMUL_PROPAGATE_FAULT)
    + kvm_inject_page_fault(vcpu, q, error_code);
    +
    return ret;
    }

    @@ -2948,7 +3009,7 @@ int complete_pio(struct kvm_vcpu *vcpu)
    if (io->in) {
    r = pio_copy_data(vcpu);
    if (r)
    - return r;
    + goto out;
    }

    delta = 1;
    @@ -2975,7 +3036,7 @@ int complete_pio(struct kvm_vcpu *vcpu)
    kvm_register_write(vcpu, VCPU_REGS_RSI, val);
    }
    }
    -
    +out:
    io->count -= io->cur_count;
    io->cur_count = 0;

    @@ -3095,10 +3156,8 @@ int kvm_emulate_pio_string(struct kvm_vc
    if (!vcpu->arch.pio.in) {
    /* string PIO write */
    ret = pio_copy_data(vcpu);
    - if (ret == X86EMUL_PROPAGATE_FAULT) {
    - kvm_inject_gp(vcpu, 0);
    + if (ret == X86EMUL_PROPAGATE_FAULT)
    return 1;
    - }
    if (ret == 0 && !pio_string_write(vcpu)) {
    complete_pio(vcpu);
    if (vcpu->arch.pio.count == 0)
    @@ -4078,7 +4137,9 @@ static int load_guest_segment_descriptor
    kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc);
    return 1;
    }
    - return kvm_read_guest_virt(dtable.base + index*8, seg_desc, sizeof(*seg_desc), vcpu);
    + return kvm_read_guest_virt_system(dtable.base + index*8,
    + seg_desc, sizeof(*seg_desc),
    + vcpu, NULL);
    }

    /* allowed just for 8 bytes segments */
    @@ -4092,15 +4153,23 @@ static int save_guest_segment_descriptor

    if (dtable.limit < index * 8 + 7)
    return 1;
    - return kvm_write_guest_virt(dtable.base + index*8, seg_desc, sizeof(*seg_desc), vcpu);
    + return kvm_write_guest_virt(dtable.base + index*8, seg_desc, sizeof(*seg_desc), vcpu, NULL);
    +}
    +
    +static gpa_t get_tss_base_addr_write(struct kvm_vcpu *vcpu,
    + struct desc_struct *seg_desc)
    +{
    + u32 base_addr = get_desc_base(seg_desc);
    +
    + return kvm_mmu_gva_to_gpa_write(vcpu, base_addr, NULL);
    }

    -static gpa_t get_tss_base_addr(struct kvm_vcpu *vcpu,
    +static gpa_t get_tss_base_addr_read(struct kvm_vcpu *vcpu,
    struct desc_struct *seg_desc)
    {
    u32 base_addr = get_desc_base(seg_desc);

    - return vcpu->arch.mmu.gva_to_gpa(vcpu, base_addr);
    + return kvm_mmu_gva_to_gpa_read(vcpu, base_addr, NULL);
    }

    static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg)
    @@ -4303,7 +4372,7 @@ static int kvm_task_switch_16(struct kvm
    sizeof tss_segment_16))
    goto out;

    - if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc),
    + if (kvm_read_guest(vcpu->kvm, get_tss_base_addr_read(vcpu, nseg_desc),
    &tss_segment_16, sizeof tss_segment_16))
    goto out;

    @@ -4311,7 +4380,7 @@ static int kvm_task_switch_16(struct kvm
    tss_segment_16.prev_task_link = old_tss_sel;

    if (kvm_write_guest(vcpu->kvm,
    - get_tss_base_addr(vcpu, nseg_desc),
    + get_tss_base_addr_write(vcpu, nseg_desc),
    &tss_segment_16.prev_task_link,
    sizeof tss_segment_16.prev_task_link))
    goto out;
    @@ -4342,7 +4411,7 @@ static int kvm_task_switch_32(struct kvm
    sizeof tss_segment_32))
    goto out;

    - if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc),
    + if (kvm_read_guest(vcpu->kvm, get_tss_base_addr_read(vcpu, nseg_desc),
    &tss_segment_32, sizeof tss_segment_32))
    goto out;

    @@ -4350,7 +4419,7 @@ static int kvm_task_switch_32(struct kvm
    tss_segment_32.prev_task_link = old_tss_sel;

    if (kvm_write_guest(vcpu->kvm,
    - get_tss_base_addr(vcpu, nseg_desc),
    + get_tss_base_addr_write(vcpu, nseg_desc),
    &tss_segment_32.prev_task_link,
    sizeof tss_segment_32.prev_task_link))
    goto out;
    @@ -4373,7 +4442,7 @@ int kvm_task_switch(struct kvm_vcpu *vcp
    u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR);
    u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR);

    - old_tss_base = vcpu->arch.mmu.gva_to_gpa(vcpu, old_tss_base);
    + old_tss_base = kvm_mmu_gva_to_gpa_write(vcpu, old_tss_base, NULL);

    /* FIXME: Handle errors. Failure to read either TSS or their
    * descriptors should generate a pagefault.
    @@ -4582,7 +4651,7 @@ int kvm_arch_vcpu_ioctl_translate(struct

    vcpu_load(vcpu);
    down_read(&vcpu->kvm->slots_lock);
    - gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, vaddr);
    + gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL);
    up_read(&vcpu->kvm->slots_lock);
    tr->physical_address = gpa;
    tr->valid = gpa != UNMAPPED_GVA;



    \
     
     \ /
      Last update: 2010-04-22 22:41    [W:4.070 / U:0.076 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site