lkml.org 
[lkml]   [2015]   [Feb]   [20]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
Subject[PATCH 23/24] kvm: plumb return of hva when resolving page fault.
From: Andres Lagar-Cavilla <andreslc@google.com>

So we don't have to redo this work later. Note the hva is not racy, it
is simple arithmetic based on the memslot.

This will be used in the huge tmpfs commits.

Signed-off-by: Andres Lagar-Cavilla <andreslc@google.com>
Signed-off-by: Hugh Dickins <hughd@google.com>
---
arch/x86/kvm/mmu.c | 16 +++++++++++-----
arch/x86/kvm/paging_tmpl.h | 3 ++-
include/linux/kvm_host.h | 2 +-
virt/kvm/kvm_main.c | 24 ++++++++++++++----------
4 files changed, 28 insertions(+), 17 deletions(-)

--- thpfs.orig/arch/x86/kvm/mmu.c 2015-02-08 18:54:22.000000000 -0800
+++ thpfs/arch/x86/kvm/mmu.c 2015-02-20 19:35:20.095835839 -0800
@@ -2907,7 +2907,8 @@ exit:
}

static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
- gva_t gva, pfn_t *pfn, bool write, bool *writable);
+ gva_t gva, pfn_t *pfn, bool write, bool *writable,
+ unsigned long *hva);
static void make_mmu_pages_available(struct kvm_vcpu *vcpu);

static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code,
@@ -2918,6 +2919,7 @@ static int nonpaging_map(struct kvm_vcpu
int force_pt_level;
pfn_t pfn;
unsigned long mmu_seq;
+ unsigned long hva;
bool map_writable, write = error_code & PFERR_WRITE_MASK;

force_pt_level = mapping_level_dirty_bitmap(vcpu, gfn);
@@ -2941,7 +2943,8 @@ static int nonpaging_map(struct kvm_vcpu
mmu_seq = vcpu->kvm->mmu_notifier_seq;
smp_rmb();

- if (try_async_pf(vcpu, prefault, gfn, v, &pfn, write, &map_writable))
+ if (try_async_pf(vcpu, prefault, gfn, v, &pfn, write,
+ &map_writable, &hva))
return 0;

if (handle_abnormal_pfn(vcpu, v, gfn, pfn, ACC_ALL, &r))
@@ -3360,11 +3363,12 @@ static bool can_do_async_pf(struct kvm_v
}

static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
- gva_t gva, pfn_t *pfn, bool write, bool *writable)
+ gva_t gva, pfn_t *pfn, bool write, bool *writable,
+ unsigned long *hva)
{
bool async;

- *pfn = gfn_to_pfn_async(vcpu->kvm, gfn, &async, write, writable);
+ *pfn = gfn_to_pfn_async(vcpu->kvm, gfn, &async, write, writable, hva);

if (!async)
return false; /* *pfn has correct page already */
@@ -3393,6 +3397,7 @@ static int tdp_page_fault(struct kvm_vcp
int force_pt_level;
gfn_t gfn = gpa >> PAGE_SHIFT;
unsigned long mmu_seq;
+ unsigned long hva;
int write = error_code & PFERR_WRITE_MASK;
bool map_writable;

@@ -3423,7 +3428,8 @@ static int tdp_page_fault(struct kvm_vcp
mmu_seq = vcpu->kvm->mmu_notifier_seq;
smp_rmb();

- if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable))
+ if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write,
+ &map_writable, &hva))
return 0;

if (handle_abnormal_pfn(vcpu, 0, gfn, pfn, ACC_ALL, &r))
--- thpfs.orig/arch/x86/kvm/paging_tmpl.h 2014-12-07 14:21:05.000000000 -0800
+++ thpfs/arch/x86/kvm/paging_tmpl.h 2015-02-20 19:35:20.095835839 -0800
@@ -709,6 +709,7 @@ static int FNAME(page_fault)(struct kvm_
int level = PT_PAGE_TABLE_LEVEL;
int force_pt_level;
unsigned long mmu_seq;
+ unsigned long hva;
bool map_writable, is_self_change_mapping;

pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
@@ -759,7 +760,7 @@ static int FNAME(page_fault)(struct kvm_
smp_rmb();

if (try_async_pf(vcpu, prefault, walker.gfn, addr, &pfn, write_fault,
- &map_writable))
+ &map_writable, &hva))
return 0;

if (handle_abnormal_pfn(vcpu, mmu_is_nested(vcpu) ? 0 : addr,
--- thpfs.orig/include/linux/kvm_host.h 2015-02-08 18:54:22.000000000 -0800
+++ thpfs/include/linux/kvm_host.h 2015-02-20 19:35:20.095835839 -0800
@@ -554,7 +554,7 @@ void kvm_set_page_accessed(struct page *

pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn);
pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async,
- bool write_fault, bool *writable);
+ bool write_fault, bool *writable, unsigned long *hva);
pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
bool *writable);
--- thpfs.orig/virt/kvm/kvm_main.c 2015-02-08 18:54:22.000000000 -0800
+++ thpfs/virt/kvm/kvm_main.c 2015-02-20 19:35:20.095835839 -0800
@@ -1328,7 +1328,8 @@ exit:

static pfn_t
__gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, bool atomic,
- bool *async, bool write_fault, bool *writable)
+ bool *async, bool write_fault, bool *writable,
+ unsigned long *hva)
{
unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault);

@@ -1344,12 +1345,15 @@ __gfn_to_pfn_memslot(struct kvm_memory_s
writable = NULL;
}

+ if (hva)
+ *hva = addr;
+
return hva_to_pfn(addr, atomic, async, write_fault,
writable);
}

static pfn_t __gfn_to_pfn(struct kvm *kvm, gfn_t gfn, bool atomic, bool *async,
- bool write_fault, bool *writable)
+ bool write_fault, bool *writable, unsigned long *hva)
{
struct kvm_memory_slot *slot;

@@ -1359,43 +1363,43 @@ static pfn_t __gfn_to_pfn(struct kvm *kv
slot = gfn_to_memslot(kvm, gfn);

return __gfn_to_pfn_memslot(slot, gfn, atomic, async, write_fault,
- writable);
+ writable, hva);
}

pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn)
{
- return __gfn_to_pfn(kvm, gfn, true, NULL, true, NULL);
+ return __gfn_to_pfn(kvm, gfn, true, NULL, true, NULL, NULL);
}
EXPORT_SYMBOL_GPL(gfn_to_pfn_atomic);

pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async,
- bool write_fault, bool *writable)
+ bool write_fault, bool *writable, unsigned long *hva)
{
- return __gfn_to_pfn(kvm, gfn, false, async, write_fault, writable);
+ return __gfn_to_pfn(kvm, gfn, false, async, write_fault, writable, hva);
}
EXPORT_SYMBOL_GPL(gfn_to_pfn_async);

pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
{
- return __gfn_to_pfn(kvm, gfn, false, NULL, true, NULL);
+ return __gfn_to_pfn(kvm, gfn, false, NULL, true, NULL, NULL);
}
EXPORT_SYMBOL_GPL(gfn_to_pfn);

pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
bool *writable)
{
- return __gfn_to_pfn(kvm, gfn, false, NULL, write_fault, writable);
+ return __gfn_to_pfn(kvm, gfn, false, NULL, write_fault, writable, NULL);
}
EXPORT_SYMBOL_GPL(gfn_to_pfn_prot);

pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
{
- return __gfn_to_pfn_memslot(slot, gfn, false, NULL, true, NULL);
+ return __gfn_to_pfn_memslot(slot, gfn, false, NULL, true, NULL, NULL);
}

pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn)
{
- return __gfn_to_pfn_memslot(slot, gfn, true, NULL, true, NULL);
+ return __gfn_to_pfn_memslot(slot, gfn, true, NULL, true, NULL, NULL);
}
EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot_atomic);


\
 
 \ /
  Last update: 2015-02-21 05:41    [W:0.188 / U:0.268 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site