lkml.org 
[lkml]   [2010]   [Nov]   [21]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
SubjectRe: [PATCH v3 6/6] KVM: MMU: delay flush all tlbs on sync_page path
On 11/20/2010 12:11 AM, Marcelo Tosatti wrote:

>> void kvm_flush_remote_tlbs(struct kvm *kvm)
>> {
>> + int dirty_count = atomic_read(&kvm->tlbs_dirty);
>> +
>> + smp_mb();
>> if (make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
>> ++kvm->stat.remote_tlb_flush;
>> + atomic_sub(dirty_count, &kvm->tlbs_dirty);
>> }
>
> This is racy because kvm_flush_remote_tlbs might be called without
> mmu_lock protection.

Sorry for my carelessness, it should be 'cmpxchg' here.

> You could decrease the counter on
> invalidate_page/invalidate_range_start only,

I want to avoid a unnecessary tlbs flush, if tlbs have been flushed
after sync_page, then we don't need flush tlbs on invalidate_page/
invalidate_range_start path.

> these are not fast paths
> anyway.
>

How about below patch? it just needs one atomic operation.

---
arch/x86/kvm/paging_tmpl.h | 4 ++--
include/linux/kvm_host.h | 2 ++
virt/kvm/kvm_main.c | 7 ++++++-
3 files changed, 10 insertions(+), 3 deletions(-)

diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index dfb906f..e64192f 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -781,14 +781,14 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
gfn = gpte_to_gfn(gpte);

if (FNAME(map_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) {
- kvm_flush_remote_tlbs(vcpu->kvm);
+ vcpu->kvm->tlbs_dirty++;
continue;
}

if (gfn != sp->gfns[i]) {
drop_spte(vcpu->kvm, &sp->spt[i],
shadow_trap_nonpresent_pte);
- kvm_flush_remote_tlbs(vcpu->kvm);
+ vcpu->kvm->tlbs_dirty++;
continue;
}

diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 4bd663d..dafd90e 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -249,6 +249,7 @@ struct kvm {
struct mmu_notifier mmu_notifier;
unsigned long mmu_notifier_seq;
long mmu_notifier_count;
+ long tlbs_dirty;
#endif
};

@@ -377,6 +378,7 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu);
void kvm_resched(struct kvm_vcpu *vcpu);
void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
+
void kvm_flush_remote_tlbs(struct kvm *kvm);
void kvm_reload_remote_mmus(struct kvm *kvm);

diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index fb93ff9..fe0a1a7 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -168,8 +168,12 @@ static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)

void kvm_flush_remote_tlbs(struct kvm *kvm)
{
+ long dirty_count = kvm->tlbs_dirty;
+
+ smp_mb();
if (make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
++kvm->stat.remote_tlb_flush;
+ cmpxchg(&kvm->tlbs_dirty, dirty_count, 0);
}

void kvm_reload_remote_mmus(struct kvm *kvm)
@@ -249,7 +253,7 @@ static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
idx = srcu_read_lock(&kvm->srcu);
spin_lock(&kvm->mmu_lock);
kvm->mmu_notifier_seq++;
- need_tlb_flush = kvm_unmap_hva(kvm, address);
+ need_tlb_flush = kvm_unmap_hva(kvm, address) | kvm->tlbs_dirty;
spin_unlock(&kvm->mmu_lock);
srcu_read_unlock(&kvm->srcu, idx);

@@ -293,6 +297,7 @@ static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
kvm->mmu_notifier_count++;
for (; start < end; start += PAGE_SIZE)
need_tlb_flush |= kvm_unmap_hva(kvm, start);
+ need_tlb_flush |= kvm->tlbs_dirty;
spin_unlock(&kvm->mmu_lock);
srcu_read_unlock(&kvm->srcu, idx);

--
1.7.0.4

\
 
 \ /
  Last update: 2010-11-22 04:43    [W:0.068 / U:0.812 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site