lkml.org 
[lkml]   [2015]   [May]   [7]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
Date
From
SubjectRe: [PATCH 2/9] KVM: MMU: introduce slot_handle_level() and its helper


On 30/04/2015 12:24, guangrong.xiao@linux.intel.com wrote:
> From: Xiao Guangrong <guangrong.xiao@linux.intel.com>
>
> There are several places walking all rmaps for the memslot so that
> introduce common functions to cleanup the code
>
> Signed-off-by: Xiao Guangrong <guangrong.xiao@linux.intel.com>
> ---
> arch/x86/kvm/mmu.c | 63 ++++++++++++++++++++++++++++++++++++++++++++++++++++++
> 1 file changed, 63 insertions(+)
>
> diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
> index ea3e3e4..75a3459 100644
> --- a/arch/x86/kvm/mmu.c
> +++ b/arch/x86/kvm/mmu.c
> @@ -4410,6 +4410,69 @@ void kvm_mmu_setup(struct kvm_vcpu *vcpu)
> init_kvm_mmu(vcpu);
> }
>
> +/* The return value indicates if tlb flush on all vcpus is needed. */
> +typedef bool (*slot_level_handler) (struct kvm *kvm, unsigned long *rmap);
> +
> +/* The caller should hold mmu-lock before calling this function. */
> +static bool
> +slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
> + slot_level_handler fn, int min_level, int max_level,
> + bool lock_flush_tlb)

Why not introduce for_each_slot_rmap first, instead of introducing one
implementation first and then switching to another? It's a small
change to reorder the patches like that. I think we should have three
iterator macros:

#define for_each_rmap_spte(rmap, iter, spte)

#define for_each_slot_rmap(slot, min_level, max_level, iter, rmapp)

#define for_each_slot_rmap_range(slot, iter, min_level, max_level, \
start_gfn, end_gfn, iter, rmapp)

where the last two take care of initializing the walker/iterator in the
first part of the "for".

This way, this function would be introduced immediately as this very
readable code:

struct slot_rmap_iterator iter;
unsigned long *rmapp;
bool flush = false;

for_each_slot_rmap(memslot, min_level, max_level, &iter, rmapp) {
if (*rmapp)
flush |= fn(kvm, rmapp);

if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
if (flush && lock_flush_tlb) {
kvm_flush_remote_tlbs(kvm);
flush = false;
}
cond_resched_lock(&kvm->mmu_lock);
}
}

/*
* What about adding this here: then callers that pass
* lock_flush_tlb == true need not care about the return
* value!
*/
if (flush && lock_flush_tlb) {
kvm_flush_remote_tlbs(kvm);
flush = false;
}

return flush;

In addition, some of these functions need to be marked always_inline I
think; either slot_handle_level/slot_handle_*_level, or the
iterators/walkers. Can you collect kvm.ko size for both cases?

Thanks,

Paolo

> +{
> + unsigned long last_gfn;
> + bool flush = false;
> + int level;
> +
> + last_gfn = memslot->base_gfn + memslot->npages - 1;
> +
> + for (level = min_level; level <= max_level; ++level) {
> + unsigned long *rmapp;
> + unsigned long last_index, index;
> +
> + rmapp = memslot->arch.rmap[level - PT_PAGE_TABLE_LEVEL];
> + last_index = gfn_to_index(last_gfn, memslot->base_gfn, level);
> +
> + for (index = 0; index <= last_index; ++index, ++rmapp) {
> + if (*rmapp)
> + flush |= fn(kvm, rmapp);
> +
> + if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
> + if (flush && lock_flush_tlb) {
> + kvm_flush_remote_tlbs(kvm);
> + flush = false;
> + }
> + cond_resched_lock(&kvm->mmu_lock);
> + }
> + }
> + }
> +
> + return flush;
> +}
> +
> +static bool
> +slot_handle_all_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
> + slot_level_handler fn, bool lock_flush_tlb)
> +{
> + return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL,
> + PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES - 1, lock_flush_tlb);
> +}
> +
> +static bool
> +slot_handle_large_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
> + slot_level_handler fn, bool lock_flush_tlb)
> +{
> + return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL + 1,
> + PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES - 1, lock_flush_tlb);
> +}
> +
> +static bool
> +slot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot,
> + slot_level_handler fn, bool lock_flush_tlb)
> +{
> + return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL,
> + PT_PAGE_TABLE_LEVEL, lock_flush_tlb);
> +}
> +
> void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
> struct kvm_memory_slot *memslot)
> {
>


\
 
 \ /
  Last update: 2015-05-07 14:21    [W:0.179 / U:0.196 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site