lkml.org 
[lkml]   [2018]   [May]   [10]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    /
    Date
    From
    SubjectRe: [PATCH v3 5/6] KVM: x86: hyperv: simplistic HVCALL_FLUSH_VIRTUAL_ADDRESS_{LIST,SPACE}_EX implementation
    2018-04-16 13:08+0200, Vitaly Kuznetsov:
    > Implement HvFlushVirtualAddress{List,Space}Ex hypercalls in a simplistic
    > way: do full TLB flush with KVM_REQ_TLB_FLUSH and kick vCPUs which are
    > currently IN_GUEST_MODE.
    >
    > Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
    > ---
    > diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
    > @@ -1301,6 +1301,108 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *current_vcpu, u64 ingpa,
    > ((u64)rep_cnt << HV_HYPERCALL_REP_COMP_OFFSET);
    > }
    >
    > +static __always_inline int get_sparse_bank_no(u64 valid_bank_mask, int bank_no)
    > +{
    > + int i = 0, j;
    > +
    > + if (!(valid_bank_mask & BIT_ULL(bank_no)))
    > + return -1;
    > +
    > + for (j = 0; j < bank_no; j++)
    > + if (valid_bank_mask & BIT_ULL(j))
    > + i++;
    > +
    > + return i;
    > +}
    > +
    > +static __always_inline int load_bank_guest(struct kvm *kvm, u64 ingpa,
    > + int sparse_bank, u64 *bank_contents)
    > +{
    > + int offset;
    > +
    > + offset = offsetof(struct hv_tlb_flush_ex, hv_vp_set.bank_contents) +
    > + sizeof(u64) * sparse_bank;
    > +
    > + if (unlikely(kvm_read_guest(kvm, ingpa + offset,
    > + bank_contents, sizeof(u64))))
    > + return 1;
    > +
    > + return 0;
    > +}
    > +
    > +static int kvm_hv_flush_tlb_ex(struct kvm_vcpu *current_vcpu, u64 ingpa,
    > + u16 rep_cnt)
    > +{
    > + struct kvm *kvm = current_vcpu->kvm;
    > + struct kvm_vcpu_hv *hv_current = &current_vcpu->arch.hyperv;
    > + struct hv_tlb_flush_ex flush;
    > + struct kvm_vcpu *vcpu;
    > + u64 bank_contents, valid_bank_mask;
    > + int i, cpu, me, current_sparse_bank = -1;
    > + u64 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
    > +
    > + if (unlikely(kvm_read_guest(kvm, ingpa, &flush, sizeof(flush))))
    > + return ret;
    > +
    > + valid_bank_mask = flush.hv_vp_set.valid_bank_mask;
    > +
    > + trace_kvm_hv_flush_tlb_ex(valid_bank_mask, flush.hv_vp_set.format,
    > + flush.address_space, flush.flags);
    > +
    > + cpumask_clear(&hv_current->tlb_lush);
    > +
    > + me = get_cpu();
    > +
    > + kvm_for_each_vcpu(i, vcpu, kvm) {
    > + struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv;
    > + int bank = hv->vp_index / 64, sparse_bank;
    > +
    > + if (flush.hv_vp_set.format == HV_GENERIC_SET_SPARCE_4K) {
    ^
    typo in the define

    > + /* Check is the bank of this vCPU is in sparse set */
    > + sparse_bank = get_sparse_bank_no(valid_bank_mask, bank);
    > + if (sparse_bank < 0)
    > + continue;
    > +
    > + /*
    > + * Assume hv->vp_index is in ascending order and we can
    > + * optimize by not reloading bank contents for every
    > + * vCPU.
    > + */

    Since sparse_bank is packed, we could compute how many bank_contents do
    we need to load and do it with one kvm_read_guest() into a local array;
    it would be faster even if hv->vp_index were in ascending order and
    wouldn't take that much memory (up to 512 B).

    > + if (sparse_bank != current_sparse_bank) {
    > + if (load_bank_guest(kvm, ingpa, sparse_bank,
    > + &bank_contents))
    > + return ret;
    > + current_sparse_bank = sparse_bank;
    > + }

    \
     
     \ /
      Last update: 2018-05-10 22:08    [W:5.405 / U:0.048 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site