lkml.org 
[lkml]   [2022]   [Aug]   [19]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 5.10 505/545] KVM: x86/pmu: Use binary search to check filtered events
    Date
    From: Jim Mattson <jmattson@google.com>

    [ Upstream commit 7ff775aca48adc854436b92c060e5eebfffb6a4a ]

    The PMU event filter may contain up to 300 events. Replace the linear
    search in reprogram_gp_counter() with a binary search.

    Signed-off-by: Jim Mattson <jmattson@google.com>
    Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
    Message-Id: <20220115052431.447232-2-jmattson@google.com>
    Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
    Signed-off-by: Sasha Levin <sashal@kernel.org>
    ---
    arch/x86/kvm/pmu.c | 30 +++++++++++++++++++-----------
    1 file changed, 19 insertions(+), 11 deletions(-)

    diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
    index 2f83b5d948b3..350e7cdaad02 100644
    --- a/arch/x86/kvm/pmu.c
    +++ b/arch/x86/kvm/pmu.c
    @@ -13,6 +13,8 @@
    #include <linux/types.h>
    #include <linux/kvm_host.h>
    #include <linux/perf_event.h>
    +#include <linux/bsearch.h>
    +#include <linux/sort.h>
    #include <asm/perf_event.h>
    #include "x86.h"
    #include "cpuid.h"
    @@ -168,13 +170,17 @@ static bool pmc_resume_counter(struct kvm_pmc *pmc)
    return true;
    }

    +static int cmp_u64(const void *a, const void *b)
    +{
    + return *(__u64 *)a - *(__u64 *)b;
    +}
    +
    void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
    {
    u64 config;
    u32 type = PERF_TYPE_RAW;
    struct kvm *kvm = pmc->vcpu->kvm;
    struct kvm_pmu_event_filter *filter;
    - int i;
    bool allow_event = true;

    if (eventsel & ARCH_PERFMON_EVENTSEL_PIN_CONTROL)
    @@ -189,16 +195,13 @@ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)

    filter = srcu_dereference(kvm->arch.pmu_event_filter, &kvm->srcu);
    if (filter) {
    - for (i = 0; i < filter->nevents; i++)
    - if (filter->events[i] ==
    - (eventsel & AMD64_RAW_EVENT_MASK_NB))
    - break;
    - if (filter->action == KVM_PMU_EVENT_ALLOW &&
    - i == filter->nevents)
    - allow_event = false;
    - if (filter->action == KVM_PMU_EVENT_DENY &&
    - i < filter->nevents)
    - allow_event = false;
    + __u64 key = eventsel & AMD64_RAW_EVENT_MASK_NB;
    +
    + if (bsearch(&key, filter->events, filter->nevents,
    + sizeof(__u64), cmp_u64))
    + allow_event = filter->action == KVM_PMU_EVENT_ALLOW;
    + else
    + allow_event = filter->action == KVM_PMU_EVENT_DENY;
    }
    if (!allow_event)
    return;
    @@ -507,6 +510,11 @@ int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp)
    /* Ensure nevents can't be changed between the user copies. */
    *filter = tmp;

    + /*
    + * Sort the in-kernel list so that we can search it with bsearch.
    + */
    + sort(&filter->events, filter->nevents, sizeof(__u64), cmp_u64, NULL);
    +
    mutex_lock(&kvm->lock);
    filter = rcu_replace_pointer(kvm->arch.pmu_event_filter, filter,
    mutex_is_locked(&kvm->lock));
    --
    2.35.1


    \
     
     \ /
      Last update: 2022-08-19 18:50    [W:4.069 / U:0.008 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site