lkml.org 
[lkml]   [2019]   [May]   [13]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[RFC KVM 08/27] KVM: x86: Optimize branches which checks if address space isolation enabled
    Date
    From: Liran Alon <liran.alon@oracle.com>

    As every entry to guest checks if should switch from host_mm to kvm_mm,
    these branches is at very hot path. Optimize them by using
    static_branch.

    Signed-off-by: Liran Alon <liran.alon@oracle.com>
    Signed-off-by: Alexandre Chartre <alexandre.chartre@oracle.com>
    ---
    arch/x86/kvm/isolation.c | 11 ++++++++---
    arch/x86/kvm/isolation.h | 7 +++++++
    2 files changed, 15 insertions(+), 3 deletions(-)

    diff --git a/arch/x86/kvm/isolation.c b/arch/x86/kvm/isolation.c
    index eeb60c4..43fd924 100644
    --- a/arch/x86/kvm/isolation.c
    +++ b/arch/x86/kvm/isolation.c
    @@ -23,6 +23,9 @@ struct mm_struct kvm_mm = {
    .mmlist = LIST_HEAD_INIT(kvm_mm.mmlist),
    };

    +DEFINE_STATIC_KEY_FALSE(kvm_isolation_enabled);
    +EXPORT_SYMBOL(kvm_isolation_enabled);
    +
    /*
    * When set to true, KVM #VMExit handlers run in isolated address space
    * which maps only KVM required code and per-VM information instead of
    @@ -118,15 +121,17 @@ int kvm_isolation_init(void)

    kvm_isolation_set_handlers();
    pr_info("KVM: x86: Running with isolated address space\n");
    + static_branch_enable(&kvm_isolation_enabled);

    return 0;
    }

    void kvm_isolation_uninit(void)
    {
    - if (!address_space_isolation)
    + if (!kvm_isolation())
    return;

    + static_branch_disable(&kvm_isolation_enabled);
    kvm_isolation_clear_handlers();
    kvm_isolation_uninit_mm();
    pr_info("KVM: x86: End of isolated address space\n");
    @@ -140,7 +145,7 @@ void kvm_may_access_sensitive_data(struct kvm_vcpu *vcpu)

    void kvm_isolation_enter(void)
    {
    - if (address_space_isolation) {
    + if (kvm_isolation()) {
    /*
    * Switches to kvm_mm should happen from vCPU thread,
    * which should not be a kernel thread with no mm
    @@ -152,7 +157,7 @@ void kvm_isolation_enter(void)

    void kvm_isolation_exit(void)
    {
    - if (address_space_isolation) {
    + if (kvm_isolation()) {
    /* TODO: Kick sibling hyperthread before switch to host mm */
    /* TODO: switch back to original mm */
    }
    diff --git a/arch/x86/kvm/isolation.h b/arch/x86/kvm/isolation.h
    index 1290d32..aa5e979 100644
    --- a/arch/x86/kvm/isolation.h
    +++ b/arch/x86/kvm/isolation.h
    @@ -4,6 +4,13 @@

    #include <linux/kvm_host.h>

    +DECLARE_STATIC_KEY_FALSE(kvm_isolation_enabled);
    +
    +static inline bool kvm_isolation(void)
    +{
    + return static_branch_likely(&kvm_isolation_enabled);
    +}
    +
    extern int kvm_isolation_init(void);
    extern void kvm_isolation_uninit(void);
    extern void kvm_isolation_enter(void);
    --
    1.7.1
    \
     
     \ /
      Last update: 2019-05-13 16:40    [W:4.169 / U:0.024 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site