lkml.org 
[lkml]   [2021]   [Mar]   [11]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v7 08/14] x86: add new features for paravirt patching
    Date
    For being able to switch paravirt patching from special cased custom
    code sequences to ALTERNATIVE handling some X86_FEATURE_* are needed
    as new features. This enables to have the standard indirect pv call
    as the default code and to patch that with the non-Xen custom code
    sequence via ALTERNATIVE patching later.

    Make sure paravirt patching is performed before alternative patching.

    Signed-off-by: Juergen Gross <jgross@suse.com>
    Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
    ---
    V3:
    - add comment (Boris Petkov)
    - no negative features (Boris Petkov)
    V4:
    - move paravirt_set_cap() to paravirt-spinlocks.c
    ---
    arch/x86/include/asm/cpufeatures.h | 2 ++
    arch/x86/include/asm/paravirt.h | 10 ++++++++++
    arch/x86/kernel/alternative.c | 30 ++++++++++++++++++++++++++--
    arch/x86/kernel/paravirt-spinlocks.c | 9 +++++++++
    4 files changed, 49 insertions(+), 2 deletions(-)

    diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
    index cc96e26d69f7..b440c950246d 100644
    --- a/arch/x86/include/asm/cpufeatures.h
    +++ b/arch/x86/include/asm/cpufeatures.h
    @@ -236,6 +236,8 @@
    #define X86_FEATURE_EPT_AD ( 8*32+17) /* Intel Extended Page Table access-dirty bit */
    #define X86_FEATURE_VMCALL ( 8*32+18) /* "" Hypervisor supports the VMCALL instruction */
    #define X86_FEATURE_VMW_VMMCALL ( 8*32+19) /* "" VMware prefers VMMCALL hypercall instruction */
    +#define X86_FEATURE_PVUNLOCK ( 8*32+20) /* "" PV unlock function */
    +#define X86_FEATURE_VCPUPREEMPT ( 8*32+21) /* "" PV vcpu_is_preempted function */

    /* Intel-defined CPU features, CPUID level 0x00000007:0 (EBX), word 9 */
    #define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* RDFSBASE, WRFSBASE, RDGSBASE, WRGSBASE instructions*/
    diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
    index 6408fd0f55ab..def450f46097 100644
    --- a/arch/x86/include/asm/paravirt.h
    +++ b/arch/x86/include/asm/paravirt.h
    @@ -45,6 +45,10 @@ static inline u64 paravirt_steal_clock(int cpu)
    return static_call(pv_steal_clock)(cpu);
    }

    +#ifdef CONFIG_PARAVIRT_SPINLOCKS
    +void __init paravirt_set_cap(void);
    +#endif
    +
    /* The paravirtualized I/O functions */
    static inline void slow_down_io(void)
    {
    @@ -809,5 +813,11 @@ static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
    {
    }
    #endif
    +
    +#ifndef CONFIG_PARAVIRT_SPINLOCKS
    +static inline void paravirt_set_cap(void)
    +{
    +}
    +#endif
    #endif /* __ASSEMBLY__ */
    #endif /* _ASM_X86_PARAVIRT_H */
    diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
    index 133b549dc091..76ad4ce454c0 100644
    --- a/arch/x86/kernel/alternative.c
    +++ b/arch/x86/kernel/alternative.c
    @@ -28,6 +28,7 @@
    #include <asm/insn.h>
    #include <asm/io.h>
    #include <asm/fixmap.h>
    +#include <asm/paravirt.h>

    int __read_mostly alternatives_patched;

    @@ -733,6 +734,33 @@ void __init alternative_instructions(void)
    * patching.
    */

    + /*
    + * Paravirt patching and alternative patching can be combined to
    + * replace a function call with a short direct code sequence (e.g.
    + * by setting a constant return value instead of doing that in an
    + * external function).
    + * In order to make this work the following sequence is required:
    + * 1. set (artificial) features depending on used paravirt
    + * functions which can later influence alternative patching
    + * 2. apply paravirt patching (generally replacing an indirect
    + * function call with a direct one)
    + * 3. apply alternative patching (e.g. replacing a direct function
    + * call with a custom code sequence)
    + * Doing paravirt patching after alternative patching would clobber
    + * the optimization of the custom code with a function call again.
    + */
    + paravirt_set_cap();
    +
    + /*
    + * First patch paravirt functions, such that we overwrite the indirect
    + * call with the direct call.
    + */
    + apply_paravirt(__parainstructions, __parainstructions_end);
    +
    + /*
    + * Then patch alternatives, such that those paravirt calls that are in
    + * alternatives can be overwritten by their immediate fragments.
    + */
    apply_alternatives(__alt_instructions, __alt_instructions_end);

    #ifdef CONFIG_SMP
    @@ -751,8 +779,6 @@ void __init alternative_instructions(void)
    }
    #endif

    - apply_paravirt(__parainstructions, __parainstructions_end);
    -
    restart_nmi();
    alternatives_patched = 1;
    }
    diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
    index 4f75d0cf6305..9e1ea99ad9df 100644
    --- a/arch/x86/kernel/paravirt-spinlocks.c
    +++ b/arch/x86/kernel/paravirt-spinlocks.c
    @@ -32,3 +32,12 @@ bool pv_is_native_vcpu_is_preempted(void)
    return pv_ops.lock.vcpu_is_preempted.func ==
    __raw_callee_save___native_vcpu_is_preempted;
    }
    +
    +void __init paravirt_set_cap(void)
    +{
    + if (!pv_is_native_spin_unlock())
    + setup_force_cpu_cap(X86_FEATURE_PVUNLOCK);
    +
    + if (!pv_is_native_vcpu_is_preempted())
    + setup_force_cpu_cap(X86_FEATURE_VCPUPREEMPT);
    +}
    --
    2.26.2
    \
     
     \ /
      Last update: 2021-03-11 15:40    [W:4.080 / U:0.624 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site