lkml.org 
[lkml]   [2018]   [Jan]   [4]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
From
Date
SubjectRe: [PATCH 08/11] arm64: KVM: Use per-CPU vector when BP hardening is enabled
On 4 January 2018 at 15:08, Will Deacon <will.deacon@arm.com> wrote:
> From: Marc Zyngier <marc.zyngier@arm.com>
>
> Now that we have per-CPU vectors, let's plug then in the KVM/arm64 code.
>

Why does bp hardening require per-cpu vectors?

> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
> Signed-off-by: Will Deacon <will.deacon@arm.com>
> ---
> arch/arm/include/asm/kvm_mmu.h | 10 ++++++++++
> arch/arm64/include/asm/kvm_mmu.h | 38 ++++++++++++++++++++++++++++++++++++++
> arch/arm64/kvm/hyp/switch.c | 2 +-
> virt/kvm/arm/arm.c | 8 +++++++-
> 4 files changed, 56 insertions(+), 2 deletions(-)
>
> diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
> index fa6f2174276b..eb46fc81a440 100644
> --- a/arch/arm/include/asm/kvm_mmu.h
> +++ b/arch/arm/include/asm/kvm_mmu.h
> @@ -221,6 +221,16 @@ static inline unsigned int kvm_get_vmid_bits(void)
> return 8;
> }
>
> +static inline void *kvm_get_hyp_vector(void)
> +{
> + return kvm_ksym_ref(__kvm_hyp_vector);
> +}
> +
> +static inline int kvm_map_vectors(void)
> +{
> + return 0;
> +}
> +
> #endif /* !__ASSEMBLY__ */
>
> #endif /* __ARM_KVM_MMU_H__ */
> diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
> index 672c8684d5c2..2d6d4bd9de52 100644
> --- a/arch/arm64/include/asm/kvm_mmu.h
> +++ b/arch/arm64/include/asm/kvm_mmu.h
> @@ -309,5 +309,43 @@ static inline unsigned int kvm_get_vmid_bits(void)
> return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8;
> }
>
> +#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
> +#include <asm/mmu.h>
> +
> +static inline void *kvm_get_hyp_vector(void)
> +{
> + struct bp_hardening_data *data = arm64_get_bp_hardening_data();
> + void *vect = kvm_ksym_ref(__kvm_hyp_vector);
> +
> + if (data->fn) {
> + vect = __bp_harden_hyp_vecs_start +
> + data->hyp_vectors_slot * SZ_2K;
> +
> + if (!has_vhe())
> + vect = lm_alias(vect);
> + }
> +
> + return vect;
> +}
> +
> +static inline int kvm_map_vectors(void)
> +{
> + return create_hyp_mappings(kvm_ksym_ref(__bp_harden_hyp_vecs_start),
> + kvm_ksym_ref(__bp_harden_hyp_vecs_end),
> + PAGE_HYP_EXEC);
> +}
> +
> +#else
> +static inline void *kvm_get_hyp_vector(void)
> +{
> + return kvm_ksym_ref(__kvm_hyp_vector);
> +}
> +
> +static inline int kvm_map_vectors(void)
> +{
> + return 0;
> +}
> +#endif
> +
> #endif /* __ASSEMBLY__ */
> #endif /* __ARM64_KVM_MMU_H__ */
> diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
> index f7c651f3a8c0..8d4f3c9d6dc4 100644
> --- a/arch/arm64/kvm/hyp/switch.c
> +++ b/arch/arm64/kvm/hyp/switch.c
> @@ -52,7 +52,7 @@ static void __hyp_text __activate_traps_vhe(void)
> val &= ~(CPACR_EL1_FPEN | CPACR_EL1_ZEN);
> write_sysreg(val, cpacr_el1);
>
> - write_sysreg(__kvm_hyp_vector, vbar_el1);
> + write_sysreg(kvm_get_hyp_vector(), vbar_el1);
> }
>
> static void __hyp_text __activate_traps_nvhe(void)
> diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
> index 6b60c98a6e22..1c9fdb6db124 100644
> --- a/virt/kvm/arm/arm.c
> +++ b/virt/kvm/arm/arm.c
> @@ -1158,7 +1158,7 @@ static void cpu_init_hyp_mode(void *dummy)
> pgd_ptr = kvm_mmu_get_httbr();
> stack_page = __this_cpu_read(kvm_arm_hyp_stack_page);
> hyp_stack_ptr = stack_page + PAGE_SIZE;
> - vector_ptr = (unsigned long)kvm_ksym_ref(__kvm_hyp_vector);
> + vector_ptr = (unsigned long)kvm_get_hyp_vector();
>
> __cpu_init_hyp_mode(pgd_ptr, hyp_stack_ptr, vector_ptr);
> __cpu_init_stage2();
> @@ -1403,6 +1403,12 @@ static int init_hyp_mode(void)
> goto out_err;
> }
>
> + err = kvm_map_vectors();
> + if (err) {
> + kvm_err("Cannot map vectors\n");
> + goto out_err;
> + }
> +
> /*
> * Map the Hyp stack pages
> */
> --
> 2.1.4
>

\
 
 \ /
  Last update: 2018-01-04 17:29    [W:2.117 / U:0.216 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site