lkml.org 
[lkml]   [2020]   [Nov]   [19]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[RFC PATCH 6/6] kvm: arm64: Remove hyp_symbol_addr
    Date
    The helper was used to force PC-relative addressing in hyp code because
    absolute addressing via constant-pools used to generate kernel VAs. This
    was cumbersome and required programmers to remember to use the helper
    whenever they wanted to take a pointer.

    Now that hyp relocations are fixed up, there is no need for the helper
    any logner. Remove it.

    Signed-off-by: David Brazdil <dbrazdil@google.com>
    ---
    arch/arm64/include/asm/kvm_asm.h | 20 --------------------
    arch/arm64/kvm/hyp/include/hyp/switch.h | 4 ++--
    arch/arm64/kvm/hyp/nvhe/hyp-smp.c | 4 ++--
    arch/arm64/kvm/hyp/nvhe/psci-relay.c | 4 ++--
    arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c | 2 +-
    5 files changed, 7 insertions(+), 27 deletions(-)

    diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
    index 1a86581e581e..1961d23c0c40 100644
    --- a/arch/arm64/include/asm/kvm_asm.h
    +++ b/arch/arm64/include/asm/kvm_asm.h
    @@ -203,26 +203,6 @@ extern void __vgic_v3_init_lrs(void);

    extern u32 __kvm_get_mdcr_el2(void);

    -/*
    - * Obtain the PC-relative address of a kernel symbol
    - * s: symbol
    - *
    - * The goal of this macro is to return a symbol's address based on a
    - * PC-relative computation, as opposed to a loading the VA from a
    - * constant pool or something similar. This works well for HYP, as an
    - * absolute VA is guaranteed to be wrong. Only use this if trying to
    - * obtain the address of a symbol (i.e. not something you obtained by
    - * following a pointer).
    - */
    -#define hyp_symbol_addr(s) \
    - ({ \
    - typeof(s) *addr; \
    - asm("adrp %0, %1\n" \
    - "add %0, %0, :lo12:%1\n" \
    - : "=r" (addr) : "S" (&s)); \
    - addr; \
    - })
    -
    #define __KVM_EXTABLE(from, to) \
    " .pushsection __kvm_ex_table, \"a\"\n" \
    " .align 3\n" \
    diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
    index 84473574c2e7..54f4860cd87c 100644
    --- a/arch/arm64/kvm/hyp/include/hyp/switch.h
    +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
    @@ -505,8 +505,8 @@ static inline void __kvm_unexpected_el2_exception(void)
    struct exception_table_entry *entry, *end;
    unsigned long elr_el2 = read_sysreg(elr_el2);

    - entry = hyp_symbol_addr(__start___kvm_ex_table);
    - end = hyp_symbol_addr(__stop___kvm_ex_table);
    + entry = &__start___kvm_ex_table;
    + end = &__stop___kvm_ex_table;

    while (entry < end) {
    addr = (unsigned long)&entry->insn + entry->insn;
    diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-smp.c b/arch/arm64/kvm/hyp/nvhe/hyp-smp.c
    index ceb427aabb91..6870d9f3d4b7 100644
    --- a/arch/arm64/kvm/hyp/nvhe/hyp-smp.c
    +++ b/arch/arm64/kvm/hyp/nvhe/hyp-smp.c
    @@ -33,8 +33,8 @@ unsigned long __hyp_per_cpu_offset(unsigned int cpu)
    if (cpu >= ARRAY_SIZE(kvm_arm_hyp_percpu_base))
    hyp_panic();

    - cpu_base_array = (unsigned long*)hyp_symbol_addr(kvm_arm_hyp_percpu_base);
    + cpu_base_array = (unsigned long*)(&kvm_arm_hyp_percpu_base[0]);
    this_cpu_base = kern_hyp_va(cpu_base_array[cpu]);
    - elf_base = (unsigned long)hyp_symbol_addr(__per_cpu_start);
    + elf_base = (unsigned long)&__per_cpu_start;
    return this_cpu_base - elf_base;
    }
    diff --git a/arch/arm64/kvm/hyp/nvhe/psci-relay.c b/arch/arm64/kvm/hyp/nvhe/psci-relay.c
    index 313ef42f0eab..f64380a49a72 100644
    --- a/arch/arm64/kvm/hyp/nvhe/psci-relay.c
    +++ b/arch/arm64/kvm/hyp/nvhe/psci-relay.c
    @@ -147,7 +147,7 @@ static int psci_cpu_suspend(u64 func_id, struct kvm_cpu_context *host_ctxt)
    * point if it is a deep sleep state.
    */
    ret = psci_call(func_id, power_state,
    - __hyp_pa(hyp_symbol_addr(__kvm_hyp_cpu_entry)),
    + __hyp_pa(__kvm_hyp_cpu_entry),
    __hyp_pa(cpu_params));

    release_reset_state(cpu_state);
    @@ -182,7 +182,7 @@ static int psci_cpu_on(u64 func_id, struct kvm_cpu_context *host_ctxt)
    return PSCI_RET_ALREADY_ON;

    ret = psci_call(func_id, mpidr,
    - __hyp_pa(hyp_symbol_addr(__kvm_hyp_cpu_entry)),
    + __hyp_pa(__kvm_hyp_cpu_entry),
    __hyp_pa(cpu_params));

    /*
    diff --git a/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c b/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c
    index 8f0585640241..87a54375bd6e 100644
    --- a/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c
    +++ b/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c
    @@ -64,7 +64,7 @@ int __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu)
    }

    rd = kvm_vcpu_dabt_get_rd(vcpu);
    - addr = hyp_symbol_addr(kvm_vgic_global_state)->vcpu_hyp_va;
    + addr = kvm_vgic_global_state.vcpu_hyp_va;
    addr += fault_ipa - vgic->vgic_cpu_base;

    if (kvm_vcpu_dabt_iswrite(vcpu)) {
    --
    2.29.2.299.gdc1121823c-goog
    \
     
     \ /
      Last update: 2020-11-19 17:26    [W:2.326 / U:0.064 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site