lkml.org 
[lkml]   [2021]   [Feb]   [8]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v12 5/7] arm64: mte: Enable async tag check fault
    Date
    MTE provides a mode that asynchronously updates the TFSR_EL1 register
    when a tag check exception is detected.

    To take advantage of this mode the kernel has to verify the status of
    the register at:
    1. Context switching
    2. Return to user/EL0 (Not required in entry from EL0 since the kernel
    did not run)
    3. Kernel entry from EL1
    4. Kernel exit to EL1

    If the register is non-zero a trace is reported.

    Add the required features for EL1 detection and reporting.

    Note: ITFSB bit is set in the SCTLR_EL1 register hence it guaranties that
    the indirect writes to TFSR_EL1 are synchronized at exception entry to
    EL1. On the context switch path the synchronization is guarantied by the
    dsb() in __switch_to().
    The dsb(nsh) in mte_check_tfsr_exit() is provisional pending
    confirmation by the architects.

    Cc: Catalin Marinas <catalin.marinas@arm.com>
    Cc: Will Deacon <will@kernel.org>
    Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
    Acked-by: Andrey Konovalov <andreyknvl@google.com>
    Signed-off-by: Vincenzo Frascino <vincenzo.frascino@arm.com>
    ---
    arch/arm64/include/asm/mte.h | 32 ++++++++++++++++++++++++++++
    arch/arm64/kernel/entry-common.c | 6 ++++++
    arch/arm64/kernel/mte.c | 36 ++++++++++++++++++++++++++++++++
    3 files changed, 74 insertions(+)

    diff --git a/arch/arm64/include/asm/mte.h b/arch/arm64/include/asm/mte.h
    index d02aff9f493d..237bb2f7309d 100644
    --- a/arch/arm64/include/asm/mte.h
    +++ b/arch/arm64/include/asm/mte.h
    @@ -92,5 +92,37 @@ static inline void mte_assign_mem_tag_range(void *addr, size_t size)

    #endif /* CONFIG_ARM64_MTE */

    +#ifdef CONFIG_KASAN_HW_TAGS
    +void mte_check_tfsr_el1(void);
    +
    +static inline void mte_check_tfsr_entry(void)
    +{
    + mte_check_tfsr_el1();
    +}
    +
    +static inline void mte_check_tfsr_exit(void)
    +{
    + /*
    + * The asynchronous faults are sync'ed automatically with
    + * TFSR_EL1 on kernel entry but for exit an explicit dsb()
    + * is required.
    + */
    + dsb(nsh);
    + isb();
    +
    + mte_check_tfsr_el1();
    +}
    +#else
    +static inline void mte_check_tfsr_el1(void)
    +{
    +}
    +static inline void mte_check_tfsr_entry(void)
    +{
    +}
    +static inline void mte_check_tfsr_exit(void)
    +{
    +}
    +#endif /* CONFIG_KASAN_HW_TAGS */
    +
    #endif /* __ASSEMBLY__ */
    #endif /* __ASM_MTE_H */
    diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c
    index 5346953e4382..31666511ba67 100644
    --- a/arch/arm64/kernel/entry-common.c
    +++ b/arch/arm64/kernel/entry-common.c
    @@ -37,6 +37,8 @@ static void noinstr enter_from_kernel_mode(struct pt_regs *regs)
    lockdep_hardirqs_off(CALLER_ADDR0);
    rcu_irq_enter_check_tick();
    trace_hardirqs_off_finish();
    +
    + mte_check_tfsr_entry();
    }

    /*
    @@ -47,6 +49,8 @@ static void noinstr exit_to_kernel_mode(struct pt_regs *regs)
    {
    lockdep_assert_irqs_disabled();

    + mte_check_tfsr_exit();
    +
    if (interrupts_enabled(regs)) {
    if (regs->exit_rcu) {
    trace_hardirqs_on_prepare();
    @@ -243,6 +247,8 @@ asmlinkage void noinstr enter_from_user_mode(void)

    asmlinkage void noinstr exit_to_user_mode(void)
    {
    + mte_check_tfsr_exit();
    +
    trace_hardirqs_on_prepare();
    lockdep_hardirqs_on_prepare(CALLER_ADDR0);
    user_enter_irqoff();
    diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
    index 60531afc706e..3332aabda466 100644
    --- a/arch/arm64/kernel/mte.c
    +++ b/arch/arm64/kernel/mte.c
    @@ -192,6 +192,29 @@ bool mte_report_once(void)
    return READ_ONCE(report_fault_once);
    }

    +#ifdef CONFIG_KASAN_HW_TAGS
    +void mte_check_tfsr_el1(void)
    +{
    + u64 tfsr_el1;
    +
    + if (!system_supports_mte())
    + return;
    +
    + tfsr_el1 = read_sysreg_s(SYS_TFSR_EL1);
    +
    + if (unlikely(tfsr_el1 & SYS_TFSR_EL1_TF1)) {
    + /*
    + * Note: isb() is not required after this direct write
    + * because there is no indirect read subsequent to it
    + * (per ARM DDI 0487F.c table D13-1).
    + */
    + write_sysreg_s(0, SYS_TFSR_EL1);
    +
    + kasan_report_async();
    + }
    +}
    +#endif
    +
    static void update_sctlr_el1_tcf0(u64 tcf0)
    {
    /* ISB required for the kernel uaccess routines */
    @@ -257,6 +280,19 @@ void mte_thread_switch(struct task_struct *next)
    /* avoid expensive SCTLR_EL1 accesses if no change */
    if (current->thread.sctlr_tcf0 != next->thread.sctlr_tcf0)
    update_sctlr_el1_tcf0(next->thread.sctlr_tcf0);
    + else
    + isb();
    +
    + /*
    + * Check if an async tag exception occurred at EL1.
    + *
    + * Note: On the context switch path we rely on the dsb() present
    + * in __switch_to() to guarantee that the indirect writes to TFSR_EL1
    + * are synchronized before this point.
    + * isb() above is required for the same reason.
    + *
    + */
    + mte_check_tfsr_el1();
    }

    void mte_suspend_exit(void)
    --
    2.30.0
    \
     
     \ /
      Last update: 2022-09-17 16:09    [W:3.024 / U:0.004 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site