lkml.org 
[lkml]   [2018]   [Dec]   [12]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v7 08/25] arm64: Make PMR part of task context
    Date
    In order to replace PSR.I interrupt disabling/enabling with ICC_PMR_EL1
    interrupt masking, ICC_PMR_EL1 needs to be saved/restored when
    taking/returning from an exception. This mimics the way hardware saves
    and restores PSR.I bit in spsr_el1 for exceptions and ERET.

    Add PMR to the registers to save in the pt_regs struct upon kernel entry,
    and restore it before ERET. Also, initialize it to a sane value when
    creating new tasks.

    Signed-off-by: Julien Thierry <julien.thierry@arm.com>
    Cc: Catalin Marinas <catalin.marinas@arm.com>
    Cc: Will Deacon <will.deacon@arm.com>
    Cc: Oleg Nesterov <oleg@redhat.com>
    Cc: Dave Martin <Dave.Martin@arm.com>
    ---
    arch/arm64/include/asm/processor.h | 3 +++
    arch/arm64/include/asm/ptrace.h | 14 +++++++++++---
    arch/arm64/kernel/asm-offsets.c | 1 +
    arch/arm64/kernel/entry.S | 14 ++++++++++++++
    arch/arm64/kernel/process.c | 6 ++++++
    5 files changed, 35 insertions(+), 3 deletions(-)

    diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
    index 6b0d4df..27c5618 100644
    --- a/arch/arm64/include/asm/processor.h
    +++ b/arch/arm64/include/asm/processor.h
    @@ -168,6 +168,9 @@ static inline void start_thread_common(struct pt_regs *regs, unsigned long pc)
    memset(regs, 0, sizeof(*regs));
    forget_syscall(regs);
    regs->pc = pc;
    +
    + if (system_uses_irq_prio_masking())
    + regs->pmr_save = GIC_PRIO_IRQON;
    }

    static inline void start_thread(struct pt_regs *regs, unsigned long pc,
    diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
    index 05cf913..43e5df5 100644
    --- a/arch/arm64/include/asm/ptrace.h
    +++ b/arch/arm64/include/asm/ptrace.h
    @@ -19,6 +19,8 @@
    #ifndef __ASM_PTRACE_H
    #define __ASM_PTRACE_H

    +#include <asm/cpufeature.h>
    +
    #include <uapi/asm/ptrace.h>

    /* Current Exception Level values, as contained in CurrentEL */
    @@ -179,7 +181,8 @@ struct pt_regs {
    #endif

    u64 orig_addr_limit;
    - u64 unused; // maintain 16 byte alignment
    + /* Only valid when ARM64_HAS_IRQ_PRIO_MASKING is enabled. */
    + u64 pmr_save;
    u64 stackframe[2];
    };

    @@ -214,8 +217,13 @@ static inline void forget_syscall(struct pt_regs *regs)
    #define processor_mode(regs) \
    ((regs)->pstate & PSR_MODE_MASK)

    -#define interrupts_enabled(regs) \
    - (!((regs)->pstate & PSR_I_BIT))
    +#define irqs_priority_unmasked(regs) \
    + (system_uses_irq_prio_masking() ? \
    + (regs)->pmr_save == GIC_PRIO_IRQON : \
    + true)
    +
    +#define interrupts_enabled(regs) \
    + (!((regs)->pstate & PSR_I_BIT) && irqs_priority_unmasked(regs))

    #define fast_interrupts_enabled(regs) \
    (!((regs)->pstate & PSR_F_BIT))
    diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
    index 323aeb5..bab4122 100644
    --- a/arch/arm64/kernel/asm-offsets.c
    +++ b/arch/arm64/kernel/asm-offsets.c
    @@ -78,6 +78,7 @@ int main(void)
    DEFINE(S_ORIG_X0, offsetof(struct pt_regs, orig_x0));
    DEFINE(S_SYSCALLNO, offsetof(struct pt_regs, syscallno));
    DEFINE(S_ORIG_ADDR_LIMIT, offsetof(struct pt_regs, orig_addr_limit));
    + DEFINE(S_PMR_SAVE, offsetof(struct pt_regs, pmr_save));
    DEFINE(S_STACKFRAME, offsetof(struct pt_regs, stackframe));
    DEFINE(S_FRAME_SIZE, sizeof(struct pt_regs));
    BLANK();
    diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
    index 039144e..2804c81 100644
    --- a/arch/arm64/kernel/entry.S
    +++ b/arch/arm64/kernel/entry.S
    @@ -249,6 +249,12 @@ alternative_else_nop_endif
    msr sp_el0, tsk
    .endif

    + /* Save pmr */
    +alternative_if ARM64_HAS_IRQ_PRIO_MASKING
    + mrs_s x20, SYS_ICC_PMR_EL1
    + str x20, [sp, #S_PMR_SAVE]
    +alternative_else_nop_endif
    +
    /*
    * Registers that may be useful after this macro is invoked:
    *
    @@ -269,6 +275,14 @@ alternative_else_nop_endif
    /* No need to restore UAO, it will be restored from SPSR_EL1 */
    .endif

    + /* Restore pmr */
    +alternative_if ARM64_HAS_IRQ_PRIO_MASKING
    + ldr x20, [sp, #S_PMR_SAVE]
    + msr_s SYS_ICC_PMR_EL1, x20
    + /* Ensure priority change is seen by redistributor */
    + dsb sy
    +alternative_else_nop_endif
    +
    ldp x21, x22, [sp, #S_PC] // load ELR, SPSR
    .if \el == 0
    ct_user_enter
    diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
    index d9a4c2d..34f495b 100644
    --- a/arch/arm64/kernel/process.c
    +++ b/arch/arm64/kernel/process.c
    @@ -231,6 +231,9 @@ void __show_regs(struct pt_regs *regs)

    printk("sp : %016llx\n", sp);

    + if (system_uses_irq_prio_masking())
    + printk("pmr_save: %08llx\n", regs->pmr_save);
    +
    i = top_reg;

    while (i >= 0) {
    @@ -362,6 +365,9 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
    if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE)
    childregs->pstate |= PSR_SSBS_BIT;

    + if (system_uses_irq_prio_masking())
    + childregs->pmr_save = GIC_PRIO_IRQON;
    +
    p->thread.cpu_context.x19 = stack_start;
    p->thread.cpu_context.x20 = stk_sz;
    }
    --
    1.9.1
    \
     
     \ /
      Last update: 2018-12-12 17:50    [W:3.473 / U:0.112 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site