lkml.org 
[lkml]   [2016]   [Apr]   [5]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v12 07/13] task_isolation: add debug boot flag
    Date
    The new "task_isolation_debug" flag simplifies debugging
    of TASK_ISOLATION kernels when processes are running in
    PR_TASK_ISOLATION_ENABLE mode. Such processes should get no
    interrupts from the kernel, and if they do, we notify either the
    process (if STRICT mode is set and the interrupt is not an NMI)
    or with a kernel stack dump on the console (otherwise).

    It's possible to use ftrace to simply detect whether a task_isolation
    core has unexpectedly entered the kernel. But what this boot flag
    does is allow the kernel to provide better diagnostics, e.g. by
    reporting in the IPI-generating code what remote core and context
    is preparing to deliver an interrupt to a task_isolation core.
    Additionally, delivering a signal to the process in STRICT mode
    allows applications to report up task isolation failures into their
    own application logging framework.

    Signed-off-by: Chris Metcalf <cmetcalf@mellanox.com>
    ---
    Documentation/kernel-parameters.txt | 8 ++++
    include/linux/context_tracking_state.h | 6 +++
    include/linux/isolation.h | 5 +++
    kernel/irq_work.c | 5 ++-
    kernel/isolation.c | 77 ++++++++++++++++++++++++++++++++++
    kernel/sched/core.c | 18 ++++++++
    kernel/signal.c | 4 ++
    kernel/smp.c | 6 ++-
    kernel/softirq.c | 33 +++++++++++++++
    9 files changed, 160 insertions(+), 2 deletions(-)

    diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
    index 9bd5e91357b1..7884e69d08fa 100644
    --- a/Documentation/kernel-parameters.txt
    +++ b/Documentation/kernel-parameters.txt
    @@ -3816,6 +3816,14 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
    also sets up nohz_full and isolcpus mode for the
    listed set of cpus.

    + task_isolation_debug [KNL]
    + In kernels built with CONFIG_TASK_ISOLATION
    + and booted in task_isolation= mode, this
    + setting will generate console backtraces when
    + the kernel is about to interrupt a task that
    + has requested PR_TASK_ISOLATION_ENABLE and is
    + running on a task_isolation core.
    +
    tcpmhash_entries= [KNL,NET]
    Set the number of tcp_metrics_hash slots.
    Default value is 8192 or 16384 depending on total
    diff --git a/include/linux/context_tracking_state.h b/include/linux/context_tracking_state.h
    index 1d34fe68f48a..4e2c4b900b82 100644
    --- a/include/linux/context_tracking_state.h
    +++ b/include/linux/context_tracking_state.h
    @@ -39,8 +39,14 @@ static inline bool context_tracking_in_user(void)
    {
    return __this_cpu_read(context_tracking.state) == CONTEXT_USER;
    }
    +
    +static inline bool context_tracking_cpu_in_user(int cpu)
    +{
    + return per_cpu(context_tracking.state, cpu) == CONTEXT_USER;
    +}
    #else
    static inline bool context_tracking_in_user(void) { return false; }
    +static inline bool context_tracking_cpu_in_user(int cpu) { return false; }
    static inline bool context_tracking_active(void) { return false; }
    static inline bool context_tracking_is_enabled(void) { return false; }
    static inline bool context_tracking_cpu_is_enabled(void) { return false; }
    diff --git a/include/linux/isolation.h b/include/linux/isolation.h
    index eb78175ed811..f04252c51cf1 100644
    --- a/include/linux/isolation.h
    +++ b/include/linux/isolation.h
    @@ -44,6 +44,9 @@ extern void _task_isolation_exception(const char *fmt, ...);
    _task_isolation_exception(fmt, ## __VA_ARGS__); \
    } while (0)

    +extern void task_isolation_debug(int cpu);
    +extern void task_isolation_debug_cpumask(const struct cpumask *);
    +extern void task_isolation_debug_task(int cpu, struct task_struct *p);
    #else
    static inline void task_isolation_init(void) { }
    static inline bool task_isolation_possible(int cpu) { return false; }
    @@ -53,6 +56,8 @@ extern inline void task_isolation_set_flags(struct task_struct *p,
    unsigned int flags) { }
    static inline int task_isolation_syscall(int nr) { return 0; }
    static inline void task_isolation_exception(const char *fmt, ...) { }
    +static inline void task_isolation_debug(int cpu) { }
    +#define task_isolation_debug_cpumask(mask) do {} while (0)
    #endif

    #endif
    diff --git a/kernel/irq_work.c b/kernel/irq_work.c
    index bcf107ce0854..a9b95ce00667 100644
    --- a/kernel/irq_work.c
    +++ b/kernel/irq_work.c
    @@ -17,6 +17,7 @@
    #include <linux/cpu.h>
    #include <linux/notifier.h>
    #include <linux/smp.h>
    +#include <linux/isolation.h>
    #include <asm/processor.h>


    @@ -75,8 +76,10 @@ bool irq_work_queue_on(struct irq_work *work, int cpu)
    if (!irq_work_claim(work))
    return false;

    - if (llist_add(&work->llnode, &per_cpu(raised_list, cpu)))
    + if (llist_add(&work->llnode, &per_cpu(raised_list, cpu))) {
    + task_isolation_debug(cpu);
    arch_send_call_function_single_ipi(cpu);
    + }

    return true;
    }
    diff --git a/kernel/isolation.c b/kernel/isolation.c
    index f44e90109472..1c4f320a24a0 100644
    --- a/kernel/isolation.c
    +++ b/kernel/isolation.c
    @@ -11,6 +11,7 @@
    #include <linux/vmstat.h>
    #include <linux/isolation.h>
    #include <linux/syscalls.h>
    +#include <linux/ratelimit.h>
    #include <asm/unistd.h>
    #include <asm/syscall.h>
    #include "time/tick-sched.h"
    @@ -230,3 +231,79 @@ int task_isolation_syscall(int syscall)

    return 0;
    }
    +
    +/* Enable debugging of any interrupts of task_isolation cores. */
    +static int task_isolation_debug_flag;
    +static int __init task_isolation_debug_func(char *str)
    +{
    + task_isolation_debug_flag = true;
    + return 1;
    +}
    +__setup("task_isolation_debug", task_isolation_debug_func);
    +
    +void task_isolation_debug_task(int cpu, struct task_struct *p)
    +{
    + static DEFINE_RATELIMIT_STATE(console_output, HZ, 1);
    + bool force_debug = false;
    +
    + /*
    + * Our caller made sure the task was running on a task isolation
    + * core, but make sure the task has enabled isolation.
    + */
    + if (!(p->task_isolation_flags & PR_TASK_ISOLATION_ENABLE))
    + return;
    +
    + /*
    + * Ensure the task is actually in userspace; if it is in kernel
    + * mode, it is expected that it may receive interrupts, and in
    + * any case they don't affect the isolation. Note that there
    + * is a race condition here as a task may have committed
    + * to returning to user space but not yet set the context
    + * tracking state to reflect it, and the check here is before
    + * we trigger the interrupt, so we might fail to warn about a
    + * legitimate interrupt. However, the race window is narrow
    + * and hitting it does not cause any incorrect behavior other
    + * than failing to send the warning.
    + */
    + if (!context_tracking_cpu_in_user(cpu))
    + return;
    +
    + /*
    + * If the task was in strict mode, deliver a signal to it.
    + * We disable task isolation mode when we deliver a signal
    + * so we won't end up recursing back here again.
    + * If we are in an NMI, we don't try delivering the signal
    + * and instead just treat it as if "debug" mode was enabled,
    + * since that's pretty much all we can do.
    + */
    + if (p->task_isolation_flags & PR_TASK_ISOLATION_STRICT) {
    + if (in_nmi())
    + force_debug = true;
    + else
    + task_isolation_interrupt(p, "interrupt");
    + }
    +
    + /*
    + * If (for example) the timer interrupt starts ticking
    + * unexpectedly, we will get an unmanageable flow of output,
    + * so limit to one backtrace per second.
    + */
    + if (force_debug ||
    + (task_isolation_debug_flag && __ratelimit(&console_output))) {
    + pr_err("Interrupt detected for task_isolation cpu %d, %s/%d\n",
    + cpu, p->comm, p->pid);
    + dump_stack();
    + }
    +}
    +
    +void task_isolation_debug_cpumask(const struct cpumask *mask)
    +{
    + int cpu, thiscpu = get_cpu();
    +
    + /* No need to report on this cpu since we're already in the kernel. */
    + for_each_cpu(cpu, mask)
    + if (cpu != thiscpu)
    + task_isolation_debug(cpu);
    +
    + put_cpu();
    +}
    diff --git a/kernel/sched/core.c b/kernel/sched/core.c
    index d8465eeab8b3..00649f7ad567 100644
    --- a/kernel/sched/core.c
    +++ b/kernel/sched/core.c
    @@ -74,6 +74,7 @@
    #include <linux/context_tracking.h>
    #include <linux/compiler.h>
    #include <linux/frame.h>
    +#include <linux/isolation.h>

    #include <asm/switch_to.h>
    #include <asm/tlb.h>
    @@ -605,6 +606,23 @@ bool sched_can_stop_tick(struct rq *rq)
    }
    #endif /* CONFIG_NO_HZ_FULL */

    +#ifdef CONFIG_TASK_ISOLATION
    +void task_isolation_debug(int cpu)
    +{
    + struct task_struct *p;
    +
    + if (!task_isolation_possible(cpu))
    + return;
    +
    + rcu_read_lock();
    + p = cpu_curr(cpu);
    + get_task_struct(p);
    + rcu_read_unlock();
    + task_isolation_debug_task(cpu, p);
    + put_task_struct(p);
    +}
    +#endif
    +
    void sched_avg_update(struct rq *rq)
    {
    s64 period = sched_avg_period();
    diff --git a/kernel/signal.c b/kernel/signal.c
    index 53e4e62f2778..9c0be099fcd9 100644
    --- a/kernel/signal.c
    +++ b/kernel/signal.c
    @@ -639,6 +639,10 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
    */
    void signal_wake_up_state(struct task_struct *t, unsigned int state)
    {
    + /* If the task is being killed, don't complain about task_isolation. */
    + if (state & TASK_WAKEKILL)
    + task_isolation_set_flags(t, 0);
    +
    set_tsk_thread_flag(t, TIF_SIGPENDING);
    /*
    * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
    diff --git a/kernel/smp.c b/kernel/smp.c
    index 74165443c240..586a1309053b 100644
    --- a/kernel/smp.c
    +++ b/kernel/smp.c
    @@ -14,6 +14,7 @@
    #include <linux/smp.h>
    #include <linux/cpu.h>
    #include <linux/sched.h>
    +#include <linux/isolation.h>

    #include "smpboot.h"

    @@ -177,8 +178,10 @@ static int generic_exec_single(int cpu, struct call_single_data *csd,
    * locking and barrier primitives. Generic code isn't really
    * equipped to do the right thing...
    */
    - if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
    + if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu))) {
    + task_isolation_debug(cpu);
    arch_send_call_function_single_ipi(cpu);
    + }

    return 0;
    }
    @@ -456,6 +459,7 @@ void smp_call_function_many(const struct cpumask *mask,
    }

    /* Send a message to all CPUs in the map */
    + task_isolation_debug_cpumask(cfd->cpumask);
    arch_send_call_function_ipi_mask(cfd->cpumask);

    if (wait) {
    diff --git a/kernel/softirq.c b/kernel/softirq.c
    index 17caf4b63342..a96da9825582 100644
    --- a/kernel/softirq.c
    +++ b/kernel/softirq.c
    @@ -26,6 +26,7 @@
    #include <linux/smpboot.h>
    #include <linux/tick.h>
    #include <linux/irq.h>
    +#include <linux/isolation.h>

    #define CREATE_TRACE_POINTS
    #include <trace/events/irq.h>
    @@ -319,6 +320,37 @@ asmlinkage __visible void do_softirq(void)
    local_irq_restore(flags);
    }

    +/* Determine whether this IRQ is something task isolation cares about. */
    +static void task_isolation_irq(void)
    +{
    +#ifdef CONFIG_TASK_ISOLATION
    + struct pt_regs *regs;
    +
    + if (!context_tracking_cpu_is_enabled())
    + return;
    +
    + /*
    + * We have not yet called __irq_enter() and so we haven't
    + * adjusted the hardirq count. This test will allow us to
    + * avoid false positives for nested IRQs.
    + */
    + if (in_interrupt())
    + return;
    +
    + /*
    + * If we were already in the kernel, not from an irq but from
    + * a syscall or synchronous exception/fault, this test should
    + * avoid a false positive as well. Note that this requires
    + * architecture support for calling set_irq_regs() prior to
    + * calling irq_enter(), and if it's not done consistently, we
    + * will not consistently avoid false positives here.
    + */
    + regs = get_irq_regs();
    + if (regs && user_mode(regs))
    + task_isolation_debug(smp_processor_id());
    +#endif
    +}
    +
    /*
    * Enter an interrupt context.
    */
    @@ -335,6 +367,7 @@ void irq_enter(void)
    _local_bh_enable();
    }

    + task_isolation_irq();
    __irq_enter();
    }

    --
    2.7.2
    \
     
     \ /
      Last update: 2016-04-05 20:01    [W:3.522 / U:0.040 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site