lkml.org 
[lkml]   [2016]   [Mar]   [2]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v10 07/12] task_isolation: add debug boot flag
    Date
    The new "task_isolation_debug" flag simplifies debugging
    of TASK_ISOLATION kernels when processes are running in
    PR_TASK_ISOLATION_ENABLE mode. Such processes should get no
    interrupts from the kernel, and if they do, when this boot flag is
    specified a kernel stack dump on the console is generated.

    It's possible to use ftrace to simply detect whether a task_isolation
    core has unexpectedly entered the kernel. But what this boot flag
    does is allow the kernel to provide better diagnostics, e.g. by
    reporting in the IPI-generating code what remote core and context
    is preparing to deliver an interrupt to a task_isolation core.

    It may be worth considering other ways to generate useful debugging
    output rather than console spew, but for now that is simple and direct.

    Signed-off-by: Chris Metcalf <cmetcalf@ezchip.com>
    ---
    Documentation/kernel-parameters.txt | 8 ++++
    include/linux/context_tracking_state.h | 6 +++
    include/linux/isolation.h | 5 +++
    kernel/irq_work.c | 5 ++-
    kernel/isolation.c | 77 ++++++++++++++++++++++++++++++++++
    kernel/sched/core.c | 18 ++++++++
    kernel/signal.c | 5 +++
    kernel/smp.c | 6 ++-
    kernel/softirq.c | 33 +++++++++++++++
    9 files changed, 161 insertions(+), 2 deletions(-)

    diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
    index c8d0b42d984a..ea0434fa906e 100644
    --- a/Documentation/kernel-parameters.txt
    +++ b/Documentation/kernel-parameters.txt
    @@ -3755,6 +3755,14 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
    also sets up nohz_full and isolcpus mode for the
    listed set of cpus.

    + task_isolation_debug [KNL]
    + In kernels built with CONFIG_TASK_ISOLATION
    + and booted in task_isolation= mode, this
    + setting will generate console backtraces when
    + the kernel is about to interrupt a task that
    + has requested PR_TASK_ISOLATION_ENABLE and is
    + running on a task_isolation core.
    +
    tcpmhash_entries= [KNL,NET]
    Set the number of tcp_metrics_hash slots.
    Default value is 8192 or 16384 depending on total
    diff --git a/include/linux/context_tracking_state.h b/include/linux/context_tracking_state.h
    index 1d34fe68f48a..4e2c4b900b82 100644
    --- a/include/linux/context_tracking_state.h
    +++ b/include/linux/context_tracking_state.h
    @@ -39,8 +39,14 @@ static inline bool context_tracking_in_user(void)
    {
    return __this_cpu_read(context_tracking.state) == CONTEXT_USER;
    }
    +
    +static inline bool context_tracking_cpu_in_user(int cpu)
    +{
    + return per_cpu(context_tracking.state, cpu) == CONTEXT_USER;
    +}
    #else
    static inline bool context_tracking_in_user(void) { return false; }
    +static inline bool context_tracking_cpu_in_user(int cpu) { return false; }
    static inline bool context_tracking_active(void) { return false; }
    static inline bool context_tracking_is_enabled(void) { return false; }
    static inline bool context_tracking_cpu_is_enabled(void) { return false; }
    diff --git a/include/linux/isolation.h b/include/linux/isolation.h
    index ba6c4d510db8..f1ae7b663746 100644
    --- a/include/linux/isolation.h
    +++ b/include/linux/isolation.h
    @@ -45,6 +45,9 @@ static inline void task_isolation_enter(void)
    extern bool task_isolation_syscall(int nr);
    extern void task_isolation_exception(const char *fmt, ...);
    extern void task_isolation_interrupt(struct task_struct *, const char *buf);
    +extern void task_isolation_debug(int cpu);
    +extern void task_isolation_debug_cpumask(const struct cpumask *);
    +extern void task_isolation_debug_task(int cpu, struct task_struct *p);

    static inline bool task_isolation_strict(void)
    {
    @@ -73,6 +76,8 @@ static inline bool task_isolation_ready(void) { return true; }
    static inline void task_isolation_enter(void) { }
    static inline bool task_isolation_check_syscall(int nr) { return false; }
    static inline void task_isolation_check_exception(const char *fmt, ...) { }
    +static inline void task_isolation_debug(int cpu) { }
    +#define task_isolation_debug_cpumask(mask) do {} while (0)
    #endif

    #endif
    diff --git a/kernel/irq_work.c b/kernel/irq_work.c
    index bcf107ce0854..a9b95ce00667 100644
    --- a/kernel/irq_work.c
    +++ b/kernel/irq_work.c
    @@ -17,6 +17,7 @@
    #include <linux/cpu.h>
    #include <linux/notifier.h>
    #include <linux/smp.h>
    +#include <linux/isolation.h>
    #include <asm/processor.h>


    @@ -75,8 +76,10 @@ bool irq_work_queue_on(struct irq_work *work, int cpu)
    if (!irq_work_claim(work))
    return false;

    - if (llist_add(&work->llnode, &per_cpu(raised_list, cpu)))
    + if (llist_add(&work->llnode, &per_cpu(raised_list, cpu))) {
    + task_isolation_debug(cpu);
    arch_send_call_function_single_ipi(cpu);
    + }

    return true;
    }
    diff --git a/kernel/isolation.c b/kernel/isolation.c
    index 5621fdf15b17..6ac35988b49a 100644
    --- a/kernel/isolation.c
    +++ b/kernel/isolation.c
    @@ -11,6 +11,7 @@
    #include <linux/vmstat.h>
    #include <linux/isolation.h>
    #include <linux/syscalls.h>
    +#include <linux/ratelimit.h>
    #include <asm/unistd.h>
    #include "time/tick-sched.h"

    @@ -182,3 +183,79 @@ bool task_isolation_syscall(int syscall)
    task_isolation_exception("syscall %d", syscall);
    return true;
    }
    +
    +/* Enable debugging of any interrupts of task_isolation cores. */
    +static int task_isolation_debug_flag;
    +static int __init task_isolation_debug_func(char *str)
    +{
    + task_isolation_debug_flag = true;
    + return 1;
    +}
    +__setup("task_isolation_debug", task_isolation_debug_func);
    +
    +void task_isolation_debug_task(int cpu, struct task_struct *p)
    +{
    + static DEFINE_RATELIMIT_STATE(console_output, HZ, 1);
    + bool force_debug = false;
    +
    + /*
    + * Our caller made sure the task was running on a task isolation
    + * core, but make sure the task has enabled isolation.
    + */
    + if (!(p->task_isolation_flags & PR_TASK_ISOLATION_ENABLE))
    + return;
    +
    + /*
    + * Ensure the task is actually in userspace; if it is in kernel
    + * mode, it is expected that it may receive interrupts, and in
    + * any case they don't affect the isolation. Note that there
    + * is a race condition here as a task may have committed
    + * to returning to user space but not yet set the context
    + * tracking state to reflect it, and the check here is before
    + * we trigger the interrupt, so we might fail to warn about a
    + * legitimate interrupt. However, the race window is narrow
    + * and hitting it does not cause any incorrect behavior other
    + * than failing to send the warning.
    + */
    + if (!context_tracking_cpu_in_user(cpu))
    + return;
    +
    + /*
    + * If the task was in strict mode, deliver a signal to it.
    + * We disable task isolation mode when we deliver a signal
    + * so we won't end up recursing back here again.
    + * If we are in an NMI, we don't try delivering the signal
    + * and instead just treat it as if "debug" mode was enabled,
    + * since that's pretty much all we can do.
    + */
    + if (p->task_isolation_flags & PR_TASK_ISOLATION_STRICT) {
    + if (in_nmi())
    + force_debug = true;
    + else
    + task_isolation_interrupt(p, "interrupt");
    + }
    +
    + /*
    + * If (for example) the timer interrupt starts ticking
    + * unexpectedly, we will get an unmanageable flow of output,
    + * so limit to one backtrace per second.
    + */
    + if (force_debug ||
    + (task_isolation_debug_flag && __ratelimit(&console_output))) {
    + pr_err("Interrupt detected for task_isolation cpu %d, %s/%d\n",
    + cpu, p->comm, p->pid);
    + dump_stack();
    + }
    +}
    +
    +void task_isolation_debug_cpumask(const struct cpumask *mask)
    +{
    + int cpu, thiscpu = get_cpu();
    +
    + /* No need to report on this cpu since we're already in the kernel. */
    + for_each_cpu(cpu, mask)
    + if (cpu != thiscpu)
    + task_isolation_debug(cpu);
    +
    + put_cpu();
    +}
    diff --git a/kernel/sched/core.c b/kernel/sched/core.c
    index 9503d590e5ef..f53e1417aa75 100644
    --- a/kernel/sched/core.c
    +++ b/kernel/sched/core.c
    @@ -74,6 +74,7 @@
    #include <linux/binfmts.h>
    #include <linux/context_tracking.h>
    #include <linux/compiler.h>
    +#include <linux/isolation.h>

    #include <asm/switch_to.h>
    #include <asm/tlb.h>
    @@ -746,6 +747,23 @@ bool sched_can_stop_tick(void)
    }
    #endif /* CONFIG_NO_HZ_FULL */

    +#ifdef CONFIG_TASK_ISOLATION
    +void task_isolation_debug(int cpu)
    +{
    + struct task_struct *p;
    +
    + if (!task_isolation_possible(cpu))
    + return;
    +
    + rcu_read_lock();
    + p = cpu_curr(cpu);
    + get_task_struct(p);
    + rcu_read_unlock();
    + task_isolation_debug_task(cpu, p);
    + put_task_struct(p);
    +}
    +#endif
    +
    void sched_avg_update(struct rq *rq)
    {
    s64 period = sched_avg_period();
    diff --git a/kernel/signal.c b/kernel/signal.c
    index 0508544c8ced..b35ce19c01c4 100644
    --- a/kernel/signal.c
    +++ b/kernel/signal.c
    @@ -638,6 +638,11 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
    */
    void signal_wake_up_state(struct task_struct *t, unsigned int state)
    {
    +#ifdef CONFIG_TASK_ISOLATION
    + /* If the task is being killed, don't complain about task_isolation. */
    + if (state & TASK_WAKEKILL)
    + t->task_isolation_flags = 0;
    +#endif
    set_tsk_thread_flag(t, TIF_SIGPENDING);
    /*
    * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
    diff --git a/kernel/smp.c b/kernel/smp.c
    index d903c02223af..a61894409645 100644
    --- a/kernel/smp.c
    +++ b/kernel/smp.c
    @@ -14,6 +14,7 @@
    #include <linux/smp.h>
    #include <linux/cpu.h>
    #include <linux/sched.h>
    +#include <linux/isolation.h>

    #include "smpboot.h"

    @@ -178,8 +179,10 @@ static int generic_exec_single(int cpu, struct call_single_data *csd,
    * locking and barrier primitives. Generic code isn't really
    * equipped to do the right thing...
    */
    - if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
    + if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu))) {
    + task_isolation_debug(cpu);
    arch_send_call_function_single_ipi(cpu);
    + }

    return 0;
    }
    @@ -457,6 +460,7 @@ void smp_call_function_many(const struct cpumask *mask,
    }

    /* Send a message to all CPUs in the map */
    + task_isolation_debug_cpumask(cfd->cpumask);
    arch_send_call_function_ipi_mask(cfd->cpumask);

    if (wait) {
    diff --git a/kernel/softirq.c b/kernel/softirq.c
    index 479e4436f787..f249b71cddf4 100644
    --- a/kernel/softirq.c
    +++ b/kernel/softirq.c
    @@ -26,6 +26,7 @@
    #include <linux/smpboot.h>
    #include <linux/tick.h>
    #include <linux/irq.h>
    +#include <linux/isolation.h>

    #define CREATE_TRACE_POINTS
    #include <trace/events/irq.h>
    @@ -319,6 +320,37 @@ asmlinkage __visible void do_softirq(void)
    local_irq_restore(flags);
    }

    +/* Determine whether this IRQ is something task isolation cares about. */
    +static void task_isolation_irq(void)
    +{
    +#ifdef CONFIG_TASK_ISOLATION
    + struct pt_regs *regs;
    +
    + if (!context_tracking_cpu_is_enabled())
    + return;
    +
    + /*
    + * We have not yet called __irq_enter() and so we haven't
    + * adjusted the hardirq count. This test will allow us to
    + * avoid false positives for nested IRQs.
    + */
    + if (in_interrupt())
    + return;
    +
    + /*
    + * If we were already in the kernel, not from an irq but from
    + * a syscall or synchronous exception/fault, this test should
    + * avoid a false positive as well. Note that this requires
    + * architecture support for calling set_irq_regs() prior to
    + * calling irq_enter(), and if it's not done consistently, we
    + * will not consistently avoid false positives here.
    + */
    + regs = get_irq_regs();
    + if (regs && user_mode(regs))
    + task_isolation_debug(smp_processor_id());
    +#endif
    +}
    +
    /*
    * Enter an interrupt context.
    */
    @@ -335,6 +367,7 @@ void irq_enter(void)
    _local_bh_enable();
    }

    + task_isolation_irq();
    __irq_enter();
    }

    --
    2.1.2
    \
     
     \ /
      Last update: 2016-03-02 21:41    [W:4.856 / U:0.044 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site