lkml.org 
[lkml]   [2015]   [Oct]   [26]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 6/7] printk: Avoid scheduling printing threads on the same CPU
Date
Currently nothing forces the scheduler to schedule printing kthread on
the same CPU that is currently doing printing. In fact in some KVM
configurations this seems to happen rather frequently and it defeats
printing offloading since the current CPU is doing printing and watching
for printing kthread to come and take over however that never happens
because that kthread has been scheduled on the very same CPU.

Fix the problem by allowing each printing kthread to be scheduled only
on a subset of CPUs and these subsets are disjoint so at least one of
the kthreads is guaranteed to be able to take over printing. CPU hotplug
makes this more difficult than it should be but we cope by
redistributing kthreads among CPUs when some kthread is not able to run
anywhere.

Signed-off-by: Jan Kara <jack@suse.com>
---
kernel/printk/printk.c | 105 ++++++++++++++++++++++++++++++++++++++++++++-----
1 file changed, 96 insertions(+), 9 deletions(-)

diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index 5153c6518b9d..72334ed42942 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -101,8 +101,10 @@ static atomic_t printing_tasks_spinning = ATOMIC_INIT(0);
#define PRINTING_TASKS 2
/* Pointers to printing kthreads */
static struct task_struct *printing_kthread[PRINTING_TASKS];
+/* Masks of cpus allowed for printing kthreads */
+static struct cpumask *printing_kthread_mask[PRINTING_TASKS];
/* Serialization of changes to printk_offload_chars and kthread creation */
-static DEFINE_MUTEX(printk_kthread_mutex);
+static DEFINE_MUTEX(printing_kthread_mutex);

/* Wait queue printing kthreads sleep on when idle */
static DECLARE_WAIT_QUEUE_HEAD(print_queue);
@@ -2840,28 +2842,113 @@ static int printing_task(void *arg)
return 0;
}

+/* Divide online cpus among printing kthreads */
+static void distribute_printing_kthreads(void)
+{
+ int i;
+ unsigned int cpus_per_thread;
+ unsigned int cpu, seen_cpu;
+
+ for (i = 0; i < PRINTING_TASKS; i++)
+ cpumask_clear(printing_kthread_mask[i]);
+
+ cpus_per_thread = DIV_ROUND_UP(num_online_cpus(), PRINTING_TASKS);
+ seen_cpu = 0;
+ for_each_online_cpu(cpu) {
+ cpumask_set_cpu(cpu,
+ printing_kthread_mask[seen_cpu / cpus_per_thread]);
+ seen_cpu++;
+ }
+
+ for (i = 0; i < PRINTING_TASKS; i++)
+ if (!cpumask_empty(printing_kthread_mask[i]))
+ set_cpus_allowed_ptr(printing_kthread[i],
+ printing_kthread_mask[i]);
+}
+
+static int printing_kthread_cpu_notify(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (unsigned long)hcpu;
+ int i;
+
+ if (printk_offload_chars == 0)
+ goto out;
+
+ /* Get exclusion against turning of printk offload off... */
+ mutex_lock(&printing_kthread_mutex);
+ /* Now a reliable check if printk offload is enabled */
+ if (printk_offload_chars == 0) {
+ mutex_unlock(&printing_kthread_mutex);
+ goto out;
+ }
+
+ if (action == CPU_ONLINE) {
+ /*
+ * Allow some task to use the CPU. We don't want to spend too
+ * much time with fair distribution so just guess. We do a fair
+ * redistribution if some task has no cpu to run on.
+ */
+ i = cpu % PRINTING_TASKS;
+ cpumask_set_cpu(cpu, printing_kthread_mask[i]);
+ set_cpus_allowed_ptr(printing_kthread[i],
+ printing_kthread_mask[i]);
+ }
+ if (action == CPU_DEAD) {
+
+ for (i = 0; i < PRINTING_TASKS; i++) {
+ if (cpumask_test_cpu(cpu, printing_kthread_mask[i])) {
+ cpumask_clear_cpu(cpu,
+ printing_kthread_mask[i]);
+ if (cpumask_empty(printing_kthread_mask[i]))
+ distribute_printing_kthreads();
+ break;
+ }
+ }
+ }
+ mutex_unlock(&printing_kthread_mutex);
+out:
+ return NOTIFY_OK;
+}
+
static int printk_start_offload_kthreads(void)
{
int i;
struct task_struct *task;
+ int ret;

/* Does handover of printing make any sense? */
if (printk_offload_chars == 0 || num_possible_cpus() <= 1)
return 0;
+
for (i = 0; i < PRINTING_TASKS; i++) {
if (printing_kthread[i])
continue;
+ printing_kthread_mask[i] = kmalloc(cpumask_size(), GFP_KERNEL);
+ if (!printing_kthread_mask[i]) {
+ pr_err("printk: Cannot allocate cpumask for printing "
+ "thread.\n");
+ ret = -ENOMEM;
+ goto out_err;
+ }
task = kthread_run(printing_task, NULL, "print/%d", i);
- if (IS_ERR(task))
+ if (IS_ERR(task)) {
+ kfree(printing_kthread_mask[i]);
+ pr_err("printk: Cannot create printing thread: %ld\n",
+ PTR_ERR(task));
+ ret = PTR_ERR(task);
goto out_err;
+ }
printing_kthread[i] = task;
}
+
+ hotcpu_notifier(printing_kthread_cpu_notify, 0);
+ distribute_printing_kthreads();
return 0;
out_err:
- pr_err("printk: Cannot create printing thread: %ld\n", PTR_ERR(task));
/* Disable offloading if creating kthreads failed */
printk_offload_chars = 0;
- return PTR_ERR(task);
+ return ret;
}

static int offload_chars_set(const char *val, const struct kernel_param *kp)
@@ -2869,26 +2956,26 @@ static int offload_chars_set(const char *val, const struct kernel_param *kp)
int ret;

/* Protect against parallel change of printk_offload_chars */
- mutex_lock(&printk_kthread_mutex);
+ mutex_lock(&printing_kthread_mutex);
ret = param_set_uint(val, kp);
if (ret) {
- mutex_unlock(&printk_kthread_mutex);
+ mutex_unlock(&printing_kthread_mutex);
return ret;
}
ret = printk_start_offload_kthreads();
- mutex_unlock(&printk_kthread_mutex);
+ mutex_unlock(&printing_kthread_mutex);
return ret;
}

static void printk_offload_init(void)
{
- mutex_lock(&printk_kthread_mutex);
+ mutex_lock(&printing_kthread_mutex);
if (num_possible_cpus() <= 1) {
/* Offloading doesn't make sense. Disable print offloading. */
printk_offload_chars = 0;
} else
printk_start_offload_kthreads();
- mutex_unlock(&printk_kthread_mutex);
+ mutex_unlock(&printing_kthread_mutex);
}

#else /* CONFIG_PRINTK_OFFLOAD */
--
2.1.4


\
 
 \ /
  Last update: 2015-10-26 06:21    [W:0.058 / U:0.084 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site