Messages in this thread Patch in this message | | | From | Lucas De Marchi <> | Subject | [PATCH 3/4] sched: fix common misspellings | Date | Fri, 18 Mar 2011 10:40:29 -0300 |
| |
They were generated by 'codespell' and then manually reviewed.
Signed-off-by: Lucas De Marchi <lucas.demarchi@profusion.mobi> --- include/linux/sched.h | 2 +- kernel/sched.c | 6 +++--- kernel/sched_autogroup.c | 2 +- kernel/sched_fair.c | 2 +- kernel/sched_rt.c | 4 ++-- 5 files changed, 8 insertions(+), 8 deletions(-)
diff --git a/include/linux/sched.h b/include/linux/sched.h index c15936f..9bd74f5 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -853,7 +853,7 @@ extern int __weak arch_sd_sibiling_asym_packing(void); /* * Optimise SD flags for power savings: - * SD_BALANCE_NEWIDLE helps agressive task consolidation and power savings. + * SD_BALANCE_NEWIDLE helps aggressive task consolidation and power savings. * Keep default SD flags if sched_{smt,mc}_power_saving=0 */ diff --git a/kernel/sched.c b/kernel/sched.c index 58d66ea..e53f7bd 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -2310,7 +2310,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state) * Cause a process which is running on another CPU to enter * kernel-mode, without any delay. (to get signals handled.) * - * NOTE: this function doesnt have to take the runqueue lock, + * NOTE: this function doesn't have to take the runqueue lock, * because all it wants to ensure is that the remote task enters * the kernel. If the IPI races and the task has been migrated * to another CPU then no harm is done and the purpose has been @@ -4991,7 +4991,7 @@ recheck: */ raw_spin_lock_irqsave(&p->pi_lock, flags); /* - * To be able to change p->policy safely, the apropriate + * To be able to change p->policy safely, the appropriate * runqueue lock must be held. */ rq = __task_rq_lock(p); @@ -5695,7 +5695,7 @@ void show_state_filter(unsigned long state_filter) do_each_thread(g, p) { /* * reset the NMI-timeout, listing all files on a slow - * console might take alot of time: + * console might take a lot of time: */ touch_nmi_watchdog(); if (!state_filter || (p->state & state_filter)) diff --git a/kernel/sched_autogroup.c b/kernel/sched_autogroup.c index 5946ac5..429242f 100644 --- a/kernel/sched_autogroup.c +++ b/kernel/sched_autogroup.c @@ -179,7 +179,7 @@ void sched_autogroup_create_attach(struct task_struct *p) struct autogroup *ag = autogroup_create(); autogroup_move_group(p, ag); - /* drop extra refrence added by autogroup_create() */ + /* drop extra reference added by autogroup_create() */ autogroup_kref_put(ag); } EXPORT_SYMBOL(sched_autogroup_create_attach); diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 3f7ec9e..3cb7f07 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -3061,7 +3061,7 @@ static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu, /* * if *imbalance is less than the average load per runnable task - * there is no gaurantee that any tasks will be moved so we'll have + * there is no guarantee that any tasks will be moved so we'll have * a think about bumping its value to force at least one task to be * moved */ diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index db308cb..e7cebdc 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c @@ -1378,7 +1378,7 @@ retry: task = pick_next_pushable_task(rq); if (task_cpu(next_task) == rq->cpu && task == next_task) { /* - * If we get here, the task hasnt moved at all, but + * If we get here, the task hasn't moved at all, but * it has failed to push. We will not try again, * since the other cpus will pull from us when they * are ready. @@ -1488,7 +1488,7 @@ static int pull_rt_task(struct rq *this_rq) /* * We continue with the search, just in * case there's an even higher prio task - * in another runqueue. (low likelyhood + * in another runqueue. (low likelihood * but possible) */ } -- 1.7.4.1
| |