lkml.org 
[lkml]   [2022]   [Sep]   [9]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[RFC PATCH 07/23] sched/fair: Collect load-balancing stats for task classes
Date
When selecting a busiest scheduling group, the class of the current task
can be used to select between two scheduling groups of equal asym_packing
priority and number of running tasks.

Compute a new task-class performance score for a scheduling group. It
is the sum of the performance of the current tasks of all the runqueues.

Also, keep track of the task with the lowest performance score on the
scheduling group.

These two metrics will be used during idle load balancing to compute the
current and the prospective task-class performance of a scheduling
group.

Cc: Ben Segall <bsegall@google.com>
Cc: Daniel Bristot de Oliveira <bristot@redhat.com>
Cc: Dietmar Eggemann <dietmar.eggemann@arm.com>
Cc: Len Brown <len.brown@intel.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Cc: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Tim C. Chen <tim.c.chen@intel.com>
Cc: Valentin Schneider <vschneid@redhat.com>
Cc: x86@kernel.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ricardo Neri <ricardo.neri-calderon@linux.intel.com>
---
kernel/sched/fair.c | 61 +++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 61 insertions(+)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 2f2a6bb5990d..58a435a04c1c 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -8686,6 +8686,63 @@ group_type group_classify(unsigned int imbalance_pct,
return group_has_spare;
}

+struct sg_lb_task_class_stats {
+ /*
+ * Score of the task with lowest score among the current tasks (i.e.,
+ * runqueue::curr) of all runqueues in the scheduling group.
+ */
+ int min_score;
+ /*
+ * Sum of the scores of the current tasks of all runqueues in the
+ * scheduling group.
+ */
+ long sum_score;
+ /* The task with score equal to @min_score */
+ struct task_struct *p_min_score;
+};
+
+#ifdef CONFIG_SCHED_TASK_CLASSES
+static void init_rq_task_classes_stats(struct sg_lb_task_class_stats *class_sgs)
+{
+ class_sgs->min_score = INT_MAX;
+ class_sgs->sum_score = 0;
+ class_sgs->p_min_score = NULL;
+}
+
+/** Called only if cpu_of(@rq) is not idle and has tasks running. */
+static void update_rq_task_classes_stats(struct sg_lb_task_class_stats *class_sgs,
+ struct rq *rq)
+{
+ int score;
+
+ if (!sched_task_classes_enabled())
+ return;
+
+ /*
+ * TODO: if nr_running > 1 we may want go through all the tasks behind
+ * rq->curr.
+ */
+ score = arch_get_task_class_score(rq->curr->class, cpu_of(rq));
+
+ class_sgs->sum_score += score;
+
+ if (score >= class_sgs->min_score)
+ return;
+
+ class_sgs->min_score = score;
+ class_sgs->p_min_score = rq->curr;
+}
+#else /* CONFIG_SCHED_TASK_CLASSES */
+static void update_rq_task_classes_stats(struct sg_lb_task_class_stats *class_sgs,
+ struct rq *rq)
+{
+}
+
+static void init_rq_task_classes_stats(struct sg_lb_task_class_stats *class_sgs)
+{
+}
+#endif /* CONFIG_SCHED_TASK_CLASSES */
+
/**
* asym_smt_can_pull_tasks - Check whether the load balancing CPU can pull tasks
* @dst_cpu: Destination CPU of the load balancing
@@ -8797,9 +8854,11 @@ static inline void update_sg_lb_stats(struct lb_env *env,
struct sg_lb_stats *sgs,
int *sg_status)
{
+ struct sg_lb_task_class_stats class_stats;
int i, nr_running, local_group;

memset(sgs, 0, sizeof(*sgs));
+ init_rq_task_classes_stats(&class_stats);

local_group = group == sds->local;

@@ -8849,6 +8908,8 @@ static inline void update_sg_lb_stats(struct lb_env *env,
if (sgs->group_misfit_task_load < load)
sgs->group_misfit_task_load = load;
}
+
+ update_rq_task_classes_stats(&class_stats, rq);
}

sgs->group_capacity = group->sgc->capacity;
--
2.25.1
\
 
 \ /
  Last update: 2022-09-10 01:07    [W:0.444 / U:0.588 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site