lkml.org 
[lkml]   [2016]   [Oct]   [28]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[RFC PATCH 1/3] sched: Introduce structures necessary for WALT
Date
From: Srivatsa Vaddagiri <vatsa@codeaurora.org>

Add the per-task and per-runqueue data structures that
will later be used by Window Assisted Load Tracking (WALT)
to estimate task demand and CPU utilization.

Move cap_scale into sched.h as that will be needed by WALT
as well to implement frequency and capacity invariance.

Signed-off-by: Srivatsa Vaddagiri <vatsa@codeaurora.org>
Signed-off-by: Vikram Mulukutla <markivx@codeaurora.org>
---
include/linux/sched.h | 39 +++++++++++++++++++++++++++++++++++++++
kernel/sched/fair.c | 2 --
kernel/sched/sched.h | 8 ++++++++
3 files changed, 47 insertions(+), 2 deletions(-)

diff --git a/include/linux/sched.h b/include/linux/sched.h
index 62c68e5..64f8bec 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -315,6 +315,21 @@ extern char ___assert_task_state[1 - 2*!!(
/* Task command name length */
#define TASK_COMM_LEN 16

+/*
+ * These events may be replaced with a combination of existing scheduler flags
+ * provided that that doesn't make the implementation too fragile.
+ */
+enum task_event {
+ PUT_PREV_TASK = 0,
+ PICK_NEXT_TASK = 1,
+ TASK_WAKE = 2,
+ TASK_MIGRATE = 3,
+ TASK_UPDATE = 4,
+ IRQ_UPDATE = 5,
+};
+
+extern char *task_event_names[];
+
#include <linux/spinlock.h>

/*
@@ -1320,6 +1335,25 @@ struct sched_statistics {
};
#endif

+#ifdef CONFIG_SCHED_WALT
+
+/* ravg represents capacity scaled cpu-usage of tasks */
+struct ravg {
+ /*
+ * 'mark_start' marks the most recent event for a task
+ *
+ * 'curr_window' represents task's cpu usage in its most recent
+ * window
+ *
+ * 'prev_window' represents task's cpu usage in the window prior
+ * to the one represented by 'curr_window'
+ */
+ u64 mark_start;
+ u32 curr_window, prev_window;
+};
+#endif
+
+
struct sched_entity {
struct load_weight load; /* for load-balancing */
struct rb_node run_node;
@@ -1480,6 +1514,11 @@ struct task_struct {
const struct sched_class *sched_class;
struct sched_entity se;
struct sched_rt_entity rt;
+
+#ifdef CONFIG_SCHED_WALT
+ struct ravg ravg;
+#endif
+
#ifdef CONFIG_CGROUP_SCHED
struct task_group *sched_task_group;
#endif
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index ce8b244..39c826d 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2674,8 +2674,6 @@ static u32 __compute_runnable_contrib(u64 n)
return contrib + runnable_avg_yN_sum[n];
}

-#define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
-
/*
* We can represent the historical contribution to runnable average as the
* coefficients of a geometric series. To do this we sub-divide our runnable
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index c64fc51..9bf6925 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -65,6 +65,8 @@ static inline void cpu_load_update_active(struct rq *this_rq) { }
# define scale_load_down(w) (w)
#endif

+#define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
+
/*
* Task weight (visible to users) and its load (invisible to users) have
* independent resolution, but they should be well calibrated. We use
@@ -664,6 +666,12 @@ struct rq {
u64 max_idle_balance_cost;
#endif

+#ifdef CONFIG_SCHED_WALT
+ u64 window_start;
+ u64 curr_runnable_sum;
+ u64 prev_runnable_sum;
+#endif /* CONFIG_SCHED_WALT */
+
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
u64 prev_irq_time;
#endif
--
TheMan
\
 
 \ /
  Last update: 2016-10-28 09:12    [W:0.116 / U:0.244 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site