lkml.org 
[lkml]   [2019]   [Nov]   [6]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
Subject[PATCH v2] sched/fair: Do not set skip buddy up the sched hierarchy
From
From: Venkatesh Pallipadi <venki@google.com>

Setting skip buddy all the way up the hierarchy does not play well
with intra-cgroup yield. One typical usecase of yield is when a
thread in a cgroup wants to yield CPU to another thread within the
same cgroup. For such a case, setting the skip buddy all the way up
the hierarchy is counter-productive, as that results in CPU being
yielded to a task in some other cgroup.

So, limit the skip effect only to the task requesting it.

Signed-off-by: Josh Don <joshdon@google.com>
---
Changelog since v1:
- As an optimization, skip clearing the skip buddy up the hierarchy
- Due to the above, it makes sense to inline __clear_buddies_skip; while
we're at it, inline the other __clear_buddies* functions as well.

kernel/sched/fair.c | 27 +++++++++++++++------------
1 file changed, 15 insertions(+), 12 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 682a754ea3e1..dbac30e3cc08 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4010,7 +4010,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
}
}

-static void __clear_buddies_last(struct sched_entity *se)
+static inline void __clear_buddies_last(struct sched_entity *se)
{
for_each_sched_entity(se) {
struct cfs_rq *cfs_rq = cfs_rq_of(se);
@@ -4021,7 +4021,7 @@ static void __clear_buddies_last(struct sched_entity *se)
}
}

-static void __clear_buddies_next(struct sched_entity *se)
+static inline void __clear_buddies_next(struct sched_entity *se)
{
for_each_sched_entity(se) {
struct cfs_rq *cfs_rq = cfs_rq_of(se);
@@ -4032,15 +4032,12 @@ static void __clear_buddies_next(struct sched_entity *se)
}
}

-static void __clear_buddies_skip(struct sched_entity *se)
+static inline void __clear_buddies_skip(struct sched_entity *se)
{
- for_each_sched_entity(se) {
- struct cfs_rq *cfs_rq = cfs_rq_of(se);
- if (cfs_rq->skip != se)
- break;
+ struct cfs_rq *cfs_rq = cfs_rq_of(se);

+ if (cfs_rq->skip == se)
cfs_rq->skip = NULL;
- }
}

static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
@@ -4051,8 +4048,7 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
if (cfs_rq->next == se)
__clear_buddies_next(se);

- if (cfs_rq->skip == se)
- __clear_buddies_skip(se);
+ __clear_buddies_skip(se);
}

static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
@@ -6647,8 +6643,15 @@ static void set_next_buddy(struct sched_entity *se)

static void set_skip_buddy(struct sched_entity *se)
{
- for_each_sched_entity(se)
- cfs_rq_of(se)->skip = se;
+ /*
+ * One typical usecase of yield is when a thread in a cgroup
+ * wants to yield CPU to another thread within the same cgroup.
+ * For such a case, setting the skip buddy all the way up the
+ * hierarchy is counter-productive, as that results in CPU being
+ * yielded to a task in some other cgroup. So, only set skip
+ * for the task requesting it.
+ */
+ cfs_rq_of(se)->skip = se;
}

/*
--
2.24.0.rc1.363.gb1bccd3e3d-goog
\
 
 \ /
  Last update: 2019-11-06 23:15    [W:0.083 / U:0.168 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site