lkml.org 
[lkml]   [2007]   [Sep]   [14]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    SubjectRe: CFS: some bad numbers with Java/database threading [FIXED]

    * Antoine Martin <antoine@nagafix.co.uk> wrote:

    > >> have an impact) Keep CONFIG_SCHED_DEBUG=y to be able to twiddle the
    > >> sysctl.
    > It looks good now! Updated results here:
    > http://devloop.org.uk/documentation/database-performance/Linux-Kernels/Kernels-ManyThreads-CombinedTests5-10msYield-noload.png
    > http://devloop.org.uk/documentation/database-performance/Linux-Kernels/Kernels-ManyThreads-CombinedTests5-10msYield.png
    > Compared with more kernels here - a bit more cluttered:
    > http://devloop.org.uk/documentation/database-performance/Linux-Kernels/Kernels-ManyThreads-CombinedTests4-10msYield-noload.png
    >
    > Thanks Ingo!
    > Does this mean that I'll have to keep doing:
    > echo 1 > /proc/sys/kernel/sched_yield_bug_workaround
    > Or are you planning on finding a more elegant solution?

    just to make sure - can you get it to work fast with the
    -rc6+yield-patch solution too? (i.e. not CFS-devel) We need a (tested)
    solution for 2.6.23 and the CFS-devel patches are not for 2.6.23. I've
    attached below the latest version of the -rc6 yield patch - the switch
    is not dependent on SCHED_DEBUG anymore but always available.

    Ingo

    ------------------->
    Subject: sched: yield workaround
    From: Ingo Molnar <mingo@elte.hu>

    sched_yield() is fundamentally broken, and CFS has changed
    its behavior. Some apps that mistakenly rely on sched_yield()
    want "weak" sched yield (such as desktop apps) - while some
    apps want "strong" sched_yield() (for example some JDKs).

    There's no way for the scheduler to figure out which of the
    two variants the app really wants - because sched_yield() is
    all about hiding from the kernel the true structure of the
    user-space locking code.

    As a solution, provide a workaround, to introduce a more
    agressive sched_yield implementation:

    # default one:
    echo 0 > /proc/sys/kernel/sched_yield_bug_workaround

    # always queues the current task next to the next task:
    echo 1 > /proc/sys/kernel/sched_yield_bug_workaround

    # NOP:
    echo 2 > /proc/sys/kernel/sched_yield_bug_workaround

    in the future, the use of this sysctl might generate
    a deprecation warning, so that apps start moving away
    from their reliance on sched_yield().

    Signed-off-by: Ingo Molnar <mingo@elte.hu>
    ---
    include/linux/sched.h | 2 +
    kernel/sched_fair.c | 73 +++++++++++++++++++++++++++++++++++++++++++++-----
    kernel/sysctl.c | 19 +++++++++++++
    3 files changed, 88 insertions(+), 6 deletions(-)

    Index: linux/include/linux/sched.h
    ===================================================================
    --- linux.orig/include/linux/sched.h
    +++ linux/include/linux/sched.h
    @@ -1402,10 +1402,12 @@ extern void sched_idle_next(void);

    extern unsigned int sysctl_sched_latency;
    extern unsigned int sysctl_sched_min_granularity;
    +extern unsigned int sysctl_sched_yield_granularity;
    extern unsigned int sysctl_sched_wakeup_granularity;
    extern unsigned int sysctl_sched_batch_wakeup_granularity;
    extern unsigned int sysctl_sched_stat_granularity;
    extern unsigned int sysctl_sched_runtime_limit;
    +extern unsigned int sysctl_sched_yield_bug_workaround;
    extern unsigned int sysctl_sched_child_runs_first;
    extern unsigned int sysctl_sched_features;

    Index: linux/kernel/sched_fair.c
    ===================================================================
    --- linux.orig/kernel/sched_fair.c
    +++ linux/kernel/sched_fair.c
    @@ -42,6 +42,16 @@ unsigned int sysctl_sched_latency __read
    */
    unsigned int sysctl_sched_min_granularity __read_mostly = 2000000ULL;

    +unsigned int sysctl_sched_yield_granularity __read_mostly = 10000000ULL;
    +
    +/*
    + * sys_sched_yield workaround switch.
    + *
    + * This option switches the yield implementation of the
    + * old scheduler back on.
    + */
    +unsigned int __read_mostly sysctl_sched_yield_bug_workaround;
    +
    /*
    * SCHED_BATCH wake-up granularity.
    * (default: 25 msec, units: nanoseconds)
    @@ -901,15 +911,66 @@ static void dequeue_task_fair(struct rq
    */
    static void yield_task_fair(struct rq *rq, struct task_struct *p)
    {
    - struct cfs_rq *cfs_rq = task_cfs_rq(p);
    + if (!sysctl_sched_yield_bug_workaround) {
    + struct cfs_rq *cfs_rq = task_cfs_rq(p);
    + __update_rq_clock(rq);
    +
    + /*
    + * Dequeue and enqueue the task to update its
    + * position within the tree:
    + */
    + dequeue_entity(cfs_rq, &p->se, 0);
    + enqueue_entity(cfs_rq, &p->se, 0);
    + return;
    + }
    +
    + if (sysctl_sched_yield_bug_workaround == 1) {
    + struct cfs_rq *cfs_rq = task_cfs_rq(p);
    + struct rb_node *curr, *next, *first;
    + struct task_struct *p_next;
    + s64 yield_key;
    +
    + __update_rq_clock(rq);
    + curr = &p->se.run_node;
    + first = first_fair(cfs_rq);
    + /*
    + * Move this task to the second place in the tree:
    + */
    + if (curr != first)
    + next = rb_next(curr);
    + else
    + next = first;
    + /*
    + * We were the last one already - nothing to do, return
    + * and reschedule:
    + */
    + if (unlikely(!next))
    + return;
    +
    + p_next = rb_entry(next, struct task_struct, se.run_node);
    + /*
    + * Minimally necessary key value to be the second in the tree:
    + */
    + yield_key = p_next->se.fair_key +
    + (int)sysctl_sched_yield_granularity;
    +
    + dequeue_entity(cfs_rq, &p->se, 0);
    +
    + /*
    + * Only update the key if we need to move more backwards
    + * than the minimally necessary position to be the second:
    + */
    + if (p->se.fair_key < yield_key)
    + p->se.fair_key = yield_key;
    +
    + __enqueue_entity(cfs_rq, &p->se);
    + return;
    + }

    - __update_rq_clock(rq);
    /*
    - * Dequeue and enqueue the task to update its
    - * position within the tree:
    + * Just reschedule, do nothing else:
    */
    - dequeue_entity(cfs_rq, &p->se, 0);
    - enqueue_entity(cfs_rq, &p->se, 0);
    + resched_task(p);
    }

    /*
    Index: linux/kernel/sysctl.c
    ===================================================================
    --- linux.orig/kernel/sysctl.c
    +++ linux/kernel/sysctl.c
    @@ -244,6 +244,17 @@ static ctl_table kern_table[] = {
    },
    {
    .ctl_name = CTL_UNNUMBERED,
    + .procname = "sched_yield_granularity_ns",
    + .data = &sysctl_sched_yield_granularity,
    + .maxlen = sizeof(unsigned int),
    + .mode = 0644,
    + .proc_handler = &proc_dointvec_minmax,
    + .strategy = &sysctl_intvec,
    + .extra1 = &min_sched_granularity_ns,
    + .extra2 = &max_sched_granularity_ns,
    + },
    + {
    + .ctl_name = CTL_UNNUMBERED,
    .procname = "sched_wakeup_granularity_ns",
    .data = &sysctl_sched_wakeup_granularity,
    .maxlen = sizeof(unsigned int),
    @@ -288,6 +299,14 @@ static ctl_table kern_table[] = {
    },
    {
    .ctl_name = CTL_UNNUMBERED,
    + .procname = "sched_yield_bug_workaround",
    + .data = &sysctl_sched_yield_bug_workaround,
    + .maxlen = sizeof(unsigned int),
    + .mode = 0644,
    + .proc_handler = &proc_dointvec,
    + },
    + {
    + .ctl_name = CTL_UNNUMBERED,
    .procname = "sched_child_runs_first",
    .data = &sysctl_sched_child_runs_first,
    .maxlen = sizeof(unsigned int),
    -
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/
    \
     
     \ /
      Last update: 2007-09-14 17:35    [W:0.047 / U:0.100 seconds]
    ©2003-2016 Jasper Spaans. hosted at Digital OceanAdvertise on this site