lkml.org 
[lkml]   [2011]   [May]   [28]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    Subject[tip:perf/core] perf: Remove task_ctx_sched_in()
    Commit-ID:  04dc2dbbfe1c6f81b996d4dab255da75f9efbb4a
    Gitweb: http://git.kernel.org/tip/04dc2dbbfe1c6f81b996d4dab255da75f9efbb4a
    Author: Peter Zijlstra <a.p.zijlstra@chello.nl>
    AuthorDate: Sat, 9 Apr 2011 21:17:43 +0200
    Committer: Ingo Molnar <mingo@elte.hu>
    CommitDate: Sat, 28 May 2011 18:01:14 +0200

    perf: Remove task_ctx_sched_in()

    Make task_ctx_sched_*() imply EVENT_ALL, since anything less will not
    actually have scheduled the task in/out at all.

    Since there's no site that schedules all of a task in (due to the
    interleave with flexible cpuctx) we can remove this function.

    Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
    Link: http://lkml.kernel.org/r/20110409192141.817893268@chello.nl
    Signed-off-by: Ingo Molnar <mingo@elte.hu>
    ---
    kernel/events/core.c | 26 ++++++--------------------
    1 files changed, 6 insertions(+), 20 deletions(-)

    diff --git a/kernel/events/core.c b/kernel/events/core.c
    index d243af9..66b3dd8 100644
    --- a/kernel/events/core.c
    +++ b/kernel/events/core.c
    @@ -1979,8 +1979,7 @@ void __perf_event_task_sched_out(struct task_struct *task,
    perf_cgroup_sched_out(task);
    }

    -static void task_ctx_sched_out(struct perf_event_context *ctx,
    - enum event_type_t event_type)
    +static void task_ctx_sched_out(struct perf_event_context *ctx)
    {
    struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);

    @@ -1990,7 +1989,7 @@ static void task_ctx_sched_out(struct perf_event_context *ctx,
    if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
    return;

    - ctx_sched_out(ctx, cpuctx, event_type);
    + ctx_sched_out(ctx, cpuctx, EVENT_ALL);
    cpuctx->task_ctx = NULL;
    }

    @@ -2098,19 +2097,6 @@ static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
    ctx_sched_in(ctx, cpuctx, event_type, task);
    }

    -static void task_ctx_sched_in(struct perf_event_context *ctx,
    - enum event_type_t event_type)
    -{
    - struct perf_cpu_context *cpuctx;
    -
    - cpuctx = __get_cpu_context(ctx);
    - if (cpuctx->task_ctx == ctx)
    - return;
    -
    - ctx_sched_in(ctx, cpuctx, event_type, NULL);
    - cpuctx->task_ctx = ctx;
    -}
    -
    static void perf_event_context_sched_in(struct perf_event_context *ctx,
    struct task_struct *task)
    {
    @@ -2363,7 +2349,7 @@ static void perf_rotate_context(struct perf_cpu_context *cpuctx)

    cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
    if (ctx)
    - task_ctx_sched_out(ctx, EVENT_FLEXIBLE);
    + ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE);

    rotate_ctx(&cpuctx->ctx);
    if (ctx)
    @@ -2371,7 +2357,7 @@ static void perf_rotate_context(struct perf_cpu_context *cpuctx)

    cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, current);
    if (ctx)
    - task_ctx_sched_in(ctx, EVENT_FLEXIBLE);
    + ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, current);

    done:
    if (remove)
    @@ -2435,7 +2421,7 @@ static void perf_event_enable_on_exec(struct perf_event_context *ctx)
    perf_cgroup_sched_out(current);

    raw_spin_lock(&ctx->lock);
    - task_ctx_sched_out(ctx, EVENT_ALL);
    + task_ctx_sched_out(ctx);

    list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
    ret = event_enable_on_exec(event, ctx);
    @@ -6794,7 +6780,7 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
    * incremented the context's refcount before we do put_ctx below.
    */
    raw_spin_lock(&child_ctx->lock);
    - task_ctx_sched_out(child_ctx, EVENT_ALL);
    + task_ctx_sched_out(child_ctx);
    child->perf_event_ctxp[ctxn] = NULL;
    /*
    * If this context is a clone; unclone it so it can't get

    \
     
     \ /
      Last update: 2011-05-28 18:43    [W:0.027 / U:3.252 seconds]
    ©2003-2016 Jasper Spaans. hosted at Digital OceanAdvertise on this site