lkml.org 
[lkml]   [2012]   [Jun]   [12]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH V5 04/13] perf: Introduce perf_pmu_migrate_context
    Date
    From: "Yan, Zheng" <zheng.z.yan@intel.com>

    Originally from Peter Zijlstra. The helper migrates perf events
    from one cpu to another cpu.

    Signed-off-by: Zheng Yan <zheng.z.yan@intel.com>
    ---
    include/linux/perf_event.h | 2 ++
    kernel/events/core.c | 36 ++++++++++++++++++++++++++++++++++++
    2 files changed, 38 insertions(+)

    diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
    index 1ce887a..76c5c8b 100644
    --- a/include/linux/perf_event.h
    +++ b/include/linux/perf_event.h
    @@ -1107,6 +1107,8 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr,
    struct task_struct *task,
    perf_overflow_handler_t callback,
    void *context);
    +extern void perf_pmu_migrate_context(struct pmu *pmu,
    + int src_cpu, int dst_cpu);
    extern u64 perf_event_read_value(struct perf_event *event,
    u64 *enabled, u64 *running);

    diff --git a/kernel/events/core.c b/kernel/events/core.c
    index 2c05027..2e54e74 100644
    --- a/kernel/events/core.c
    +++ b/kernel/events/core.c
    @@ -1641,6 +1641,8 @@ perf_install_in_context(struct perf_event_context *ctx,
    lockdep_assert_held(&ctx->mutex);

    event->ctx = ctx;
    + if (event->cpu != -1)
    + event->cpu = cpu;

    if (!task) {
    /*
    @@ -6375,6 +6377,7 @@ SYSCALL_DEFINE5(perf_event_open,
    mutex_lock(&ctx->mutex);

    if (move_group) {
    + synchronize_rcu();
    perf_install_in_context(ctx, group_leader, event->cpu);
    get_ctx(ctx);
    list_for_each_entry(sibling, &group_leader->sibling_list,
    @@ -6480,6 +6483,39 @@ err:
    }
    EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);

    +void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
    +{
    + struct perf_event_context *src_ctx;
    + struct perf_event_context *dst_ctx;
    + struct perf_event *event, *tmp;
    + LIST_HEAD(events);
    +
    + src_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, src_cpu)->ctx;
    + dst_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, dst_cpu)->ctx;
    +
    + mutex_lock(&src_ctx->mutex);
    + list_for_each_entry_safe(event, tmp, &src_ctx->event_list,
    + event_entry) {
    + perf_remove_from_context(event);
    + put_ctx(src_ctx);
    + list_add(&event->event_entry, &events);
    + }
    + mutex_unlock(&src_ctx->mutex);
    +
    + synchronize_rcu();
    +
    + mutex_lock(&dst_ctx->mutex);
    + list_for_each_entry_safe(event, tmp, &events, event_entry) {
    + list_del(&event->event_entry);
    + if (event->state >= PERF_EVENT_STATE_OFF)
    + event->state = PERF_EVENT_STATE_INACTIVE;
    + perf_install_in_context(dst_ctx, event, dst_cpu);
    + get_ctx(dst_ctx);
    + }
    + mutex_unlock(&dst_ctx->mutex);
    +}
    +EXPORT_SYMBOL_GPL(perf_pmu_migrate_context);
    +
    static void sync_child_event(struct perf_event *child_event,
    struct task_struct *child)
    {
    --
    1.7.10.2


    \
     
     \ /
      Last update: 2012-06-12 08:41    [W:4.299 / U:0.200 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site