lkml.org 
[lkml]   [2019]   [Jul]   [2]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
Subject[PATCH 1/7] perf: propagate perf_install_in_context errors up
From
The current __perf_install_in_context can fail and the error is ignored.
Changing __perf_install_in_context can add new failure modes that need
errors propagating up. This change prepares for this.

Signed-off-by: Ian Rogers <irogers@google.com>
---
kernel/events/core.c | 38 +++++++++++++++++++++++++-------------
1 file changed, 25 insertions(+), 13 deletions(-)

diff --git a/kernel/events/core.c b/kernel/events/core.c
index 785d708f8553..4faa90f5a934 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -2558,11 +2558,12 @@ static int __perf_install_in_context(void *info)
*
* Very similar to event_function_call, see comment there.
*/
-static void
+static int
perf_install_in_context(struct perf_event_context *ctx,
struct perf_event *event,
int cpu)
{
+ int err;
struct task_struct *task = READ_ONCE(ctx->task);

lockdep_assert_held(&ctx->mutex);
@@ -2577,15 +2578,15 @@ perf_install_in_context(struct perf_event_context *ctx,
smp_store_release(&event->ctx, ctx);

if (!task) {
- cpu_function_call(cpu, __perf_install_in_context, event);
- return;
+ err = cpu_function_call(cpu, __perf_install_in_context, event);
+ return err;
}

/*
* Should not happen, we validate the ctx is still alive before calling.
*/
if (WARN_ON_ONCE(task == TASK_TOMBSTONE))
- return;
+ return 0;

/*
* Installing events is tricky because we cannot rely on ctx->is_active
@@ -2619,8 +2620,9 @@ perf_install_in_context(struct perf_event_context *ctx,
*/
smp_mb();
again:
- if (!task_function_call(task, __perf_install_in_context, event))
- return;
+ err = task_function_call(task, __perf_install_in_context, event);
+ if (err)
+ return err;

raw_spin_lock_irq(&ctx->lock);
task = ctx->task;
@@ -2631,7 +2633,7 @@ perf_install_in_context(struct perf_event_context *ctx,
* against perf_event_exit_task_context().
*/
raw_spin_unlock_irq(&ctx->lock);
- return;
+ return 0;
}
/*
* If the task is not running, ctx->lock will avoid it becoming so,
@@ -2643,6 +2645,7 @@ perf_install_in_context(struct perf_event_context *ctx,
}
add_event_to_ctx(event, ctx);
raw_spin_unlock_irq(&ctx->lock);
+ return 0;
}

/*
@@ -11103,7 +11106,9 @@ SYSCALL_DEFINE5(perf_event_open,
*/
for_each_sibling_event(sibling, group_leader) {
perf_event__state_init(sibling);
- perf_install_in_context(ctx, sibling, sibling->cpu);
+ err = perf_install_in_context(ctx, sibling,
+ sibling->cpu);
+ WARN_ON_ONCE(err);
get_ctx(ctx);
}

@@ -11113,7 +11118,9 @@ SYSCALL_DEFINE5(perf_event_open,
* startup state, ready to be add into new context.
*/
perf_event__state_init(group_leader);
- perf_install_in_context(ctx, group_leader, group_leader->cpu);
+ err = perf_install_in_context(ctx, group_leader,
+ group_leader->cpu);
+ WARN_ON_ONCE(err);
get_ctx(ctx);
}

@@ -11128,7 +11135,8 @@ SYSCALL_DEFINE5(perf_event_open,

event->owner = current;

- perf_install_in_context(ctx, event, event->cpu);
+ err = perf_install_in_context(ctx, event, event->cpu);
+ WARN_ON_ONCE(err);
perf_unpin_context(ctx);

if (move_group)
@@ -11247,7 +11255,8 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
goto err_unlock;
}

- perf_install_in_context(ctx, event, cpu);
+ err = perf_install_in_context(ctx, event, cpu);
+ WARN_ON_ONCE(err);
perf_unpin_context(ctx);
mutex_unlock(&ctx->mutex);

@@ -11270,6 +11279,7 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
struct perf_event_context *dst_ctx;
struct perf_event *event, *tmp;
LIST_HEAD(events);
+ int err;

src_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, src_cpu)->ctx;
dst_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, dst_cpu)->ctx;
@@ -11308,7 +11318,8 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
if (event->state >= PERF_EVENT_STATE_OFF)
event->state = PERF_EVENT_STATE_INACTIVE;
account_event_cpu(event, dst_cpu);
- perf_install_in_context(dst_ctx, event, dst_cpu);
+ err = perf_install_in_context(dst_ctx, event, dst_cpu);
+ WARN_ON_ONCE(err);
get_ctx(dst_ctx);
}

@@ -11321,7 +11332,8 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
if (event->state >= PERF_EVENT_STATE_OFF)
event->state = PERF_EVENT_STATE_INACTIVE;
account_event_cpu(event, dst_cpu);
- perf_install_in_context(dst_ctx, event, dst_cpu);
+ err = perf_install_in_context(dst_ctx, event, dst_cpu);
+ WARN_ON_ONCE(err);
get_ctx(dst_ctx);
}
mutex_unlock(&dst_ctx->mutex);
--
2.22.0.410.gd8fdbe21b5-goog
\
 
 \ /
  Last update: 2019-07-02 09:01    [W:3.089 / U:0.220 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site