lkml.org 
[lkml]   [2016]   [Oct]   [29]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH v3 40/46] perf/x86/intel/cmt: add rotation scheduled work
Date
Schedule rotation work each pmu->hrtimer_interval_ms ms.
Default to CMT_DEFAULT_ROTATION_PERIOD period.

Signed-off-by: David Carrillo-Cisneros <davidcc@google.com>
---
arch/x86/events/intel/cmt.c | 98 ++++++++++++++++++++++++++++++++++++++++++++-
arch/x86/events/intel/cmt.h | 2 +
2 files changed, 98 insertions(+), 2 deletions(-)

diff --git a/arch/x86/events/intel/cmt.c b/arch/x86/events/intel/cmt.c
index 05803a8..8bf6aa5 100644
--- a/arch/x86/events/intel/cmt.c
+++ b/arch/x86/events/intel/cmt.c
@@ -51,6 +51,13 @@ static size_t pkg_uflags_size;
static struct pkg_data **cmt_pkgs_data;

/*
+ * Time between execution of rotation logic. The frequency of execution does
+ * not affect the rate at which RMIDs are recycled.
+ * The rotation period is stored in pmu->hrtimer_interval_ms.
+ */
+#define CMT_DEFAULT_ROTATION_PERIOD 1200 /* ms */
+
+/*
* Rotation Service Level Objectives (SLO) for monrs with llc_occupancy
* monitoring. Note that these are monr level SLOs, therefore all pmonrs in
* the monr meet or exceed them.
@@ -1306,6 +1313,79 @@ static void smp_call_rmid_read(void *data)

static struct pmu intel_cmt_pmu;

+/* Schedule rotation in one package. */
+static bool __intel_cmt_schedule_rotation_for_pkg(struct pkg_data *pkgd)
+{
+ unsigned long delay;
+
+ if (pkgd->work_cpu >= nr_cpu_ids)
+ return false;
+ delay = msecs_to_jiffies(intel_cmt_pmu.hrtimer_interval_ms);
+
+ return schedule_delayed_work_on(pkgd->work_cpu,
+ &pkgd->rotation_work, delay);
+}
+
+static void intel_cmt_schedule_rotation(void)
+{
+ struct pkg_data *pkgd = NULL;
+
+ rcu_read_lock();
+ while ((pkgd = cmt_pkgs_data_next_rcu(pkgd)))
+ __intel_cmt_schedule_rotation_for_pkg(pkgd);
+ rcu_read_unlock();
+}
+
+/*
+ * Rotation for @pkgd is needed if its package has at least one online CPU and:
+ * - there is a non-root monr (such monr could request rmids in @pkgd at any
+ * time), or
+ * - there are dirty rmids in pkgd.
+ */
+static bool intel_cmt_need_rmid_rotation(struct pkg_data *pkgd)
+{
+ unsigned long flags;
+ bool do_rot;
+
+ /* protected by cmt_mutex. */
+ if (!list_empty(&monr_hrchy_root->children))
+ return true;
+
+ raw_spin_lock_irqsave(&pkgd->lock, flags);
+ do_rot = pkgd->nr_dep_pmonrs || pkgd->nr_dirty_rmids;
+ raw_spin_unlock_irqrestore(&pkgd->lock, flags);
+
+ return do_rot;
+}
+
+/*
+ * Rotation function, runs per-package.
+ */
+static void intel_cmt_rmid_rotation_work(struct work_struct *work)
+{
+ struct pkg_data *pkgd;
+
+ pkgd = container_of(to_delayed_work(work),
+ struct pkg_data, rotation_work);
+
+ /* If this pkg_data is in its way to be destroyed. */
+ if (pkgd->work_cpu >= nr_cpu_ids)
+ return;
+
+ mutex_lock(&pkgd->mutex);
+
+ if (!intel_cmt_need_rmid_rotation(pkgd))
+ goto exit;
+
+ /* To add call to rotation function in next patch */
+
+ if (intel_cmt_need_rmid_rotation(pkgd))
+ __intel_cmt_schedule_rotation_for_pkg(pkgd);
+
+exit:
+ mutex_unlock(&pkgd->mutex);
+}
+
/* Try to find a monr with same target, otherwise create new one. */
static int mon_group_setup_event(struct perf_event *event)
{
@@ -1796,6 +1876,11 @@ static int intel_cmt_event_init(struct perf_event *event)
mutex_lock(&cmt_mutex);

err = mon_group_setup_event(event);
+ /*
+ * schedule rotation even if error, in case the error was caused by
+ * insufficient rmids.
+ */
+ intel_cmt_schedule_rotation();

mutex_unlock(&cmt_mutex);

@@ -1885,6 +1970,7 @@ static const struct attribute_group *intel_cmt_attr_groups[] = {
};

static struct pmu intel_cmt_pmu = {
+ .hrtimer_interval_ms = CMT_DEFAULT_ROTATION_PERIOD,
.attr_groups = intel_cmt_attr_groups,
.task_ctx_nr = perf_sw_context,
.event_init = intel_cmt_event_init,
@@ -2009,6 +2095,7 @@ static struct pkg_data *alloc_pkg_data(int cpu)
mutex_init(&pkgd->mutex);
raw_spin_lock_init(&pkgd->lock);

+ INIT_DELAYED_WORK(&pkgd->rotation_work, intel_cmt_rmid_rotation_work);
pkgd->work_cpu = cpu;
pkgd->pkgid = pkgid;

@@ -2131,9 +2218,10 @@ static int intel_cmt_hp_online_enter(unsigned int cpu)

rcu_read_lock();
pkgd = rcu_dereference(cmt_pkgs_data[pkgid]);
- if (pkgd->work_cpu >= nr_cpu_ids)
+ if (pkgd->work_cpu >= nr_cpu_ids) {
pkgd->work_cpu = cpu;
-
+ __intel_cmt_schedule_rotation_for_pkg(pkgd);
+ }
rcu_read_unlock();

return 0;
@@ -2184,6 +2272,7 @@ static int intel_cmt_prep_down(unsigned int cpu)
pkgd = rcu_dereference_protected(cmt_pkgs_data[pkgid],
lockdep_is_held(&cmt_mutex));
if (pkgd->work_cpu >= nr_cpu_ids) {
+ cancel_delayed_work_sync(&pkgd->rotation_work);
/* will destroy pkgd */
__terminate_pkg_data(pkgd);
RCU_INIT_POINTER(cmt_pkgs_data[pkgid], NULL);
@@ -2569,6 +2658,11 @@ static ssize_t cmt_monitoring_write(struct kernfs_open_file *of,
monr_destroy(monr);

exit_free:
+ /*
+ * schedule rotation even if in error, in case the error was caused by
+ * insufficient rmids.
+ */
+ intel_cmt_schedule_rotation();
kfree(uflags);
exit_unlock:
monr_hrchy_release_mutexes();
diff --git a/arch/x86/events/intel/cmt.h b/arch/x86/events/intel/cmt.h
index 8756666..872cce0 100644
--- a/arch/x86/events/intel/cmt.h
+++ b/arch/x86/events/intel/cmt.h
@@ -248,6 +248,7 @@ struct cmt_csd {
* @mutex: Hold when modifying this pkg_data.
* @mutex_key: lockdep class for pkg_data's mutex.
* @lock: Hold to protect pmonrs in this pkg_data.
+ * @rotation_work: Task that performs rotation of rmids.
* @work_cpu: CPU to run rotation and other batch jobs.
* It must be in the package associated to its
* instance of pkg_data.
@@ -268,6 +269,7 @@ struct pkg_data {
struct mutex mutex;
raw_spinlock_t lock;

+ struct delayed_work rotation_work;
unsigned int work_cpu;
u32 max_rmid;
u16 pkgid;
--
2.8.0.rc3.226.g39d4020
\
 
 \ /
  Last update: 2016-10-30 02:43    [W:0.215 / U:0.200 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site