lkml.org 
[lkml]   [2009]   [Aug]   [20]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
Subject[PATCH 4/15] sched: Define per CPU variables and cpu_to_group function for MN domain

Additionally fixup cpu_to_phys_group() in case of CONFIG_SCHED_MN=y.

Signed-off-by: Andreas Herrmann <andreas.herrmann3@amd.com>
---
kernel/sched.c | 32 +++++++++++++++++++++++++++-----
1 files changed, 27 insertions(+), 5 deletions(-)

diff --git a/kernel/sched.c b/kernel/sched.c
index 9990c3a..d85985d 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -8255,9 +8255,27 @@ cpu_to_cpu_group(int cpu, const struct cpumask *cpu_map,
#ifdef CONFIG_SCHED_MC
static DEFINE_PER_CPU(struct static_sched_domain, core_domains);
static DEFINE_PER_CPU(struct static_sched_group, sched_group_core);
-#endif /* CONFIG_SCHED_MC */

-#if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT)
+/*
+ * multi-node sched-domains:
+ */
+#ifdef CONFIG_SCHED_MN
+static DEFINE_PER_CPU(struct static_sched_domain, cpu_node_domains);
+static DEFINE_PER_CPU(struct static_sched_group, sched_group_cpu_node);
+
+static int cpu_to_cpu_node_group(int cpu, const struct cpumask *cpu_map,
+ struct sched_group **sg, struct cpumask *mask)
+{
+ int group;
+ cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map);
+ group = cpumask_first(mask);
+ if (sg)
+ *sg = &per_cpu(sched_group_cpu_node, group).sg;
+ return group;
+}
+#endif /* CONFIG_SCHED_MN */
+
+#ifdef CONFIG_SCHED_SMT
static int
cpu_to_core_group(int cpu, const struct cpumask *cpu_map,
struct sched_group **sg, struct cpumask *mask)
@@ -8270,7 +8288,7 @@ cpu_to_core_group(int cpu, const struct cpumask *cpu_map,
*sg = &per_cpu(sched_group_core, group).sg;
return group;
}
-#elif defined(CONFIG_SCHED_MC)
+#else
static int
cpu_to_core_group(int cpu, const struct cpumask *cpu_map,
struct sched_group **sg, struct cpumask *unused)
@@ -8279,7 +8297,8 @@ cpu_to_core_group(int cpu, const struct cpumask *cpu_map,
*sg = &per_cpu(sched_group_core, cpu).sg;
return cpu;
}
-#endif
+#endif /* CONFIG_SCHED_SMT */
+#endif /* CONFIG_SCHED_MC */

static DEFINE_PER_CPU(struct static_sched_domain, phys_domains);
static DEFINE_PER_CPU(struct static_sched_group, sched_group_phys);
@@ -8289,7 +8308,10 @@ cpu_to_phys_group(int cpu, const struct cpumask *cpu_map,
struct sched_group **sg, struct cpumask *mask)
{
int group;
-#ifdef CONFIG_SCHED_MC
+#ifdef CONFIG_SCHED_MN
+ cpumask_and(mask, topology_cpu_node_cpumask(cpu), cpu_map);
+ group = cpumask_first(mask);
+#elif CONFIG_SCHED_MC
cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map);
group = cpumask_first(mask);
#elif defined(CONFIG_SCHED_SMT)
--
1.6.0.4




\
 
 \ /
  Last update: 2009-08-20 15:39    [W:0.201 / U:0.844 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site