lkml.org 
[lkml]   [2009]   [May]   [18]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
Subject[GIT PULL] scheduler fixes
Linus,

Please pull the latest sched-fixes-for-linus git tree from:

git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip.git sched-fixes-for-linus

Thanks,

Ingo

------------------>
Ron (1):
sched: Fix fallback sched_clock()'s offset when using jiffies

Rusty Russell (1):
sched: avoid flexible array member inside struct (gcc extension)


kernel/sched.c | 28 +++++++++++++++-------------
kernel/sched_clock.c | 3 ++-
2 files changed, 17 insertions(+), 14 deletions(-)

diff --git a/kernel/sched.c b/kernel/sched.c
index 26efa47..d1ef62c 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -7756,22 +7756,24 @@ int sched_smt_power_savings = 0, sched_mc_power_savings = 0;
* FIXME: use cpumask_var_t or dynamic percpu alloc to avoid wasting space
* for nr_cpu_ids < CONFIG_NR_CPUS.
*/
-struct static_sched_group {
+union static_sched_group {
struct sched_group sg;
- DECLARE_BITMAP(cpus, CONFIG_NR_CPUS);
+ char _sg_and_cpus[sizeof(struct sched_group) +
+ BITS_TO_LONGS(CONFIG_NR_CPUS) * sizeof(long)];
};

-struct static_sched_domain {
+union static_sched_domain {
struct sched_domain sd;
- DECLARE_BITMAP(span, CONFIG_NR_CPUS);
+ char _sd_and_cpus[sizeof(struct sched_domain) +
+ BITS_TO_LONGS(CONFIG_NR_CPUS) * sizeof(long)];
};

/*
* SMT sched-domains:
*/
#ifdef CONFIG_SCHED_SMT
-static DEFINE_PER_CPU(struct static_sched_domain, cpu_domains);
-static DEFINE_PER_CPU(struct static_sched_group, sched_group_cpus);
+static DEFINE_PER_CPU(union static_sched_domain, cpu_domains);
+static DEFINE_PER_CPU(union static_sched_group, sched_group_cpus);

static int
cpu_to_cpu_group(int cpu, const struct cpumask *cpu_map,
@@ -7787,8 +7789,8 @@ cpu_to_cpu_group(int cpu, const struct cpumask *cpu_map,
* multi-core sched-domains:
*/
#ifdef CONFIG_SCHED_MC
-static DEFINE_PER_CPU(struct static_sched_domain, core_domains);
-static DEFINE_PER_CPU(struct static_sched_group, sched_group_core);
+static DEFINE_PER_CPU(union static_sched_domain, core_domains);
+static DEFINE_PER_CPU(union static_sched_group, sched_group_core);
#endif /* CONFIG_SCHED_MC */

#if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT)
@@ -7815,8 +7817,8 @@ cpu_to_core_group(int cpu, const struct cpumask *cpu_map,
}
#endif

-static DEFINE_PER_CPU(struct static_sched_domain, phys_domains);
-static DEFINE_PER_CPU(struct static_sched_group, sched_group_phys);
+static DEFINE_PER_CPU(union static_sched_domain, phys_domains);
+static DEFINE_PER_CPU(union static_sched_group, sched_group_phys);

static int
cpu_to_phys_group(int cpu, const struct cpumask *cpu_map,
@@ -7843,11 +7845,11 @@ cpu_to_phys_group(int cpu, const struct cpumask *cpu_map,
* groups, so roll our own. Now each node has its own list of groups which
* gets dynamically allocated.
*/
-static DEFINE_PER_CPU(struct static_sched_domain, node_domains);
+static DEFINE_PER_CPU(union static_sched_domain, node_domains);
static struct sched_group ***sched_group_nodes_bycpu;

-static DEFINE_PER_CPU(struct static_sched_domain, allnodes_domains);
-static DEFINE_PER_CPU(struct static_sched_group, sched_group_allnodes);
+static DEFINE_PER_CPU(union static_sched_domain, allnodes_domains);
+static DEFINE_PER_CPU(union static_sched_group, sched_group_allnodes);

static int cpu_to_allnodes_group(int cpu, const struct cpumask *cpu_map,
struct sched_group **sg,
diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c
index 819f17a..e1d16c9 100644
--- a/kernel/sched_clock.c
+++ b/kernel/sched_clock.c
@@ -38,7 +38,8 @@
*/
unsigned long long __attribute__((weak)) sched_clock(void)
{
- return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ);
+ return (unsigned long long)(jiffies - INITIAL_JIFFIES)
+ * (NSEC_PER_SEC / HZ);
}

static __read_mostly int sched_clock_running;

\
 
 \ /
  Last update: 2009-05-18 16:29    [W:0.112 / U:0.012 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site