lkml.org 
[lkml]   [2008]   [Apr]   [25]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    Subject[PATCH 05/11] sched: replace MAX_NUMNODES with nr_node_ids in kernel/sched.c
      * Replace usages of MAX_NUMNODES with nr_node_ids in kernel/sched.c,
    where appropriate. This saves some allocated space as well as many
    wasted cycles going through node entries that are non-existent.

    For inclusion into sched-devel/latest tree.

    Based on:
    git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git
    + sched-devel/latest .../mingo/linux-2.6-sched-devel.git


    Signed-off-by: Mike Travis <travis@sgi.com>
    ---
    kernel/sched.c | 18 +++++++++---------
    1 file changed, 9 insertions(+), 9 deletions(-)

    --- linux-2.6.sched.orig/kernel/sched.c
    +++ linux-2.6.sched/kernel/sched.c
    @@ -7056,9 +7056,9 @@ static int find_next_best_node(int node,

    min_val = INT_MAX;

    - for (i = 0; i < MAX_NUMNODES; i++) {
    + for (i = 0; i < nr_node_ids; i++) {
    /* Start at @node */
    - n = (node + i) % MAX_NUMNODES;
    + n = (node + i) % nr_node_ids;

    if (!nr_cpus_node(n))
    continue;
    @@ -7252,7 +7252,7 @@ static void free_sched_groups(const cpum
    if (!sched_group_nodes)
    continue;

    - for (i = 0; i < MAX_NUMNODES; i++) {
    + for (i = 0; i < nr_node_ids; i++) {
    struct sched_group *oldsg, *sg = sched_group_nodes[i];

    *nodemask = node_to_cpumask(i);
    @@ -7440,7 +7440,7 @@ static int __build_sched_domains(const c
    /*
    * Allocate the per-node list of sched groups
    */
    - sched_group_nodes = kcalloc(MAX_NUMNODES, sizeof(struct sched_group *),
    + sched_group_nodes = kcalloc(nr_node_ids, sizeof(struct sched_group *),
    GFP_KERNEL);
    if (!sched_group_nodes) {
    printk(KERN_WARNING "Can not alloc sched group node list\n");
    @@ -7584,7 +7584,7 @@ static int __build_sched_domains(const c
    #endif

    /* Set up physical groups */
    - for (i = 0; i < MAX_NUMNODES; i++) {
    + for (i = 0; i < nr_node_ids; i++) {
    SCHED_CPUMASK_VAR(nodemask, allmasks);
    SCHED_CPUMASK_VAR(send_covered, allmasks);

    @@ -7608,7 +7608,7 @@ static int __build_sched_domains(const c
    send_covered, tmpmask);
    }

    - for (i = 0; i < MAX_NUMNODES; i++) {
    + for (i = 0; i < nr_node_ids; i++) {
    /* Set up node groups */
    struct sched_group *sg, *prev;
    SCHED_CPUMASK_VAR(nodemask, allmasks);
    @@ -7647,9 +7647,9 @@ static int __build_sched_domains(const c
    cpus_or(*covered, *covered, *nodemask);
    prev = sg;

    - for (j = 0; j < MAX_NUMNODES; j++) {
    + for (j = 0; j < nr_node_ids; j++) {
    SCHED_CPUMASK_VAR(notcovered, allmasks);
    - int n = (i + j) % MAX_NUMNODES;
    + int n = (i + j) % nr_node_ids;
    node_to_cpumask_ptr(pnodemask, n);

    cpus_complement(*notcovered, *covered);
    @@ -7702,7 +7702,7 @@ static int __build_sched_domains(const c
    }

    #ifdef CONFIG_NUMA
    - for (i = 0; i < MAX_NUMNODES; i++)
    + for (i = 0; i < nr_node_ids; i++)
    init_numa_sched_groups_power(sched_group_nodes[i]);

    if (sd_allnodes) {
    --


    \
     
     \ /
      Last update: 2008-04-26 02:21    [W:7.804 / U:0.456 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site