lkml.org 
[lkml]   [2018]   [Dec]   [12]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
SubjectRe: [PATCH] mm/alloc: fallback to first node if the wanted node offline
On Wed 12-12-18 16:31:35, Pingfan Liu wrote:
> On Mon, Dec 10, 2018 at 8:37 PM Michal Hocko <mhocko@kernel.org> wrote:
> >
> [...]
> >
> > In other words. Does the following work? I am sorry to wildguess this
> > way but I am not able to recreate your setups to play with this myself.
> >
> > diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
> > index 1308f5408bf7..d51643e10d00 100644
> > --- a/arch/x86/mm/numa.c
> > +++ b/arch/x86/mm/numa.c
> > @@ -216,8 +216,6 @@ static void __init alloc_node_data(int nid)
> >
> > node_data[nid] = nd;
> > memset(NODE_DATA(nid), 0, sizeof(pg_data_t));
> > -
> > - node_set_online(nid);
> > }
> >
> > /**
> > @@ -527,6 +525,19 @@ static void __init numa_clear_kernel_node_hotplug(void)
> > }
> > }
> >
> > +static void __init init_memory_less_node(int nid)
> > +{
> > + unsigned long zones_size[MAX_NR_ZONES] = {0};
> > + unsigned long zholes_size[MAX_NR_ZONES] = {0};
> > +
> > + free_area_init_node(nid, zones_size, 0, zholes_size);
> > +
> > + /*
> > + * All zonelists will be built later in start_kernel() after per cpu
> > + * areas are initialized.
> > + */
> > +}
> > +
> > static int __init numa_register_memblks(struct numa_meminfo *mi)
> > {
> > unsigned long uninitialized_var(pfn_align);
> > @@ -570,7 +581,7 @@ static int __init numa_register_memblks(struct numa_meminfo *mi)
> > return -EINVAL;
> >
> > /* Finally register nodes. */
> > - for_each_node_mask(nid, node_possible_map) {
> > + for_each_node(nid) {
> > u64 start = PFN_PHYS(max_pfn);
> > u64 end = 0;
> >
> > @@ -592,6 +603,10 @@ static int __init numa_register_memblks(struct numa_meminfo *mi)
> > continue;
> >
> > alloc_node_data(nid);
> > + if (!end)
>
> Here comes the bug, since !end can not reach here.

You are right. I am dumb. I've just completely missed that. Sigh.
Anyway, I think the code is more complicated than necessary and we can
simply drop the check. I do not think we really have to worry about
the start overflowing end. So the end patch should look as follows.
Btw. I believe it is better to pull alloc_node_data out of init_memory_less_node
because a) there is no need to duplicate the call and moreover we want
to pull node_set_online as well. The code also seems cleaner this way.

Thanks for your testing and your patience with me here.

diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index 1308f5408bf7..a5548fe668fb 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -216,8 +216,6 @@ static void __init alloc_node_data(int nid)

node_data[nid] = nd;
memset(NODE_DATA(nid), 0, sizeof(pg_data_t));
-
- node_set_online(nid);
}

/**
@@ -527,6 +525,19 @@ static void __init numa_clear_kernel_node_hotplug(void)
}
}

+static void __init init_memory_less_node(int nid)
+{
+ unsigned long zones_size[MAX_NR_ZONES] = {0};
+ unsigned long zholes_size[MAX_NR_ZONES] = {0};
+
+ free_area_init_node(nid, zones_size, 0, zholes_size);
+
+ /*
+ * All zonelists will be built later in start_kernel() after per cpu
+ * areas are initialized.
+ */
+}
+
static int __init numa_register_memblks(struct numa_meminfo *mi)
{
unsigned long uninitialized_var(pfn_align);
@@ -570,7 +581,7 @@ static int __init numa_register_memblks(struct numa_meminfo *mi)
return -EINVAL;

/* Finally register nodes. */
- for_each_node_mask(nid, node_possible_map) {
+ for_each_node(nid) {
u64 start = PFN_PHYS(max_pfn);
u64 end = 0;

@@ -581,9 +592,6 @@ static int __init numa_register_memblks(struct numa_meminfo *mi)
end = max(mi->blk[i].end, end);
}

- if (start >= end)
- continue;
-
/*
* Don't confuse VM with a node that doesn't have the
* minimum amount of memory:
@@ -592,6 +600,10 @@ static int __init numa_register_memblks(struct numa_meminfo *mi)
continue;

alloc_node_data(nid);
+ if (!end)
+ init_memory_less_node(nid);
+ else
+ node_set_online(nid);
}

/* Dump memblock with node info and return. */
@@ -721,21 +733,6 @@ void __init x86_numa_init(void)
numa_init(dummy_numa_init);
}

-static void __init init_memory_less_node(int nid)
-{
- unsigned long zones_size[MAX_NR_ZONES] = {0};
- unsigned long zholes_size[MAX_NR_ZONES] = {0};
-
- /* Allocate and initialize node data. Memory-less node is now online.*/
- alloc_node_data(nid);
- free_area_init_node(nid, zones_size, 0, zholes_size);
-
- /*
- * All zonelists will be built later in start_kernel() after per cpu
- * areas are initialized.
- */
-}
-
/*
* Setup early cpu_to_node.
*
@@ -763,9 +760,6 @@ void __init init_cpu_to_node(void)
if (node == NUMA_NO_NODE)
continue;

- if (!node_online(node))
- init_memory_less_node(node);
-
numa_set_node(cpu, node);
}
}
--
Michal Hocko
SUSE Labs
\
 
 \ /
  Last update: 2018-12-12 12:55    [W:1.417 / U:0.296 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site