lkml.org 
[lkml]   [2018]   [Dec]   [11]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Subject[RFC 3/3] powerpc/numa: Apply mapping between HW and kernel cpus
From
Date
Apply new interface to map external powerpc cpus across multiple
nodes to a range of kernel cpu values. Mapping is intended to
prevent confusion within the kernel about the cpu+node mapping, and
the changes in configuration that may happen due to powerpc LPAR
migration or other associativity changes during the lifetime of a
system. These interfaces exchange the thread_index provided by the
'ibm,ppc-interrupt-server#s' properties, for an internal index to
be used by kernel scheduling interfaces.

Signed-off-by: Michael Bringmann <mwb@linux.vnet.ibm.com>
---
arch/powerpc/mm/numa.c | 45 +++++++++++++++++---------
arch/powerpc/platforms/pseries/hotplug-cpu.c | 15 +++++++--
2 files changed, 41 insertions(+), 19 deletions(-)

diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 7d6bba264..59d7cd9 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -1063,7 +1063,8 @@ u64 memory_hotplug_max(void)

struct topology_update_data {
struct topology_update_data *next;
- unsigned int cpu;
+ unsigned int old_cpu;
+ unsigned int new_cpu;
int old_nid;
int new_nid;
};
@@ -1253,13 +1254,13 @@ static int update_cpu_topology(void *data)

for (update = data; update; update = update->next) {
int new_nid = update->new_nid;
- if (cpu != update->cpu)
+ if (cpu != update->new_cpu)
continue;

- unmap_cpu_from_node(cpu);
- map_cpu_to_node(cpu, new_nid);
- set_cpu_numa_node(cpu, new_nid);
- set_cpu_numa_mem(cpu, local_memory_node(new_nid));
+ unmap_cpu_from_node(update->old_cpu);
+ map_cpu_to_node(update->new_cpu, new_nid);
+ set_cpu_numa_node(update->new_cpu, new_nid);
+ set_cpu_numa_mem(update->new_cpu, local_memory_node(new_nid));
vdso_getcpu_init();
}

@@ -1283,7 +1284,7 @@ static int update_lookup_table(void *data)
int nid, base, j;

nid = update->new_nid;
- base = cpu_first_thread_sibling(update->cpu);
+ base = cpu_first_thread_sibling(update->new_cpu);

for (j = 0; j < threads_per_core; j++) {
update_numa_cpu_lookup_table(base + j, nid);
@@ -1305,7 +1306,7 @@ int numa_update_cpu_topology(bool cpus_locked)
struct topology_update_data *updates, *ud;
cpumask_t updated_cpus;
struct device *dev;
- int weight, new_nid, i = 0;
+ int weight, new_nid, i = 0, ii;

if (!prrn_enabled && !vphn_enabled && topology_inited)
return 0;
@@ -1349,12 +1350,16 @@ int numa_update_cpu_topology(bool cpus_locked)
continue;
}

+ ii = 0;
for_each_cpu(sibling, cpu_sibling_mask(cpu)) {
ud = &updates[i++];
ud->next = &updates[i];
- ud->cpu = sibling;
ud->new_nid = new_nid;
ud->old_nid = numa_cpu_lookup_table[sibling];
+ ud->old_cpu = sibling;
+ ud->new_cpu = cpuremap_map_cpu(
+ get_hard_smp_processor_id(sibling),
+ ii++, new_nid);
cpumask_set_cpu(sibling, &updated_cpus);
}
cpu = cpu_last_thread_sibling(cpu);
@@ -1370,9 +1375,10 @@ int numa_update_cpu_topology(bool cpus_locked)
pr_debug("Topology update for the following CPUs:\n");
if (cpumask_weight(&updated_cpus)) {
for (ud = &updates[0]; ud; ud = ud->next) {
- pr_debug("cpu %d moving from node %d "
- "to %d\n", ud->cpu,
- ud->old_nid, ud->new_nid);
+ pr_debug("cpu %d, node %d moving to"
+ " cpu %d, node %d\n",
+ ud->old_cpu, ud->old_nid,
+ ud->new_cpu, ud->new_nid);
}
}

@@ -1409,13 +1415,20 @@ int numa_update_cpu_topology(bool cpus_locked)
cpumask_of(raw_smp_processor_id()));

for (ud = &updates[0]; ud; ud = ud->next) {
- unregister_cpu_under_node(ud->cpu, ud->old_nid);
- register_cpu_under_node(ud->cpu, ud->new_nid);
+ unregister_cpu_under_node(ud->old_cpu, ud->old_nid);
+ register_cpu_under_node(ud->new_cpu, ud->new_nid);

- dev = get_cpu_device(ud->cpu);
+ dev = get_cpu_device(ud->old_cpu);
if (dev)
kobject_uevent(&dev->kobj, KOBJ_CHANGE);
- cpumask_clear_cpu(ud->cpu, &cpu_associativity_changes_mask);
+ cpumask_clear_cpu(ud->old_cpu, &cpu_associativity_changes_mask);
+ if (ud->old_cpu != ud->new_cpu) {
+ dev = get_cpu_device(ud->new_cpu);
+ if (dev)
+ kobject_uevent(&dev->kobj, KOBJ_CHANGE);
+ cpumask_clear_cpu(ud->new_cpu,
+ &cpu_associativity_changes_mask);
+ }
changed = 1;
}

diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
index 620cb57..3a11a31 100644
--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
+++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
@@ -259,8 +259,13 @@ static int pseries_add_processor(struct device_node *np)
zalloc_cpumask_var(&tmp, GFP_KERNEL);

nthreads = len / sizeof(u32);
- for (i = 0; i < nthreads; i++)
- cpumask_set_cpu(i, tmp);
+ for (i = 0; i < nthreads; i++) {
+ int thread_index = be32_to_cpu(intserv[i]);
+ int nid = find_and_online_cpu_nid(thread_index, false);
+ int cpu = cpuremap_map_cpu(thread_index, i, nid);
+ cpumask_set_cpu(cpu, tmp);
+ cpuremap_reserve_cpu(cpu);
+ }

cpu_maps_update_begin();

@@ -333,6 +338,7 @@ static void pseries_remove_processor(struct device_node *np)
set_cpu_present(cpu, false);
set_hard_smp_processor_id(cpu, -1);
update_numa_cpu_lookup_table(cpu, -1);
+ cpuremap_release_cpu(cpu);
break;
}
if (cpu >= nr_cpu_ids)
@@ -346,7 +352,7 @@ static int dlpar_online_cpu(struct device_node *dn)
{
int rc = 0;
unsigned int cpu;
- int len, nthreads, i;
+ int len, nthreads, i, nid;
const __be32 *intserv;
u32 thread;

@@ -367,9 +373,11 @@ static int dlpar_online_cpu(struct device_node *dn)
cpu_maps_update_done();
timed_topology_update(1);
find_and_online_cpu_nid(cpu, true);
+ cpuremap_map_cpu(thread, i, nid);
rc = device_online(get_cpu_device(cpu));
if (rc)
goto out;
+ cpuremap_reserve_cpu(cpu);
cpu_maps_update_begin();

break;
@@ -541,6 +549,7 @@ static int dlpar_offline_cpu(struct device_node *dn)
rc = device_offline(get_cpu_device(cpu));
if (rc)
goto out;
+ cpuremap_release_cpu(cpu);
cpu_maps_update_begin();
break;

\
 
 \ /
  Last update: 2018-12-11 23:05    [W:0.043 / U:0.232 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site