lkml.org 
[lkml]   [2019]   [Feb]   [6]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
Date
From
SubjectRe: [PATCH v03] powerpc/numa: Perform full re-add of CPU for PRRN/VPHN topology update
Hi Michael,

Thank you for the patch! Yet something to improve:

[auto build test ERROR on powerpc/next]
[also build test ERROR on v5.0-rc4 next-20190206]
[if your patch is applied to the wrong git tree, please drop us a note to help improve the system]

url: https://github.com/0day-ci/linux/commits/Michael-Bringmann/powerpc-numa-Perform-full-re-add-of-CPU-for-PRRN-VPHN-topology-update/20190207-101545
base: https://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git next
config: powerpc-allyesconfig (attached as .config)
compiler: powerpc64-linux-gnu-gcc (Debian 8.2.0-11) 8.2.0
reproduce:
wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
chmod +x ~/bin/make.cross
# save the attached .config to linux build tree
GCC_VERSION=8.2.0 make.cross ARCH=powerpc

All errors (new ones prefixed by >>):

arch/powerpc/mm/numa.c: In function 'numa_update_cpu_topology':
>> arch/powerpc/mm/numa.c:1361:4: error: implicit declaration of function 'dlpar_cpu_readd'; did you mean 'raw_cpu_read'? [-Werror=implicit-function-declaration]
dlpar_cpu_readd(cpu);
^~~~~~~~~~~~~~~
raw_cpu_read
cc1: some warnings being treated as errors

vim +1361 arch/powerpc/mm/numa.c

1298
1299 /*
1300 * Update the node maps and sysfs entries for each cpu whose home node
1301 * has changed. Returns 1 when the topology has changed, and 0 otherwise.
1302 *
1303 * readd_cpus: Also readd any CPUs that have changed affinity
1304 */
1305 static int numa_update_cpu_topology(bool readd_cpus)
1306 {
1307 unsigned int cpu, sibling, changed = 0;
1308 struct topology_update_data *updates, *ud;
1309 cpumask_t updated_cpus;
1310 struct device *dev;
1311 int weight, new_nid, i = 0;
1312
1313 if ((!prrn_enabled && !vphn_enabled && topology_inited) ||
1314 topology_update_in_progress)
1315 return 0;
1316
1317 weight = cpumask_weight(&cpu_associativity_changes_mask);
1318 if (!weight)
1319 return 0;
1320
1321 updates = kcalloc(weight, sizeof(*updates), GFP_KERNEL);
1322 if (!updates)
1323 return 0;
1324
1325 topology_update_in_progress = 1;
1326
1327 cpumask_clear(&updated_cpus);
1328
1329 for_each_cpu(cpu, &cpu_associativity_changes_mask) {
1330 /*
1331 * If siblings aren't flagged for changes, updates list
1332 * will be too short. Skip on this update and set for next
1333 * update.
1334 */
1335 if (!cpumask_subset(cpu_sibling_mask(cpu),
1336 &cpu_associativity_changes_mask)) {
1337 pr_info("Sibling bits not set for associativity "
1338 "change, cpu%d\n", cpu);
1339 cpumask_or(&cpu_associativity_changes_mask,
1340 &cpu_associativity_changes_mask,
1341 cpu_sibling_mask(cpu));
1342 cpu = cpu_last_thread_sibling(cpu);
1343 continue;
1344 }
1345
1346 new_nid = find_and_online_cpu_nid(cpu);
1347
1348 if ((new_nid == numa_cpu_lookup_table[cpu]) ||
1349 !cpu_present(cpu)) {
1350 cpumask_andnot(&cpu_associativity_changes_mask,
1351 &cpu_associativity_changes_mask,
1352 cpu_sibling_mask(cpu));
1353 if (cpu_present(cpu))
1354 dbg("Assoc chg gives same node %d for cpu%d\n",
1355 new_nid, cpu);
1356 cpu = cpu_last_thread_sibling(cpu);
1357 continue;
1358 }
1359
1360 if (readd_cpus)
> 1361 dlpar_cpu_readd(cpu);
1362
1363 for_each_cpu(sibling, cpu_sibling_mask(cpu)) {
1364 ud = &updates[i++];
1365 ud->next = &updates[i];
1366 ud->cpu = sibling;
1367 ud->new_nid = new_nid;
1368 ud->old_nid = numa_cpu_lookup_table[sibling];
1369 cpumask_set_cpu(sibling, &updated_cpus);
1370 }
1371 cpu = cpu_last_thread_sibling(cpu);
1372 }
1373
1374 /*
1375 * Prevent processing of 'updates' from overflowing array
1376 * where last entry filled in a 'next' pointer.
1377 */
1378 if (i)
1379 updates[i-1].next = NULL;
1380
1381 pr_debug("Topology update for the following CPUs:\n");
1382 if (cpumask_weight(&updated_cpus)) {
1383 for (ud = &updates[0]; ud; ud = ud->next) {
1384 pr_debug("cpu %d moving from node %d "
1385 "to %d\n", ud->cpu,
1386 ud->old_nid, ud->new_nid);
1387 }
1388 }
1389
1390 /*
1391 * In cases where we have nothing to update (because the updates list
1392 * is too short or because the new topology is same as the old one),
1393 * skip invoking update_cpu_topology() via stop-machine(). This is
1394 * necessary (and not just a fast-path optimization) since stop-machine
1395 * can end up electing a random CPU to run update_cpu_topology(), and
1396 * thus trick us into setting up incorrect cpu-node mappings (since
1397 * 'updates' is kzalloc()'ed).
1398 *
1399 * And for the similar reason, we will skip all the following updating.
1400 */
1401 if (!cpumask_weight(&updated_cpus))
1402 goto out;
1403
1404 stop_machine(update_cpu_topology, &updates[0], &updated_cpus);
1405
1406 /*
1407 * Update the numa-cpu lookup table with the new mappings, even for
1408 * offline CPUs. It is best to perform this update from the stop-
1409 * machine context.
1410 */
1411 stop_machine(update_lookup_table, &updates[0],
1412 cpumask_of(raw_smp_processor_id()));
1413
1414 for (ud = &updates[0]; ud; ud = ud->next) {
1415 unregister_cpu_under_node(ud->cpu, ud->old_nid);
1416 register_cpu_under_node(ud->cpu, ud->new_nid);
1417
1418 dev = get_cpu_device(ud->cpu);
1419 if (dev)
1420 kobject_uevent(&dev->kobj, KOBJ_CHANGE);
1421 cpumask_clear_cpu(ud->cpu, &cpu_associativity_changes_mask);
1422 changed = 1;
1423 }
1424
1425 out:
1426 topology_changed = changed;
1427 topology_update_in_progress = 0;
1428 kfree(updates);
1429 return changed;
1430 }
1431

---
0-DAY kernel test infrastructure Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all Intel Corporation
[unhandled content-type:application/gzip]
\
 
 \ /
  Last update: 2019-02-07 05:49    [W:0.075 / U:0.244 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site