lkml.org 
[lkml]   [2016]   [Feb]   [22]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    Subject[patch V2 11/28] x86/topology: Create logical package id
    For per package oriented services we must be able to rely on the number of cpu
    packages to be within bounds. Create a tracking facility, which

    - calculates the number of possible packages depending on nr_cpu_ids after
    boot

    - makes sure that the package id is within the number of possible packages. If
    the apic id is outside we map it to a logical package id if there is enough
    space available.

    Provide interfaces for drivers to query the mapping and do translations from
    physcial to logical ids.

    Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
    ---
    arch/x86/include/asm/processor.h | 2
    arch/x86/include/asm/topology.h | 10 +++
    arch/x86/kernel/apic/apic.c | 14 +++++
    arch/x86/kernel/cpu/common.c | 2
    arch/x86/kernel/cpu/intel.c | 13 +++++
    arch/x86/kernel/cpu/proc.c | 1
    arch/x86/kernel/smpboot.c | 100 +++++++++++++++++++++++++++++++++++++++
    7 files changed, 142 insertions(+)

    Index: b/arch/x86/include/asm/processor.h
    ===================================================================
    --- a/arch/x86/include/asm/processor.h
    +++ b/arch/x86/include/asm/processor.h
    @@ -129,6 +129,8 @@ struct cpuinfo_x86 {
    u16 booted_cores;
    /* Physical processor id: */
    u16 phys_proc_id;
    + /* Logical processor id: */
    + u16 logical_proc_id;
    /* Core id: */
    u16 cpu_core_id;
    /* Compute unit id */
    Index: b/arch/x86/include/asm/topology.h
    ===================================================================
    --- a/arch/x86/include/asm/topology.h
    +++ b/arch/x86/include/asm/topology.h
    @@ -119,12 +119,22 @@ static inline void setup_node_to_cpumask

    extern const struct cpumask *cpu_coregroup_mask(int cpu);

    +#define topology_logical_package_id(cpu) (cpu_data(cpu).logical_proc_id)
    #define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id)
    #define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id)

    #ifdef ENABLE_TOPO_DEFINES
    #define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu))
    #define topology_sibling_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu))
    +
    +extern unsigned int __max_logical_packages;
    +#define topology_max_packages() (__max_logical_packages)
    +int topology_update_package_map(unsigned int apicid, unsigned int cpu);
    +extern int topology_phys_to_logical_pkg(unsigned int pkg);
    +#else
    +#define topology_max_packages() (1)
    +static inline int
    +topology_update_package_map(unsigned int apicid, unsigned int cpu) { return 0; }
    #endif

    static inline void arch_fix_phys_package_id(int num, u32 slot)
    Index: b/arch/x86/kernel/apic/apic.c
    ===================================================================
    --- a/arch/x86/kernel/apic/apic.c
    +++ b/arch/x86/kernel/apic/apic.c
    @@ -2078,6 +2078,20 @@ int generic_processor_info(int apicid, i
    cpu = cpumask_next_zero(-1, cpu_present_mask);

    /*
    + * This can happen on physical hotplug. The sanity check at boot time
    + * is done from native_smp_prepare_cpus() after num_possible_cpus() is
    + * established.
    + */
    + if (topology_update_package_map(apicid, cpu) < 0) {
    + int thiscpu = max + disabled_cpus;
    +
    + pr_warning("ACPI: Package limit reached. Processor %d/0x%x ignored.\n",
    + thiscpu, apicid);
    + disabled_cpus++;
    + return -ENOSPC;
    + }
    +
    + /*
    * Validate version
    */
    if (version == 0x0) {
    Index: b/arch/x86/kernel/cpu/common.c
    ===================================================================
    --- a/arch/x86/kernel/cpu/common.c
    +++ b/arch/x86/kernel/cpu/common.c
    @@ -977,6 +977,8 @@ static void identify_cpu(struct cpuinfo_
    #ifdef CONFIG_NUMA
    numa_add_cpu(smp_processor_id());
    #endif
    + /* The boot/hotplug time assigment got cleared, restore it */
    + c->logical_proc_id = topology_phys_to_logical_pkg(c->phys_proc_id);
    }

    /*
    Index: b/arch/x86/kernel/cpu/intel.c
    ===================================================================
    --- a/arch/x86/kernel/cpu/intel.c
    +++ b/arch/x86/kernel/cpu/intel.c
    @@ -160,6 +160,19 @@ static void early_init_intel(struct cpui
    pr_info("Disabling PGE capability bit\n");
    setup_clear_cpu_cap(X86_FEATURE_PGE);
    }
    +
    + if (c->cpuid_level >= 0x00000001) {
    + u32 eax, ebx, ecx, edx;
    +
    + cpuid(0x00000001, &eax, &ebx, &ecx, &edx);
    + /*
    + * If HTT (EDX[28]) is set EBX[16:23] contain the number of
    + * apicids which are reserved per package. Store the resulting
    + * shift value for the package management code.
    + */
    + if (edx & (1U << 28))
    + c->x86_coreid_bits = get_count_order((ebx >> 16) & 0xff);
    + }
    }

    #ifdef CONFIG_X86_32
    Index: b/arch/x86/kernel/cpu/proc.c
    ===================================================================
    --- a/arch/x86/kernel/cpu/proc.c
    +++ b/arch/x86/kernel/cpu/proc.c
    @@ -12,6 +12,7 @@ static void show_cpuinfo_core(struct seq
    {
    #ifdef CONFIG_SMP
    seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
    + seq_printf(m, "logical id\t: %d\n", c->logical_proc_id);
    seq_printf(m, "siblings\t: %d\n",
    cpumask_weight(topology_core_cpumask(cpu)));
    seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
    Index: b/arch/x86/kernel/smpboot.c
    ===================================================================
    --- a/arch/x86/kernel/smpboot.c
    +++ b/arch/x86/kernel/smpboot.c
    @@ -97,6 +97,14 @@ DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t
    DEFINE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info);
    EXPORT_PER_CPU_SYMBOL(cpu_info);

    +/* Logical package management. We might want to allocate that dynamically */
    +static int *physical_to_logical_pkg __read_mostly;
    +static unsigned long *physical_package_map __read_mostly;;
    +static unsigned long *logical_package_map __read_mostly;
    +static unsigned int max_physical_pkg_id __read_mostly;
    +unsigned int __max_logical_packages __read_mostly;
    +EXPORT_SYMBOL(__max_logical_packages);
    +
    static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
    {
    unsigned long flags;
    @@ -251,6 +259,97 @@ static void notrace start_secondary(void
    cpu_startup_entry(CPUHP_ONLINE);
    }

    +int topology_update_package_map(unsigned int apicid, unsigned int cpu)
    +{
    + unsigned int new, pkg = apicid >> boot_cpu_data.x86_coreid_bits;
    +
    + /* Called from early boot ? */
    + if (!physical_package_map)
    + return 0;
    +
    + if (pkg >= max_physical_pkg_id)
    + return -EINVAL;
    +
    + /* Set the logical package id */
    + if (test_and_set_bit(pkg, physical_package_map))
    + goto found;
    +
    + if (pkg < __max_logical_packages) {
    + set_bit(pkg, logical_package_map);
    + physical_to_logical_pkg[pkg] = pkg;
    + goto found;
    + }
    + new = find_first_zero_bit(logical_package_map, __max_logical_packages);
    + if (new >= __max_logical_packages) {
    + physical_to_logical_pkg[pkg] = -1;
    + pr_warn("APIC(%x) Package %u exceeds logical package map\n",
    + apicid, pkg);
    + return -ENOSPC;
    + }
    + set_bit(new, logical_package_map);
    + pr_info("APIC(%x) Converting physical %u to logical package %u\n",
    + apicid, pkg, new);
    + physical_to_logical_pkg[pkg] = new;
    +
    +found:
    + cpu_data(cpu).logical_proc_id = physical_to_logical_pkg[pkg];
    + return 0;
    +}
    +
    +/**
    + * topology_phys_to_logical_pkg - Map a physical package id to a logical
    + *
    + * Returns logical package id or -1 if not found
    + */
    +int topology_phys_to_logical_pkg(unsigned int phys_pkg)
    +{
    + if (phys_pkg >= max_physical_pkg_id)
    + return -1;
    + return physical_to_logical_pkg[phys_pkg];
    +}
    +EXPORT_SYMBOL(topology_phys_to_logical_pkg);
    +
    +static void __init smp_init_package_map(void)
    +{
    + unsigned int ncpus, cpu;
    + size_t size;
    +
    + /*
    + * Today neither Intel nor AMD support heterogenous systems. That
    + * might change in the future....
    + */
    + ncpus = boot_cpu_data.x86_max_cores * smp_num_siblings;
    + __max_logical_packages = DIV_ROUND_UP(nr_cpu_ids, ncpus);
    +
    + /*
    + * Possibly larger than what we need as the number of apic ids per
    + * package can be smaller than the actual used apic ids.
    + */
    + max_physical_pkg_id = DIV_ROUND_UP(MAX_LOCAL_APIC, ncpus);
    + size = max_physical_pkg_id * sizeof(unsigned int);
    + physical_to_logical_pkg = kmalloc(size, GFP_KERNEL);
    + memset(physical_to_logical_pkg, 0xff, size);
    + size = BITS_TO_LONGS(max_physical_pkg_id);
    + physical_package_map = kzalloc(size, GFP_KERNEL);
    + size = BITS_TO_LONGS(__max_logical_packages);
    + logical_package_map = kzalloc(size, GFP_KERNEL);
    +
    + pr_info("Max logical packages: %u\n", __max_logical_packages);
    +
    + for_each_present_cpu(cpu) {
    + unsigned int apicid = apic->cpu_present_to_apicid(cpu);
    +
    + if (apicid == BAD_APICID || !apic->apic_id_valid(apicid))
    + continue;
    + if (!topology_update_package_map(apicid, cpu))
    + continue;
    + pr_warn("CPU %u APICId %x disabled\n", cpu, apicid);
    + per_cpu(x86_bios_cpu_apicid, cpu) = BAD_APICID;
    + set_cpu_possible(cpu, false);
    + set_cpu_present(cpu, false);
    + }
    +}
    +
    void __init smp_store_boot_cpu_info(void)
    {
    int id = 0; /* CPU 0 */
    @@ -258,6 +357,7 @@ void __init smp_store_boot_cpu_info(void

    *c = boot_cpu_data;
    c->cpu_index = id;
    + smp_init_package_map();
    }

    /*

    \
     
     \ /
      Last update: 2016-02-22 12:41    [W:2.462 / U:0.108 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site