lkml.org 
[lkml]   [2015]   [Jul]   [29]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
Date
From
SubjectRe: [PATCH 8/9] x86/intel_rdt: Hot cpu support for Cache Allocation
On Wed, Jul 01, 2015 at 03:21:09PM -0700, Vikas Shivappa wrote:
> +/*
> + * cbm_update_msrs() - Updates all the existing IA32_L3_MASK_n MSRs
> + * which are one per CLOSid except IA32_L3_MASK_0 on the current package.
> + */
> +static void cbm_update_msrs(void *info)
> +{
> + int maxid = boot_cpu_data.x86_cache_max_closid;
> + unsigned int i;
> +
> + /*
> + * At cpureset, all bits of IA32_L3_MASK_n are set.
> + * The index starts from one as there is no need
> + * to update IA32_L3_MASK_0 as it belongs to root cgroup
> + * whose cache mask is all 1s always.
> + */
> + for (i = 1; i < maxid; i++) {
> + if (ccmap[i].clos_refcnt)
> + cbm_cpu_update((void *)i);
> + }
> +}
> +
> +static inline void intel_rdt_cpu_start(int cpu)
> +{
> + struct intel_pqr_state *state = &per_cpu(pqr_state, cpu);
> +
> + state->closid = 0;
> + mutex_lock(&rdt_group_mutex);
> + if (rdt_cpumask_update(cpu))
> + smp_call_function_single(cpu, cbm_update_msrs, NULL, 1);
> + mutex_unlock(&rdt_group_mutex);
> +}

If you were to guard your array with both a mutex and a raw_spinlock
then you can avoid the IPI and use CPU_STARTING.

> +static int intel_rdt_cpu_notifier(struct notifier_block *nb,
> + unsigned long action, void *hcpu)
> +{
> + unsigned int cpu = (unsigned long)hcpu;
> +
> + switch (action) {
> + case CPU_DOWN_FAILED:
> + case CPU_ONLINE:
> + intel_rdt_cpu_start(cpu);
> + break;
> + case CPU_DOWN_PREPARE:
> + intel_rdt_cpu_exit(cpu);
> + break;
> + default:
> + break;
> + }
> +
> + return NOTIFY_OK;
> }


\
 
 \ /
  Last update: 2015-07-29 21:11    [W:0.381 / U:0.360 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site