Messages in this thread |  | | Date | Mon, 17 Oct 2016 21:35:34 +0200 (CEST) | From | Thomas Gleixner <> | Subject | Re: [PATCH v4 11/18] x86/intel_rdt: Add basic resctrl filesystem support |
| |
On Fri, 14 Oct 2016, Fenghua Yu wrote: > +++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c > + * > + * More information about RDT be found in the Intel (R) x86 Architecture > + * Software Developer Manual.
Yes, that's how it should look like.
> +static void l3_qos_cfg_update(void *arg) > +{ > + struct rdt_resource *r = arg; > + > + wrmsrl(IA32_L3_QOS_CFG, r->cdp_enabled); > +} > + > +static void set_l3_qos_cfg(struct rdt_resource *r) > +{ > + struct list_head *l; > + struct rdt_domain *d; > + struct cpumask cpu_mask;
You cannot have cpumasks on stack.
cpumask_var_t mask;
if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) return -ENOMEM;
> + int cpu; > + > + cpumask_clear(&cpu_mask);
That can go away then
> + list_for_each(l, &r->domains) {
list_for_each_entry() again
> + d = list_entry(l, struct rdt_domain, list); > + cpumask_set_cpu(cpumask_any(&d->cpu_mask), &cpu_mask);
A comment to explain what this does would be helpful.
> + } > + cpu = get_cpu(); > + /* Update QOS_CFG MSR on this cpu if it's in cpu_mask. */ > + if (cpumask_test_cpu(cpu, &cpu_mask)) > + l3_qos_cfg_update(r); > + /* Update QOS_CFG MSR on all other cpus in cpu_mask. */ > + smp_call_function_many(&cpu_mask, l3_qos_cfg_update, r, 1); > + put_cpu(); > +} > + > +static int parse_rdtgroupfs_options(char *data) > +{ > + char *token, *o = data; > + struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3]; > + > + r->cdp_enabled = false; > + while ((token = strsep(&o, ",")) != NULL) { > + if (!*token) > + return -EINVAL; > + > + if (!strcmp(token, "cdp")) > + if (r->enabled && r->cdp_capable) > + r->cdp_enabled = true; > + } > + > + return 0; > +} > + > +static struct dentry *rdt_mount(struct file_system_type *fs_type, > + int flags, const char *unused_dev_name, > + void *data) > +{ > + struct dentry *dentry; > + int ret; > + bool new_sb; > + > + mutex_lock(&rdtgroup_mutex); > + /* > + * resctrl file system can only be mounted once. > + */ > + if (static_branch_unlikely(&rdt_enable_key)) { > + dentry = ERR_PTR(-EBUSY); > + goto out; > + } > + > + ret = parse_rdtgroupfs_options(data); > + if (ret) { > + dentry = ERR_PTR(ret); > + goto out; > + } > + > + dentry = kernfs_mount(fs_type, flags, rdt_root, > + RDTGROUP_SUPER_MAGIC, &new_sb);
&new_sb is pointless here. It just tells the caller that a new superblock has been created, So in case of a valid dentry new_sb will always be true, and if anything failed in kernfs_mount() including the allocation of a new superblock then new_sb is completely irrelevant as IS_ERR(dentry) will be true. So you can just hand in NULL because you do not allow multiple mounts.
> + if (IS_ERR(dentry)) > + goto out; > + if (!new_sb) { > + dentry = ERR_PTR(-EINVAL); > + goto out; > + } > + if (rdt_resources_all[RDT_RESOURCE_L3].cdp_capable) > + set_l3_qos_cfg(&rdt_resources_all[RDT_RESOURCE_L3]); > + static_branch_enable(&rdt_enable_key); > + > +out: > + mutex_unlock(&rdtgroup_mutex); > + > + return dentry; > +} > + > +static void reset_all_cbms(struct rdt_resource *r) > +{ > + struct list_head *l; > + struct rdt_domain *d; > + struct msr_param msr_param; > + struct cpumask cpu_mask; > + int i, cpu; > + > + cpumask_clear(&cpu_mask); > + msr_param.res = r; > + msr_param.low = 0; > + msr_param.high = r->max_closid; > + > + list_for_each(l, &r->domains) { > + d = list_entry(l, struct rdt_domain, list);
list_for_each_entry()
> + cpumask_set_cpu(cpumask_any(&d->cpu_mask), &cpu_mask); > + > + for (i = 0; i < r->max_closid; i++) > + d->cbm[i] = r->max_cbm; > + } > + cpu = get_cpu(); > + /* Update CBM on this cpu if it's in cpu_mask. */ > + if (cpumask_test_cpu(cpu, &cpu_mask)) > + rdt_cbm_update(&msr_param); > + /* Updte CBM on all other cpus in cpu_mask. */
Update
> + smp_call_function_many(&cpu_mask, rdt_cbm_update, &msr_param, 1); > + put_cpu(); > +} > +
Thanks,
tglx
|  |