lkml.org 
[lkml]   [2020]   [Nov]   [17]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    /
    SubjectRe: [PATCH 11/24] x86/resctrl: Group staged configuration into a separate struct
    From
    Date
    Hi James,

    On 10/30/2020 9:11 AM, James Morse wrote:
    > Arm's MPAM may have surprisingly large bitmaps for its cache
    > portions as the architecture allows up to 4K portions. The size
    > exposed via resctrl may not be the same, some scaling may
    > occur.
    >
    > The values written to hardware may be unlike the values received
    > from resctrl, e.g. MBA percentages may be backed by a bitmap,
    > or a maximum value that isn't a percentage.
    >
    > Today resctrl's ctrlval arrays are written to directly by the

    If using a cryptic word like "ctrlval" it would be easier to understand
    what is meant if it matches the variable in the code, "ctrl_val".

    > resctrl filesystem code. e.g. apply_config(). This is a problem

    This sentence starts with "Today" implying what code does before this
    change but the example function, apply_config() is introduced in this patch.

    > if scaling or conversion is needed by the architecture.
    >
    > The arch code should own the ctrlval array (to allow scaling and
    > conversion), and should only need a single copy of the array for the
    > values currently applied in hardware.

    ok, but that is the case, no?

    >
    > Move the new_ctrl bitmap value and flag into a struct for staged
    > configuration changes. This is created as an array to allow one per type

    This is a bit cryptic as the reader may not know while reading this
    commit message what "new_ctrl" is or where it is currently hosted.

    > of configuration. Today there is only one element in the array, but
    > eventually resctrl will use the array slots for CODE/DATA/BOTH to detect
    > a duplicate schema being written.
    >
    > Signed-off-by: James Morse <james.morse@arm.com>
    > ---
    > arch/x86/kernel/cpu/resctrl/ctrlmondata.c | 49 ++++++++++++++++-------
    > arch/x86/kernel/cpu/resctrl/rdtgroup.c | 22 +++++-----
    > include/linux/resctrl.h | 17 +++++---
    > 3 files changed, 60 insertions(+), 28 deletions(-)
    >
    > diff --git a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
    > index 28d69c78c29e..0c95ed83eb05 100644
    > --- a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
    > +++ b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c

    ...

    > @@ -240,15 +244,30 @@ static int parse_line(char *line, struct resctrl_schema *s,
    > return -EINVAL;
    > }
    >
    > +static void apply_config(struct rdt_hw_domain *hw_dom,
    > + struct resctrl_staged_config *cfg, int closid,
    > + cpumask_var_t cpu_mask, bool mba_sc)
    > +{
    > + struct rdt_domain *dom = &hw_dom->resctrl;
    > + u32 *dc = mba_sc ? hw_dom->mbps_val : hw_dom->ctrl_val;
    > +
    > + if (cfg->new_ctrl != dc[closid]) {
    > + cpumask_set_cpu(cpumask_any(&dom->cpu_mask), cpu_mask);
    > + dc[closid] = cfg->new_ctrl;
    > + }
    > +
    > + cfg->have_new_ctrl = false;

    Why is this necessary?

    > +}
    > +
    > int update_domains(struct rdt_resource *r, int closid)
    > {
    > + struct resctrl_staged_config *cfg;
    > struct rdt_hw_domain *hw_dom;
    > struct msr_param msr_param;
    > cpumask_var_t cpu_mask;
    > struct rdt_domain *d;
    > bool mba_sc;
    > - u32 *dc;
    > - int cpu;
    > + int cpu, i;
    >
    > if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
    > return -ENOMEM;
    > @@ -260,10 +279,12 @@ int update_domains(struct rdt_resource *r, int closid)
    > mba_sc = is_mba_sc(r);
    > list_for_each_entry(d, &r->domains, list) {
    > hw_dom = resctrl_to_arch_dom(d);
    > - dc = !mba_sc ? hw_dom->ctrl_val : hw_dom->mbps_val;
    > - if (d->have_new_ctrl && d->new_ctrl != dc[closid]) {
    > - cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask);
    > - dc[closid] = d->new_ctrl;
    > + for (i = 0; i < ARRAY_SIZE(d->staged_config); i++) {

    I understand it may make later patches easier but it seems too early to
    introduce this loop because apply_config() does not seem to be ready for
    it yet (it would just keep overwriting a closid's config).

    > + cfg = &hw_dom->resctrl.staged_config[i];
    > + if (!cfg->have_new_ctrl)
    > + continue;
    > +
    > + apply_config(hw_dom, cfg, closid, cpu_mask, mba_sc);
    > }
    > }
    >
    > @@ -338,7 +359,7 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
    >
    > list_for_each_entry(s, &resctrl_all_schema, list) {
    > list_for_each_entry(dom, &s->res->domains, list)
    > - dom->have_new_ctrl = false;
    > + memset(dom->staged_config, 0, sizeof(dom->staged_config));
    > }
    >
    > while ((tok = strsep(&buf, "\n")) != NULL) {

    ...

    > diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h
    > index 9f71f0238239..f1164bbb66c5 100644
    > --- a/include/linux/resctrl.h
    > +++ b/include/linux/resctrl.h
    > @@ -26,13 +26,21 @@ enum resctrl_conf_type {
    > CDP_DATA,
    > };
    >
    > +/**
    > + * struct resctrl_staged_config - parsed configuration to be applied
    > + * @new_ctrl: new ctrl value to be loaded
    > + * @have_new_ctrl: did user provide new_ctrl for this domain

    The "for this domain" in this description is no longer appropriate after
    the copy.

    > + */
    > +struct resctrl_staged_config {
    > + u32 new_ctrl;
    > + bool have_new_ctrl;
    > +};
    > +
    > /**
    > * struct rdt_domain - group of cpus sharing an RDT resource
    > * @list: all instances of this resource
    > * @id: unique id for this instance
    > * @cpu_mask: which cpus share this resource
    > - * @new_ctrl: new ctrl value to be loaded
    > - * @have_new_ctrl: did user provide new_ctrl for this domain
    > * @rmid_busy_llc: bitmap of which limbo RMIDs are above threshold
    > * @mbm_total: saved state for MBM total bandwidth
    > * @mbm_local: saved state for MBM local bandwidth
    > @@ -41,15 +49,13 @@ enum resctrl_conf_type {
    > * @mbm_work_cpu: worker cpu for MBM h/w counters
    > * @cqm_work_cpu: worker cpu for CQM h/w counters
    > * @plr: pseudo-locked region (if any) associated with domain
    > + * @staged_config: parsed configuration to be applied
    > */
    > struct rdt_domain {
    > struct list_head list;
    > int id;
    > struct cpumask cpu_mask;
    >
    > - u32 new_ctrl;
    > - bool have_new_ctrl;
    > -
    > unsigned long *rmid_busy_llc;
    > struct mbm_state *mbm_total;
    > struct mbm_state *mbm_local;
    > @@ -59,6 +65,7 @@ struct rdt_domain {
    > int cqm_work_cpu;
    >
    > struct pseudo_lock_region *plr;
    > + struct resctrl_staged_config staged_config[1];
    > };
    >
    > /**
    >

    Reinette

    \
     
     \ /
      Last update: 2020-11-18 00:30    [W:2.807 / U:0.320 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site