lkml.org 
[lkml]   [2016]   [Oct]   [22]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v5 17/18] x86/intel_rdt: Add scheduler hook
    Date
    From: Fenghua Yu <fenghua.yu@intel.com>

    Hook the x86 scheduler code to update closid based on whether the current
    task is assigned to a specific closid or running on a CPU assigned to a
    specific closid.

    Signed-off-by: Fenghua Yu <fenghua.yu@intel.com>
    ---
    arch/x86/include/asm/intel_rdt.h | 42 ++++++++++++++++++++++++++++++++
    arch/x86/kernel/cpu/intel_rdt.c | 1 -
    arch/x86/kernel/cpu/intel_rdt_rdtgroup.c | 3 +++
    arch/x86/kernel/process_32.c | 4 +++
    arch/x86/kernel/process_64.c | 4 +++
    5 files changed, 53 insertions(+), 1 deletion(-)

    diff --git a/arch/x86/include/asm/intel_rdt.h b/arch/x86/include/asm/intel_rdt.h
    index f847189..100e53c 100644
    --- a/arch/x86/include/asm/intel_rdt.h
    +++ b/arch/x86/include/asm/intel_rdt.h
    @@ -1,8 +1,12 @@
    #ifndef _ASM_X86_INTEL_RDT_H
    #define _ASM_X86_INTEL_RDT_H

    +#ifdef CONFIG_INTEL_RDT_A
    +
    #include <linux/jump_label.h>

    +#include <asm/intel_rdt_common.h>
    +
    #define IA32_L3_QOS_CFG 0xc81
    #define IA32_L3_CBM_BASE 0xc90
    #define IA32_L2_CBM_BASE 0xd10
    @@ -174,4 +178,42 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
    char *buf, size_t nbytes, loff_t off);
    int rdtgroup_schemata_show(struct kernfs_open_file *of,
    struct seq_file *s, void *v);
    +
    +/*
    + * intel_rdt_sched_in() - Writes the task's CLOSid to IA32_PQR_MSR
    + *
    + * Following considerations are made so that this has minimal impact
    + * on scheduler hot path:
    + * - This will stay as no-op unless we are running on an Intel SKU
    + * which supports resource control and we enable by mounting the
    + * resctrl file system.
    + * - Caches the per cpu CLOSid values and does the MSR write only
    + * when a task with a different CLOSid is scheduled in.
    + */
    +static inline void intel_rdt_sched_in(void)
    +{
    + if (static_branch_likely(&rdt_enable_key)) {
    + struct intel_pqr_state *state = this_cpu_ptr(&pqr_state);
    + int closid;
    +
    + /*
    + * If this task has a closid assigned, use it.
    + * Else use the closid assigned to this cpu.
    + */
    + closid = current->closid;
    + if (closid == 0)
    + closid = this_cpu_read(cpu_closid);
    +
    + if (closid != state->closid) {
    + state->closid = closid;
    + wrmsr(MSR_IA32_PQR_ASSOC, state->rmid, closid);
    + }
    + }
    +}
    +
    +#else
    +
    +static inline void intel_rdt_sched_in(void) {}
    +
    +#endif /* CONFIG_INTEL_RDT_A */
    #endif /* _ASM_X86_INTEL_RDT_H */
    diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c
    index d2d77cb..fd8a75a 100644
    --- a/arch/x86/kernel/cpu/intel_rdt.c
    +++ b/arch/x86/kernel/cpu/intel_rdt.c
    @@ -29,7 +29,6 @@
    #include <linux/cacheinfo.h>
    #include <linux/cpuhotplug.h>

    -#include <asm/intel_rdt_common.h>
    #include <asm/intel-family.h>
    #include <asm/intel_rdt.h>

    diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
    index 4a6b3b8..fca56ab 100644
    --- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
    +++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
    @@ -308,6 +308,9 @@ static void move_myself(struct callback_head *head)
    kfree(rdtgrp);
    }

    + /* update PQR_ASSOC MSR to make resource group go into effect */
    + intel_rdt_sched_in();
    +
    kfree(callback);
    }

    diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
    index bd7be8e..efe7f9f 100644
    --- a/arch/x86/kernel/process_32.c
    +++ b/arch/x86/kernel/process_32.c
    @@ -54,6 +54,7 @@
    #include <asm/debugreg.h>
    #include <asm/switch_to.h>
    #include <asm/vm86.h>
    +#include <asm/intel_rdt.h>

    void __show_regs(struct pt_regs *regs, int all)
    {
    @@ -299,5 +300,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)

    this_cpu_write(current_task, next_p);

    + /* Load the Intel cache allocation PQR MSR. */
    + intel_rdt_sched_in();
    +
    return prev_p;
    }
    diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
    index b3760b3..acd7d6f 100644
    --- a/arch/x86/kernel/process_64.c
    +++ b/arch/x86/kernel/process_64.c
    @@ -50,6 +50,7 @@
    #include <asm/switch_to.h>
    #include <asm/xen/hypervisor.h>
    #include <asm/vdso.h>
    +#include <asm/intel_rdt.h>

    __visible DEFINE_PER_CPU(unsigned long, rsp_scratch);

    @@ -473,6 +474,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
    loadsegment(ss, __KERNEL_DS);
    }

    + /* Load the Intel cache allocation PQR MSR. */
    + intel_rdt_sched_in();
    +
    return prev_p;
    }

    --
    2.5.0
    \
     
     \ /
      Last update: 2016-10-22 15:22    [W:4.113 / U:0.036 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site