lkml.org 
[lkml]   [2016]   [Oct]   [14]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v4 15/18] x86/intel_rdt: Add tasks files
    Date
    From: Fenghua Yu <fenghua.yu@intel.com>

    The root directory all subdirectories are automatically populated
    with a read/write (mode 0644) file named "tasks". When read it will
    show all the task IDs assigned to the resource group. Tasks can be
    added (one at a time) to a group by writing the task ID to the file.
    E.g.

    Membership in a resource group is indicated by a new field in the
    task_struct "int closid" which holds the CLOSID for each task. The
    default resource group uses CLOSID=0 which means that all existing
    tasks when the resctrl file system is mounted belong to the default
    group.

    A resource group cannot be removed while there are tasks assigned
    to it.

    Signed-off-by: Fenghua Yu <fenghua.yu@intel.com>
    Signed-off-by: Tony Luck <tony.luck@intel.com>
    ---
    arch/x86/kernel/cpu/intel_rdt_rdtgroup.c | 173 +++++++++++++++++++++++++++++++
    include/linux/sched.h | 3 +
    2 files changed, 176 insertions(+)

    diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
    index f2d7a3a..bdbe2d1 100644
    --- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
    +++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
    @@ -28,6 +28,7 @@
    #include <linux/sched.h>
    #include <linux/slab.h>
    #include <linux/cpu.h>
    +#include <linux/task_work.h>

    #include <uapi/linux/magic.h>

    @@ -278,6 +279,152 @@ unlock:
    return ret ?: nbytes;
    }

    +struct task_move_callback {
    + struct callback_head work;
    + struct rdtgroup *rdtgrp;
    +};
    +
    +static void move_myself(struct callback_head *head)
    +{
    + struct task_move_callback *callback;
    + struct rdtgroup *rdtgrp;
    +
    + callback = container_of(head, struct task_move_callback, work);
    + rdtgrp = callback->rdtgrp;
    +
    + /* Resource group might have been deleted before process runs */
    + if (atomic_dec_and_test(&rdtgrp->waitcount) &&
    + (rdtgrp->flags & RDT_DELETED)) {
    + current->closid = 0;
    + kfree(rdtgrp);
    + }
    +
    + kfree(callback);
    +}
    +
    +static int __rdtgroup_move_task(struct task_struct *tsk,
    + struct rdtgroup *rdtgrp)
    +{
    + struct task_move_callback *callback;
    + int ret;
    +
    + callback = kzalloc(sizeof(*callback), GFP_KERNEL);
    + if (!callback)
    + return -ENOMEM;
    + callback->work.func = move_myself;
    + callback->rdtgrp = rdtgrp;
    + atomic_inc(&rdtgrp->waitcount);
    + ret = task_work_add(tsk, &callback->work, true);
    + if (ret) {
    + atomic_dec(&rdtgrp->waitcount);
    + kfree(callback);
    + } else {
    + tsk->closid = rdtgrp->closid;
    + }
    + return ret;
    +}
    +
    +static int rdtgroup_task_write_permission(struct task_struct *task,
    + struct kernfs_open_file *of)
    +{
    + const struct cred *cred = current_cred();
    + const struct cred *tcred = get_task_cred(task);
    + int ret = 0;
    +
    + /*
    + * even if we're attaching all tasks in the thread group, we only
    + * need to check permissions on one of them.
    + */
    + if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
    + !uid_eq(cred->euid, tcred->uid) &&
    + !uid_eq(cred->euid, tcred->suid))
    + ret = -EPERM;
    +
    + put_cred(tcred);
    + return ret;
    +}
    +
    +static int rdtgroup_move_task(pid_t pid, struct rdtgroup *rdtgrp,
    + struct kernfs_open_file *of)
    +{
    + struct task_struct *tsk;
    + int ret;
    +
    + rcu_read_lock();
    + if (pid) {
    + tsk = find_task_by_vpid(pid);
    + if (!tsk) {
    + ret = -ESRCH;
    + goto out_unlock_rcu;
    + }
    + } else {
    + tsk = current;
    + }
    +
    + get_task_struct(tsk);
    + rcu_read_unlock();
    +
    + ret = rdtgroup_task_write_permission(tsk, of);
    + if (!ret)
    + ret = __rdtgroup_move_task(tsk, rdtgrp);
    +
    + put_task_struct(tsk);
    + return ret;
    +
    +out_unlock_rcu:
    + rcu_read_unlock();
    + return ret;
    +}
    +
    +static ssize_t rdtgroup_tasks_write(struct kernfs_open_file *of,
    + char *buf, size_t nbytes, loff_t off)
    +{
    + struct rdtgroup *rdtgrp;
    + pid_t pid;
    + int ret = 0;
    +
    + if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0)
    + return -EINVAL;
    + rdtgrp = rdtgroup_kn_lock_live(of->kn);
    +
    + if (rdtgrp)
    + ret = rdtgroup_move_task(pid, rdtgrp, of);
    + else
    + ret = -ENOENT;
    +
    + rdtgroup_kn_unlock(of->kn);
    +
    + return ret ?: nbytes;
    +}
    +
    +static void show_rdt_tasks(struct rdtgroup *r, struct seq_file *s)
    +{
    + struct task_struct *p;
    +
    + rcu_read_lock();
    + for_each_process(p) {
    + if (p->closid == r->closid)
    + seq_printf(s, "%d\n", p->pid);
    + }
    + rcu_read_unlock();
    +}
    +
    +static int rdtgroup_tasks_show(struct kernfs_open_file *of,
    + struct seq_file *s, void *v)
    +{
    + struct rdtgroup *rdtgrp;
    + int ret = 0;
    +
    + rdtgrp = rdtgroup_kn_lock_live(of->kn);
    + if (rdtgrp)
    + show_rdt_tasks(rdtgrp, s);
    + else
    + ret = -ENOENT;
    + rdtgroup_kn_unlock(of->kn);
    +
    + return ret;
    +}
    +
    /* Files in each rdtgroup */
    static struct rftype rdtgroup_base_files[] = {
    {
    @@ -288,6 +435,13 @@ static struct rftype rdtgroup_base_files[] = {
    .seq_show = rdtgroup_cpus_show,
    },
    {
    + .name = "tasks",
    + .mode = 0644,
    + .kf_ops = &rdtgroup_kf_single_ops,
    + .write = rdtgroup_tasks_write,
    + .seq_show = rdtgroup_tasks_show,
    + },
    + {
    /* NULL terminated */
    }
    };
    @@ -559,6 +713,13 @@ static void rmdir_all_sub(void)
    {
    struct rdtgroup *rdtgrp;
    struct list_head *l, *next;
    + struct task_struct *p;
    +
    + /* move all tasks to default resource group */
    + read_lock(&tasklist_lock);
    + for_each_process(p)
    + p->closid = 0;
    + read_unlock(&tasklist_lock);

    get_cpu();
    /* Reset PQR_ASSOC MSR on this cpu. */
    @@ -681,6 +842,7 @@ static int rdtgroup_rmdir(struct kernfs_node *kn)
    {
    struct rdtgroup *rdtgrp;
    int ret = 0;
    + struct task_struct *p;

    rdtgrp = rdtgroup_kn_lock_live(kn);
    if (!rdtgrp) {
    @@ -698,6 +860,17 @@ static int rdtgroup_rmdir(struct kernfs_node *kn)
    return -EPERM;
    }

    + /* Don't allow if there are processes in this group */
    + read_lock(&tasklist_lock);
    + for_each_process(p) {
    + if (p->closid == rdtgrp->closid) {
    + read_unlock(&tasklist_lock);
    + rdtgroup_kn_unlock(kn);
    + return -EBUSY;
    + }
    + }
    + read_unlock(&tasklist_lock);
    +
    /* Give any CPUs back to the default group */
    cpumask_or(&rdtgroup_default.cpu_mask,
    &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask);
    diff --git a/include/linux/sched.h b/include/linux/sched.h
    index 62c68e5..8a05c46 100644
    --- a/include/linux/sched.h
    +++ b/include/linux/sched.h
    @@ -1766,6 +1766,9 @@ struct task_struct {
    /* cg_list protected by css_set_lock and tsk->alloc_lock */
    struct list_head cg_list;
    #endif
    +#ifdef CONFIG_INTEL_RDT_A
    + int closid;
    +#endif
    #ifdef CONFIG_FUTEX
    struct robust_list_head __user *robust_list;
    #ifdef CONFIG_COMPAT
    --
    2.5.0
    \
     
     \ /
      Last update: 2016-10-15 01:12    [W:4.218 / U:0.012 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site