lkml.org 
[lkml]   [2021]   [Sep]   [29]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 2/2] hugetlb_cgroup: Add post_attach interface for tasks migration
Date
Add post_attach interface to change the page's hugetlb cgroup
and uncharge the old hugetlb cgroup when tasks migration finished.

Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
---
mm/hugetlb_cgroup.c | 68 +++++++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 68 insertions(+)

diff --git a/mm/hugetlb_cgroup.c b/mm/hugetlb_cgroup.c
index 2568d0c..bd53d04 100644
--- a/mm/hugetlb_cgroup.c
+++ b/mm/hugetlb_cgroup.c
@@ -229,6 +229,7 @@ static void hugetlb_cgroup_clear(void)
{
struct mm_struct *mm = hmc.mm;
struct hugetlb_cgroup *to = hmc.to;
+ struct hugetlb_cgroup *from = hmc.from;
int idx;

/* we must uncharge all the leftover precharges from hmc.to */
@@ -242,6 +243,17 @@ static void hugetlb_cgroup_clear(void)
hmc.precharge[idx] = 0;
}

+ for (idx = 0; idx < HUGE_MAX_HSTATE; idx++) {
+ if (!hmc.moved_charge[idx])
+ continue;
+
+ page_counter_uncharge(
+ __hugetlb_cgroup_counter_from_cgroup(from, idx, false),
+ hmc.moved_charge[idx] * pages_per_huge_page(&hstates[idx]));
+
+ hmc.moved_charge[idx] = 0;
+ }
+
hmc.from = NULL;
hmc.to = NULL;
hmc.mm = NULL;
@@ -311,6 +323,61 @@ static void hugetlb_cgroup_cancel_attach(struct cgroup_taskset *tset)
hugetlb_cgroup_clear();
}

+static int hugetlb_cgroup_move_charge_pte_range(pte_t *pte, unsigned long hmask,
+ unsigned long addr,
+ unsigned long end,
+ struct mm_walk *walk)
+{
+ struct page *page;
+ spinlock_t *ptl;
+ pte_t entry;
+ struct hstate *h = hstate_vma(walk->vma);
+
+ ptl = huge_pte_lock(h, walk->mm, pte);
+ entry = huge_ptep_get(pte);
+ /* TODO: only handle present hugetlb pages now. */
+ if (!pte_present(entry)) {
+ spin_unlock(ptl);
+ return 0;
+ }
+
+ page = pte_page(entry);
+ spin_unlock(ptl);
+
+ spin_lock_irq(&hugetlb_lock);
+ if (hugetlb_cgroup_from_page(page) == hmc.from) {
+ int idx = hstate_index(h);
+
+ set_hugetlb_cgroup(page, hmc.to);
+ hmc.precharge[idx]--;
+ hmc.moved_charge[idx]++;
+ }
+ spin_unlock_irq(&hugetlb_lock);
+
+ cond_resched();
+ return 0;
+}
+
+static const struct mm_walk_ops hugetlb_charge_walk_ops = {
+ .hugetlb_entry = hugetlb_cgroup_move_charge_pte_range,
+};
+
+static void hugetlb_cgroup_move_task(void)
+{
+ if (hugetlb_cgroup_disabled())
+ return;
+
+ if (!hmc.to)
+ return;
+
+ mmap_read_lock(hmc.mm);
+ walk_page_range(hmc.mm, 0, hmc.mm->highest_vm_end,
+ &hugetlb_charge_walk_ops, NULL);
+ mmap_read_unlock(hmc.mm);
+
+ hugetlb_cgroup_clear();
+}
+
/*
* Should be called with hugetlb_lock held.
* Since we are holding hugetlb_lock, pages cannot get moved from
@@ -968,6 +1035,7 @@ struct cgroup_subsys hugetlb_cgrp_subsys = {
.css_free = hugetlb_cgroup_css_free,
.can_attach = hugetlb_cgroup_can_attach,
.cancel_attach = hugetlb_cgroup_cancel_attach,
+ .post_attach = hugetlb_cgroup_move_task,
.dfl_cftypes = hugetlb_files,
.legacy_cftypes = hugetlb_files,
};
--
1.8.3.1
\
 
 \ /
  Last update: 2021-09-29 12:21    [W:0.099 / U:1.716 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site