lkml.org 
[lkml]   [2012]   [Jun]   [13]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH -V9 [updated] 10/15] hugetlb/cgroup: Add the cgroup pointer to page lru
    Date
    From: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>

    Add the hugetlb cgroup pointer to 3rd page lru.next. This limit
    the usage to hugetlb cgroup to only hugepages with 3 or more
    normal pages. I guess that is an acceptable limitation.

    Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
    ---
    include/linux/hugetlb_cgroup.h | 37 +++++++++++++++++++++++++++++++++++++
    mm/hugetlb.c | 4 ++++
    2 files changed, 41 insertions(+)

    diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h
    index e9944b4..2e4cb6b 100644
    --- a/include/linux/hugetlb_cgroup.h
    +++ b/include/linux/hugetlb_cgroup.h
    @@ -18,8 +18,34 @@
    #include <linux/res_counter.h>

    struct hugetlb_cgroup;
    +/*
    + * Minimum page order trackable by hugetlb cgroup.
    + * At least 3 pages are necessary for all the tracking information.
    + */
    +#define HUGETLB_CGROUP_MIN_ORDER 2

    #ifdef CONFIG_CGROUP_HUGETLB_RES_CTLR
    +
    +static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page)
    +{
    + VM_BUG_ON(!PageHuge(page));
    +
    + if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER)
    + return NULL;
    + return (struct hugetlb_cgroup *)page[2].lru.next;
    +}
    +
    +static inline
    +int set_hugetlb_cgroup(struct page *page, struct hugetlb_cgroup *h_cg)
    +{
    + VM_BUG_ON(!PageHuge(page));
    +
    + if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER)
    + return -1;
    + page[2].lru.next = (void *)h_cg;
    + return 0;
    +}
    +
    static inline bool hugetlb_cgroup_disabled(void)
    {
    if (hugetlb_subsys.disabled)
    @@ -28,6 +54,17 @@ static inline bool hugetlb_cgroup_disabled(void)
    }

    #else
    +static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page)
    +{
    + return NULL;
    +}
    +
    +static inline
    +int set_hugetlb_cgroup(struct page *page, struct hugetlb_cgroup *h_cg)
    +{
    + return 0;
    +}
    +
    static inline bool hugetlb_cgroup_disabled(void)
    {
    return true;
    diff --git a/mm/hugetlb.c b/mm/hugetlb.c
    index e899a2d..6a449c5 100644
    --- a/mm/hugetlb.c
    +++ b/mm/hugetlb.c
    @@ -28,6 +28,7 @@

    #include <linux/io.h>
    #include <linux/hugetlb.h>
    +#include <linux/hugetlb_cgroup.h>
    #include <linux/node.h>
    #include "internal.h"

    @@ -591,6 +592,7 @@ static void update_and_free_page(struct hstate *h, struct page *page)
    1 << PG_active | 1 << PG_reserved |
    1 << PG_private | 1 << PG_writeback);
    }
    + VM_BUG_ON(hugetlb_cgroup_from_page(page));
    set_compound_page_dtor(page, NULL);
    set_page_refcounted(page);
    arch_release_hugepage(page);
    @@ -643,6 +645,7 @@ static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
    INIT_LIST_HEAD(&page->lru);
    set_compound_page_dtor(page, free_huge_page);
    spin_lock(&hugetlb_lock);
    + set_hugetlb_cgroup(page, NULL);
    h->nr_huge_pages++;
    h->nr_huge_pages_node[nid]++;
    spin_unlock(&hugetlb_lock);
    @@ -892,6 +895,7 @@ static struct page *alloc_buddy_huge_page(struct hstate *h, int nid)
    INIT_LIST_HEAD(&page->lru);
    r_nid = page_to_nid(page);
    set_compound_page_dtor(page, free_huge_page);
    + set_hugetlb_cgroup(page, NULL);
    /*
    * We incremented the global counters already
    */
    --
    1.7.10


    \
     
     \ /
      Last update: 2012-06-13 14:21    [W:3.147 / U:0.040 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site