lkml.org 
[lkml]   [2023]   [Jan]   [10]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH mm-unstable v2 5/8] mm/hugetlb: increase use of folios in alloc_huge_page()
    Date
    Change hugetlb_cgroup_commit_charge{,_rsvd}(), dequeue_huge_page_vma()
    and alloc_buddy_huge_page_with_mpol() to use folios so alloc_huge_page()
    is cleaned by operating on folios until its return.

    Signed-off-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
    Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
    ---
    include/linux/hugetlb_cgroup.h | 8 ++++----
    mm/hugetlb.c | 33 ++++++++++++++++-----------------
    mm/hugetlb_cgroup.c | 8 ++------
    3 files changed, 22 insertions(+), 27 deletions(-)

    diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h
    index f706626a8063..3d82d91f49ac 100644
    --- a/include/linux/hugetlb_cgroup.h
    +++ b/include/linux/hugetlb_cgroup.h
    @@ -141,10 +141,10 @@ extern int hugetlb_cgroup_charge_cgroup_rsvd(int idx, unsigned long nr_pages,
    struct hugetlb_cgroup **ptr);
    extern void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
    struct hugetlb_cgroup *h_cg,
    - struct page *page);
    + struct folio *folio);
    extern void hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
    struct hugetlb_cgroup *h_cg,
    - struct page *page);
    + struct folio *folio);
    extern void hugetlb_cgroup_uncharge_folio(int idx, unsigned long nr_pages,
    struct folio *folio);
    extern void hugetlb_cgroup_uncharge_folio_rsvd(int idx, unsigned long nr_pages,
    @@ -230,14 +230,14 @@ static inline int hugetlb_cgroup_charge_cgroup_rsvd(int idx,

    static inline void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
    struct hugetlb_cgroup *h_cg,
    - struct page *page)
    + struct folio *folio)
    {
    }

    static inline void
    hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
    struct hugetlb_cgroup *h_cg,
    - struct page *page)
    + struct folio *folio)
    {
    }

    diff --git a/mm/hugetlb.c b/mm/hugetlb.c
    index 62552172683a..f3e1d052b40c 100644
    --- a/mm/hugetlb.c
    +++ b/mm/hugetlb.c
    @@ -1206,7 +1206,7 @@ static unsigned long available_huge_pages(struct hstate *h)
    return h->free_huge_pages - h->resv_huge_pages;
    }

    -static struct page *dequeue_huge_page_vma(struct hstate *h,
    +static struct folio *dequeue_hugetlb_folio_vma(struct hstate *h,
    struct vm_area_struct *vma,
    unsigned long address, int avoid_reserve,
    long chg)
    @@ -1250,7 +1250,7 @@ static struct page *dequeue_huge_page_vma(struct hstate *h,
    }

    mpol_cond_put(mpol);
    - return &folio->page;
    + return folio;

    err:
    return NULL;
    @@ -2302,7 +2302,7 @@ static struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
    * Use the VMA's mpolicy to allocate a huge page from the buddy.
    */
    static
    -struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h,
    +struct folio *alloc_buddy_hugetlb_folio_with_mpol(struct hstate *h,
    struct vm_area_struct *vma, unsigned long addr)
    {
    struct folio *folio = NULL;
    @@ -2325,7 +2325,7 @@ struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h,
    if (!folio)
    folio = alloc_surplus_hugetlb_folio(h, gfp_mask, nid, nodemask);
    mpol_cond_put(mpol);
    - return &folio->page;
    + return folio;
    }

    /* page migration callback function */
    @@ -2874,7 +2874,6 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
    {
    struct hugepage_subpool *spool = subpool_vma(vma);
    struct hstate *h = hstate_vma(vma);
    - struct page *page;
    struct folio *folio;
    long map_chg, map_commit;
    long gbl_chg;
    @@ -2938,34 +2937,34 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
    * from the global free pool (global change). gbl_chg == 0 indicates
    * a reservation exists for the allocation.
    */
    - page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg);
    - if (!page) {
    + folio = dequeue_hugetlb_folio_vma(h, vma, addr, avoid_reserve, gbl_chg);
    + if (!folio) {
    spin_unlock_irq(&hugetlb_lock);
    - page = alloc_buddy_huge_page_with_mpol(h, vma, addr);
    - if (!page)
    + folio = alloc_buddy_hugetlb_folio_with_mpol(h, vma, addr);
    + if (!folio)
    goto out_uncharge_cgroup;
    spin_lock_irq(&hugetlb_lock);
    if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
    - SetHPageRestoreReserve(page);
    + folio_set_hugetlb_restore_reserve(folio);
    h->resv_huge_pages--;
    }
    - list_add(&page->lru, &h->hugepage_activelist);
    - set_page_refcounted(page);
    + list_add(&folio->lru, &h->hugepage_activelist);
    + folio_ref_unfreeze(folio, 1);
    /* Fall through */
    }
    - folio = page_folio(page);
    - hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
    +
    + hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, folio);
    /* If allocation is not consuming a reservation, also store the
    * hugetlb_cgroup pointer on the page.
    */
    if (deferred_reserve) {
    hugetlb_cgroup_commit_charge_rsvd(idx, pages_per_huge_page(h),
    - h_cg, page);
    + h_cg, folio);
    }

    spin_unlock_irq(&hugetlb_lock);

    - hugetlb_set_page_subpool(page, spool);
    + hugetlb_set_folio_subpool(folio, spool);

    map_commit = vma_commit_reservation(h, vma, addr);
    if (unlikely(map_chg > map_commit)) {
    @@ -2986,7 +2985,7 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
    hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h),
    pages_per_huge_page(h), folio);
    }
    - return page;
    + return &folio->page;

    out_uncharge_cgroup:
    hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
    diff --git a/mm/hugetlb_cgroup.c b/mm/hugetlb_cgroup.c
    index d9e4425d81ac..dedd2edb076e 100644
    --- a/mm/hugetlb_cgroup.c
    +++ b/mm/hugetlb_cgroup.c
    @@ -331,19 +331,15 @@ static void __hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,

    void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
    struct hugetlb_cgroup *h_cg,
    - struct page *page)
    + struct folio *folio)
    {
    - struct folio *folio = page_folio(page);
    -
    __hugetlb_cgroup_commit_charge(idx, nr_pages, h_cg, folio, false);
    }

    void hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
    struct hugetlb_cgroup *h_cg,
    - struct page *page)
    + struct folio *folio)
    {
    - struct folio *folio = page_folio(page);
    -
    __hugetlb_cgroup_commit_charge(idx, nr_pages, h_cg, folio, true);
    }

    --
    2.39.0
    \
     
     \ /
      Last update: 2023-03-26 23:35    [W:4.295 / U:0.052 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site