lkml.org 
[lkml]   [2021]   [Feb]   [5]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
SubjectRe: [PATCH v14 6/8] mm: hugetlb: introduce nr_free_vmemmap_pages in the struct hstate
From
Date
On 2021/2/4 11:50, Muchun Song wrote:
> All the infrastructure is ready, so we introduce nr_free_vmemmap_pages
> field in the hstate to indicate how many vmemmap pages associated with
> a HugeTLB page that can be freed to buddy allocator. And initialize it
> in the hugetlb_vmemmap_init(). This patch is actual enablement of the
> feature.
>
> Signed-off-by: Muchun Song <songmuchun@bytedance.com>
> Acked-by: Mike Kravetz <mike.kravetz@oracle.com>
> Reviewed-by: Oscar Salvador <osalvador@suse.de>
> ---
> include/linux/hugetlb.h | 3 +++
> mm/hugetlb.c | 1 +
> mm/hugetlb_vmemmap.c | 30 ++++++++++++++++++++++++++----
> mm/hugetlb_vmemmap.h | 5 +++++
> 4 files changed, 35 insertions(+), 4 deletions(-)
>
> diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
> index ad249e56ac49..775aea53669a 100644
> --- a/include/linux/hugetlb.h
> +++ b/include/linux/hugetlb.h
> @@ -560,6 +560,9 @@ struct hstate {
> unsigned int nr_huge_pages_node[MAX_NUMNODES];
> unsigned int free_huge_pages_node[MAX_NUMNODES];
> unsigned int surplus_huge_pages_node[MAX_NUMNODES];
> +#ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP
> + unsigned int nr_free_vmemmap_pages;
> +#endif
> #ifdef CONFIG_CGROUP_HUGETLB
> /* cgroup control files */
> struct cftype cgroup_files_dfl[7];
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index 5518283aa667..04dde2b71f3e 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -3220,6 +3220,7 @@ void __init hugetlb_add_hstate(unsigned int order)
> h->next_nid_to_free = first_memory_node;
> snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
> huge_page_size(h)/1024);
> + hugetlb_vmemmap_init(h);
>
> parsed_hstate = h;
> }
> diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c
> index 224a3cb69bf9..36ebd677e606 100644
> --- a/mm/hugetlb_vmemmap.c
> +++ b/mm/hugetlb_vmemmap.c
> @@ -208,13 +208,10 @@ early_param("hugetlb_free_vmemmap", early_hugetlb_free_vmemmap_param);
> /*
> * How many vmemmap pages associated with a HugeTLB page that can be freed
> * to the buddy allocator.
> - *
> - * Todo: Returns zero for now, which means the feature is disabled. We will
> - * enable it once all the infrastructure is there.
> */
> static inline unsigned int free_vmemmap_pages_per_hpage(struct hstate *h)
> {
> - return 0;
> + return h->nr_free_vmemmap_pages;
> }
>
> static inline unsigned long free_vmemmap_pages_size_per_hpage(struct hstate *h)
> @@ -269,3 +266,28 @@ void free_huge_page_vmemmap(struct hstate *h, struct page *head)
> */
> vmemmap_remap_free(vmemmap_addr, vmemmap_end, vmemmap_reuse);
> }
> +
> +void __init hugetlb_vmemmap_init(struct hstate *h)
> +{
> + unsigned int nr_pages = pages_per_huge_page(h);
> + unsigned int vmemmap_pages;
> +
> + if (!hugetlb_free_vmemmap_enabled)
> + return;
> +
> + vmemmap_pages = (nr_pages * sizeof(struct page)) >> PAGE_SHIFT;
> + /*
> + * The head page and the first tail page are not to be freed to buddy
> + * allocator, the other pages will map to the first tail page, so they
> + * can be freed.
> + *
> + * Could RESERVE_VMEMMAP_NR be greater than @vmemmap_pages? It is true
> + * on some architectures (e.g. aarch64). See Documentation/arm64/
> + * hugetlbpage.rst for more details.
> + */
> + if (likely(vmemmap_pages > RESERVE_VMEMMAP_NR))
> + h->nr_free_vmemmap_pages = vmemmap_pages - RESERVE_VMEMMAP_NR;

Not a problem. Should we set h->nr_free_vmemmap_pages to 0 in 'else' case explicitly ?

Anyway, looks good to me. Thanks.
Reviewed-by: Miaohe Lin <linmiaohe@huawei.com>

> +
> + pr_info("can free %d vmemmap pages for %s\n", h->nr_free_vmemmap_pages,
> + h->name);
> +}
> diff --git a/mm/hugetlb_vmemmap.h b/mm/hugetlb_vmemmap.h
> index 6f89a9eed02c..02a21604ef1d 100644
> --- a/mm/hugetlb_vmemmap.h
> +++ b/mm/hugetlb_vmemmap.h
> @@ -14,6 +14,7 @@
> int alloc_huge_page_vmemmap(struct hstate *h, struct page *head,
> gfp_t gfp_mask);
> void free_huge_page_vmemmap(struct hstate *h, struct page *head);
> +void hugetlb_vmemmap_init(struct hstate *h);
> #else
> static inline int alloc_huge_page_vmemmap(struct hstate *h, struct page *head,
> gfp_t gfp_mask)
> @@ -24,5 +25,9 @@ static inline int alloc_huge_page_vmemmap(struct hstate *h, struct page *head,
> static inline void free_huge_page_vmemmap(struct hstate *h, struct page *head)
> {
> }
> +
> +static inline void hugetlb_vmemmap_init(struct hstate *h)
> +{
> +}
> #endif /* CONFIG_HUGETLB_PAGE_FREE_VMEMMAP */
> #endif /* _LINUX_HUGETLB_VMEMMAP_H */
>

\
 
 \ /
  Last update: 2021-02-05 08:33    [W:0.311 / U:0.008 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site