lkml.org 
[lkml]   [2020]   [Nov]   [8]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH v3 11/21] mm/hugetlb: Allocate the vmemmap pages associated with each hugetlb page
Date
When we free a hugetlb page to the buddy, we should allocate the vmemmap
pages associated with it. We can do that in the __free_hugepage().

Signed-off-by: Muchun Song <songmuchun@bytedance.com>
---
mm/hugetlb.c | 110 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 109 insertions(+), 1 deletion(-)

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index ded7f0fbde35..8295911fe76e 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1307,6 +1307,8 @@ static void __free_hugepage(struct hstate *h, struct page *page);
* reserve at least 2 pages as vmemmap areas.
*/
#define RESERVE_VMEMMAP_NR 2U
+#define RESERVE_VMEMMAP_SIZE (RESERVE_VMEMMAP_NR << PAGE_SHIFT)
+#define GFP_VMEMMAP_PAGE (GFP_KERNEL | __GFP_NOFAIL | __GFP_MEMALLOC)

#define page_huge_pte(page) ((page)->pmd_huge_pte)

@@ -1490,7 +1492,7 @@ static void __free_huge_page_pmd_vmemmap(struct hstate *h, pmd_t *pmd,
struct list_head *free_pages)
{
unsigned long next;
- unsigned long start = addr + RESERVE_VMEMMAP_NR * PAGE_SIZE;
+ unsigned long start = addr + RESERVE_VMEMMAP_SIZE;
unsigned long end = addr + vmemmap_pages_size_per_hpage(h);
struct page *reuse = NULL;

@@ -1578,6 +1580,106 @@ static void free_huge_page_vmemmap(struct hstate *h, struct page *head)
free_vmemmap_page_list(&free_pages);
}

+static void __remap_huge_page_pte_vmemmap(struct page *reuse, pte_t *ptep,
+ unsigned long start,
+ unsigned int nr_remap,
+ struct list_head *remap_pages)
+{
+ pgprot_t pgprot = PAGE_KERNEL;
+ void *from = (void *)page_private(reuse);
+ unsigned long addr, end = start + (nr_remap << PAGE_SHIFT);
+
+ for (addr = start; addr < end; addr += PAGE_SIZE) {
+ void *to;
+ struct page *page;
+ pte_t entry, old = *ptep;
+
+ page = list_first_entry_or_null(remap_pages, struct page, lru);
+ list_del(&page->lru);
+ to = page_to_virt(page);
+ copy_page(to, from);
+
+ /*
+ * Make sure that any data that writes to the @to is made
+ * visible to the physical page.
+ */
+ flush_kernel_vmap_range(to, PAGE_SIZE);
+
+ prepare_vmemmap_page(page);
+
+ entry = mk_pte(page, pgprot);
+ set_pte_at(&init_mm, addr, ptep++, entry);
+
+ VM_BUG_ON(!pte_present(old) || pte_page(old) != reuse);
+ }
+}
+
+static void __remap_huge_page_pmd_vmemmap(struct hstate *h, pmd_t *pmd,
+ unsigned long addr,
+ struct list_head *remap_pages)
+{
+ unsigned long next;
+ unsigned long start = addr + RESERVE_VMEMMAP_NR * PAGE_SIZE;
+ unsigned long end = addr + vmemmap_pages_size_per_hpage(h);
+ struct page *reuse = NULL;
+
+ addr = start;
+ do {
+ unsigned int nr_pages;
+ pte_t *ptep;
+
+ ptep = pte_offset_kernel(pmd, addr);
+ if (!reuse) {
+ reuse = pte_page(ptep[-1]);
+ set_page_private(reuse, addr - PAGE_SIZE);
+ }
+
+ next = vmemmap_hpage_addr_end(addr, end);
+ nr_pages = (next - addr) >> PAGE_SHIFT;
+ __remap_huge_page_pte_vmemmap(reuse, ptep, addr, nr_pages,
+ remap_pages);
+ } while (pmd++, addr = next, addr != end);
+
+ flush_tlb_kernel_range(start, end);
+}
+
+static inline void alloc_vmemmap_pages(struct hstate *h, struct list_head *list)
+{
+ int i;
+
+ for (i = 0; i < free_vmemmap_pages_per_hpage(h); i++) {
+ struct page *page;
+
+ /* This should not fail */
+ page = alloc_page(GFP_VMEMMAP_PAGE);
+ list_add_tail(&page->lru, list);
+ }
+}
+
+static void alloc_huge_page_vmemmap(struct hstate *h, struct page *head)
+{
+ pmd_t *pmd;
+ spinlock_t *ptl;
+ LIST_HEAD(remap_pages);
+
+ if (!free_vmemmap_pages_per_hpage(h))
+ return;
+
+ alloc_vmemmap_pages(h, &remap_pages);
+
+ pmd = vmemmap_to_pmd(head);
+ ptl = vmemmap_pmd_lock(pmd);
+ __remap_huge_page_pmd_vmemmap(h, pmd, (unsigned long)head,
+ &remap_pages);
+ if (!freed_vmemmap_hpage_dec(pmd_page(*pmd))) {
+ /*
+ * Todo:
+ * Merge pte to huge pmd if it has ever been split.
+ */
+ }
+ spin_unlock(ptl);
+}
+
/*
* As update_and_free_page() is be called from a non-task context(and hold
* hugetlb_lock), we can defer the actual freeing in a workqueue to prevent
@@ -1653,6 +1755,10 @@ static inline void free_huge_page_vmemmap(struct hstate *h, struct page *head)
{
}

+static inline void alloc_huge_page_vmemmap(struct hstate *h, struct page *head)
+{
+}
+
static inline void __update_and_free_page(struct hstate *h, struct page *page)
{
__free_hugepage(h, page);
@@ -1685,6 +1791,8 @@ static void __free_hugepage(struct hstate *h, struct page *page)
{
int i;

+ alloc_huge_page_vmemmap(h, page);
+
for (i = 0; i < pages_per_huge_page(h); i++) {
page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
1 << PG_referenced | 1 << PG_dirty |
--
2.11.0
\
 
 \ /
  Last update: 2020-11-08 15:14    [W:0.738 / U:0.440 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site