lkml.org 
[lkml]   [2017]   [Dec]   [5]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH v4 37/73] mm: Convert huge_memory to XArray
Date
From: Matthew Wilcox <mawilcox@microsoft.com>

Quite a straightforward conversion.

Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com>
---
mm/huge_memory.c | 19 ++++++++-----------
1 file changed, 8 insertions(+), 11 deletions(-)

diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 28909c475ee5..5a41b00d86bd 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2379,7 +2379,7 @@ static void __split_huge_page_tail(struct page *head, int tail,
if (PageAnon(head) && !PageSwapCache(head)) {
page_ref_inc(page_tail);
} else {
- /* Additional pin to radix tree */
+ /* Additional pin to page cache */
page_ref_add(page_tail, 2);
}

@@ -2450,13 +2450,13 @@ static void __split_huge_page(struct page *page, struct list_head *list,
ClearPageCompound(head);
/* See comment in __split_huge_page_tail() */
if (PageAnon(head)) {
- /* Additional pin to radix tree of swap cache */
+ /* Additional pin to swap cache */
if (PageSwapCache(head))
page_ref_add(head, 2);
else
page_ref_inc(head);
} else {
- /* Additional pin to radix tree */
+ /* Additional pin to page cache */
page_ref_add(head, 2);
xa_unlock(&head->mapping->pages);
}
@@ -2568,7 +2568,7 @@ bool can_split_huge_page(struct page *page, int *pextra_pins)
{
int extra_pins;

- /* Additional pins from radix tree */
+ /* Additional pins from page cache */
if (PageAnon(page))
extra_pins = PageSwapCache(page) ? HPAGE_PMD_NR : 0;
else
@@ -2664,17 +2664,14 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
spin_lock_irqsave(zone_lru_lock(page_zone(head)), flags);

if (mapping) {
- void **pslot;
+ XA_STATE(xas, &mapping->pages, page_index(head));

- xa_lock(&mapping->pages);
- pslot = radix_tree_lookup_slot(&mapping->pages,
- page_index(head));
/*
- * Check if the head page is present in radix tree.
+ * Check if the head page is present in page cache.
* We assume all tail are present too, if head is there.
*/
- if (radix_tree_deref_slot_protected(pslot,
- &mapping->pages.xa_lock) != head)
+ xa_lock(&mapping->pages);
+ if (xas_load(&xas) != head)
goto fail;
}

--
2.15.0
\
 
 \ /
  Last update: 2017-12-06 01:55    [W:0.420 / U:0.020 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site