lkml.org 
[lkml]   [2009]   [Feb]   [22]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 15/20] Do not disable interrupts in free_page_mlock()
Date
free_page_mlock() tests and clears PG_mlocked. If set, it disables interrupts
to update counters and this happens on every page free even though interrupts
are disabled very shortly afterwards a second time. This is wasteful.

This patch splits what free_page_mlock() does. The bit check is still
made. However, the update of counters is delayed until the interrupts are
disabled. One potential weirdness with this split is that the counters do
not get updated if the bad_page() check is triggered but a system showing
bad pages is getting screwed already.

Signed-off-by: Mel Gorman <mel@csn.ul.ie>
---
mm/internal.h | 10 ++--------
mm/page_alloc.c | 8 +++++++-
2 files changed, 9 insertions(+), 9 deletions(-)

diff --git a/mm/internal.h b/mm/internal.h
index 478223b..b52bf86 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -155,14 +155,8 @@ static inline void mlock_migrate_page(struct page *newpage, struct page *page)
*/
static inline void free_page_mlock(struct page *page)
{
- if (unlikely(TestClearPageMlocked(page))) {
- unsigned long flags;
-
- local_irq_save(flags);
- __dec_zone_page_state(page, NR_MLOCK);
- __count_vm_event(UNEVICTABLE_MLOCKFREED);
- local_irq_restore(flags);
- }
+ __dec_zone_page_state(page, NR_MLOCK);
+ __count_vm_event(UNEVICTABLE_MLOCKFREED);
}

#else /* CONFIG_UNEVICTABLE_LRU */
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a9e9466..9adafba 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -501,7 +501,6 @@ static inline void __free_one_page(struct page *page,

static inline int free_pages_check(struct page *page)
{
- free_page_mlock(page);
if (unlikely(page_mapcount(page) |
(page->mapping != NULL) |
(page_count(page) != 0) |
@@ -559,6 +558,7 @@ static void __free_pages_ok(struct page *page, unsigned int order,
unsigned long flags;
int i;
int bad = 0;
+ int clearMlocked = TestClearPageMlocked(page);

for (i = 0 ; i < (1 << order) ; ++i)
bad += free_pages_check(page + i);
@@ -574,6 +574,8 @@ static void __free_pages_ok(struct page *page, unsigned int order,
kernel_map_pages(page, 1 << order, 0);

local_irq_save(flags);
+ if (clearMlocked)
+ free_page_mlock(page);
__count_vm_events(PGFREE, 1 << order);
free_one_page(page_zone(page), page, order, migratetype);
local_irq_restore(flags);
@@ -1023,6 +1025,7 @@ static void free_hot_cold_page(struct page *page, int cold)
struct zone *zone = page_zone(page);
struct per_cpu_pages *pcp;
unsigned long flags;
+ int clearMlocked = TestClearPageMlocked(page);

if (PageAnon(page))
page->mapping = NULL;
@@ -1039,6 +1042,9 @@ static void free_hot_cold_page(struct page *page, int cold)
pcp = &zone_pcp(zone, get_cpu())->pcp;
local_irq_save(flags);
__count_vm_event(PGFREE);
+ if (clearMlocked)
+ free_page_mlock(page);
+
if (cold)
list_add_tail(&page->lru, &pcp->list);
else
--
1.5.6.5


\
 
 \ /
  Last update: 2009-02-23 00:25    [W:0.214 / U:0.036 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site