lkml.org 
[lkml]   [2009]   [Apr]   [23]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
SubjectRe: [PATCH 15/22] Do not disable interrupts in free_page_mlock()
Date
> > @@ -157,14 +157,9 @@ static inline void mlock_migrate_page(struct page *newpage, struct page *page)
> > */
> > static inline void free_page_mlock(struct page *page)
> > {
> > - if (unlikely(TestClearPageMlocked(page))) {
> > - unsigned long flags;
> > -
> > - local_irq_save(flags);
> > - __dec_zone_page_state(page, NR_MLOCK);
> > - __count_vm_event(UNEVICTABLE_MLOCKFREED);
> > - local_irq_restore(flags);
> > - }
> > + __ClearPageMlocked(page);
> > + __dec_zone_page_state(page, NR_MLOCK);
> > + __count_vm_event(UNEVICTABLE_MLOCKFREED);
> > }
>
> The conscientuous reviewer runs around and checks for free_page_mlock()
> callers in other .c files which might be affected.
>
> Only there are no such callers.
>
> The reviewer's job would be reduced if free_page_mlock() wasn't
> needlessly placed in a header file!

very sorry.

How about this?

=============================================
Subject: [PATCH] move free_page_mlock() to page_alloc.c

Currently, free_page_mlock() is only called from page_alloc.c.
Thus, we can move it to page_alloc.c.

Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Christoph Lameter <cl@linux-foundation.org>
Cc: Pekka Enberg <penberg@cs.helsinki.fi>
Cc: Dave Hansen <dave@linux.vnet.ibm.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
---
mm/internal.h | 18 ------------------
mm/page_alloc.c | 21 +++++++++++++++++++++
2 files changed, 21 insertions(+), 18 deletions(-)

Index: b/mm/internal.h
===================================================================
--- a/mm/internal.h 2009-04-24 09:12:03.000000000 +0900
+++ b/mm/internal.h 2009-04-24 09:12:10.000000000 +0900
@@ -150,23 +150,6 @@ static inline void mlock_migrate_page(st
}
}

-/*
- * free_page_mlock() -- clean up attempts to free and mlocked() page.
- * Page should not be on lru, so no need to fix that up.
- * free_pages_check() will verify...
- */
-static inline void free_page_mlock(struct page *page)
-{
- if (unlikely(TestClearPageMlocked(page))) {
- unsigned long flags;
-
- local_irq_save(flags);
- __dec_zone_page_state(page, NR_MLOCK);
- __count_vm_event(UNEVICTABLE_MLOCKFREED);
- local_irq_restore(flags);
- }
-}
-
#else /* CONFIG_HAVE_MLOCKED_PAGE_BIT */
static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p)
{
@@ -175,7 +158,6 @@ static inline int is_mlocked_vma(struct
static inline void clear_page_mlock(struct page *page) { }
static inline void mlock_vma_page(struct page *page) { }
static inline void mlock_migrate_page(struct page *new, struct page *old) { }
-static inline void free_page_mlock(struct page *page) { }

#endif /* CONFIG_HAVE_MLOCKED_PAGE_BIT */

Index: b/mm/page_alloc.c
===================================================================
--- a/mm/page_alloc.c 2009-04-24 09:12:03.000000000 +0900
+++ b/mm/page_alloc.c 2009-04-24 09:13:25.000000000 +0900
@@ -491,6 +491,27 @@ static inline void __free_one_page(struc
zone->free_area[order].nr_free++;
}

+#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
+/*
+ * free_page_mlock() -- clean up attempts to free and mlocked() page.
+ * Page should not be on lru, so no need to fix that up.
+ * free_pages_check() will verify...
+ */
+static void free_page_mlock(struct page *page)
+{
+ if (unlikely(TestClearPageMlocked(page))) {
+ unsigned long flags;
+
+ local_irq_save(flags);
+ __dec_zone_page_state(page, NR_MLOCK);
+ __count_vm_event(UNEVICTABLE_MLOCKFREED);
+ local_irq_restore(flags);
+ }
+}
+#else
+static void free_page_mlock(struct page *page) { }
+#endif
+
static inline int free_pages_check(struct page *page)
{
free_page_mlock(page);



\
 
 \ /
  Last update: 2009-04-24 02:37    [W:0.126 / U:11.052 seconds]
©2003-2018 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site