lkml.org 
[lkml]   [2004]   [Nov]   [21]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Date
Subject[PATCH]: 2/4 mm/swap.c cleanup
Add pagevec_for_each_page() macro to iterate over all pages in a
pagevec. Non-trivial part is to keep track of page zone and relock
zone->lru_lock when switching to new zone.

This simplifies functions in mm/swap.c that process pages from pvec in a
batch.

(Patch is for 2.6.10-rc2)

Signed-off-by: Nikita Danilov <nikita@clusterfs.com>

include/linux/pagevec.h | 45 +++++++++++++++++++++++++++++++++++++++++++
mm/swap.c | 50 +++++++++---------------------------------------
2 files changed, 55 insertions(+), 40 deletions(-)
diff -puN include/linux/pagevec.h~pvec-cleanup include/linux/pagevec.h
--- bk-linux/include/linux/pagevec.h~pvec-cleanup 2004-11-21 17:01:02.606535952 +0300
+++ bk-linux-nikita/include/linux/pagevec.h 2004-11-21 17:01:02.611535192 +0300
@@ -83,3 +83,48 @@ static inline void pagevec_lru_add(struc
if (pagevec_count(pvec))
__pagevec_lru_add(pvec);
}
+
+/*
+ * Helper macro for pagevec_for_each_page(). This is called on each
+ * iteration before entering loop body. Checks if we are switching to
+ * the new zone so that ->lru_lock should be re-taken.
+ */
+#define __guardloop(_v, _i, _p, _z) \
+({ \
+ struct zone *pagezone; \
+ \
+ _p = (_v)->pages[i]; \
+ pagezone = page_zone(_p); \
+ \
+ if (pagezone != (_z)) { \
+ if (_z) \
+ spin_unlock_irq(&(_z)->lru_lock); \
+ _z = pagezone; \
+ spin_lock_irq(&(_z)->lru_lock); \
+ } \
+})
+
+/*
+ * Helper macro for pagevec_for_each_page(). This is called after last
+ * iteration to release zone->lru_lock if necessary.
+ */
+#define __postloop(_v, _i, _p, _z) \
+({ \
+ if ((_z) != NULL) \
+ spin_unlock_irq(&(_z)->lru_lock); \
+})
+
+/*
+ * Macro to iterate over all pages in pvec. Body of a loop is invoked with
+ * page's zone ->lru_lock held. This is used by various function in mm/swap.c
+ * to batch per-page operations that whould otherwise had to acquire hot
+ * zone->lru_lock for each page.
+ *
+ * This uses ugly preprocessor magic, but that's what preprocessor is all
+ * about.
+ */
+#define pagevec_for_each_page(_v, _i, _p, _z) \
+for (_i = 0, _z = NULL; \
+ ((_i) < pagevec_count(_v) && (__guardloop(_v, _i, _p, _z), 1)) || \
+ (__postloop(_v, _i, _p, _z), 0); \
+ (_i)++)
diff -puN mm/swap.c~pvec-cleanup mm/swap.c
--- bk-linux/mm/swap.c~pvec-cleanup 2004-11-21 17:01:02.609535496 +0300
+++ bk-linux-nikita/mm/swap.c 2004-11-21 17:01:02.611535192 +0300
@@ -116,19 +116,11 @@ void fastcall activate_page(struct page
static void __pagevec_mark_accessed(struct pagevec *pvec)
{
int i;
- struct zone *zone = NULL;
+ struct zone *zone;
+ struct page *page;

- for (i = 0; i < pagevec_count(pvec); i++) {
- struct page *page = pvec->pages[i];
- struct zone *pagezone = page_zone(page);
-
- if (pagezone != zone) {
- if (zone)
- local_unlock_irq(&zone->lru_lock);
- zone = pagezone;
- local_lock_irq(&zone->lru_lock);
- }
- if (!PageActive(page) && PageReferenced(page) && PageLRU(page)) {
+ pagevec_for_each_page(pvec, i, page, zone) {
+ if (!PageActive(page) && PageReferenced(page) && PageLRU(page)){
del_page_from_inactive_list(zone, page);
SetPageActive(page);
add_page_to_active_list(zone, page);
@@ -138,8 +130,6 @@ static void __pagevec_mark_accessed(stru
SetPageReferenced(page);
}
}
- if (zone)
- local_unlock_irq(&zone->lru_lock);
release_pages(pvec->pages, pvec->nr, pvec->cold);
pagevec_reinit(pvec);
}
@@ -322,25 +312,15 @@ void __pagevec_release_nonlru(struct pag
void __pagevec_lru_add(struct pagevec *pvec)
{
int i;
- struct zone *zone = NULL;
-
- for (i = 0; i < pagevec_count(pvec); i++) {
- struct page *page = pvec->pages[i];
- struct zone *pagezone = page_zone(page);
+ struct page *page;
+ struct zone *zone;

- if (pagezone != zone) {
- if (zone)
- spin_unlock_irq(&zone->lru_lock);
- zone = pagezone;
- spin_lock_irq(&zone->lru_lock);
- }
+ pagevec_for_each_page(pvec, i, page, zone) {
if (TestSetPageLRU(page))
BUG();
ClearPageSkipped(page);
add_page_to_inactive_list(zone, page);
}
- if (zone)
- spin_unlock_irq(&zone->lru_lock);
release_pages(pvec->pages, pvec->nr, pvec->cold);
pagevec_reinit(pvec);
}
@@ -350,26 +330,16 @@ EXPORT_SYMBOL(__pagevec_lru_add);
void __pagevec_lru_add_active(struct pagevec *pvec)
{
int i;
- struct zone *zone = NULL;
-
- for (i = 0; i < pagevec_count(pvec); i++) {
- struct page *page = pvec->pages[i];
- struct zone *pagezone = page_zone(page);
+ struct page *page;
+ struct zone *zone;

- if (pagezone != zone) {
- if (zone)
- spin_unlock_irq(&zone->lru_lock);
- zone = pagezone;
- spin_lock_irq(&zone->lru_lock);
- }
+ pagevec_for_each_page(pvec, i, page, zone) {
if (TestSetPageLRU(page))
BUG();
if (TestSetPageActive(page))
BUG();
add_page_to_active_list(zone, page);
}
- if (zone)
- spin_unlock_irq(&zone->lru_lock);
release_pages(pvec->pages, pvec->nr, pvec->cold);
pagevec_reinit(pvec);
}
_
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/

\
 
 \ /
  Last update: 2005-03-22 14:08    [from the cache]
©2003-2011 Jasper Spaans