lkml.org 
[lkml]   [1998]   [Jul]   [19]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
Subjectrevised patch for memory defragmentation
I've made a few improvements to the memory defragmentation patch, and it
now seems to be working a little more effectively. The goal for order 1
pages has been increased to 16, as these are the most important, and
it's also necessary to make more order-1 pages if there's to be any hope
of making order-2 pages.

The patch now uses vmalloc() for its bitmap if the system memory is over
128M, so the patch should work on all machines now. (Untested though.)

The defragger reports the before and after counts in the low-order
memory lists, so you'll get messages like:

Jul 19 10:08:36 acer kernel: anneal_mem: before defragging: 5 1 0
Jul 19 10:08:36 acer kernel: anneal_mem: after defragging: 30 5 0
Jul 19 10:45:40 acer kernel: anneal_mem: before defragging: 3 5 2
Jul 19 10:45:40 acer kernel: anneal_mem: after defragging: 15 7 2

Thanks to all who helped with testing and suggestions, and I'll
continue trying to improve the operation. The current patch appears
stable and safe, though not as effective as I'd like.

Regards,
Bill--- linux-2.1.109/include/linux/mm.h.old Fri Jul 17 09:34:42 1998
+++ linux-2.1.109/include/linux/mm.h Fri Jul 17 21:03:39 1998
@@ -251,20 +251,24 @@
return page;
}

-/* memory.c & swap.c*/
+/* page_alloc.c */

/*
* Decide if we should try to do some swapout..
*/
extern int free_memory_available(void);

+extern int need_defrag(void);
+extern void anneal_mem(void);
+
#define free_page(addr) free_pages((addr),0)
extern void FASTCALL(free_pages(unsigned long addr, unsigned long order));
extern void FASTCALL(__free_page(struct page *));
-
extern void show_free_areas(void);
-extern unsigned long put_dirty_page(struct task_struct * tsk,unsigned long page,
- unsigned long address);
+
+/* memory.c & swap.c*/
+extern unsigned long put_dirty_page(struct task_struct *, unsigned long,
+ unsigned long);

extern void free_page_tables(struct mm_struct * mm);
extern void clear_page_tables(struct task_struct * tsk);
--- linux-2.1.109/mm/page_alloc.c.old Fri Jul 17 09:10:45 1998
+++ linux-2.1.109/mm/page_alloc.c Sun Jul 19 10:36:23 1998
@@ -399,3 +399,278 @@
set_pte(page_table, pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot))));
return;
}
+
+/* ==================== Memory Defragmentation ======================== */
+
+#define DEFRAG_MAX_ORDER 3
+#define DEFRAG_PAGES 32
+
+/* These are the goals for the defragmentation code */
+int free_mem_goal[NR_MEM_LISTS] = {0, 16, 4, 2, 0, };
+int free_mem_curr[NR_MEM_LISTS]; /* what we have */
+
+/*
+ * Check whether we need to run the memory defragger.
+ * We only care about the order 1, 2, and 3 lists, but
+ * allow a credit to trickle down from the higher orders.
+ */
+int need_defrag(void)
+{
+ int order = NR_MEM_LISTS, credit = 0, defrag = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&page_alloc_lock, flags);
+ while (--order) {
+ struct free_area_struct *list = free_area + order;
+ struct page *next = list->next;
+
+ credit <<= 1;
+ /*
+ * Count the nodes on the memory list, but
+ * stop after one node on the higher orders.
+ */
+ free_mem_curr[order] = 0;
+ while (next != memory_head(list)) {
+ credit++;
+ free_mem_curr[order]++;
+ if (order > DEFRAG_MAX_ORDER)
+ break;
+ next = next->next;
+ }
+ /*
+ * Check whether we've met the goal.
+ */
+ if (credit >= free_mem_goal[order]) {
+ credit -= free_mem_goal[order];
+ } else {
+ credit = 0;
+ defrag = 1;
+ }
+ }
+ spin_unlock_irqrestore(&page_alloc_lock, flags);
+ return defrag;
+}
+
+/*
+ * Builds a bitmap of the free pages in the memory lists
+ * up to and including the specified order.
+ */
+static void build_free_map(unsigned char *map, int size, int max_order)
+{
+ unsigned long flags;
+ int order;
+
+ /* Clear the bitmap */
+ memset((void *) map, 0, size);
+
+ spin_lock_irqsave(&page_alloc_lock, flags);
+
+ for (order = 0; order <= max_order; order++) {
+ struct free_area_struct *area = free_area + order;
+ struct page *next = memory_head(area), *node;
+ unsigned long index, map_nr;
+
+ while ((node = next->next) != memory_head(area)) {
+ next = node;
+ map_nr = node->map_nr;
+ index = map_nr + (1 << order) - 1;
+ while (index >= map_nr) {
+ set_bit(index, map);
+ index--;
+ }
+ }
+ }
+ spin_unlock_irqrestore(&page_alloc_lock, flags);
+}
+
+/*
+ * Look for candidate pages to anneal the specified order.
+ */
+static int find_candidate_pages(unsigned char *map, int size, int order,
+ int*list)
+{
+ int nbits = (1 << order);
+ int num = 0, i, offset, index, map_nr;
+
+ for (i = 0; i < size; i++, map++) {
+ if (!*map)
+ continue;
+ for (offset = 0; offset < 8; offset += nbits) {
+ /*
+ * Look for page groups needing only one
+ * page to be coalesced.
+ */
+ map_nr = 0;
+ for (index = 0; index < nbits; index++) {
+ if (test_bit(offset + index, map))
+ continue;
+ /* More than one page missing? */
+ if (map_nr)
+ goto next;
+ /* Save the index of the missing page */
+ map_nr = (i << 3) + offset + index;
+#ifdef DEFRAG_DEBUG
+printk("find_candidate_pages: mask=%x, bit=%d, map_nr=%d\n",
+*map, offset + index, map_nr);
+#endif
+ }
+ /*
+ * Check whether we found a good candidate.
+ */
+ if (map_nr) {
+ struct page *page = mem_map + map_nr;
+ if (atomic_read(&page->count) > 1)
+ goto next;
+ /* This is all we handle so far */
+ if (page->buffers) {
+ if (page->buffers->b_count)
+ goto next;
+ } else if (!page->inode)
+ goto next;
+ list[num] = map_nr;
+ num++;
+ if (num >= DEFRAG_PAGES)
+ goto out;
+ }
+ next:
+ }
+
+ }
+out:
+#ifdef DEFRAG_DEBUG
+printk("find_candidate_pages: order %d, found %d\n", order, num);
+#endif
+ return num;
+}
+
+/*
+ * Attempt to free or replace the specified page.
+ */
+static int replace_page(unsigned char *bitmap, int map_nr)
+{
+ struct page *page = mem_map + map_nr;
+ int result = 0;
+
+ /*
+ * Check whether it's a page we can handle ...
+ */
+ if (PageLocked(page))
+ goto out;
+
+ switch (atomic_read(&page->count)) {
+ default:
+ /* may handle this case later */
+ goto out;
+ case 0:
+ /* shouldn't happen ... */
+ printk("replace_page: page not in use??\n");
+ goto out;
+ case 1:
+ break;
+ }
+
+ /* The easiest case of all ... */
+ if (page->inode) {
+#ifdef DEFRAG_DEBUG
+printk("replace_page: page %p in page cache\n", page);
+#endif
+ if (PageSwapCache(page))
+ delete_from_swap_cache(page);
+ else {
+ /* N.B. make a routine in filemap.c for this */
+ remove_page_from_hash_queue(page);
+ remove_page_from_inode_queue(page);
+ __free_page(page);
+ }
+ goto out_set;
+ }
+ /* Not too hard if it works ... */
+ if (page->buffers) {
+ struct buffer_head *bh = page->buffers;
+ if (try_to_free_buffer(bh, &bh, 6)) {
+#ifdef DEFRAG_DEBUG
+printk("replace_page: page %p in buffer cache\n", page);
+#endif
+ goto out_set;
+ }
+ goto out;
+ }
+ /* Possibly a mapped page? Not handled yet ... */
+ goto out;
+
+ /*
+ * We freed the page, so update our bitmap.
+ */
+out_set:
+ set_bit(page->map_nr, bitmap);
+ result = 1;
+out:
+ return result;
+}
+
+/*
+ * Top-level entry point for memory defragmentation.
+ * Attempts to repopulate the higher order memory lists
+ * by searching for page groups missing only a single
+ * page.
+ */
+void anneal_mem(void)
+{
+ int map_size = (num_physpages >> 3);
+ unsigned char *bitmap;
+ int order, i, num, progress;
+ int list[DEFRAG_PAGES];
+
+printk("anneal_mem: before defragging: %d %d %d\n",
+free_mem_curr[1], free_mem_curr[2], free_mem_curr[3]);
+
+ /*
+ * Allocate memory for the page bitmap,
+ * using vmalloc() if memory > 128M.
+ */
+ if (map_size <= PAGE_SIZE)
+ bitmap = (unsigned char *) __get_free_page(GFP_KERNEL);
+ else
+ bitmap = (unsigned char *) vmalloc(map_size);
+ if (!bitmap)
+ return;
+ /*
+ * Build the bitmap for the lists to be defragged.
+ * Only need to do this once ... map is updated as
+ * pages are freed.
+ */
+ build_free_map(bitmap, map_size, DEFRAG_MAX_ORDER);
+
+repeat:
+ progress = 0;
+ /* Iterate over the orders to be defragged. */
+ for (order = 1; order <= DEFRAG_MAX_ORDER; order++) {
+ /*
+ * Select the pages to build page groups of this order.
+ */
+ num = find_candidate_pages(bitmap, map_size, order, list);
+ /*
+ * Try to free or replace the candidate pages.
+ */
+ for (i = 0; i < num; i++) {
+ if (replace_page(bitmap, list[i])) {
+ free_mem_curr[order]++;
+ list[i] = -1;
+ progress = 1;
+ }
+ }
+ }
+ /* Keep trying if we made progress. */
+ if (progress) {
+ if (need_defrag())
+ goto repeat;
+ }
+printk("anneal_mem: after defragging: %d %d %d\n",
+free_mem_curr[1], free_mem_curr[2], free_mem_curr[3]);
+
+ /* Free the bitmap */
+ if (map_size <= PAGE_SIZE)
+ free_page((unsigned long) bitmap);
+ else
+ vfree((void *) bitmap);
+}
--- linux-2.1.109/mm/vmscan.c.old Fri Jul 17 09:10:45 1998
+++ linux-2.1.109/mm/vmscan.c Sat Jul 18 22:03:41 1998
@@ -561,20 +571,21 @@
tries >>= 4*free_memory_available();

while (tries--) {
- int gfp_mask;
-
if (free_memory_available() > 1)
break;
- gfp_mask = __GFP_IO;
- do_try_to_free_page(gfp_mask);
+ do_try_to_free_page(__GFP_IO);
/*
* Syncing large chunks is faster than swapping
* synchronously (less head movement). -- Rik.
*/
if (atomic_read(&nr_async_pages) >= pager_daemon.swap_cluster)
run_task_queue(&tq_disk);
-
}
+ /*
+ * Check whether we need to call the memory defragger.
+ */
+ if (need_defrag())
+ anneal_mem();
}
/* As if we could ever get here - maybe we want to make this killable */
remove_wait_queue(&kswapd_wait, &wait);
\
 
 \ /
  Last update: 2005-03-22 13:43    [W:0.257 / U:0.044 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site