lkml.org 
[lkml]   [2010]   [Jul]   [13]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
SubjectRe: [RFC] Tight check of pfn_valid on sparsemem
On Wed, 14 Jul 2010 01:44:23 +0900
Minchan Kim <minchan.kim@gmail.com> wrote:

> > If you _really_ can't make the section size smaller, and the vast
> > majority of the sections are fully populated, you could hack something
> > in. We could, for instance, have a global list that's mostly readonly
> > which tells you which sections need to be have their sizes closely
> > inspected. That would work OK if, for instance, you only needed to
> > check a couple of memory sections in the system. It'll start to suck if
> > you made the lists very long.
>
> Thanks for advise. As I say, I hope Russell accept 16M section.
>

It seems what I needed was good sleep....
How about this if 16M section is not acceptable ?

== NOT TESTED AT ALL, EVEN NOT COMPILED ==

register address of mem_section to memmap itself's page struct's pg->private field.
This means the page is used for memmap of the section.
Otherwise, the page is used for other purpose and memmap has a hole.

---
arch/arm/mm/init.c | 11 ++++++++++-
include/linux/mmzone.h | 19 ++++++++++++++++++-
mm/sparse.c | 37 +++++++++++++++++++++++++++++++++++++
3 files changed, 65 insertions(+), 2 deletions(-)

Index: mmotm-2.6.35-0701/include/linux/mmzone.h
===================================================================
--- mmotm-2.6.35-0701.orig/include/linux/mmzone.h
+++ mmotm-2.6.35-0701/include/linux/mmzone.h
@@ -1047,11 +1047,28 @@ static inline struct mem_section *__pfn_
return __nr_to_section(pfn_to_section_nr(pfn));
}

+#ifdef CONFIG_SPARSEMEM_HAS_PIT
+void mark_memmap_pit(unsigned long start, unsigned long end, bool valid);
+static inline int page_valid(struct mem_section *ms, unsigned long pfn)
+{
+ struct page *page = pfn_to_page(pfn);
+ struct page *__pg = virt_to_page(page);
+ return __pg->private == ms;
+}
+#else
+static inline int page_valid(struct mem_section *ms, unsigned long pfn)
+{
+ return 1;
+}
+#endif
+
static inline int pfn_valid(unsigned long pfn)
{
+ struct mem_section *ms;
if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
return 0;
- return valid_section(__nr_to_section(pfn_to_section_nr(pfn)));
+ ms = __nr_to_section(pfn_to_section_nr(pfn));
+ return valid_section(ms) && page_valid(ms, pfn);
}

static inline int pfn_present(unsigned long pfn)
Index: mmotm-2.6.35-0701/mm/sparse.c
===================================================================
--- mmotm-2.6.35-0701.orig/mm/sparse.c
+++ mmotm-2.6.35-0701/mm/sparse.c
@@ -615,6 +615,43 @@ void __init sparse_init(void)
free_bootmem(__pa(usemap_map), size);
}

+#ifdef CONFIT_SPARSEMEM_HAS_PIT
+/*
+ * Fill memmap's pg->private with a pointer to mem_section.
+ * pfn_valid() will check this later. (see include/linux/mmzone.h)
+ * The caller should call
+ * mark_memmap_pit(start, end, true) # for all allocated mem_map
+ * and, after that,
+ * mark_memmap_pit(start, end, false) # for all pits in mem_map.
+ * please see usage in ARM.
+ */
+void mark_memmap_pit(unsigned long start, unsigned long end, bool valid)
+{
+ struct mem_section *ms;
+ unsigned long pos, next;
+ struct page *pg;
+ void *memmap, *end;
+ unsigned long mapsize = sizeof(struct page) * PAGES_PER_SECTION;
+
+ for (pos = start;
+ pos < end; pos = next) {
+ next = (pos + PAGES_PER_SECTION) & PAGE_SECTION_MASK;
+ ms = __pfn_to_section(pos);
+ if (!valid_section(ms))
+ continue;
+ for (memmap = pfn_to_page(pfn), end = pfn_to_page(next-1);
+ memmap != end + 1;
+ memmap += PAGE_SIZE) {
+ pg = virt_to_page(memmap);
+ if (valid)
+ pg->private = ms;
+ else
+ pg->private = NULL;
+ }
+ }
+}
+#endif
+
#ifdef CONFIG_MEMORY_HOTPLUG
#ifdef CONFIG_SPARSEMEM_VMEMMAP
static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
Index: mmotm-2.6.35-0701/arch/arm/mm/init.c
===================================================================
--- mmotm-2.6.35-0701.orig/arch/arm/mm/init.c
+++ mmotm-2.6.35-0701/arch/arm/mm/init.c
@@ -234,6 +234,13 @@ static void __init arm_bootmem_free(stru
arch_adjust_zones(zone_size, zhole_size);

free_area_init_node(0, zone_size, min, zhole_size);
+
+#ifdef CONFIG_SPARSEMEM
+ for_each_bank(i, mi) {
+ mark_memmap_pit(bank_start_pfn(mi->bank[i]),
+ bank_end_pfn(mi->bank[i]), true);
+ }
+#endif
}

#ifndef CONFIG_SPARSEMEM
@@ -386,8 +393,10 @@ free_memmap(unsigned long start_pfn, uns
* If there are free pages between these,
* free the section of the memmap array.
*/
- if (pg < pgend)
+ if (pg < pgend) {
+ mark_memap_pit(pg >> PAGE_SHIFT, pgend >> PAGE_SHIFT, false);
free_bootmem(pg, pgend - pg);
+ }
}

/*


\
 
 \ /
  Last update: 2010-07-14 02:31    [W:0.068 / U:0.152 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site