lkml.org 
[lkml]   [2004]   [Oct]   [21]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
SubjectRe: ZONE_PADDING wastes 4 bytes of the new cacheline
Andrew Morton wrote:
> Nick Piggin <nickpiggin@yahoo.com.au> wrote:
>
>>> #if defined(CONFIG_SMP)
>>
>> > struct zone_padding {
>> > - int x;
>> > } ____cacheline_maxaligned_in_smp;
>> > #define ZONE_PADDING(name) struct zone_padding name;
>> > #else
>>
>> Perhaps to keep old compilers working? Not sure.
>
>
> gcc-2.95 is OK with it.
>
> Stock 2.6.9:
>
> sizeof(struct zone) = 1920
>
> With Andrea's patch:
>
> sizeof(struct zone) = 1536
>
> With ZONE_PADDING removed:
>
> sizeof(struct zone) = 1408
>
>

How about this patch. 1536 before, 1152 afterwards for me.

Uses the zero length array which seems to be quite abundant throughout
the tree (although maybe that also causes problems when it is by itself
in an array?).

Also try to be a bit smarter about getting commonly accessed fields
together, which surely can't be worse than before.



---

linux-2.6-npiggin/include/linux/mmzone.h | 37 +++++++++++++++----------------
1 files changed, 19 insertions(+), 18 deletions(-)

diff -puN include/linux/mmzone.h~mm-help-zone-padding include/linux/mmzone.h
--- linux-2.6/include/linux/mmzone.h~mm-help-zone-padding 2004-10-22 08:21:30.000000000 +1000
+++ linux-2.6-npiggin/include/linux/mmzone.h 2004-10-22 08:24:09.000000000 +1000
@@ -35,7 +35,7 @@ struct pglist_data;
*/
#if defined(CONFIG_SMP)
struct zone_padding {
- int x;
+ char x[0];
} ____cacheline_maxaligned_in_smp;
#define ZONE_PADDING(name) struct zone_padding name;
#else
@@ -108,10 +108,7 @@ struct per_cpu_pageset {
*/

struct zone {
- /*
- * Commonly accessed fields:
- */
- spinlock_t lock;
+ /* Fields commonly accessed by the page allocator */
unsigned long free_pages;
unsigned long pages_min, pages_low, pages_high;
/*
@@ -128,8 +125,18 @@ struct zone {
*/
unsigned long protection[MAX_NR_ZONES];

+ struct per_cpu_pageset pageset[NR_CPUS];
+
+ /*
+ * free areas of different sizes
+ */
+ spinlock_t lock;
+ struct free_area free_area[MAX_ORDER];
+
+
ZONE_PADDING(_pad1_)

+ /* Fields commonly accessed by the page reclaim scanner */
spinlock_t lru_lock;
struct list_head active_list;
struct list_head inactive_list;
@@ -137,10 +144,8 @@ struct zone {
unsigned long nr_scan_inactive;
unsigned long nr_active;
unsigned long nr_inactive;
- int all_unreclaimable; /* All pages pinned */
unsigned long pages_scanned; /* since last reclaim */
-
- ZONE_PADDING(_pad2_)
+ int all_unreclaimable; /* All pages pinned */

/*
* prev_priority holds the scanning priority for this zone. It is
@@ -161,10 +166,9 @@ struct zone {
int temp_priority;
int prev_priority;

- /*
- * free areas of different sizes
- */
- struct free_area free_area[MAX_ORDER];
+
+ ZONE_PADDING(_pad2_)
+ /* Rarely used or read-mostly fields */

/*
* wait_table -- the array holding the hash table
@@ -194,10 +198,6 @@ struct zone {
unsigned long wait_table_size;
unsigned long wait_table_bits;

- ZONE_PADDING(_pad3_)
-
- struct per_cpu_pageset pageset[NR_CPUS];
-
/*
* Discontig memory support fields.
*/
@@ -206,12 +206,13 @@ struct zone {
/* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */
unsigned long zone_start_pfn;

+ unsigned long spanned_pages; /* total size, including holes */
+ unsigned long present_pages; /* amount of memory (excluding holes) */
+
/*
* rarely used fields:
*/
char *name;
- unsigned long spanned_pages; /* total size, including holes */
- unsigned long present_pages; /* amount of memory (excluding holes) */
} ____cacheline_maxaligned_in_smp;


_
\
 
 \ /
  Last update: 2005-03-22 14:07    [W:0.187 / U:0.064 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site