lkml.org 
[lkml]   [2004]   [Oct]   [21]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
Date
From
SubjectRe: ZONE_PADDING wastes 4 bytes of the new cacheline
On Fri, Oct 22, 2004 at 08:26:47AM +1000, Nick Piggin wrote:
> Uses the zero length array which seems to be quite abundant throughout
> the tree (although maybe that also causes problems when it is by itself
> in an array?).

then we could use this on the spinlock too to drop the #ifdef. It
doesn't really matter either ways, in practice it's the same.

> @@ -108,10 +108,7 @@ struct per_cpu_pageset {
> */
>
> struct zone {
> - /*
> - * Commonly accessed fields:
> - */
> - spinlock_t lock;
> + /* Fields commonly accessed by the page allocator */
> unsigned long free_pages;
> unsigned long pages_min, pages_low, pages_high;
> /*
> @@ -128,8 +125,18 @@ struct zone {
> */
> unsigned long protection[MAX_NR_ZONES];
>
> + struct per_cpu_pageset pageset[NR_CPUS];
> +
> + /*
> + * free areas of different sizes
> + */
> + spinlock_t lock;
> + struct free_area free_area[MAX_ORDER];
> +
> +
> ZONE_PADDING(_pad1_)
>
> + /* Fields commonly accessed by the page reclaim scanner */
> spinlock_t lru_lock;
> struct list_head active_list;
> struct list_head inactive_list;
> @@ -137,10 +144,8 @@ struct zone {
> unsigned long nr_scan_inactive;
> unsigned long nr_active;
> unsigned long nr_inactive;
> - int all_unreclaimable; /* All pages pinned */
> unsigned long pages_scanned; /* since last reclaim */
> -
> - ZONE_PADDING(_pad2_)
> + int all_unreclaimable; /* All pages pinned */
>
> /*
> * prev_priority holds the scanning priority for this zone. It is
> @@ -161,10 +166,9 @@ struct zone {
> int temp_priority;
> int prev_priority;
>
> - /*
> - * free areas of different sizes
> - */
> - struct free_area free_area[MAX_ORDER];
> +
> + ZONE_PADDING(_pad2_)
> + /* Rarely used or read-mostly fields */
>
> /*
> * wait_table -- the array holding the hash table
> @@ -194,10 +198,6 @@ struct zone {
> unsigned long wait_table_size;
> unsigned long wait_table_bits;
>
> - ZONE_PADDING(_pad3_)
> -
> - struct per_cpu_pageset pageset[NR_CPUS];
> -
> /*
> * Discontig memory support fields.
> */
> @@ -206,12 +206,13 @@ struct zone {
> /* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */
> unsigned long zone_start_pfn;
>
> + unsigned long spanned_pages; /* total size, including holes */
> + unsigned long present_pages; /* amount of memory (excluding holes) */
> +
> /*
> * rarely used fields:
> */
> char *name;
> - unsigned long spanned_pages; /* total size, including holes */
> - unsigned long present_pages; /* amount of memory (excluding holes) */
> } ____cacheline_maxaligned_in_smp;

looks reasonable. only cons is that this rejects on my tree ;), pages_*
and protection is gone in my tree, replaced by watermarks[] using the
very same optimal and proven algo of 2.4 (enabled by default of course).
I'll reevaluate the false sharing later on.
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/

\
 
 \ /
  Last update: 2005-03-22 14:07    [W:0.247 / U:0.264 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site