lkml.org 
[lkml]   [2010]   [Feb]   [19]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 01/24] Rename .data.cacheline_aligned to .data..cacheline_aligned.
    Date
    From: Tim Abbott <tabbott@ksplice.com>

    Signed-off-by: Tim Abbott <tabbott@ksplice.com>
    Cc: Sam Ravnborg <sam@ravnborg.org>
    Signed-off-by: Denys Vlasenko <vda.linux@googlemail.com>
    ---
    arch/powerpc/kernel/vmlinux.lds.S | 2 +-
    arch/x86/kernel/init_task.c | 2 +-
    include/asm-generic/vmlinux.lds.h | 2 +-
    include/linux/cache.h | 2 +-
    4 files changed, 4 insertions(+), 4 deletions(-)

    diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
    index dcd01c8..3229c06 100644
    --- a/arch/powerpc/kernel/vmlinux.lds.S
    +++ b/arch/powerpc/kernel/vmlinux.lds.S
    @@ -231,7 +231,7 @@ SECTIONS
    PAGE_ALIGNED_DATA(PAGE_SIZE)
    }

    - .data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) {
    + .data..cacheline_aligned : AT(ADDR(.data..cacheline_aligned) - LOAD_OFFSET) {
    CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
    }

    diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
    index 3a54dcb..43e9ccf 100644
    --- a/arch/x86/kernel/init_task.c
    +++ b/arch/x86/kernel/init_task.c
    @@ -34,7 +34,7 @@ EXPORT_SYMBOL(init_task);
    /*
    * per-CPU TSS segments. Threads are completely 'soft' on Linux,
    * no more per-task TSS's. The TSS size is kept cacheline-aligned
    - * so they are allowed to end up in the .data.cacheline_aligned
    + * so they are allowed to end up in the .data..cacheline_aligned
    * section. Since TSS's are completely CPU-local, we want them
    * on exact cacheline boundaries, to eliminate cacheline ping-pong.
    */
    diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
    index 67e6520..78450aa 100644
    --- a/include/asm-generic/vmlinux.lds.h
    +++ b/include/asm-generic/vmlinux.lds.h
    @@ -189,7 +189,7 @@

    #define CACHELINE_ALIGNED_DATA(align) \
    . = ALIGN(align); \
    - *(.data.cacheline_aligned)
    + *(.data..cacheline_aligned)

    #define INIT_TASK_DATA(align) \
    . = ALIGN(align); \
    diff --git a/include/linux/cache.h b/include/linux/cache.h
    index 97e2488..4c57065 100644
    --- a/include/linux/cache.h
    +++ b/include/linux/cache.h
    @@ -31,7 +31,7 @@
    #ifndef __cacheline_aligned
    #define __cacheline_aligned \
    __attribute__((__aligned__(SMP_CACHE_BYTES), \
    - __section__(".data.cacheline_aligned")))
    + __section__(".data..cacheline_aligned")))
    #endif /* __cacheline_aligned */

    #ifndef __cacheline_aligned_in_smp
    --
    1.6.2.4


    \
     
     \ /
      Last update: 2010-02-20 01:11    [W:0.031 / U:0.396 seconds]
    ©2003-2016 Jasper Spaans. hosted at Digital OceanAdvertise on this site