lkml.org 
[lkml]   [2008]   [Oct]   [8]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
SubjectRe: FRV/ARM unaligned access question
Date
Harvey Harrison <harvey.harrison@gmail.com> wrote:

> If there isn't an issue I'm missing, could ARM/FRV move over to the
> packed-struct version?

Using this source:

typedef unsigned char u8;
typedef unsigned int u32;

struct __una_u32 { u32 x __attribute__((packed)); };

#if 0 // packed struct
static inline u32 __get_unaligned_cpu32(const void *p)
{
const struct __una_u32 *ptr = (const struct __una_u32 *)p;
return ptr->x;
}

static inline u32 get_unaligned_be32(const void *p)
{
return __get_unaligned_cpu32((const u8 *)p);
}

#else // manual byteshift
static inline u32 __get_unaligned_be32(const u8 *p)
{
return p[0] << 24 | p[1] << 16 | p[2] << 8 | p[3];
}

static inline u32 get_unaligned_be32(const void *p)
{
return __get_unaligned_be32((const u8 *)p);
}
#endif

u32 jump(u32 *p)
{
return get_unaligned_be32(p);
}

I see the packed struct version compile (with -O2) to:

jump:
ldub @(gr8,gr0),gr6
ldubi @(gr8,1),gr4
ldubi @(gr8,2),gr5
ldubi @(gr8,3),gr8
slli gr6,#24,gr6
slli gr4,#16,gr4
or.p gr4, gr6, gr4
slli gr5,#8,gr5
or gr5, gr4, gr5
or.p gr8, gr5, gr8
ret

and the byteshift version compile to:

jump:
ldubi.p @(gr8,1),gr7
mov gr8, gr5
ldubi @(gr5,2),gr6
ldub @(gr8,gr0),gr8
ldubi @(gr5,3),gr9
slli gr7,#16,gr7
slli gr6,#8,gr6
slli.p gr8,#24,gr8
or gr6, gr7, gr6
or gr8, gr9, gr8
or.p gr8, gr6, gr8
ret

so they're more or less equivalent, give or take the compiler using an extra
instruction unnecessarily.


Switching to the packed struct algorithms also reduces the kernel size very
slightly. Before:

warthog>size vmlinux
text data bss dec hex filename
2207836 66588 150189 2424613 24ff25 vmlinux

After:

warthog>size vmlinux
text data bss dec hex filename
2207804 66588 150189 2424581 24ff05 vmlinux

The attached patch boots okay over NFS.

David
---
[PATCH] FRV: Use packed-struct unalignment rather than manual-shift

From: David Howells <dhowells@redhat.com>

Use the packed-struct unalignment algorithms rather than the manual-shift
unalignment algorithms.

This makes the kernel very slightly smaller.

Signed-off-by: David Howells <dhowells@redhat.com>
---

include/asm-frv/unaligned.h | 4 ++--
1 files changed, 2 insertions(+), 2 deletions(-)


diff --git a/include/asm-frv/unaligned.h b/include/asm-frv/unaligned.h
index 839a2fb..d06b9bc 100644
--- a/include/asm-frv/unaligned.h
+++ b/include/asm-frv/unaligned.h
@@ -12,8 +12,8 @@
#ifndef _ASM_UNALIGNED_H
#define _ASM_UNALIGNED_H

-#include <linux/unaligned/le_byteshift.h>
-#include <linux/unaligned/be_byteshift.h>
+#include <linux/unaligned/le_struct.h>
+#include <linux/unaligned/be_struct.h>
#include <linux/unaligned/generic.h>

#define get_unaligned __get_unaligned_be

\
 
 \ /
  Last update: 2008-10-08 13:19    [W:0.057 / U:0.236 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site