Messages in this thread | | | Date | Thu, 18 Jun 2009 15:45:21 -0400 | From | Christoph Hellwig <> | Subject | Re: [PATCH] m68k: merge the mmu and non-mmu versions of checksum.h |
| |
On Wed, Jun 17, 2009 at 05:11:15PM +1000, Greg Ungerer wrote: > +#ifdef CONFIG_MMU > /* > * This is a version of ip_compute_csum() optimized for IP headers, > * which always checksum on 4 octet boundaries. > @@ -59,6 +61,9 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) > : "memory"); > return (__force __sum16)~sum; > } > +#else > +__sum16 ip_fast_csum(const void *iph, unsigned int ihl); > +#endif
Any good reason this is inline for all mmu processors and out of line for nommu, independent of the actual cpu variant?
> static inline __sum16 csum_fold(__wsum sum) > { > unsigned int tmp = (__force u32)sum; > +#ifdef CONFIG_COLDFIRE > + tmp = (tmp & 0xffff) + (tmp >> 16); > + tmp = (tmp & 0xffff) + (tmp >> 16); > + return (__force __sum16)~tmp; > +#else > __asm__("swap %1\n\t" > "addw %1, %0\n\t" > "clrw %1\n\t" > @@ -74,6 +84,7 @@ static inline __sum16 csum_fold(__wsum sum) > : "=&d" (sum), "=&d" (tmp) > : "0" (sum), "1" (tmp)); > return (__force __sum16)~sum; > +#endif > }
I think this would be cleaner by having totally separate functions for both cases, e.g.
#ifdef CONFIG_COLDFIRE static inline __sum16 csum_fold(__wsum sum) { unsigned int tmp = (__force u32)sum;
tmp = (tmp & 0xffff) + (tmp >> 16); tmp = (tmp & 0xffff) + (tmp >> 16);
return (__force __sum16)~tmp; } #else ... #endif
| |