lkml.org 
[lkml]   [2012]   [Apr]   [24]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
SubjectRe: [PATCH 05/16] sched: SCHED_DEADLINE policy implementation.
From
Date
On Tue, 2012-04-24 at 00:21 +0100, Tommaso Cucinotta wrote:
> > Yes I can do it for x86_64, but people tend to get mighty upset if you
> > break the compile for all other arches...
>
> rather than breaking compile, I was thinking more of using the
> optimization for a more accurate comparison on archs that have 64-bit
> mul and 128-bit cmp, and leaving the overflow on other archs. Though,
> that would imply a difference in behavior on those borderline cases
> (very big periods I guess).
>
> However, I'm also puzzled from what would happen by compiling the
> current code on mostly 16-bit micros which have very limited 32-bit
> operations...

We don't support 16bit archs, 32bit is almost useless as it is :-)

Anyway, how about something like this, I guess archs can go wild and add
asm/math128.h if they want etc..

Completely untested, hasn't even seen a compiler..

---
Subject: math128: Add {add,mult,cmp}_u128
From: Peter Zijlstra <a.p.zijlstra@chello.nl>
Date: Tue Apr 24 11:47:12 CEST 2012


Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
---
include/linux/math128.h | 75 ++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 75 insertions(+)

--- /dev/null
+++ b/include/linux/math128.h
@@ -0,0 +1,75 @@
+#ifndef _LINUX_MATH128_H
+#define _LINUX_MATH128_H
+
+#include <linux/types.h>
+
+typedef struct {
+ u64 hi, lo;
+} u128;
+
+u128 add_u128(u128 a, u128 b)
+{
+ u128 res;
+
+ res.hi = a.hi + b.hi;
+ res.lo = a.lo + b.lo;
+
+ if (res.lo < a.lo || res.lo < b.lo)
+ res.hi++;
+
+ return res;
+}
+
+/*
+ * a * b = (ah * 2^32 + al) * (bh * 2^32 + bl) =
+ * ah*bh * 2^64 + (ah*bl + bh*al) * 2^32 + al*bl
+ */
+u128 mult_u128(u64 a, u64 b)
+{
+ u128 res;
+ u64 ah, al;
+ u64 bh, bl;
+ u128 t1, t2, t3, t4;
+
+ ah = a >> 32;
+ al = a & ((1ULL << 32) - 1);
+
+ bh = b >> 32;
+ bl = b & ((1ULL << 32) - 1);
+
+ t1.lo = 0;
+ t1.hi = ah * bh;
+
+ t2.lo = ah * bl;
+ t2.hi = t2.lo >> 32;
+ t2.lo <<= 32;
+
+ t3.lo = al * bh;
+ t3.hi = t3.lo >> 32;
+ t3.lo <<= 32;
+
+ t4.lo = al * bl;
+ t4.hi = 0;
+
+ res = add_u128(t1, t2);
+ res = add_u128(res, t3);
+ res = add_u128(res, t4);
+
+ return res;
+}
+
+int cmp_u128(u128 a, u128 b)
+{
+ if (a.hi > b.hi)
+ return 1;
+ if (a.hi < b.hi)
+ return -1;
+ if (a.lo > b.lo)
+ return 1;
+ if (a.lo < b.lo)
+ return -1;
+
+ return 0;
+}
+
+#endif /* _LINUX_MATH128_H */


\
 
 \ /
  Last update: 2012-04-24 11:53    [W:0.122 / U:23.064 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site