summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBrian Behlendorf <behlendorf1@llnl.gov>2010-10-26 23:23:10 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2010-10-27 01:52:19 +0200
commit658716d19f8f155c67d4677ba68034b8e492dfbe (patch)
tree8743b9a16606a63dc33f93c580367baf36692454
parentlib/parser: cleanup match_number() (diff)
downloadlinux-658716d19f8f155c67d4677ba68034b8e492dfbe.tar.xz
linux-658716d19f8f155c67d4677ba68034b8e492dfbe.zip
div64_u64(): improve precision on 32bit platforms
The current implementation of div64_u64 for 32bit systems returns an approximately correct result when the divisor exceeds 32bits. Since doing 64bit division using 32bit hardware is a long since solved problem we just use one of the existing proven methods. Additionally, add a div64_s64 function to correctly handle doing signed 64bit division. Addresses https://bugzilla.redhat.com/show_bug.cgi?id=616105 Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Signed-off-by: Oleg Nesterov <oleg@redhat.com> Cc: Ben Woodard <bwoodard@llnl.gov> Cc: Jeremy Fitzhardinge <jeremy@goop.org> Cc: Mark Grondona <mgrondona@llnl.gov> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/kernel.h5
-rw-r--r--include/linux/math64.h12
-rw-r--r--lib/div64.c52
3 files changed, 59 insertions, 10 deletions
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 77b04ed037df..450092c1e35f 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -173,6 +173,11 @@ extern int _cond_resched(void);
(__x < 0) ? -__x : __x; \
})
+#define abs64(x) ({ \
+ s64 __x = (x); \
+ (__x < 0) ? -__x : __x; \
+ })
+
#ifdef CONFIG_PROVE_LOCKING
void might_fault(void);
#else
diff --git a/include/linux/math64.h b/include/linux/math64.h
index c87f1528703a..23fcdfcba81b 100644
--- a/include/linux/math64.h
+++ b/include/linux/math64.h
@@ -35,6 +35,14 @@ static inline u64 div64_u64(u64 dividend, u64 divisor)
return dividend / divisor;
}
+/**
+ * div64_s64 - signed 64bit divide with 64bit divisor
+ */
+static inline s64 div64_s64(s64 dividend, s64 divisor)
+{
+ return dividend / divisor;
+}
+
#elif BITS_PER_LONG == 32
#ifndef div_u64_rem
@@ -53,6 +61,10 @@ extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder);
extern u64 div64_u64(u64 dividend, u64 divisor);
#endif
+#ifndef div64_s64
+extern s64 div64_s64(s64 dividend, s64 divisor);
+#endif
+
#endif /* BITS_PER_LONG */
/**
diff --git a/lib/div64.c b/lib/div64.c
index a111eb8de9cf..5b4919191778 100644
--- a/lib/div64.c
+++ b/lib/div64.c
@@ -77,26 +77,58 @@ s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
EXPORT_SYMBOL(div_s64_rem);
#endif
-/* 64bit divisor, dividend and result. dynamic precision */
+/**
+ * div64_u64 - unsigned 64bit divide with 64bit divisor
+ * @dividend: 64bit dividend
+ * @divisor: 64bit divisor
+ *
+ * This implementation is a modified version of the algorithm proposed
+ * by the book 'Hacker's Delight'. The original source and full proof
+ * can be found here and is available for use without restriction.
+ *
+ * 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c'
+ */
#ifndef div64_u64
u64 div64_u64(u64 dividend, u64 divisor)
{
- u32 high, d;
+ u32 high = divisor >> 32;
+ u64 quot;
- high = divisor >> 32;
- if (high) {
- unsigned int shift = fls(high);
+ if (high == 0) {
+ quot = div_u64(dividend, divisor);
+ } else {
+ int n = 1 + fls(high);
+ quot = div_u64(dividend >> n, divisor >> n);
- d = divisor >> shift;
- dividend >>= shift;
- } else
- d = divisor;
+ if (quot != 0)
+ quot--;
+ if ((dividend - quot * divisor) >= divisor)
+ quot++;
+ }
- return div_u64(dividend, d);
+ return quot;
}
EXPORT_SYMBOL(div64_u64);
#endif
+/**
+ * div64_s64 - signed 64bit divide with 64bit divisor
+ * @dividend: 64bit dividend
+ * @divisor: 64bit divisor
+ */
+#ifndef div64_s64
+s64 div64_s64(s64 dividend, s64 divisor)
+{
+ s64 quot, t;
+
+ quot = div64_u64(abs64(dividend), abs64(divisor));
+ t = (dividend ^ divisor) >> 63;
+
+ return (quot ^ t) - t;
+}
+EXPORT_SYMBOL(div64_s64);
+#endif
+
#endif /* BITS_PER_LONG == 32 */
/*