return bitLen(x) - 1
}
-// Number of leading zeros in x.
-func leadingZeros(x Word) uint {
+// nlz returns the number of leading zeros in x.
+func nlz(x Word) uint {
return uint(_W - bitLen(x))
}
+// nlz64 returns the number of leading zeros in x.
+func nlz64(x uint64) uint {
+ switch _W {
+ case 32:
+ w := x >> 32
+ if w == 0 {
+ return 32 + nlz(Word(x))
+ }
+ return nlz(Word(w))
+ case 64:
+ return nlz(Word(x))
+ }
+ panic("unreachable")
+}
+
// q = (u1<<_W + u0 - r)/y
// Adapted from Warren, Hacker's Delight, p. 152.
func divWW_g(u1, u0, v Word) (q, r Word) {
return 1<<_W - 1, 1<<_W - 1
}
- s := leadingZeros(v)
+ s := nlz(v)
v <<= s
vn1 := v >> _W2
return
}
-// nlz returns the number of leading zero bits in x.
-func nlz(x Word) uint {
- return _W - uint(bitLen(x))
-}
-
-func nlz64(x uint64) uint {
- // TODO(gri) this can be done more nicely
- if _W == 32 {
- if x>>32 == 0 {
- return 32 + nlz(Word(x))
- }
- return nlz(Word(x >> 32))
- }
- if _W == 64 {
- return nlz(Word(x))
- }
- panic("unreachable")
-}
-
func (z *Float) setBits64(neg bool, x uint64) *Float {
if z.prec == 0 {
z.prec = 64
return z
}
-func high32(x nat) uint32 {
- // TODO(gri) This can be done more efficiently on 32bit platforms.
- return uint32(high64(x) >> 32)
+// msb32 returns the 32 most significant bits of x.
+func msb32(x nat) uint32 {
+ i := len(x) - 1
+ if i < 0 {
+ return 0
+ }
+ if debugFloat && x[i]&(1<<(_W-1)) == 0 {
+ panic("x not normalized")
+ }
+ switch _W {
+ case 32:
+ return uint32(x[i])
+ case 64:
+ return uint32(x[i] >> 32)
+ }
+ panic("unreachable")
}
-func high64(x nat) uint64 {
- i := len(x)
- if i == 0 {
+// msb64 returns the 64 most significant bits of x.
+func msb64(x nat) uint64 {
+ i := len(x) - 1
+ if i < 0 {
return 0
}
- // i > 0
- v := uint64(x[i-1])
- if _W == 32 {
- v <<= 32
- if i > 1 {
- v |= uint64(x[i-2])
+ if debugFloat && x[i]&(1<<(_W-1)) == 0 {
+ panic("x not normalized")
+ }
+ switch _W {
+ case 32:
+ v := uint64(x[i]) << 32
+ if i > 0 {
+ v |= uint64(x[i-1])
}
+ return v
+ case 64:
+ return uint64(x[i])
}
- return v
+ panic("unreachable")
}
// Uint64 returns the unsigned integer resulting from truncating x
// 1 <= x < Inf
if x.exp <= 64 {
// u = trunc(x) fits into a uint64
- u := high64(x.mant) >> (64 - uint32(x.exp))
+ u := msb64(x.mant) >> (64 - uint32(x.exp))
if x.MinPrec() <= 64 {
return u, Exact
}
// 1 <= |x| < +Inf
if x.exp <= 63 {
// i = trunc(x) fits into an int64 (excluding math.MinInt64)
- i := int64(high64(x.mant) >> (64 - uint32(x.exp)))
+ i := int64(msb64(x.mant) >> (64 - uint32(x.exp)))
if x.neg {
i = -i
}
return 0.0, Below
}
// bexp = 0
- mant = high32(r.mant) >> (fbits - r.prec)
+ mant = msb32(r.mant) >> (fbits - r.prec)
} else {
// normal number: emin <= e <= emax
bexp = uint32(e+bias) << mbits
- mant = high32(r.mant) >> ebits & (1<<mbits - 1) // cut off msb (implicit 1 bit)
+ mant = msb32(r.mant) >> ebits & (1<<mbits - 1) // cut off msb (implicit 1 bit)
}
return math.Float32frombits(sign | bexp | mant), r.acc
return 0.0, Below
}
// bexp = 0
- mant = high64(r.mant) >> (fbits - r.prec)
+ mant = msb64(r.mant) >> (fbits - r.prec)
} else {
// normal number: emin <= e <= emax
bexp = uint64(e+bias) << mbits
- mant = high64(r.mant) >> ebits & (1<<mbits - 1) // cut off msb (implicit 1 bit)
+ mant = msb64(r.mant) >> ebits & (1<<mbits - 1) // cut off msb (implicit 1 bit)
}
return math.Float64frombits(sign | bexp | mant), r.acc
}
}
-func TestLeadingZeros(t *testing.T) {
+func TestNLZ(t *testing.T) {
var x Word = _B >> 1
for i := 0; i <= _W; i++ {
- if int(leadingZeros(x)) != i {
- t.Errorf("failed at %x: got %d want %d", x, leadingZeros(x), i)
+ if int(nlz(x)) != i {
+ t.Errorf("failed at %x: got %d want %d", x, nlz(x), i)
}
x >>= 1
}