From: Jorropo Date: Thu, 4 Dec 2025 05:46:19 +0000 (+0100) Subject: cmd/compile: cleanup isUnsignedPowerOfTwo X-Git-Url: http://www.git.cypherpunks.su/?a=commitdiff_plain;h=82ef9f5b2130cb4cc88c52c68b7bd45764ab2200;p=gostls13.git cmd/compile: cleanup isUnsignedPowerOfTwo Merge the signed and unsigned generic functions. The only implementation difference between the two is: n > 0 vs n != 0 check. For unsigned numbers n > 0 == n != 0 and we infact optimize the first to the second. Change-Id: Ia2f6c3e3d4eb098d98f85e06dc2e81baa60bad4e Reviewed-on: https://go-review.googlesource.com/c/go/+/726720 Reviewed-by: Keith Randall Reviewed-by: Keith Randall Reviewed-by: Carlos Amedee LUCI-TryBot-Result: Go LUCI --- diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64.rules b/src/cmd/compile/internal/ssa/_gen/AMD64.rules index b49e85b53c..ac3a4b0a07 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/AMD64.rules @@ -598,30 +598,30 @@ // mutandis, for UGE and SETAE, and CC and SETCC. ((NE|EQ) (TESTL (SHLL (MOVLconst [1]) x) y)) => ((ULT|UGE) (BTL x y)) ((NE|EQ) (TESTQ (SHLQ (MOVQconst [1]) x) y)) => ((ULT|UGE) (BTQ x y)) -((NE|EQ) (TESTLconst [c] x)) && isUnsignedPowerOfTwo(uint32(c)) +((NE|EQ) (TESTLconst [c] x)) && isPowerOfTwo(uint32(c)) => ((ULT|UGE) (BTLconst [int8(log32u(uint32(c)))] x)) -((NE|EQ) (TESTQconst [c] x)) && isUnsignedPowerOfTwo(uint64(c)) +((NE|EQ) (TESTQconst [c] x)) && isPowerOfTwo(uint64(c)) => ((ULT|UGE) (BTQconst [int8(log32u(uint32(c)))] x)) -((NE|EQ) (TESTQ (MOVQconst [c]) x)) && isUnsignedPowerOfTwo(uint64(c)) +((NE|EQ) (TESTQ (MOVQconst [c]) x)) && isPowerOfTwo(uint64(c)) => ((ULT|UGE) (BTQconst [int8(log64u(uint64(c)))] x)) (SET(NE|EQ) (TESTL (SHLL (MOVLconst [1]) x) y)) => (SET(B|AE) (BTL x y)) (SET(NE|EQ) (TESTQ (SHLQ (MOVQconst [1]) x) y)) => (SET(B|AE) (BTQ x y)) -(SET(NE|EQ) (TESTLconst [c] x)) && isUnsignedPowerOfTwo(uint32(c)) +(SET(NE|EQ) (TESTLconst [c] x)) && isPowerOfTwo(uint32(c)) => (SET(B|AE) (BTLconst [int8(log32u(uint32(c)))] x)) -(SET(NE|EQ) (TESTQconst [c] x)) && isUnsignedPowerOfTwo(uint64(c)) +(SET(NE|EQ) (TESTQconst [c] x)) && isPowerOfTwo(uint64(c)) => (SET(B|AE) (BTQconst [int8(log32u(uint32(c)))] x)) -(SET(NE|EQ) (TESTQ (MOVQconst [c]) x)) && isUnsignedPowerOfTwo(uint64(c)) +(SET(NE|EQ) (TESTQ (MOVQconst [c]) x)) && isPowerOfTwo(uint64(c)) => (SET(B|AE) (BTQconst [int8(log64u(uint64(c)))] x)) // SET..store variant (SET(NE|EQ)store [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem) => (SET(B|AE)store [off] {sym} ptr (BTL x y) mem) (SET(NE|EQ)store [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem) => (SET(B|AE)store [off] {sym} ptr (BTQ x y) mem) -(SET(NE|EQ)store [off] {sym} ptr (TESTLconst [c] x) mem) && isUnsignedPowerOfTwo(uint32(c)) +(SET(NE|EQ)store [off] {sym} ptr (TESTLconst [c] x) mem) && isPowerOfTwo(uint32(c)) => (SET(B|AE)store [off] {sym} ptr (BTLconst [int8(log32u(uint32(c)))] x) mem) -(SET(NE|EQ)store [off] {sym} ptr (TESTQconst [c] x) mem) && isUnsignedPowerOfTwo(uint64(c)) +(SET(NE|EQ)store [off] {sym} ptr (TESTQconst [c] x) mem) && isPowerOfTwo(uint64(c)) => (SET(B|AE)store [off] {sym} ptr (BTQconst [int8(log32u(uint32(c)))] x) mem) -(SET(NE|EQ)store [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem) && isUnsignedPowerOfTwo(uint64(c)) +(SET(NE|EQ)store [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem) && isPowerOfTwo(uint64(c)) => (SET(B|AE)store [off] {sym} ptr (BTQconst [int8(log64u(uint64(c)))] x) mem) // Handle bit-testing in the form (a>>b)&1 != 0 by building the above rules @@ -647,14 +647,14 @@ (XOR(Q|L) (SHL(Q|L) (MOV(Q|L)const [1]) y) x) => (BTC(Q|L) x y) // Note: only convert OR/XOR to BTS/BTC if the constant wouldn't fit in // the constant field of the OR/XOR instruction. See issue 61694. -((OR|XOR)Q (MOVQconst [c]) x) && isUnsignedPowerOfTwo(uint64(c)) && uint64(c) >= 1<<31 => (BT(S|C)Qconst [int8(log64u(uint64(c)))] x) +((OR|XOR)Q (MOVQconst [c]) x) && isPowerOfTwo(uint64(c)) && uint64(c) >= 1<<31 => (BT(S|C)Qconst [int8(log64u(uint64(c)))] x) // Recognize bit clearing: a &^= 1< (BTR(Q|L) x y) (ANDN(Q|L) x (SHL(Q|L) (MOV(Q|L)const [1]) y)) => (BTR(Q|L) x y) // Note: only convert AND to BTR if the constant wouldn't fit in // the constant field of the AND instruction. See issue 61694. -(ANDQ (MOVQconst [c]) x) && isUnsignedPowerOfTwo(uint64(^c)) && uint64(^c) >= 1<<31 => (BTRQconst [int8(log64u(uint64(^c)))] x) +(ANDQ (MOVQconst [c]) x) && isPowerOfTwo(uint64(^c)) && uint64(^c) >= 1<<31 => (BTRQconst [int8(log64u(uint64(^c)))] x) // Special-case bit patterns on first/last bit. // generic.rules changes ANDs of high-part/low-part masks into a couple of shifts, diff --git a/src/cmd/compile/internal/ssa/_gen/MIPS.rules b/src/cmd/compile/internal/ssa/_gen/MIPS.rules index fe1e00a4e4..b707a3ad16 100644 --- a/src/cmd/compile/internal/ssa/_gen/MIPS.rules +++ b/src/cmd/compile/internal/ssa/_gen/MIPS.rules @@ -617,13 +617,13 @@ (Select0 (MULTU (MOVWconst [1]) _ )) => (MOVWconst [0]) (Select1 (MULTU (MOVWconst [-1]) x )) => (NEG x) (Select0 (MULTU (MOVWconst [-1]) x )) => (CMOVZ (ADDconst [-1] x) (MOVWconst [0]) x) -(Select1 (MULTU (MOVWconst [c]) x )) && isUnsignedPowerOfTwo(uint32(c)) => (SLLconst [int32(log32u(uint32(c)))] x) -(Select0 (MULTU (MOVWconst [c]) x )) && isUnsignedPowerOfTwo(uint32(c)) => (SRLconst [int32(32-log32u(uint32(c)))] x) +(Select1 (MULTU (MOVWconst [c]) x )) && isPowerOfTwo(uint32(c)) => (SLLconst [int32(log32u(uint32(c)))] x) +(Select0 (MULTU (MOVWconst [c]) x )) && isPowerOfTwo(uint32(c)) => (SRLconst [int32(32-log32u(uint32(c)))] x) (MUL (MOVWconst [0]) _ ) => (MOVWconst [0]) (MUL (MOVWconst [1]) x ) => x (MUL (MOVWconst [-1]) x ) => (NEG x) -(MUL (MOVWconst [c]) x ) && isUnsignedPowerOfTwo(uint32(c)) => (SLLconst [int32(log32u(uint32(c)))] x) +(MUL (MOVWconst [c]) x ) && isPowerOfTwo(uint32(c)) => (SLLconst [int32(log32u(uint32(c)))] x) // generic simplifications (ADD x (NEG y)) => (SUB x y) diff --git a/src/cmd/compile/internal/ssa/_gen/generic.rules b/src/cmd/compile/internal/ssa/_gen/generic.rules index b62a1ee219..b34fee095e 100644 --- a/src/cmd/compile/internal/ssa/_gen/generic.rules +++ b/src/cmd/compile/internal/ssa/_gen/generic.rules @@ -1063,10 +1063,10 @@ (Div64 x (Const64 [-1<<63])) => (Rsh64Ux64 (And64 x (Neg64 x)) (Const64 [63])) // Unsigned divide by power of 2. Strength reduce to a shift. -(Div8u n (Const8 [c])) && isUnsignedPowerOfTwo(uint8(c)) => (Rsh8Ux64 n (Const64 [log8u(uint8(c))])) -(Div16u n (Const16 [c])) && isUnsignedPowerOfTwo(uint16(c)) => (Rsh16Ux64 n (Const64 [log16u(uint16(c))])) -(Div32u n (Const32 [c])) && isUnsignedPowerOfTwo(uint32(c)) => (Rsh32Ux64 n (Const64 [log32u(uint32(c))])) -(Div64u n (Const64 [c])) && isUnsignedPowerOfTwo(uint64(c)) => (Rsh64Ux64 n (Const64 [log64u(uint64(c))])) +(Div8u n (Const8 [c])) && isPowerOfTwo(uint8(c)) => (Rsh8Ux64 n (Const64 [log8u(uint8(c))])) +(Div16u n (Const16 [c])) && isPowerOfTwo(uint16(c)) => (Rsh16Ux64 n (Const64 [log16u(uint16(c))])) +(Div32u n (Const32 [c])) && isPowerOfTwo(uint32(c)) => (Rsh32Ux64 n (Const64 [log32u(uint32(c))])) +(Div64u n (Const64 [c])) && isPowerOfTwo(uint64(c)) => (Rsh64Ux64 n (Const64 [log64u(uint64(c))])) // Strength reduce multiplication by a power of two to a shift. // Excluded from early opt so that prove can recognize mod @@ -1093,10 +1093,10 @@ // Strength reduction of div to mul is delayed to divmod.rules. // Unsigned mod by power of 2 constant. -(Mod8u n (Const8 [c])) && isUnsignedPowerOfTwo(uint8(c)) => (And8 n (Const8 [c-1])) -(Mod16u n (Const16 [c])) && isUnsignedPowerOfTwo(uint16(c)) => (And16 n (Const16 [c-1])) -(Mod32u n (Const32 [c])) && isUnsignedPowerOfTwo(uint32(c)) => (And32 n (Const32 [c-1])) -(Mod64u n (Const64 [c])) && isUnsignedPowerOfTwo(uint64(c)) => (And64 n (Const64 [c-1])) +(Mod8u n (Const8 [c])) && isPowerOfTwo(uint8(c)) => (And8 n (Const8 [c-1])) +(Mod16u n (Const16 [c])) && isPowerOfTwo(uint16(c)) => (And16 n (Const16 [c-1])) +(Mod32u n (Const32 [c])) && isPowerOfTwo(uint32(c)) => (And32 n (Const32 [c-1])) +(Mod64u n (Const64 [c])) && isPowerOfTwo(uint64(c)) => (And64 n (Const64 [c-1])) // Signed non-negative mod by power of 2 constant. // TODO: Replace ModN with ModNu in prove. diff --git a/src/cmd/compile/internal/ssa/prove.go b/src/cmd/compile/internal/ssa/prove.go index de16dfb340..93443a3d3c 100644 --- a/src/cmd/compile/internal/ssa/prove.go +++ b/src/cmd/compile/internal/ssa/prove.go @@ -2888,9 +2888,9 @@ func simplifyBlock(sdom SparseTree, ft *factsTable, b *Block) { xl := ft.limits[x.ID] y := v.Args[1] yl := ft.limits[y.ID] - if xl.umin == xl.umax && isUnsignedPowerOfTwo(xl.umin) || + if xl.umin == xl.umax && isPowerOfTwo(xl.umin) || xl.min == xl.max && isPowerOfTwo(xl.min) || - yl.umin == yl.umax && isUnsignedPowerOfTwo(yl.umin) || + yl.umin == yl.umax && isPowerOfTwo(yl.umin) || yl.min == yl.max && isPowerOfTwo(yl.min) { // 0,1 * a power of two is better done as a shift break diff --git a/src/cmd/compile/internal/ssa/rewrite.go b/src/cmd/compile/internal/ssa/rewrite.go index 6f415e9760..b4e1a7fd33 100644 --- a/src/cmd/compile/internal/ssa/rewrite.go +++ b/src/cmd/compile/internal/ssa/rewrite.go @@ -518,22 +518,17 @@ func log32(n int32) int64 { return log32u(uint32(n)) } func log64(n int64) int64 { return log64u(uint64(n)) } // logXu returns the logarithm of n base 2. -// n must be a power of 2 (isUnsignedPowerOfTwo returns true) +// n must be a power of 2 (isPowerOfTwo returns true) func log8u(n uint8) int64 { return int64(bits.Len8(n)) - 1 } func log16u(n uint16) int64 { return int64(bits.Len16(n)) - 1 } func log32u(n uint32) int64 { return int64(bits.Len32(n)) - 1 } func log64u(n uint64) int64 { return int64(bits.Len64(n)) - 1 } // isPowerOfTwoX functions report whether n is a power of 2. -func isPowerOfTwo[T int8 | int16 | int32 | int64](n T) bool { +func isPowerOfTwo[T int8 | int16 | int32 | int64 | uint8 | uint16 | uint32 | uint64](n T) bool { return n > 0 && n&(n-1) == 0 } -// isUnsignedPowerOfTwo reports whether n is an unsigned power of 2. -func isUnsignedPowerOfTwo[T uint8 | uint16 | uint32 | uint64](n T) bool { - return n != 0 && n&(n-1) == 0 -} - // is32Bit reports whether n can be represented as a signed 32 bit integer. func is32Bit(n int64) bool { return n == int64(int32(n)) diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index b1b1c84046..459c33017a 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -8311,7 +8311,7 @@ func rewriteValueAMD64_OpAMD64ANDQ(v *Value) bool { break } // match: (ANDQ (MOVQconst [c]) x) - // cond: isUnsignedPowerOfTwo(uint64(^c)) && uint64(^c) >= 1<<31 + // cond: isPowerOfTwo(uint64(^c)) && uint64(^c) >= 1<<31 // result: (BTRQconst [int8(log64u(uint64(^c)))] x) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { @@ -8320,7 +8320,7 @@ func rewriteValueAMD64_OpAMD64ANDQ(v *Value) bool { } c := auxIntToInt64(v_0.AuxInt) x := v_1 - if !(isUnsignedPowerOfTwo(uint64(^c)) && uint64(^c) >= 1<<31) { + if !(isPowerOfTwo(uint64(^c)) && uint64(^c) >= 1<<31) { continue } v.reset(OpAMD64BTRQconst) @@ -19802,7 +19802,7 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { break } // match: (ORQ (MOVQconst [c]) x) - // cond: isUnsignedPowerOfTwo(uint64(c)) && uint64(c) >= 1<<31 + // cond: isPowerOfTwo(uint64(c)) && uint64(c) >= 1<<31 // result: (BTSQconst [int8(log64u(uint64(c)))] x) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { @@ -19811,7 +19811,7 @@ func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool { } c := auxIntToInt64(v_0.AuxInt) x := v_1 - if !(isUnsignedPowerOfTwo(uint64(c)) && uint64(c) >= 1<<31) { + if !(isPowerOfTwo(uint64(c)) && uint64(c) >= 1<<31) { continue } v.reset(OpAMD64BTSQconst) @@ -22769,7 +22769,7 @@ func rewriteValueAMD64_OpAMD64SETEQ(v *Value) bool { break } // match: (SETEQ (TESTLconst [c] x)) - // cond: isUnsignedPowerOfTwo(uint32(c)) + // cond: isPowerOfTwo(uint32(c)) // result: (SETAE (BTLconst [int8(log32u(uint32(c)))] x)) for { if v_0.Op != OpAMD64TESTLconst { @@ -22777,7 +22777,7 @@ func rewriteValueAMD64_OpAMD64SETEQ(v *Value) bool { } c := auxIntToInt32(v_0.AuxInt) x := v_0.Args[0] - if !(isUnsignedPowerOfTwo(uint32(c))) { + if !(isPowerOfTwo(uint32(c))) { break } v.reset(OpAMD64SETAE) @@ -22788,7 +22788,7 @@ func rewriteValueAMD64_OpAMD64SETEQ(v *Value) bool { return true } // match: (SETEQ (TESTQconst [c] x)) - // cond: isUnsignedPowerOfTwo(uint64(c)) + // cond: isPowerOfTwo(uint64(c)) // result: (SETAE (BTQconst [int8(log32u(uint32(c)))] x)) for { if v_0.Op != OpAMD64TESTQconst { @@ -22796,7 +22796,7 @@ func rewriteValueAMD64_OpAMD64SETEQ(v *Value) bool { } c := auxIntToInt32(v_0.AuxInt) x := v_0.Args[0] - if !(isUnsignedPowerOfTwo(uint64(c))) { + if !(isPowerOfTwo(uint64(c))) { break } v.reset(OpAMD64SETAE) @@ -22807,7 +22807,7 @@ func rewriteValueAMD64_OpAMD64SETEQ(v *Value) bool { return true } // match: (SETEQ (TESTQ (MOVQconst [c]) x)) - // cond: isUnsignedPowerOfTwo(uint64(c)) + // cond: isPowerOfTwo(uint64(c)) // result: (SETAE (BTQconst [int8(log64u(uint64(c)))] x)) for { if v_0.Op != OpAMD64TESTQ { @@ -22822,7 +22822,7 @@ func rewriteValueAMD64_OpAMD64SETEQ(v *Value) bool { } c := auxIntToInt64(v_0_0.AuxInt) x := v_0_1 - if !(isUnsignedPowerOfTwo(uint64(c))) { + if !(isPowerOfTwo(uint64(c))) { continue } v.reset(OpAMD64SETAE) @@ -23430,7 +23430,7 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { break } // match: (SETEQstore [off] {sym} ptr (TESTLconst [c] x) mem) - // cond: isUnsignedPowerOfTwo(uint32(c)) + // cond: isPowerOfTwo(uint32(c)) // result: (SETAEstore [off] {sym} ptr (BTLconst [int8(log32u(uint32(c)))] x) mem) for { off := auxIntToInt32(v.AuxInt) @@ -23442,7 +23442,7 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { c := auxIntToInt32(v_1.AuxInt) x := v_1.Args[0] mem := v_2 - if !(isUnsignedPowerOfTwo(uint32(c))) { + if !(isPowerOfTwo(uint32(c))) { break } v.reset(OpAMD64SETAEstore) @@ -23455,7 +23455,7 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { return true } // match: (SETEQstore [off] {sym} ptr (TESTQconst [c] x) mem) - // cond: isUnsignedPowerOfTwo(uint64(c)) + // cond: isPowerOfTwo(uint64(c)) // result: (SETAEstore [off] {sym} ptr (BTQconst [int8(log32u(uint32(c)))] x) mem) for { off := auxIntToInt32(v.AuxInt) @@ -23467,7 +23467,7 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { c := auxIntToInt32(v_1.AuxInt) x := v_1.Args[0] mem := v_2 - if !(isUnsignedPowerOfTwo(uint64(c))) { + if !(isPowerOfTwo(uint64(c))) { break } v.reset(OpAMD64SETAEstore) @@ -23480,7 +23480,7 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { return true } // match: (SETEQstore [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem) - // cond: isUnsignedPowerOfTwo(uint64(c)) + // cond: isPowerOfTwo(uint64(c)) // result: (SETAEstore [off] {sym} ptr (BTQconst [int8(log64u(uint64(c)))] x) mem) for { off := auxIntToInt32(v.AuxInt) @@ -23499,7 +23499,7 @@ func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool { c := auxIntToInt64(v_1_0.AuxInt) x := v_1_1 mem := v_2 - if !(isUnsignedPowerOfTwo(uint64(c))) { + if !(isPowerOfTwo(uint64(c))) { continue } v.reset(OpAMD64SETAEstore) @@ -24999,7 +24999,7 @@ func rewriteValueAMD64_OpAMD64SETNE(v *Value) bool { break } // match: (SETNE (TESTLconst [c] x)) - // cond: isUnsignedPowerOfTwo(uint32(c)) + // cond: isPowerOfTwo(uint32(c)) // result: (SETB (BTLconst [int8(log32u(uint32(c)))] x)) for { if v_0.Op != OpAMD64TESTLconst { @@ -25007,7 +25007,7 @@ func rewriteValueAMD64_OpAMD64SETNE(v *Value) bool { } c := auxIntToInt32(v_0.AuxInt) x := v_0.Args[0] - if !(isUnsignedPowerOfTwo(uint32(c))) { + if !(isPowerOfTwo(uint32(c))) { break } v.reset(OpAMD64SETB) @@ -25018,7 +25018,7 @@ func rewriteValueAMD64_OpAMD64SETNE(v *Value) bool { return true } // match: (SETNE (TESTQconst [c] x)) - // cond: isUnsignedPowerOfTwo(uint64(c)) + // cond: isPowerOfTwo(uint64(c)) // result: (SETB (BTQconst [int8(log32u(uint32(c)))] x)) for { if v_0.Op != OpAMD64TESTQconst { @@ -25026,7 +25026,7 @@ func rewriteValueAMD64_OpAMD64SETNE(v *Value) bool { } c := auxIntToInt32(v_0.AuxInt) x := v_0.Args[0] - if !(isUnsignedPowerOfTwo(uint64(c))) { + if !(isPowerOfTwo(uint64(c))) { break } v.reset(OpAMD64SETB) @@ -25037,7 +25037,7 @@ func rewriteValueAMD64_OpAMD64SETNE(v *Value) bool { return true } // match: (SETNE (TESTQ (MOVQconst [c]) x)) - // cond: isUnsignedPowerOfTwo(uint64(c)) + // cond: isPowerOfTwo(uint64(c)) // result: (SETB (BTQconst [int8(log64u(uint64(c)))] x)) for { if v_0.Op != OpAMD64TESTQ { @@ -25052,7 +25052,7 @@ func rewriteValueAMD64_OpAMD64SETNE(v *Value) bool { } c := auxIntToInt64(v_0_0.AuxInt) x := v_0_1 - if !(isUnsignedPowerOfTwo(uint64(c))) { + if !(isPowerOfTwo(uint64(c))) { continue } v.reset(OpAMD64SETB) @@ -25476,7 +25476,7 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { break } // match: (SETNEstore [off] {sym} ptr (TESTLconst [c] x) mem) - // cond: isUnsignedPowerOfTwo(uint32(c)) + // cond: isPowerOfTwo(uint32(c)) // result: (SETBstore [off] {sym} ptr (BTLconst [int8(log32u(uint32(c)))] x) mem) for { off := auxIntToInt32(v.AuxInt) @@ -25488,7 +25488,7 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { c := auxIntToInt32(v_1.AuxInt) x := v_1.Args[0] mem := v_2 - if !(isUnsignedPowerOfTwo(uint32(c))) { + if !(isPowerOfTwo(uint32(c))) { break } v.reset(OpAMD64SETBstore) @@ -25501,7 +25501,7 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { return true } // match: (SETNEstore [off] {sym} ptr (TESTQconst [c] x) mem) - // cond: isUnsignedPowerOfTwo(uint64(c)) + // cond: isPowerOfTwo(uint64(c)) // result: (SETBstore [off] {sym} ptr (BTQconst [int8(log32u(uint32(c)))] x) mem) for { off := auxIntToInt32(v.AuxInt) @@ -25513,7 +25513,7 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { c := auxIntToInt32(v_1.AuxInt) x := v_1.Args[0] mem := v_2 - if !(isUnsignedPowerOfTwo(uint64(c))) { + if !(isPowerOfTwo(uint64(c))) { break } v.reset(OpAMD64SETBstore) @@ -25526,7 +25526,7 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { return true } // match: (SETNEstore [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem) - // cond: isUnsignedPowerOfTwo(uint64(c)) + // cond: isPowerOfTwo(uint64(c)) // result: (SETBstore [off] {sym} ptr (BTQconst [int8(log64u(uint64(c)))] x) mem) for { off := auxIntToInt32(v.AuxInt) @@ -25545,7 +25545,7 @@ func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool { c := auxIntToInt64(v_1_0.AuxInt) x := v_1_1 mem := v_2 - if !(isUnsignedPowerOfTwo(uint64(c))) { + if !(isPowerOfTwo(uint64(c))) { continue } v.reset(OpAMD64SETBstore) @@ -65170,7 +65170,7 @@ func rewriteValueAMD64_OpAMD64XORQ(v *Value) bool { break } // match: (XORQ (MOVQconst [c]) x) - // cond: isUnsignedPowerOfTwo(uint64(c)) && uint64(c) >= 1<<31 + // cond: isPowerOfTwo(uint64(c)) && uint64(c) >= 1<<31 // result: (BTCQconst [int8(log64u(uint64(c)))] x) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { @@ -65179,7 +65179,7 @@ func rewriteValueAMD64_OpAMD64XORQ(v *Value) bool { } c := auxIntToInt64(v_0.AuxInt) x := v_1 - if !(isUnsignedPowerOfTwo(uint64(c)) && uint64(c) >= 1<<31) { + if !(isPowerOfTwo(uint64(c)) && uint64(c) >= 1<<31) { continue } v.reset(OpAMD64BTCQconst) @@ -77147,13 +77147,13 @@ func rewriteBlockAMD64(b *Block) bool { break } // match: (EQ (TESTLconst [c] x)) - // cond: isUnsignedPowerOfTwo(uint32(c)) + // cond: isPowerOfTwo(uint32(c)) // result: (UGE (BTLconst [int8(log32u(uint32(c)))] x)) for b.Controls[0].Op == OpAMD64TESTLconst { v_0 := b.Controls[0] c := auxIntToInt32(v_0.AuxInt) x := v_0.Args[0] - if !(isUnsignedPowerOfTwo(uint32(c))) { + if !(isPowerOfTwo(uint32(c))) { break } v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags) @@ -77163,13 +77163,13 @@ func rewriteBlockAMD64(b *Block) bool { return true } // match: (EQ (TESTQconst [c] x)) - // cond: isUnsignedPowerOfTwo(uint64(c)) + // cond: isPowerOfTwo(uint64(c)) // result: (UGE (BTQconst [int8(log32u(uint32(c)))] x)) for b.Controls[0].Op == OpAMD64TESTQconst { v_0 := b.Controls[0] c := auxIntToInt32(v_0.AuxInt) x := v_0.Args[0] - if !(isUnsignedPowerOfTwo(uint64(c))) { + if !(isPowerOfTwo(uint64(c))) { break } v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) @@ -77179,7 +77179,7 @@ func rewriteBlockAMD64(b *Block) bool { return true } // match: (EQ (TESTQ (MOVQconst [c]) x)) - // cond: isUnsignedPowerOfTwo(uint64(c)) + // cond: isPowerOfTwo(uint64(c)) // result: (UGE (BTQconst [int8(log64u(uint64(c)))] x)) for b.Controls[0].Op == OpAMD64TESTQ { v_0 := b.Controls[0] @@ -77192,7 +77192,7 @@ func rewriteBlockAMD64(b *Block) bool { } c := auxIntToInt64(v_0_0.AuxInt) x := v_0_1 - if !(isUnsignedPowerOfTwo(uint64(c))) { + if !(isPowerOfTwo(uint64(c))) { continue } v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) @@ -78311,13 +78311,13 @@ func rewriteBlockAMD64(b *Block) bool { break } // match: (NE (TESTLconst [c] x)) - // cond: isUnsignedPowerOfTwo(uint32(c)) + // cond: isPowerOfTwo(uint32(c)) // result: (ULT (BTLconst [int8(log32u(uint32(c)))] x)) for b.Controls[0].Op == OpAMD64TESTLconst { v_0 := b.Controls[0] c := auxIntToInt32(v_0.AuxInt) x := v_0.Args[0] - if !(isUnsignedPowerOfTwo(uint32(c))) { + if !(isPowerOfTwo(uint32(c))) { break } v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags) @@ -78327,13 +78327,13 @@ func rewriteBlockAMD64(b *Block) bool { return true } // match: (NE (TESTQconst [c] x)) - // cond: isUnsignedPowerOfTwo(uint64(c)) + // cond: isPowerOfTwo(uint64(c)) // result: (ULT (BTQconst [int8(log32u(uint32(c)))] x)) for b.Controls[0].Op == OpAMD64TESTQconst { v_0 := b.Controls[0] c := auxIntToInt32(v_0.AuxInt) x := v_0.Args[0] - if !(isUnsignedPowerOfTwo(uint64(c))) { + if !(isPowerOfTwo(uint64(c))) { break } v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) @@ -78343,7 +78343,7 @@ func rewriteBlockAMD64(b *Block) bool { return true } // match: (NE (TESTQ (MOVQconst [c]) x)) - // cond: isUnsignedPowerOfTwo(uint64(c)) + // cond: isPowerOfTwo(uint64(c)) // result: (ULT (BTQconst [int8(log64u(uint64(c)))] x)) for b.Controls[0].Op == OpAMD64TESTQ { v_0 := b.Controls[0] @@ -78356,7 +78356,7 @@ func rewriteBlockAMD64(b *Block) bool { } c := auxIntToInt64(v_0_0.AuxInt) x := v_0_1 - if !(isUnsignedPowerOfTwo(uint64(c))) { + if !(isPowerOfTwo(uint64(c))) { continue } v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags) diff --git a/src/cmd/compile/internal/ssa/rewriteMIPS.go b/src/cmd/compile/internal/ssa/rewriteMIPS.go index ff696337ef..ffb7d93586 100644 --- a/src/cmd/compile/internal/ssa/rewriteMIPS.go +++ b/src/cmd/compile/internal/ssa/rewriteMIPS.go @@ -4197,7 +4197,7 @@ func rewriteValueMIPS_OpMIPSMUL(v *Value) bool { break } // match: (MUL (MOVWconst [c]) x ) - // cond: isUnsignedPowerOfTwo(uint32(c)) + // cond: isPowerOfTwo(uint32(c)) // result: (SLLconst [int32(log32u(uint32(c)))] x) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { @@ -4206,7 +4206,7 @@ func rewriteValueMIPS_OpMIPSMUL(v *Value) bool { } c := auxIntToInt32(v_0.AuxInt) x := v_1 - if !(isUnsignedPowerOfTwo(uint32(c))) { + if !(isPowerOfTwo(uint32(c))) { continue } v.reset(OpMIPSSLLconst) @@ -6655,7 +6655,7 @@ func rewriteValueMIPS_OpSelect0(v *Value) bool { break } // match: (Select0 (MULTU (MOVWconst [c]) x )) - // cond: isUnsignedPowerOfTwo(uint32(c)) + // cond: isPowerOfTwo(uint32(c)) // result: (SRLconst [int32(32-log32u(uint32(c)))] x) for { if v_0.Op != OpMIPSMULTU { @@ -6670,7 +6670,7 @@ func rewriteValueMIPS_OpSelect0(v *Value) bool { } c := auxIntToInt32(v_0_0.AuxInt) x := v_0_1 - if !(isUnsignedPowerOfTwo(uint32(c))) { + if !(isPowerOfTwo(uint32(c))) { continue } v.reset(OpMIPSSRLconst) @@ -6874,7 +6874,7 @@ func rewriteValueMIPS_OpSelect1(v *Value) bool { break } // match: (Select1 (MULTU (MOVWconst [c]) x )) - // cond: isUnsignedPowerOfTwo(uint32(c)) + // cond: isPowerOfTwo(uint32(c)) // result: (SLLconst [int32(log32u(uint32(c)))] x) for { if v_0.Op != OpMIPSMULTU { @@ -6889,7 +6889,7 @@ func rewriteValueMIPS_OpSelect1(v *Value) bool { } c := auxIntToInt32(v_0_0.AuxInt) x := v_0_1 - if !(isUnsignedPowerOfTwo(uint32(c))) { + if !(isPowerOfTwo(uint32(c))) { continue } v.reset(OpMIPSSLLconst) diff --git a/src/cmd/compile/internal/ssa/rewritegeneric.go b/src/cmd/compile/internal/ssa/rewritegeneric.go index 49c5facc32..ee74d7c971 100644 --- a/src/cmd/compile/internal/ssa/rewritegeneric.go +++ b/src/cmd/compile/internal/ssa/rewritegeneric.go @@ -7106,7 +7106,7 @@ func rewriteValuegeneric_OpDiv16u(v *Value) bool { return true } // match: (Div16u n (Const16 [c])) - // cond: isUnsignedPowerOfTwo(uint16(c)) + // cond: isPowerOfTwo(uint16(c)) // result: (Rsh16Ux64 n (Const64 [log16u(uint16(c))])) for { n := v_0 @@ -7114,7 +7114,7 @@ func rewriteValuegeneric_OpDiv16u(v *Value) bool { break } c := auxIntToInt16(v_1.AuxInt) - if !(isUnsignedPowerOfTwo(uint16(c))) { + if !(isPowerOfTwo(uint16(c))) { break } v.reset(OpRsh16Ux64) @@ -7259,7 +7259,7 @@ func rewriteValuegeneric_OpDiv32u(v *Value) bool { return true } // match: (Div32u n (Const32 [c])) - // cond: isUnsignedPowerOfTwo(uint32(c)) + // cond: isPowerOfTwo(uint32(c)) // result: (Rsh32Ux64 n (Const64 [log32u(uint32(c))])) for { n := v_0 @@ -7267,7 +7267,7 @@ func rewriteValuegeneric_OpDiv32u(v *Value) bool { break } c := auxIntToInt32(v_1.AuxInt) - if !(isUnsignedPowerOfTwo(uint32(c))) { + if !(isPowerOfTwo(uint32(c))) { break } v.reset(OpRsh32Ux64) @@ -7424,7 +7424,7 @@ func rewriteValuegeneric_OpDiv64u(v *Value) bool { return true } // match: (Div64u n (Const64 [c])) - // cond: isUnsignedPowerOfTwo(uint64(c)) + // cond: isPowerOfTwo(uint64(c)) // result: (Rsh64Ux64 n (Const64 [log64u(uint64(c))])) for { n := v_0 @@ -7432,7 +7432,7 @@ func rewriteValuegeneric_OpDiv64u(v *Value) bool { break } c := auxIntToInt64(v_1.AuxInt) - if !(isUnsignedPowerOfTwo(uint64(c))) { + if !(isPowerOfTwo(uint64(c))) { break } v.reset(OpRsh64Ux64) @@ -7533,7 +7533,7 @@ func rewriteValuegeneric_OpDiv8u(v *Value) bool { return true } // match: (Div8u n (Const8 [c])) - // cond: isUnsignedPowerOfTwo(uint8(c)) + // cond: isPowerOfTwo(uint8(c)) // result: (Rsh8Ux64 n (Const64 [log8u(uint8(c))])) for { n := v_0 @@ -7541,7 +7541,7 @@ func rewriteValuegeneric_OpDiv8u(v *Value) bool { break } c := auxIntToInt8(v_1.AuxInt) - if !(isUnsignedPowerOfTwo(uint8(c))) { + if !(isPowerOfTwo(uint8(c))) { break } v.reset(OpRsh8Ux64) @@ -15264,7 +15264,7 @@ func rewriteValuegeneric_OpMod16u(v *Value) bool { return true } // match: (Mod16u n (Const16 [c])) - // cond: isUnsignedPowerOfTwo(uint16(c)) + // cond: isPowerOfTwo(uint16(c)) // result: (And16 n (Const16 [c-1])) for { t := v.Type @@ -15273,7 +15273,7 @@ func rewriteValuegeneric_OpMod16u(v *Value) bool { break } c := auxIntToInt16(v_1.AuxInt) - if !(isUnsignedPowerOfTwo(uint16(c))) { + if !(isPowerOfTwo(uint16(c))) { break } v.reset(OpAnd16) @@ -15418,7 +15418,7 @@ func rewriteValuegeneric_OpMod32u(v *Value) bool { return true } // match: (Mod32u n (Const32 [c])) - // cond: isUnsignedPowerOfTwo(uint32(c)) + // cond: isPowerOfTwo(uint32(c)) // result: (And32 n (Const32 [c-1])) for { t := v.Type @@ -15427,7 +15427,7 @@ func rewriteValuegeneric_OpMod32u(v *Value) bool { break } c := auxIntToInt32(v_1.AuxInt) - if !(isUnsignedPowerOfTwo(uint32(c))) { + if !(isPowerOfTwo(uint32(c))) { break } v.reset(OpAnd32) @@ -15583,7 +15583,7 @@ func rewriteValuegeneric_OpMod64u(v *Value) bool { return true } // match: (Mod64u n (Const64 [c])) - // cond: isUnsignedPowerOfTwo(uint64(c)) + // cond: isPowerOfTwo(uint64(c)) // result: (And64 n (Const64 [c-1])) for { t := v.Type @@ -15592,7 +15592,7 @@ func rewriteValuegeneric_OpMod64u(v *Value) bool { break } c := auxIntToInt64(v_1.AuxInt) - if !(isUnsignedPowerOfTwo(uint64(c))) { + if !(isPowerOfTwo(uint64(c))) { break } v.reset(OpAnd64) @@ -15737,7 +15737,7 @@ func rewriteValuegeneric_OpMod8u(v *Value) bool { return true } // match: (Mod8u n (Const8 [c])) - // cond: isUnsignedPowerOfTwo(uint8(c)) + // cond: isPowerOfTwo(uint8(c)) // result: (And8 n (Const8 [c-1])) for { t := v.Type @@ -15746,7 +15746,7 @@ func rewriteValuegeneric_OpMod8u(v *Value) bool { break } c := auxIntToInt8(v_1.AuxInt) - if !(isUnsignedPowerOfTwo(uint8(c))) { + if !(isPowerOfTwo(uint8(c))) { break } v.reset(OpAnd8)