From bcdd5d002431a3354e95c8745f4d5771cee22483 Mon Sep 17 00:00:00 2001 From: Josh Bleecher Snyder Date: Thu, 23 Jan 2020 21:47:42 -0800 Subject: [PATCH] cmd/compile: use shift boundedness when lowering shifts on 386 MIME-Version: 1.0 Content-Type: text/plain; charset=utf8 Content-Transfer-Encoding: 8bit Minor improvements to generated code. file before after Δ % runtime.s 451117 450977 -140 -0.031% compress/bzip2.s 10202 10194 -8 -0.078% compress/lzw.s 5924 5904 -20 -0.338% compress/flate.s 45053 45032 -21 -0.047% net.s 236980 236970 -10 -0.004% vendor/golang.org/x/crypto/cryptobyte.s 29450 29439 -11 -0.037% crypto/x509.s 107854 107840 -14 -0.013% cmd/vendor/golang.org/x/arch/arm64/arm64asm.s 102448 102434 -14 -0.014% cmd/internal/obj/arm.s 60536 60528 -8 -0.013% cmd/vendor/golang.org/x/mod/sumdb/tlog.s 38273 38276 +3 +0.008% net/http.s 462215 462201 -14 -0.003% cmd/compile/internal/ssa.s 3951732 3954683 +2951 +0.075% total 16946051 16948745 +2694 +0.016% Change-Id: I9f6df1a90a295dce6fe86c8eb7576a8c96f8bb0a Reviewed-on: https://go-review.googlesource.com/c/go/+/217000 Run-TryBot: Josh Bleecher Snyder Reviewed-by: Cherry Zhang --- src/cmd/compile/internal/ssa/gen/386.rules | 30 +- src/cmd/compile/internal/ssa/rewrite386.go | 549 +++++++++++++++++++++ 2 files changed, 570 insertions(+), 9 deletions(-) diff --git a/src/cmd/compile/internal/ssa/gen/386.rules b/src/cmd/compile/internal/ssa/gen/386.rules index e1b15d3ad3..23fbd59b8a 100644 --- a/src/cmd/compile/internal/ssa/gen/386.rules +++ b/src/cmd/compile/internal/ssa/gen/386.rules @@ -104,20 +104,32 @@ // Lowering shifts // Unsigned shifts need to return 0 if shift amount is >= width of shifted value. // result = (arg << shift) & (shift >= argbits ? 0 : 0xffffffffffffffff) -(Lsh32x(32|16|8) x y) -> (ANDL (SHLL x y) (SBBLcarrymask (CMP(L|W|B)const y [32]))) -(Lsh16x(32|16|8) x y) -> (ANDL (SHLL x y) (SBBLcarrymask (CMP(L|W|B)const y [32]))) -(Lsh8x(32|16|8) x y) -> (ANDL (SHLL x y) (SBBLcarrymask (CMP(L|W|B)const y [32]))) +(Lsh32x(32|16|8) x y) && !shiftIsBounded(v) -> (ANDL (SHLL x y) (SBBLcarrymask (CMP(L|W|B)const y [32]))) +(Lsh16x(32|16|8) x y) && !shiftIsBounded(v) -> (ANDL (SHLL x y) (SBBLcarrymask (CMP(L|W|B)const y [32]))) +(Lsh8x(32|16|8) x y) && !shiftIsBounded(v) -> (ANDL (SHLL x y) (SBBLcarrymask (CMP(L|W|B)const y [32]))) -(Rsh32Ux(32|16|8) x y) -> (ANDL (SHRL x y) (SBBLcarrymask (CMP(L|W|B)const y [32]))) -(Rsh16Ux(32|16|8) x y) -> (ANDL (SHRW x y) (SBBLcarrymask (CMP(L|W|B)const y [16]))) -(Rsh8Ux(32|16|8) x y) -> (ANDL (SHRB x y) (SBBLcarrymask (CMP(L|W|B)const y [8]))) +(Lsh32x(32|16|8) x y) && shiftIsBounded(v) -> (SHLL x y) +(Lsh16x(32|16|8) x y) && shiftIsBounded(v) -> (SHLL x y) +(Lsh8x(32|16|8) x y) && shiftIsBounded(v) -> (SHLL x y) + +(Rsh32Ux(32|16|8) x y) && !shiftIsBounded(v) -> (ANDL (SHRL x y) (SBBLcarrymask (CMP(L|W|B)const y [32]))) +(Rsh16Ux(32|16|8) x y) && !shiftIsBounded(v) -> (ANDL (SHRW x y) (SBBLcarrymask (CMP(L|W|B)const y [16]))) +(Rsh8Ux(32|16|8) x y) && !shiftIsBounded(v) -> (ANDL (SHRB x y) (SBBLcarrymask (CMP(L|W|B)const y [8]))) + +(Rsh32Ux(32|16|8) x y) && shiftIsBounded(v) -> (SHRL x y) +(Rsh16Ux(32|16|8) x y) && shiftIsBounded(v) -> (SHRW x y) +(Rsh8Ux(32|16|8) x y) && shiftIsBounded(v) -> (SHRB x y) // Signed right shift needs to return 0/-1 if shift amount is >= width of shifted value. // We implement this by setting the shift value to -1 (all ones) if the shift value is >= width. -(Rsh32x(32|16|8) x y) -> (SARL x (ORL y (NOTL (SBBLcarrymask (CMP(L|W|B)const y [32]))))) -(Rsh16x(32|16|8) x y) -> (SARW x (ORL y (NOTL (SBBLcarrymask (CMP(L|W|B)const y [16]))))) -(Rsh8x(32|16|8) x y) -> (SARB x (ORL y (NOTL (SBBLcarrymask (CMP(L|W|B)const y [8]))))) +(Rsh32x(32|16|8) x y) && !shiftIsBounded(v) -> (SARL x (ORL y (NOTL (SBBLcarrymask (CMP(L|W|B)const y [32]))))) +(Rsh16x(32|16|8) x y) && !shiftIsBounded(v) -> (SARW x (ORL y (NOTL (SBBLcarrymask (CMP(L|W|B)const y [16]))))) +(Rsh8x(32|16|8) x y) && !shiftIsBounded(v) -> (SARB x (ORL y (NOTL (SBBLcarrymask (CMP(L|W|B)const y [8]))))) + +(Rsh32x(32|16|8) x y) && shiftIsBounded(v) -> (SARL x y) +(Rsh16x(32|16|8) x y) && shiftIsBounded(v) -> (SARW x y) +(Rsh8x(32|16|8) x y) && shiftIsBounded(v) -> (SARB x y) // constant shifts // generic opt rewrites all constant shifts to shift by Const64 diff --git a/src/cmd/compile/internal/ssa/rewrite386.go b/src/cmd/compile/internal/ssa/rewrite386.go index 02716a3e30..14a7780f8d 100644 --- a/src/cmd/compile/internal/ssa/rewrite386.go +++ b/src/cmd/compile/internal/ssa/rewrite386.go @@ -14969,11 +14969,15 @@ func rewriteValue386_OpLsh16x16(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (Lsh16x16 x y) + // cond: !shiftIsBounded(v) // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPWconst y [32]))) for { t := v.Type x := v_0 y := v_1 + if !(!shiftIsBounded(v)) { + break + } v.reset(Op386ANDL) v0 := b.NewValue0(v.Pos, Op386SHLL, t) v0.AddArg(x) @@ -14987,17 +14991,38 @@ func rewriteValue386_OpLsh16x16(v *Value) bool { v.AddArg(v1) return true } + // match: (Lsh16x16 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(Op386SHLL) + v.Type = t + v.AddArg(x) + v.AddArg(y) + return true + } + return false } func rewriteValue386_OpLsh16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (Lsh16x32 x y) + // cond: !shiftIsBounded(v) // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPLconst y [32]))) for { t := v.Type x := v_0 y := v_1 + if !(!shiftIsBounded(v)) { + break + } v.reset(Op386ANDL) v0 := b.NewValue0(v.Pos, Op386SHLL, t) v0.AddArg(x) @@ -15011,6 +15036,23 @@ func rewriteValue386_OpLsh16x32(v *Value) bool { v.AddArg(v1) return true } + // match: (Lsh16x32 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(Op386SHLL) + v.Type = t + v.AddArg(x) + v.AddArg(y) + return true + } + return false } func rewriteValue386_OpLsh16x64(v *Value) bool { v_1 := v.Args[1] @@ -15054,11 +15096,15 @@ func rewriteValue386_OpLsh16x8(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (Lsh16x8 x y) + // cond: !shiftIsBounded(v) // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) for { t := v.Type x := v_0 y := v_1 + if !(!shiftIsBounded(v)) { + break + } v.reset(Op386ANDL) v0 := b.NewValue0(v.Pos, Op386SHLL, t) v0.AddArg(x) @@ -15072,17 +15118,38 @@ func rewriteValue386_OpLsh16x8(v *Value) bool { v.AddArg(v1) return true } + // match: (Lsh16x8 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(Op386SHLL) + v.Type = t + v.AddArg(x) + v.AddArg(y) + return true + } + return false } func rewriteValue386_OpLsh32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (Lsh32x16 x y) + // cond: !shiftIsBounded(v) // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPWconst y [32]))) for { t := v.Type x := v_0 y := v_1 + if !(!shiftIsBounded(v)) { + break + } v.reset(Op386ANDL) v0 := b.NewValue0(v.Pos, Op386SHLL, t) v0.AddArg(x) @@ -15096,17 +15163,38 @@ func rewriteValue386_OpLsh32x16(v *Value) bool { v.AddArg(v1) return true } + // match: (Lsh32x16 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(Op386SHLL) + v.Type = t + v.AddArg(x) + v.AddArg(y) + return true + } + return false } func rewriteValue386_OpLsh32x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (Lsh32x32 x y) + // cond: !shiftIsBounded(v) // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPLconst y [32]))) for { t := v.Type x := v_0 y := v_1 + if !(!shiftIsBounded(v)) { + break + } v.reset(Op386ANDL) v0 := b.NewValue0(v.Pos, Op386SHLL, t) v0.AddArg(x) @@ -15120,6 +15208,23 @@ func rewriteValue386_OpLsh32x32(v *Value) bool { v.AddArg(v1) return true } + // match: (Lsh32x32 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(Op386SHLL) + v.Type = t + v.AddArg(x) + v.AddArg(y) + return true + } + return false } func rewriteValue386_OpLsh32x64(v *Value) bool { v_1 := v.Args[1] @@ -15163,11 +15268,15 @@ func rewriteValue386_OpLsh32x8(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (Lsh32x8 x y) + // cond: !shiftIsBounded(v) // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) for { t := v.Type x := v_0 y := v_1 + if !(!shiftIsBounded(v)) { + break + } v.reset(Op386ANDL) v0 := b.NewValue0(v.Pos, Op386SHLL, t) v0.AddArg(x) @@ -15181,17 +15290,38 @@ func rewriteValue386_OpLsh32x8(v *Value) bool { v.AddArg(v1) return true } + // match: (Lsh32x8 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(Op386SHLL) + v.Type = t + v.AddArg(x) + v.AddArg(y) + return true + } + return false } func rewriteValue386_OpLsh8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (Lsh8x16 x y) + // cond: !shiftIsBounded(v) // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPWconst y [32]))) for { t := v.Type x := v_0 y := v_1 + if !(!shiftIsBounded(v)) { + break + } v.reset(Op386ANDL) v0 := b.NewValue0(v.Pos, Op386SHLL, t) v0.AddArg(x) @@ -15205,17 +15335,38 @@ func rewriteValue386_OpLsh8x16(v *Value) bool { v.AddArg(v1) return true } + // match: (Lsh8x16 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(Op386SHLL) + v.Type = t + v.AddArg(x) + v.AddArg(y) + return true + } + return false } func rewriteValue386_OpLsh8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (Lsh8x32 x y) + // cond: !shiftIsBounded(v) // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPLconst y [32]))) for { t := v.Type x := v_0 y := v_1 + if !(!shiftIsBounded(v)) { + break + } v.reset(Op386ANDL) v0 := b.NewValue0(v.Pos, Op386SHLL, t) v0.AddArg(x) @@ -15229,6 +15380,23 @@ func rewriteValue386_OpLsh8x32(v *Value) bool { v.AddArg(v1) return true } + // match: (Lsh8x32 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(Op386SHLL) + v.Type = t + v.AddArg(x) + v.AddArg(y) + return true + } + return false } func rewriteValue386_OpLsh8x64(v *Value) bool { v_1 := v.Args[1] @@ -15272,11 +15440,15 @@ func rewriteValue386_OpLsh8x8(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (Lsh8x8 x y) + // cond: !shiftIsBounded(v) // result: (ANDL (SHLL x y) (SBBLcarrymask (CMPBconst y [32]))) for { t := v.Type x := v_0 y := v_1 + if !(!shiftIsBounded(v)) { + break + } v.reset(Op386ANDL) v0 := b.NewValue0(v.Pos, Op386SHLL, t) v0.AddArg(x) @@ -15290,6 +15462,23 @@ func rewriteValue386_OpLsh8x8(v *Value) bool { v.AddArg(v1) return true } + // match: (Lsh8x8 x y) + // cond: shiftIsBounded(v) + // result: (SHLL x y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(Op386SHLL) + v.Type = t + v.AddArg(x) + v.AddArg(y) + return true + } + return false } func rewriteValue386_OpMod16(v *Value) bool { v_1 := v.Args[1] @@ -16279,11 +16468,15 @@ func rewriteValue386_OpRsh16Ux16(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (Rsh16Ux16 x y) + // cond: !shiftIsBounded(v) // result: (ANDL (SHRW x y) (SBBLcarrymask (CMPWconst y [16]))) for { t := v.Type x := v_0 y := v_1 + if !(!shiftIsBounded(v)) { + break + } v.reset(Op386ANDL) v0 := b.NewValue0(v.Pos, Op386SHRW, t) v0.AddArg(x) @@ -16297,17 +16490,38 @@ func rewriteValue386_OpRsh16Ux16(v *Value) bool { v.AddArg(v1) return true } + // match: (Rsh16Ux16 x y) + // cond: shiftIsBounded(v) + // result: (SHRW x y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(Op386SHRW) + v.Type = t + v.AddArg(x) + v.AddArg(y) + return true + } + return false } func rewriteValue386_OpRsh16Ux32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (Rsh16Ux32 x y) + // cond: !shiftIsBounded(v) // result: (ANDL (SHRW x y) (SBBLcarrymask (CMPLconst y [16]))) for { t := v.Type x := v_0 y := v_1 + if !(!shiftIsBounded(v)) { + break + } v.reset(Op386ANDL) v0 := b.NewValue0(v.Pos, Op386SHRW, t) v0.AddArg(x) @@ -16321,6 +16535,23 @@ func rewriteValue386_OpRsh16Ux32(v *Value) bool { v.AddArg(v1) return true } + // match: (Rsh16Ux32 x y) + // cond: shiftIsBounded(v) + // result: (SHRW x y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(Op386SHRW) + v.Type = t + v.AddArg(x) + v.AddArg(y) + return true + } + return false } func rewriteValue386_OpRsh16Ux64(v *Value) bool { v_1 := v.Args[1] @@ -16364,11 +16595,15 @@ func rewriteValue386_OpRsh16Ux8(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (Rsh16Ux8 x y) + // cond: !shiftIsBounded(v) // result: (ANDL (SHRW x y) (SBBLcarrymask (CMPBconst y [16]))) for { t := v.Type x := v_0 y := v_1 + if !(!shiftIsBounded(v)) { + break + } v.reset(Op386ANDL) v0 := b.NewValue0(v.Pos, Op386SHRW, t) v0.AddArg(x) @@ -16382,17 +16617,38 @@ func rewriteValue386_OpRsh16Ux8(v *Value) bool { v.AddArg(v1) return true } + // match: (Rsh16Ux8 x y) + // cond: shiftIsBounded(v) + // result: (SHRW x y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(Op386SHRW) + v.Type = t + v.AddArg(x) + v.AddArg(y) + return true + } + return false } func rewriteValue386_OpRsh16x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (Rsh16x16 x y) + // cond: !shiftIsBounded(v) // result: (SARW x (ORL y (NOTL (SBBLcarrymask (CMPWconst y [16]))))) for { t := v.Type x := v_0 y := v_1 + if !(!shiftIsBounded(v)) { + break + } v.reset(Op386SARW) v.Type = t v.AddArg(x) @@ -16409,17 +16665,36 @@ func rewriteValue386_OpRsh16x16(v *Value) bool { v.AddArg(v0) return true } + // match: (Rsh16x16 x y) + // cond: shiftIsBounded(v) + // result: (SARW x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(Op386SARW) + v.AddArg(x) + v.AddArg(y) + return true + } + return false } func rewriteValue386_OpRsh16x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (Rsh16x32 x y) + // cond: !shiftIsBounded(v) // result: (SARW x (ORL y (NOTL (SBBLcarrymask (CMPLconst y [16]))))) for { t := v.Type x := v_0 y := v_1 + if !(!shiftIsBounded(v)) { + break + } v.reset(Op386SARW) v.Type = t v.AddArg(x) @@ -16436,6 +16711,21 @@ func rewriteValue386_OpRsh16x32(v *Value) bool { v.AddArg(v0) return true } + // match: (Rsh16x32 x y) + // cond: shiftIsBounded(v) + // result: (SARW x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(Op386SARW) + v.AddArg(x) + v.AddArg(y) + return true + } + return false } func rewriteValue386_OpRsh16x64(v *Value) bool { v_1 := v.Args[1] @@ -16481,11 +16771,15 @@ func rewriteValue386_OpRsh16x8(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (Rsh16x8 x y) + // cond: !shiftIsBounded(v) // result: (SARW x (ORL y (NOTL (SBBLcarrymask (CMPBconst y [16]))))) for { t := v.Type x := v_0 y := v_1 + if !(!shiftIsBounded(v)) { + break + } v.reset(Op386SARW) v.Type = t v.AddArg(x) @@ -16502,17 +16796,36 @@ func rewriteValue386_OpRsh16x8(v *Value) bool { v.AddArg(v0) return true } + // match: (Rsh16x8 x y) + // cond: shiftIsBounded(v) + // result: (SARW x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(Op386SARW) + v.AddArg(x) + v.AddArg(y) + return true + } + return false } func rewriteValue386_OpRsh32Ux16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (Rsh32Ux16 x y) + // cond: !shiftIsBounded(v) // result: (ANDL (SHRL x y) (SBBLcarrymask (CMPWconst y [32]))) for { t := v.Type x := v_0 y := v_1 + if !(!shiftIsBounded(v)) { + break + } v.reset(Op386ANDL) v0 := b.NewValue0(v.Pos, Op386SHRL, t) v0.AddArg(x) @@ -16526,17 +16839,38 @@ func rewriteValue386_OpRsh32Ux16(v *Value) bool { v.AddArg(v1) return true } + // match: (Rsh32Ux16 x y) + // cond: shiftIsBounded(v) + // result: (SHRL x y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(Op386SHRL) + v.Type = t + v.AddArg(x) + v.AddArg(y) + return true + } + return false } func rewriteValue386_OpRsh32Ux32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (Rsh32Ux32 x y) + // cond: !shiftIsBounded(v) // result: (ANDL (SHRL x y) (SBBLcarrymask (CMPLconst y [32]))) for { t := v.Type x := v_0 y := v_1 + if !(!shiftIsBounded(v)) { + break + } v.reset(Op386ANDL) v0 := b.NewValue0(v.Pos, Op386SHRL, t) v0.AddArg(x) @@ -16550,6 +16884,23 @@ func rewriteValue386_OpRsh32Ux32(v *Value) bool { v.AddArg(v1) return true } + // match: (Rsh32Ux32 x y) + // cond: shiftIsBounded(v) + // result: (SHRL x y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(Op386SHRL) + v.Type = t + v.AddArg(x) + v.AddArg(y) + return true + } + return false } func rewriteValue386_OpRsh32Ux64(v *Value) bool { v_1 := v.Args[1] @@ -16593,11 +16944,15 @@ func rewriteValue386_OpRsh32Ux8(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (Rsh32Ux8 x y) + // cond: !shiftIsBounded(v) // result: (ANDL (SHRL x y) (SBBLcarrymask (CMPBconst y [32]))) for { t := v.Type x := v_0 y := v_1 + if !(!shiftIsBounded(v)) { + break + } v.reset(Op386ANDL) v0 := b.NewValue0(v.Pos, Op386SHRL, t) v0.AddArg(x) @@ -16611,17 +16966,38 @@ func rewriteValue386_OpRsh32Ux8(v *Value) bool { v.AddArg(v1) return true } + // match: (Rsh32Ux8 x y) + // cond: shiftIsBounded(v) + // result: (SHRL x y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(Op386SHRL) + v.Type = t + v.AddArg(x) + v.AddArg(y) + return true + } + return false } func rewriteValue386_OpRsh32x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (Rsh32x16 x y) + // cond: !shiftIsBounded(v) // result: (SARL x (ORL y (NOTL (SBBLcarrymask (CMPWconst y [32]))))) for { t := v.Type x := v_0 y := v_1 + if !(!shiftIsBounded(v)) { + break + } v.reset(Op386SARL) v.Type = t v.AddArg(x) @@ -16638,17 +17014,36 @@ func rewriteValue386_OpRsh32x16(v *Value) bool { v.AddArg(v0) return true } + // match: (Rsh32x16 x y) + // cond: shiftIsBounded(v) + // result: (SARL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(Op386SARL) + v.AddArg(x) + v.AddArg(y) + return true + } + return false } func rewriteValue386_OpRsh32x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (Rsh32x32 x y) + // cond: !shiftIsBounded(v) // result: (SARL x (ORL y (NOTL (SBBLcarrymask (CMPLconst y [32]))))) for { t := v.Type x := v_0 y := v_1 + if !(!shiftIsBounded(v)) { + break + } v.reset(Op386SARL) v.Type = t v.AddArg(x) @@ -16665,6 +17060,21 @@ func rewriteValue386_OpRsh32x32(v *Value) bool { v.AddArg(v0) return true } + // match: (Rsh32x32 x y) + // cond: shiftIsBounded(v) + // result: (SARL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(Op386SARL) + v.AddArg(x) + v.AddArg(y) + return true + } + return false } func rewriteValue386_OpRsh32x64(v *Value) bool { v_1 := v.Args[1] @@ -16710,11 +17120,15 @@ func rewriteValue386_OpRsh32x8(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (Rsh32x8 x y) + // cond: !shiftIsBounded(v) // result: (SARL x (ORL y (NOTL (SBBLcarrymask (CMPBconst y [32]))))) for { t := v.Type x := v_0 y := v_1 + if !(!shiftIsBounded(v)) { + break + } v.reset(Op386SARL) v.Type = t v.AddArg(x) @@ -16731,17 +17145,36 @@ func rewriteValue386_OpRsh32x8(v *Value) bool { v.AddArg(v0) return true } + // match: (Rsh32x8 x y) + // cond: shiftIsBounded(v) + // result: (SARL x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(Op386SARL) + v.AddArg(x) + v.AddArg(y) + return true + } + return false } func rewriteValue386_OpRsh8Ux16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (Rsh8Ux16 x y) + // cond: !shiftIsBounded(v) // result: (ANDL (SHRB x y) (SBBLcarrymask (CMPWconst y [8]))) for { t := v.Type x := v_0 y := v_1 + if !(!shiftIsBounded(v)) { + break + } v.reset(Op386ANDL) v0 := b.NewValue0(v.Pos, Op386SHRB, t) v0.AddArg(x) @@ -16755,17 +17188,38 @@ func rewriteValue386_OpRsh8Ux16(v *Value) bool { v.AddArg(v1) return true } + // match: (Rsh8Ux16 x y) + // cond: shiftIsBounded(v) + // result: (SHRB x y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(Op386SHRB) + v.Type = t + v.AddArg(x) + v.AddArg(y) + return true + } + return false } func rewriteValue386_OpRsh8Ux32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (Rsh8Ux32 x y) + // cond: !shiftIsBounded(v) // result: (ANDL (SHRB x y) (SBBLcarrymask (CMPLconst y [8]))) for { t := v.Type x := v_0 y := v_1 + if !(!shiftIsBounded(v)) { + break + } v.reset(Op386ANDL) v0 := b.NewValue0(v.Pos, Op386SHRB, t) v0.AddArg(x) @@ -16779,6 +17233,23 @@ func rewriteValue386_OpRsh8Ux32(v *Value) bool { v.AddArg(v1) return true } + // match: (Rsh8Ux32 x y) + // cond: shiftIsBounded(v) + // result: (SHRB x y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(Op386SHRB) + v.Type = t + v.AddArg(x) + v.AddArg(y) + return true + } + return false } func rewriteValue386_OpRsh8Ux64(v *Value) bool { v_1 := v.Args[1] @@ -16822,11 +17293,15 @@ func rewriteValue386_OpRsh8Ux8(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (Rsh8Ux8 x y) + // cond: !shiftIsBounded(v) // result: (ANDL (SHRB x y) (SBBLcarrymask (CMPBconst y [8]))) for { t := v.Type x := v_0 y := v_1 + if !(!shiftIsBounded(v)) { + break + } v.reset(Op386ANDL) v0 := b.NewValue0(v.Pos, Op386SHRB, t) v0.AddArg(x) @@ -16840,17 +17315,38 @@ func rewriteValue386_OpRsh8Ux8(v *Value) bool { v.AddArg(v1) return true } + // match: (Rsh8Ux8 x y) + // cond: shiftIsBounded(v) + // result: (SHRB x y) + for { + t := v.Type + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(Op386SHRB) + v.Type = t + v.AddArg(x) + v.AddArg(y) + return true + } + return false } func rewriteValue386_OpRsh8x16(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (Rsh8x16 x y) + // cond: !shiftIsBounded(v) // result: (SARB x (ORL y (NOTL (SBBLcarrymask (CMPWconst y [8]))))) for { t := v.Type x := v_0 y := v_1 + if !(!shiftIsBounded(v)) { + break + } v.reset(Op386SARB) v.Type = t v.AddArg(x) @@ -16867,17 +17363,36 @@ func rewriteValue386_OpRsh8x16(v *Value) bool { v.AddArg(v0) return true } + // match: (Rsh8x16 x y) + // cond: shiftIsBounded(v) + // result: (SARB x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(Op386SARB) + v.AddArg(x) + v.AddArg(y) + return true + } + return false } func rewriteValue386_OpRsh8x32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block // match: (Rsh8x32 x y) + // cond: !shiftIsBounded(v) // result: (SARB x (ORL y (NOTL (SBBLcarrymask (CMPLconst y [8]))))) for { t := v.Type x := v_0 y := v_1 + if !(!shiftIsBounded(v)) { + break + } v.reset(Op386SARB) v.Type = t v.AddArg(x) @@ -16894,6 +17409,21 @@ func rewriteValue386_OpRsh8x32(v *Value) bool { v.AddArg(v0) return true } + // match: (Rsh8x32 x y) + // cond: shiftIsBounded(v) + // result: (SARB x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(Op386SARB) + v.AddArg(x) + v.AddArg(y) + return true + } + return false } func rewriteValue386_OpRsh8x64(v *Value) bool { v_1 := v.Args[1] @@ -16939,11 +17469,15 @@ func rewriteValue386_OpRsh8x8(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (Rsh8x8 x y) + // cond: !shiftIsBounded(v) // result: (SARB x (ORL y (NOTL (SBBLcarrymask (CMPBconst y [8]))))) for { t := v.Type x := v_0 y := v_1 + if !(!shiftIsBounded(v)) { + break + } v.reset(Op386SARB) v.Type = t v.AddArg(x) @@ -16960,6 +17494,21 @@ func rewriteValue386_OpRsh8x8(v *Value) bool { v.AddArg(v0) return true } + // match: (Rsh8x8 x y) + // cond: shiftIsBounded(v) + // result: (SARB x y) + for { + x := v_0 + y := v_1 + if !(shiftIsBounded(v)) { + break + } + v.reset(Op386SARB) + v.AddArg(x) + v.AddArg(y) + return true + } + return false } func rewriteValue386_OpSelect0(v *Value) bool { v_0 := v.Args[0] -- 2.50.0