]> Cypherpunks repositories - gostls13.git/commitdiff
cmd/compile: fold constant shifts into (SHL|SHR|SAR)Xload ops
authorKeith Randall <khr@golang.org>
Thu, 14 Apr 2022 00:33:24 +0000 (17:33 -0700)
committerKeith Randall <khr@google.com>
Thu, 14 Apr 2022 15:54:48 +0000 (15:54 +0000)
We should prefer a constant shift op to a X shift op.
That way we don't have to materialize the constant to shift by.

Should fix GOAMD64=v3 builder

Change-Id: I56b45d2940c959382b970e3f962ed4a09cc2a239
Reviewed-on: https://go-review.googlesource.com/c/go/+/400254
Reviewed-by: Cherry Mui <cherryyz@google.com>
Reviewed-by: Wayne Zuo <wdvxdr@golangcn.org>
Run-TryBot: Dmitri Shuralyov <dmitshur@golang.org>
TryBot-Result: Gopher Robot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@google.com>
src/cmd/compile/internal/ssa/gen/AMD64.rules
src/cmd/compile/internal/ssa/rewriteAMD64.go

index 1bee810fbf30ed0635a4c7f80969765f3558824c..1fd36bfc887741254bcbdadeca81c70a08b23ce2 100644 (file)
 (SARX(Q|L) l:(MOV(Q|L)load [off] {sym} ptr mem) x) && canMergeLoad(v, l) && clobber(l) => (SARX(Q|L)load [off] {sym} ptr x mem)
 (SHLX(Q|L) l:(MOV(Q|L)load [off] {sym} ptr mem) x) && canMergeLoad(v, l) && clobber(l) => (SHLX(Q|L)load [off] {sym} ptr x mem)
 (SHRX(Q|L) l:(MOV(Q|L)load [off] {sym} ptr mem) x) && canMergeLoad(v, l) && clobber(l) => (SHRX(Q|L)load [off] {sym} ptr x mem)
+
+((SHL|SHR|SAR)XQload [off] {sym} ptr (MOVQconst [c]) mem) => ((SHL|SHR|SAR)Qconst [int8(c&63)] (MOVQload [off] {sym} ptr mem))
+((SHL|SHR|SAR)XQload [off] {sym} ptr (MOVLconst [c]) mem) => ((SHL|SHR|SAR)Qconst [int8(c&63)] (MOVQload [off] {sym} ptr mem))
+((SHL|SHR|SAR)XLload [off] {sym} ptr (MOVLconst [c]) mem) => ((SHL|SHR|SAR)Lconst [int8(c&31)] (MOVLload [off] {sym} ptr mem))
index f5ec7dc00375305c47abaac139d332a649dcbc4e..67ccc99679a1b3e6172324ee02bf0e253c919eac 100644 (file)
@@ -384,8 +384,12 @@ func rewriteValueAMD64(v *Value) bool {
                return rewriteValueAMD64_OpAMD64SARWconst(v)
        case OpAMD64SARXL:
                return rewriteValueAMD64_OpAMD64SARXL(v)
+       case OpAMD64SARXLload:
+               return rewriteValueAMD64_OpAMD64SARXLload(v)
        case OpAMD64SARXQ:
                return rewriteValueAMD64_OpAMD64SARXQ(v)
+       case OpAMD64SARXQload:
+               return rewriteValueAMD64_OpAMD64SARXQload(v)
        case OpAMD64SBBLcarrymask:
                return rewriteValueAMD64_OpAMD64SBBLcarrymask(v)
        case OpAMD64SBBQ:
@@ -444,8 +448,12 @@ func rewriteValueAMD64(v *Value) bool {
                return rewriteValueAMD64_OpAMD64SHLQconst(v)
        case OpAMD64SHLXL:
                return rewriteValueAMD64_OpAMD64SHLXL(v)
+       case OpAMD64SHLXLload:
+               return rewriteValueAMD64_OpAMD64SHLXLload(v)
        case OpAMD64SHLXQ:
                return rewriteValueAMD64_OpAMD64SHLXQ(v)
+       case OpAMD64SHLXQload:
+               return rewriteValueAMD64_OpAMD64SHLXQload(v)
        case OpAMD64SHRB:
                return rewriteValueAMD64_OpAMD64SHRB(v)
        case OpAMD64SHRBconst:
@@ -464,8 +472,12 @@ func rewriteValueAMD64(v *Value) bool {
                return rewriteValueAMD64_OpAMD64SHRWconst(v)
        case OpAMD64SHRXL:
                return rewriteValueAMD64_OpAMD64SHRXL(v)
+       case OpAMD64SHRXLload:
+               return rewriteValueAMD64_OpAMD64SHRXLload(v)
        case OpAMD64SHRXQ:
                return rewriteValueAMD64_OpAMD64SHRXQ(v)
+       case OpAMD64SHRXQload:
+               return rewriteValueAMD64_OpAMD64SHRXQload(v)
        case OpAMD64SUBL:
                return rewriteValueAMD64_OpAMD64SUBL(v)
        case OpAMD64SUBLconst:
@@ -21625,6 +21637,34 @@ func rewriteValueAMD64_OpAMD64SARXL(v *Value) bool {
        }
        return false
 }
+func rewriteValueAMD64_OpAMD64SARXLload(v *Value) bool {
+       v_2 := v.Args[2]
+       v_1 := v.Args[1]
+       v_0 := v.Args[0]
+       b := v.Block
+       typ := &b.Func.Config.Types
+       // match: (SARXLload [off] {sym} ptr (MOVLconst [c]) mem)
+       // result: (SARLconst [int8(c&31)] (MOVLload [off] {sym} ptr mem))
+       for {
+               off := auxIntToInt32(v.AuxInt)
+               sym := auxToSym(v.Aux)
+               ptr := v_0
+               if v_1.Op != OpAMD64MOVLconst {
+                       break
+               }
+               c := auxIntToInt32(v_1.AuxInt)
+               mem := v_2
+               v.reset(OpAMD64SARLconst)
+               v.AuxInt = int8ToAuxInt(int8(c & 31))
+               v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
+               v0.AuxInt = int32ToAuxInt(off)
+               v0.Aux = symToAux(sym)
+               v0.AddArg2(ptr, mem)
+               v.AddArg(v0)
+               return true
+       }
+       return false
+}
 func rewriteValueAMD64_OpAMD64SARXQ(v *Value) bool {
        v_1 := v.Args[1]
        v_0 := v.Args[0]
@@ -21843,6 +21883,54 @@ func rewriteValueAMD64_OpAMD64SARXQ(v *Value) bool {
        }
        return false
 }
+func rewriteValueAMD64_OpAMD64SARXQload(v *Value) bool {
+       v_2 := v.Args[2]
+       v_1 := v.Args[1]
+       v_0 := v.Args[0]
+       b := v.Block
+       typ := &b.Func.Config.Types
+       // match: (SARXQload [off] {sym} ptr (MOVQconst [c]) mem)
+       // result: (SARQconst [int8(c&63)] (MOVQload [off] {sym} ptr mem))
+       for {
+               off := auxIntToInt32(v.AuxInt)
+               sym := auxToSym(v.Aux)
+               ptr := v_0
+               if v_1.Op != OpAMD64MOVQconst {
+                       break
+               }
+               c := auxIntToInt64(v_1.AuxInt)
+               mem := v_2
+               v.reset(OpAMD64SARQconst)
+               v.AuxInt = int8ToAuxInt(int8(c & 63))
+               v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
+               v0.AuxInt = int32ToAuxInt(off)
+               v0.Aux = symToAux(sym)
+               v0.AddArg2(ptr, mem)
+               v.AddArg(v0)
+               return true
+       }
+       // match: (SARXQload [off] {sym} ptr (MOVLconst [c]) mem)
+       // result: (SARQconst [int8(c&63)] (MOVQload [off] {sym} ptr mem))
+       for {
+               off := auxIntToInt32(v.AuxInt)
+               sym := auxToSym(v.Aux)
+               ptr := v_0
+               if v_1.Op != OpAMD64MOVLconst {
+                       break
+               }
+               c := auxIntToInt32(v_1.AuxInt)
+               mem := v_2
+               v.reset(OpAMD64SARQconst)
+               v.AuxInt = int8ToAuxInt(int8(c & 63))
+               v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
+               v0.AuxInt = int32ToAuxInt(off)
+               v0.Aux = symToAux(sym)
+               v0.AddArg2(ptr, mem)
+               v.AddArg(v0)
+               return true
+       }
+       return false
+}
 func rewriteValueAMD64_OpAMD64SBBLcarrymask(v *Value) bool {
        v_0 := v.Args[0]
        // match: (SBBLcarrymask (FlagEQ))
@@ -26913,6 +27001,34 @@ func rewriteValueAMD64_OpAMD64SHLXL(v *Value) bool {
        }
        return false
 }
+func rewriteValueAMD64_OpAMD64SHLXLload(v *Value) bool {
+       v_2 := v.Args[2]
+       v_1 := v.Args[1]
+       v_0 := v.Args[0]
+       b := v.Block
+       typ := &b.Func.Config.Types
+       // match: (SHLXLload [off] {sym} ptr (MOVLconst [c]) mem)
+       // result: (SHLLconst [int8(c&31)] (MOVLload [off] {sym} ptr mem))
+       for {
+               off := auxIntToInt32(v.AuxInt)
+               sym := auxToSym(v.Aux)
+               ptr := v_0
+               if v_1.Op != OpAMD64MOVLconst {
+                       break
+               }
+               c := auxIntToInt32(v_1.AuxInt)
+               mem := v_2
+               v.reset(OpAMD64SHLLconst)
+               v.AuxInt = int8ToAuxInt(int8(c & 31))
+               v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
+               v0.AuxInt = int32ToAuxInt(off)
+               v0.Aux = symToAux(sym)
+               v0.AddArg2(ptr, mem)
+               v.AddArg(v0)
+               return true
+       }
+       return false
+}
 func rewriteValueAMD64_OpAMD64SHLXQ(v *Value) bool {
        v_1 := v.Args[1]
        v_0 := v.Args[0]
@@ -27131,6 +27247,54 @@ func rewriteValueAMD64_OpAMD64SHLXQ(v *Value) bool {
        }
        return false
 }
+func rewriteValueAMD64_OpAMD64SHLXQload(v *Value) bool {
+       v_2 := v.Args[2]
+       v_1 := v.Args[1]
+       v_0 := v.Args[0]
+       b := v.Block
+       typ := &b.Func.Config.Types
+       // match: (SHLXQload [off] {sym} ptr (MOVQconst [c]) mem)
+       // result: (SHLQconst [int8(c&63)] (MOVQload [off] {sym} ptr mem))
+       for {
+               off := auxIntToInt32(v.AuxInt)
+               sym := auxToSym(v.Aux)
+               ptr := v_0
+               if v_1.Op != OpAMD64MOVQconst {
+                       break
+               }
+               c := auxIntToInt64(v_1.AuxInt)
+               mem := v_2
+               v.reset(OpAMD64SHLQconst)
+               v.AuxInt = int8ToAuxInt(int8(c & 63))
+               v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
+               v0.AuxInt = int32ToAuxInt(off)
+               v0.Aux = symToAux(sym)
+               v0.AddArg2(ptr, mem)
+               v.AddArg(v0)
+               return true
+       }
+       // match: (SHLXQload [off] {sym} ptr (MOVLconst [c]) mem)
+       // result: (SHLQconst [int8(c&63)] (MOVQload [off] {sym} ptr mem))
+       for {
+               off := auxIntToInt32(v.AuxInt)
+               sym := auxToSym(v.Aux)
+               ptr := v_0
+               if v_1.Op != OpAMD64MOVLconst {
+                       break
+               }
+               c := auxIntToInt32(v_1.AuxInt)
+               mem := v_2
+               v.reset(OpAMD64SHLQconst)
+               v.AuxInt = int8ToAuxInt(int8(c & 63))
+               v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
+               v0.AuxInt = int32ToAuxInt(off)
+               v0.Aux = symToAux(sym)
+               v0.AddArg2(ptr, mem)
+               v.AddArg(v0)
+               return true
+       }
+       return false
+}
 func rewriteValueAMD64_OpAMD64SHRB(v *Value) bool {
        v_1 := v.Args[1]
        v_0 := v.Args[0]
@@ -27985,6 +28149,34 @@ func rewriteValueAMD64_OpAMD64SHRXL(v *Value) bool {
        }
        return false
 }
+func rewriteValueAMD64_OpAMD64SHRXLload(v *Value) bool {
+       v_2 := v.Args[2]
+       v_1 := v.Args[1]
+       v_0 := v.Args[0]
+       b := v.Block
+       typ := &b.Func.Config.Types
+       // match: (SHRXLload [off] {sym} ptr (MOVLconst [c]) mem)
+       // result: (SHRLconst [int8(c&31)] (MOVLload [off] {sym} ptr mem))
+       for {
+               off := auxIntToInt32(v.AuxInt)
+               sym := auxToSym(v.Aux)
+               ptr := v_0
+               if v_1.Op != OpAMD64MOVLconst {
+                       break
+               }
+               c := auxIntToInt32(v_1.AuxInt)
+               mem := v_2
+               v.reset(OpAMD64SHRLconst)
+               v.AuxInt = int8ToAuxInt(int8(c & 31))
+               v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
+               v0.AuxInt = int32ToAuxInt(off)
+               v0.Aux = symToAux(sym)
+               v0.AddArg2(ptr, mem)
+               v.AddArg(v0)
+               return true
+       }
+       return false
+}
 func rewriteValueAMD64_OpAMD64SHRXQ(v *Value) bool {
        v_1 := v.Args[1]
        v_0 := v.Args[0]
@@ -28203,6 +28395,54 @@ func rewriteValueAMD64_OpAMD64SHRXQ(v *Value) bool {
        }
        return false
 }
+func rewriteValueAMD64_OpAMD64SHRXQload(v *Value) bool {
+       v_2 := v.Args[2]
+       v_1 := v.Args[1]
+       v_0 := v.Args[0]
+       b := v.Block
+       typ := &b.Func.Config.Types
+       // match: (SHRXQload [off] {sym} ptr (MOVQconst [c]) mem)
+       // result: (SHRQconst [int8(c&63)] (MOVQload [off] {sym} ptr mem))
+       for {
+               off := auxIntToInt32(v.AuxInt)
+               sym := auxToSym(v.Aux)
+               ptr := v_0
+               if v_1.Op != OpAMD64MOVQconst {
+                       break
+               }
+               c := auxIntToInt64(v_1.AuxInt)
+               mem := v_2
+               v.reset(OpAMD64SHRQconst)
+               v.AuxInt = int8ToAuxInt(int8(c & 63))
+               v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
+               v0.AuxInt = int32ToAuxInt(off)
+               v0.Aux = symToAux(sym)
+               v0.AddArg2(ptr, mem)
+               v.AddArg(v0)
+               return true
+       }
+       // match: (SHRXQload [off] {sym} ptr (MOVLconst [c]) mem)
+       // result: (SHRQconst [int8(c&63)] (MOVQload [off] {sym} ptr mem))
+       for {
+               off := auxIntToInt32(v.AuxInt)
+               sym := auxToSym(v.Aux)
+               ptr := v_0
+               if v_1.Op != OpAMD64MOVLconst {
+                       break
+               }
+               c := auxIntToInt32(v_1.AuxInt)
+               mem := v_2
+               v.reset(OpAMD64SHRQconst)
+               v.AuxInt = int8ToAuxInt(int8(c & 63))
+               v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
+               v0.AuxInt = int32ToAuxInt(off)
+               v0.Aux = symToAux(sym)
+               v0.AddArg2(ptr, mem)
+               v.AddArg(v0)
+               return true
+       }
+       return false
+}
 func rewriteValueAMD64_OpAMD64SUBL(v *Value) bool {
        v_1 := v.Args[1]
        v_0 := v.Args[0]