]> Cypherpunks repositories - gostls13.git/commitdiff
cmd/compile: fold constant shift with extension on riscv64
authorWayne Zuo <wdvxdr@golangcn.org>
Tue, 6 Sep 2022 03:43:28 +0000 (11:43 +0800)
committerJoel Sing <joel@sing.id.au>
Thu, 6 Oct 2022 05:21:04 +0000 (05:21 +0000)
For example:

  movb a0, a0
  srai $1, a0, a0

the assembler will expand to:

  slli $56, a0, a0
  srai $56, a0, a0
  srai $1, a0, a0

this CL optimize to:

  slli $56, a0, a0
  srai $57, a0, a0

Remove 270+ instructions from Go binary on linux/riscv64.

Change-Id: I375e19f9d3bd54f2781791d8cbe5970191297dc8
Reviewed-on: https://go-review.googlesource.com/c/go/+/428496
Reviewed-by: Keith Randall <khr@google.com>
Run-TryBot: Wayne Zuo <wdvxdr@golangcn.org>
Reviewed-by: Joel Sing <joel@sing.id.au>
Reviewed-by: Cherry Mui <cherryyz@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>

src/cmd/compile/internal/ssa/_gen/RISCV64latelower.rules [new file with mode: 0644]
src/cmd/compile/internal/ssa/config.go
src/cmd/compile/internal/ssa/rewriteRISCV64latelower.go [new file with mode: 0644]
test/codegen/shift.go

diff --git a/src/cmd/compile/internal/ssa/_gen/RISCV64latelower.rules b/src/cmd/compile/internal/ssa/_gen/RISCV64latelower.rules
new file mode 100644 (file)
index 0000000..c44a837
--- /dev/null
@@ -0,0 +1,19 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Fold constant shift with extension.
+(SRAI <t> [c] (MOVBreg  x)) && c <   8 => (SRAI [56+c] (SLLI <t> [56] x))
+(SRAI <t> [c] (MOVHreg  x)) && c <  16 => (SRAI [48+c] (SLLI <t> [48] x))
+(SRAI <t> [c] (MOVWreg  x)) && c <  32 => (SRAI [32+c] (SLLI <t> [32] x))
+(SRLI <t> [c] (MOVBUreg x)) && c <   8 => (SRLI [56+c] (SLLI <t> [56] x))
+(SRLI <t> [c] (MOVHUreg x)) && c <  16 => (SRLI [48+c] (SLLI <t> [48] x))
+(SRLI <t> [c] (MOVWUreg x)) && c <  32 => (SRLI [32+c] (SLLI <t> [32] x))
+(SLLI <t> [c] (MOVBUreg x)) && c <= 56 => (SRLI [56-c] (SLLI <t> [56] x))
+(SLLI <t> [c] (MOVHUreg x)) && c <= 48 => (SRLI [48-c] (SLLI <t> [48] x))
+(SLLI <t> [c] (MOVWUreg x)) && c <= 32 => (SRLI [32-c] (SLLI <t> [32] x))
+
+// Shift by zero.
+(SRAI [0] x) => x
+(SRLI [0] x) => x
+(SLLI [0] x) => x
index 0ad2d94dce85b29abc216a63c478461e61d62e3d..5f39a6dfb3fa9e560c4d3d5ce49e946e0094040e 100644 (file)
@@ -309,6 +309,7 @@ func NewConfig(arch string, types Types, ctxt *obj.Link, optimize, softfloat boo
                c.RegSize = 8
                c.lowerBlock = rewriteBlockRISCV64
                c.lowerValue = rewriteValueRISCV64
+               c.lateLowerValue = rewriteValueRISCV64latelower
                c.registers = registersRISCV64[:]
                c.gpRegMask = gpRegMaskRISCV64
                c.fpRegMask = fpRegMaskRISCV64
diff --git a/src/cmd/compile/internal/ssa/rewriteRISCV64latelower.go b/src/cmd/compile/internal/ssa/rewriteRISCV64latelower.go
new file mode 100644 (file)
index 0000000..bde0164
--- /dev/null
@@ -0,0 +1,253 @@
+// Code generated from gen/RISCV64latelower.rules; DO NOT EDIT.
+// generated with: cd gen; go run *.go
+
+package ssa
+
+func rewriteValueRISCV64latelower(v *Value) bool {
+       switch v.Op {
+       case OpRISCV64SLLI:
+               return rewriteValueRISCV64latelower_OpRISCV64SLLI(v)
+       case OpRISCV64SRAI:
+               return rewriteValueRISCV64latelower_OpRISCV64SRAI(v)
+       case OpRISCV64SRLI:
+               return rewriteValueRISCV64latelower_OpRISCV64SRLI(v)
+       }
+       return false
+}
+func rewriteValueRISCV64latelower_OpRISCV64SLLI(v *Value) bool {
+       v_0 := v.Args[0]
+       b := v.Block
+       // match: (SLLI <t> [c] (MOVBUreg x))
+       // cond: c <= 56
+       // result: (SRLI [56-c] (SLLI <t> [56] x))
+       for {
+               t := v.Type
+               c := auxIntToInt64(v.AuxInt)
+               if v_0.Op != OpRISCV64MOVBUreg {
+                       break
+               }
+               x := v_0.Args[0]
+               if !(c <= 56) {
+                       break
+               }
+               v.reset(OpRISCV64SRLI)
+               v.AuxInt = int64ToAuxInt(56 - c)
+               v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t)
+               v0.AuxInt = int64ToAuxInt(56)
+               v0.AddArg(x)
+               v.AddArg(v0)
+               return true
+       }
+       // match: (SLLI <t> [c] (MOVHUreg x))
+       // cond: c <= 48
+       // result: (SRLI [48-c] (SLLI <t> [48] x))
+       for {
+               t := v.Type
+               c := auxIntToInt64(v.AuxInt)
+               if v_0.Op != OpRISCV64MOVHUreg {
+                       break
+               }
+               x := v_0.Args[0]
+               if !(c <= 48) {
+                       break
+               }
+               v.reset(OpRISCV64SRLI)
+               v.AuxInt = int64ToAuxInt(48 - c)
+               v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t)
+               v0.AuxInt = int64ToAuxInt(48)
+               v0.AddArg(x)
+               v.AddArg(v0)
+               return true
+       }
+       // match: (SLLI <t> [c] (MOVWUreg x))
+       // cond: c <= 32
+       // result: (SRLI [32-c] (SLLI <t> [32] x))
+       for {
+               t := v.Type
+               c := auxIntToInt64(v.AuxInt)
+               if v_0.Op != OpRISCV64MOVWUreg {
+                       break
+               }
+               x := v_0.Args[0]
+               if !(c <= 32) {
+                       break
+               }
+               v.reset(OpRISCV64SRLI)
+               v.AuxInt = int64ToAuxInt(32 - c)
+               v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t)
+               v0.AuxInt = int64ToAuxInt(32)
+               v0.AddArg(x)
+               v.AddArg(v0)
+               return true
+       }
+       // match: (SLLI [0] x)
+       // result: x
+       for {
+               if auxIntToInt64(v.AuxInt) != 0 {
+                       break
+               }
+               x := v_0
+               v.copyOf(x)
+               return true
+       }
+       return false
+}
+func rewriteValueRISCV64latelower_OpRISCV64SRAI(v *Value) bool {
+       v_0 := v.Args[0]
+       b := v.Block
+       // match: (SRAI <t> [c] (MOVBreg x))
+       // cond: c < 8
+       // result: (SRAI [56+c] (SLLI <t> [56] x))
+       for {
+               t := v.Type
+               c := auxIntToInt64(v.AuxInt)
+               if v_0.Op != OpRISCV64MOVBreg {
+                       break
+               }
+               x := v_0.Args[0]
+               if !(c < 8) {
+                       break
+               }
+               v.reset(OpRISCV64SRAI)
+               v.AuxInt = int64ToAuxInt(56 + c)
+               v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t)
+               v0.AuxInt = int64ToAuxInt(56)
+               v0.AddArg(x)
+               v.AddArg(v0)
+               return true
+       }
+       // match: (SRAI <t> [c] (MOVHreg x))
+       // cond: c < 16
+       // result: (SRAI [48+c] (SLLI <t> [48] x))
+       for {
+               t := v.Type
+               c := auxIntToInt64(v.AuxInt)
+               if v_0.Op != OpRISCV64MOVHreg {
+                       break
+               }
+               x := v_0.Args[0]
+               if !(c < 16) {
+                       break
+               }
+               v.reset(OpRISCV64SRAI)
+               v.AuxInt = int64ToAuxInt(48 + c)
+               v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t)
+               v0.AuxInt = int64ToAuxInt(48)
+               v0.AddArg(x)
+               v.AddArg(v0)
+               return true
+       }
+       // match: (SRAI <t> [c] (MOVWreg x))
+       // cond: c < 32
+       // result: (SRAI [32+c] (SLLI <t> [32] x))
+       for {
+               t := v.Type
+               c := auxIntToInt64(v.AuxInt)
+               if v_0.Op != OpRISCV64MOVWreg {
+                       break
+               }
+               x := v_0.Args[0]
+               if !(c < 32) {
+                       break
+               }
+               v.reset(OpRISCV64SRAI)
+               v.AuxInt = int64ToAuxInt(32 + c)
+               v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t)
+               v0.AuxInt = int64ToAuxInt(32)
+               v0.AddArg(x)
+               v.AddArg(v0)
+               return true
+       }
+       // match: (SRAI [0] x)
+       // result: x
+       for {
+               if auxIntToInt64(v.AuxInt) != 0 {
+                       break
+               }
+               x := v_0
+               v.copyOf(x)
+               return true
+       }
+       return false
+}
+func rewriteValueRISCV64latelower_OpRISCV64SRLI(v *Value) bool {
+       v_0 := v.Args[0]
+       b := v.Block
+       // match: (SRLI <t> [c] (MOVBUreg x))
+       // cond: c < 8
+       // result: (SRLI [56+c] (SLLI <t> [56] x))
+       for {
+               t := v.Type
+               c := auxIntToInt64(v.AuxInt)
+               if v_0.Op != OpRISCV64MOVBUreg {
+                       break
+               }
+               x := v_0.Args[0]
+               if !(c < 8) {
+                       break
+               }
+               v.reset(OpRISCV64SRLI)
+               v.AuxInt = int64ToAuxInt(56 + c)
+               v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t)
+               v0.AuxInt = int64ToAuxInt(56)
+               v0.AddArg(x)
+               v.AddArg(v0)
+               return true
+       }
+       // match: (SRLI <t> [c] (MOVHUreg x))
+       // cond: c < 16
+       // result: (SRLI [48+c] (SLLI <t> [48] x))
+       for {
+               t := v.Type
+               c := auxIntToInt64(v.AuxInt)
+               if v_0.Op != OpRISCV64MOVHUreg {
+                       break
+               }
+               x := v_0.Args[0]
+               if !(c < 16) {
+                       break
+               }
+               v.reset(OpRISCV64SRLI)
+               v.AuxInt = int64ToAuxInt(48 + c)
+               v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t)
+               v0.AuxInt = int64ToAuxInt(48)
+               v0.AddArg(x)
+               v.AddArg(v0)
+               return true
+       }
+       // match: (SRLI <t> [c] (MOVWUreg x))
+       // cond: c < 32
+       // result: (SRLI [32+c] (SLLI <t> [32] x))
+       for {
+               t := v.Type
+               c := auxIntToInt64(v.AuxInt)
+               if v_0.Op != OpRISCV64MOVWUreg {
+                       break
+               }
+               x := v_0.Args[0]
+               if !(c < 32) {
+                       break
+               }
+               v.reset(OpRISCV64SRLI)
+               v.AuxInt = int64ToAuxInt(32 + c)
+               v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t)
+               v0.AuxInt = int64ToAuxInt(32)
+               v0.AddArg(x)
+               v.AddArg(v0)
+               return true
+       }
+       // match: (SRLI [0] x)
+       // result: x
+       for {
+               if auxIntToInt64(v.AuxInt) != 0 {
+                       break
+               }
+               x := v_0
+               v.copyOf(x)
+               return true
+       }
+       return false
+}
+func rewriteBlockRISCV64latelower(b *Block) bool {
+       return false
+}
index c82566bb100d91a2116290869e0232001dbd1f23..4a9f5d4356433930e0cfffd2c786a0e76c5d7237 100644 (file)
@@ -34,21 +34,21 @@ func rshConst64x64(v int64) int64 {
 func lshConst32x64(v int32) int32 {
        // ppc64:"SLW"
        // ppc64le:"SLW"
-       // riscv64:"SLLI",-"AND",-"SLTIU"
+       // riscv64:"SLLI",-"AND",-"SLTIU", -"MOVW"
        return v << uint64(29)
 }
 
 func rshConst32Ux64(v uint32) uint32 {
        // ppc64:"SRW"
        // ppc64le:"SRW"
-       // riscv64:"SRLI",-"AND",-"SLTIU"
+       // riscv64:"SRLI",-"AND",-"SLTIU", -"MOVW"
        return v >> uint64(29)
 }
 
 func rshConst32x64(v int32) int32 {
        // ppc64:"SRAW"
        // ppc64le:"SRAW"
-       // riscv64:"SRAI",-"OR",-"SLTIU"
+       // riscv64:"SRAI",-"OR",-"SLTIU", -"MOVW"
        return v >> uint64(29)
 }