In addition to unsigned loads which already exist.
This helps code that does switches on strings to constant-fold
the switch away when the string being switched on is constant.
Fixes #71699
Change-Id: If3051af0f7255d2a573da6f96b153a987a7f159d
Reviewed-on: https://go-review.googlesource.com/c/go/+/649295
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
Reviewed-by: Cuong Manh Le <cuong.manhle.vn@gmail.com>
Reviewed-by: Dmitri Shuralyov <dmitshur@google.com>
Reviewed-by: Keith Randall <khr@google.com>
Auto-Submit: Keith Randall <khr@google.com>
(MOVBload [off] {sym} (SB) _) && symIsRO(sym) => (MOVLconst [int32(read8(sym, int64(off)))])
(MOVWload [off] {sym} (SB) _) && symIsRO(sym) => (MOVLconst [int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))])
(MOVLload [off] {sym} (SB) _) && symIsRO(sym) => (MOVLconst [int32(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+(MOVBLSXload [off] {sym} (SB) _) && symIsRO(sym) => (MOVLconst [int32(int8(read8(sym, int64(off))))])
+(MOVWLSXload [off] {sym} (SB) _) && symIsRO(sym) => (MOVLconst [int32(int16(read16(sym, int64(off), config.ctxt.Arch.ByteOrder)))])
(MOVBload [off] {sym} (SB) _) && symIsRO(sym) => (MOVLconst [int32(read8(sym, int64(off)))])
(MOVWload [off] {sym} (SB) _) && symIsRO(sym) => (MOVLconst [int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))])
-(MOVLload [off] {sym} (SB) _) && symIsRO(sym) => (MOVQconst [int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+(MOVLload [off] {sym} (SB) _) && symIsRO(sym) => (MOVLconst [int32(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))])
(MOVQload [off] {sym} (SB) _) && symIsRO(sym) => (MOVQconst [int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+(MOVBQSXload [off] {sym} (SB) _) && symIsRO(sym) => (MOVQconst [int64(int8(read8(sym, int64(off))))])
+(MOVWQSXload [off] {sym} (SB) _) && symIsRO(sym) => (MOVQconst [int64(int16(read16(sym, int64(off), config.ctxt.Arch.ByteOrder)))])
+(MOVLQSXload [off] {sym} (SB) _) && symIsRO(sym) => (MOVQconst [int64(int32(read32(sym, int64(off), config.ctxt.Arch.ByteOrder)))])
+
+
(MOVOstore [dstOff] {dstSym} ptr (MOVOload [srcOff] {srcSym} (SB) _) mem) && symIsRO(srcSym) =>
(MOVQstore [dstOff+8] {dstSym} ptr (MOVQconst [int64(read64(srcSym, int64(srcOff)+8, config.ctxt.Arch.ByteOrder))])
(MOVQstore [dstOff] {dstSym} ptr (MOVQconst [int64(read64(srcSym, int64(srcOff), config.ctxt.Arch.ByteOrder))]) mem))
{name: "MOVLQSX", argLength: 1, reg: gp11, asm: "MOVLQSX"}, // sign extend arg0 from int32 to int64
{name: "MOVLQZX", argLength: 1, reg: gp11, asm: "MOVL"}, // zero extend arg0 from int32 to int64
- {name: "MOVLconst", reg: gp01, asm: "MOVL", typ: "UInt32", aux: "Int32", rematerializeable: true}, // 32 low bits of auxint
+ {name: "MOVLconst", reg: gp01, asm: "MOVL", typ: "UInt32", aux: "Int32", rematerializeable: true}, // 32 low bits of auxint (upper 32 are zeroed)
{name: "MOVQconst", reg: gp01, asm: "MOVQ", typ: "UInt64", aux: "Int64", rematerializeable: true}, // auxint
{name: "CVTTSD2SL", argLength: 1, reg: fpgp, asm: "CVTTSD2SL"}, // convert float64 to int32
(MOVBUload [off] {sym} (SB) _) && symIsRO(sym) => (MOVWconst [int32(read8(sym, int64(off)))])
(MOVHUload [off] {sym} (SB) _) && symIsRO(sym) => (MOVWconst [int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))])
(MOVWload [off] {sym} (SB) _) && symIsRO(sym) => (MOVWconst [int32(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+(MOVBload [off] {sym} (SB) _) && symIsRO(sym) => (MOVWconst [int32(int8(read8(sym, int64(off))))])
+(MOVHload [off] {sym} (SB) _) && symIsRO(sym) => (MOVWconst [int32(int16(read16(sym, int64(off), config.ctxt.Arch.ByteOrder)))])
(MOVHUload [off] {sym} (SB) _) && symIsRO(sym) => (MOVDconst [int64(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))])
(MOVWUload [off] {sym} (SB) _) && symIsRO(sym) => (MOVDconst [int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))])
(MOVDload [off] {sym} (SB) _) && symIsRO(sym) => (MOVDconst [int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+(MOVBload [off] {sym} (SB) _) && symIsRO(sym) => (MOVDconst [int64(int8(read8(sym, int64(off))))])
+(MOVHload [off] {sym} (SB) _) && symIsRO(sym) => (MOVDconst [int64(int16(read16(sym, int64(off), config.ctxt.Arch.ByteOrder)))])
+(MOVWload [off] {sym} (SB) _) && symIsRO(sym) => (MOVDconst [int64(int32(read32(sym, int64(off), config.ctxt.Arch.ByteOrder)))])
// Prefetch instructions (aux is option: 0 - PLDL1KEEP; 1 - PLDL1STRM)
(PrefetchCache addr mem) => (PRFM [0] addr mem)
(SGTU x x) => (MOVVconst [0])
// fold readonly sym load
-(MOVBload [off] {sym} (SB) _) && symIsRO(sym) => (MOVVconst [int64(read8(sym, int64(off)))])
-(MOVHload [off] {sym} (SB) _) && symIsRO(sym) => (MOVVconst [int64(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))])
-(MOVWload [off] {sym} (SB) _) && symIsRO(sym) => (MOVVconst [int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+(MOVBUload [off] {sym} (SB) _) && symIsRO(sym) => (MOVVconst [int64(read8(sym, int64(off)))])
+(MOVHUload [off] {sym} (SB) _) && symIsRO(sym) => (MOVVconst [int64(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+(MOVWUload [off] {sym} (SB) _) && symIsRO(sym) => (MOVVconst [int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))])
(MOVVload [off] {sym} (SB) _) && symIsRO(sym) => (MOVVconst [int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+(MOVBload [off] {sym} (SB) _) && symIsRO(sym) => (MOVVconst [int64(int8(read8(sym, int64(off))))])
+(MOVHload [off] {sym} (SB) _) && symIsRO(sym) => (MOVVconst [int64(int16(read16(sym, int64(off), config.ctxt.Arch.ByteOrder)))])
+(MOVWload [off] {sym} (SB) _) && symIsRO(sym) => (MOVVconst [int64(int32(read32(sym, int64(off), config.ctxt.Arch.ByteOrder)))])
(I64Load32U [off] (LoweredAddr {sym} [off2] (SB)) _) && symIsRO(sym) && isU32Bit(off+int64(off2)) => (I64Const [int64(read32(sym, off+int64(off2), config.ctxt.Arch.ByteOrder))])
(I64Load16U [off] (LoweredAddr {sym} [off2] (SB)) _) && symIsRO(sym) && isU32Bit(off+int64(off2)) => (I64Const [int64(read16(sym, off+int64(off2), config.ctxt.Arch.ByteOrder))])
(I64Load8U [off] (LoweredAddr {sym} [off2] (SB)) _) && symIsRO(sym) && isU32Bit(off+int64(off2)) => (I64Const [int64(read8(sym, off+int64(off2)))])
+(I64Load32S [off] (LoweredAddr {sym} [off2] (SB)) _) && symIsRO(sym) && isU32Bit(off+int64(off2)) => (I64Const [int64(int32(read32(sym, off+int64(off2), config.ctxt.Arch.ByteOrder)))])
+(I64Load16S [off] (LoweredAddr {sym} [off2] (SB)) _) && symIsRO(sym) && isU32Bit(off+int64(off2)) => (I64Const [int64(int16(read16(sym, off+int64(off2), config.ctxt.Arch.ByteOrder)))])
+(I64Load8S [off] (LoweredAddr {sym} [off2] (SB)) _) && symIsRO(sym) && isU32Bit(off+int64(off2)) => (I64Const [int64(int8(read8(sym, off+int64(off2))))])
v.AddArg2(base, mem)
return true
}
+ // match: (MOVBLSXload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVLconst [int32(int8(read8(sym, int64(off))))])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpSB || !(symIsRO(sym)) {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(int32(int8(read8(sym, int64(off)))))
+ return true
+ }
return false
}
func rewriteValue386_Op386MOVBLZX(v *Value) bool {
v.AddArg2(base, mem)
return true
}
+ // match: (MOVWLSXload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVLconst [int32(int16(read16(sym, int64(off), config.ctxt.Arch.ByteOrder)))])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpSB || !(symIsRO(sym)) {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(int32(int16(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))))
+ return true
+ }
return false
}
func rewriteValue386_Op386MOVWLZX(v *Value) bool {
v.AddArg2(base, mem)
return true
}
+ // match: (MOVBQSXload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVQconst [int64(int8(read8(sym, int64(off))))])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpSB || !(symIsRO(sym)) {
+ break
+ }
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(int64(int8(read8(sym, int64(off)))))
+ return true
+ }
return false
}
func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value) bool {
func rewriteValueAMD64_OpAMD64MOVLQSXload(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
// match: (MOVLQSXload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _))
// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
// result: (MOVLQSX x)
v.AddArg2(base, mem)
return true
}
+ // match: (MOVLQSXload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVQconst [int64(int32(read32(sym, int64(off), config.ctxt.Arch.ByteOrder)))])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpSB || !(symIsRO(sym)) {
+ break
+ }
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(int64(int32(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))))
+ return true
+ }
return false
}
func rewriteValueAMD64_OpAMD64MOVLQZX(v *Value) bool {
}
// match: (MOVLload [off] {sym} (SB) _)
// cond: symIsRO(sym)
- // result: (MOVQconst [int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+ // result: (MOVLconst [int32(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))])
for {
off := auxIntToInt32(v.AuxInt)
sym := auxToSym(v.Aux)
if v_0.Op != OpSB || !(symIsRO(sym)) {
break
}
- v.reset(OpAMD64MOVQconst)
- v.AuxInt = int64ToAuxInt(int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder)))
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(int32(read32(sym, int64(off), config.ctxt.Arch.ByteOrder)))
return true
}
return false
func rewriteValueAMD64_OpAMD64MOVWQSXload(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
// match: (MOVWQSXload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _))
// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
// result: (MOVWQSX x)
v.AddArg2(base, mem)
return true
}
+ // match: (MOVWQSXload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVQconst [int64(int16(read16(sym, int64(off), config.ctxt.Arch.ByteOrder)))])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpSB || !(symIsRO(sym)) {
+ break
+ }
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(int64(int16(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))))
+ return true
+ }
return false
}
func rewriteValueAMD64_OpAMD64MOVWQZX(v *Value) bool {
v.AddArg3(ptr, idx, mem)
return true
}
+ // match: (MOVBload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVWconst [int32(int8(read8(sym, int64(off))))])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpSB || !(symIsRO(sym)) {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(int8(read8(sym, int64(off)))))
+ return true
+ }
return false
}
func rewriteValueARM_OpARMMOVBloadidx(v *Value) bool {
func rewriteValueARM_OpARMMOVHload(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
// match: (MOVHload [off1] {sym} (ADDconst [off2] ptr) mem)
// result: (MOVHload [off1+off2] {sym} ptr mem)
for {
v.AddArg3(ptr, idx, mem)
return true
}
+ // match: (MOVHload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVWconst [int32(int16(read16(sym, int64(off), config.ctxt.Arch.ByteOrder)))])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpSB || !(symIsRO(sym)) {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(int16(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))))
+ return true
+ }
return false
}
func rewriteValueARM_OpARMMOVHloadidx(v *Value) bool {
v.AuxInt = int64ToAuxInt(0)
return true
}
+ // match: (MOVBload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVDconst [int64(int8(read8(sym, int64(off))))])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpSB || !(symIsRO(sym)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(int8(read8(sym, int64(off)))))
+ return true
+ }
return false
}
func rewriteValueARM64_OpARM64MOVBloadidx(v *Value) bool {
v.AuxInt = int64ToAuxInt(0)
return true
}
+ // match: (MOVHload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVDconst [int64(int16(read16(sym, int64(off), config.ctxt.Arch.ByteOrder)))])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpSB || !(symIsRO(sym)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(int16(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))))
+ return true
+ }
return false
}
func rewriteValueARM64_OpARM64MOVHloadidx(v *Value) bool {
v.AuxInt = int64ToAuxInt(0)
return true
}
+ // match: (MOVWload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVDconst [int64(int32(read32(sym, int64(off), config.ctxt.Arch.ByteOrder)))])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpSB || !(symIsRO(sym)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(int32(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))))
+ return true
+ }
return false
}
func rewriteValueARM64_OpARM64MOVWloadidx(v *Value) bool {
v.AddArg2(ptr, mem)
return true
}
+ // match: (MOVBUload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVVconst [int64(read8(sym, int64(off)))])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpSB || !(symIsRO(sym)) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(int64(read8(sym, int64(off))))
+ return true
+ }
return false
}
func rewriteValueMIPS64_OpMIPS64MOVBUreg(v *Value) bool {
}
// match: (MOVBload [off] {sym} (SB) _)
// cond: symIsRO(sym)
- // result: (MOVVconst [int64(read8(sym, int64(off)))])
+ // result: (MOVVconst [int64(int8(read8(sym, int64(off))))])
for {
off := auxIntToInt32(v.AuxInt)
sym := auxToSym(v.Aux)
break
}
v.reset(OpMIPS64MOVVconst)
- v.AuxInt = int64ToAuxInt(int64(read8(sym, int64(off))))
+ v.AuxInt = int64ToAuxInt(int64(int8(read8(sym, int64(off)))))
return true
}
return false
v.AddArg2(ptr, mem)
return true
}
+ // match: (MOVHUload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVVconst [int64(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpSB || !(symIsRO(sym)) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(int64(read16(sym, int64(off), config.ctxt.Arch.ByteOrder)))
+ return true
+ }
return false
}
func rewriteValueMIPS64_OpMIPS64MOVHUreg(v *Value) bool {
}
// match: (MOVHload [off] {sym} (SB) _)
// cond: symIsRO(sym)
- // result: (MOVVconst [int64(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+ // result: (MOVVconst [int64(int16(read16(sym, int64(off), config.ctxt.Arch.ByteOrder)))])
for {
off := auxIntToInt32(v.AuxInt)
sym := auxToSym(v.Aux)
break
}
v.reset(OpMIPS64MOVVconst)
- v.AuxInt = int64ToAuxInt(int64(read16(sym, int64(off), config.ctxt.Arch.ByteOrder)))
+ v.AuxInt = int64ToAuxInt(int64(int16(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))))
return true
}
return false
v.AddArg2(ptr, mem)
return true
}
+ // match: (MOVWUload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVVconst [int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpSB || !(symIsRO(sym)) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder)))
+ return true
+ }
return false
}
func rewriteValueMIPS64_OpMIPS64MOVWUreg(v *Value) bool {
}
// match: (MOVWload [off] {sym} (SB) _)
// cond: symIsRO(sym)
- // result: (MOVVconst [int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+ // result: (MOVVconst [int64(int32(read32(sym, int64(off), config.ctxt.Arch.ByteOrder)))])
for {
off := auxIntToInt32(v.AuxInt)
sym := auxToSym(v.Aux)
break
}
v.reset(OpMIPS64MOVVconst)
- v.AuxInt = int64ToAuxInt(int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder)))
+ v.AuxInt = int64ToAuxInt(int64(int32(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))))
return true
}
return false
func rewriteValueWasm_OpWasmI64Load16S(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
// match: (I64Load16S [off] (I64AddConst [off2] ptr) mem)
// cond: isU32Bit(off+off2)
// result: (I64Load16S [off+off2] ptr mem)
v.AddArg2(ptr, mem)
return true
}
+ // match: (I64Load16S [off] (LoweredAddr {sym} [off2] (SB)) _)
+ // cond: symIsRO(sym) && isU32Bit(off+int64(off2))
+ // result: (I64Const [int64(int16(read16(sym, off+int64(off2), config.ctxt.Arch.ByteOrder)))])
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpWasmLoweredAddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym := auxToSym(v_0.Aux)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSB || !(symIsRO(sym) && isU32Bit(off+int64(off2))) {
+ break
+ }
+ v.reset(OpWasmI64Const)
+ v.AuxInt = int64ToAuxInt(int64(int16(read16(sym, off+int64(off2), config.ctxt.Arch.ByteOrder))))
+ return true
+ }
return false
}
func rewriteValueWasm_OpWasmI64Load16U(v *Value) bool {
func rewriteValueWasm_OpWasmI64Load32S(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
// match: (I64Load32S [off] (I64AddConst [off2] ptr) mem)
// cond: isU32Bit(off+off2)
// result: (I64Load32S [off+off2] ptr mem)
v.AddArg2(ptr, mem)
return true
}
+ // match: (I64Load32S [off] (LoweredAddr {sym} [off2] (SB)) _)
+ // cond: symIsRO(sym) && isU32Bit(off+int64(off2))
+ // result: (I64Const [int64(int32(read32(sym, off+int64(off2), config.ctxt.Arch.ByteOrder)))])
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpWasmLoweredAddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym := auxToSym(v_0.Aux)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSB || !(symIsRO(sym) && isU32Bit(off+int64(off2))) {
+ break
+ }
+ v.reset(OpWasmI64Const)
+ v.AuxInt = int64ToAuxInt(int64(int32(read32(sym, off+int64(off2), config.ctxt.Arch.ByteOrder))))
+ return true
+ }
return false
}
func rewriteValueWasm_OpWasmI64Load32U(v *Value) bool {
v.AddArg2(ptr, mem)
return true
}
+ // match: (I64Load8S [off] (LoweredAddr {sym} [off2] (SB)) _)
+ // cond: symIsRO(sym) && isU32Bit(off+int64(off2))
+ // result: (I64Const [int64(int8(read8(sym, off+int64(off2))))])
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpWasmLoweredAddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym := auxToSym(v_0.Aux)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSB || !(symIsRO(sym) && isU32Bit(off+int64(off2))) {
+ break
+ }
+ v.reset(OpWasmI64Const)
+ v.AuxInt = int64ToAuxInt(int64(int8(read8(sym, off+int64(off2)))))
+ return true
+ }
return false
}
func rewriteValueWasm_OpWasmI64Load8U(v *Value) bool {
// arm64:`CALL\truntime.typeAssert`,`LDAR`,`MOVWU\t16\(R0\)`,`MOVD\t\(R.*\)\(R.*\)`
return x
}
+
+// Make sure we can constant fold after inlining. See issue 71699.
+func stringSwitchInlineable(s string) {
+ switch s {
+ case "foo", "bar", "baz", "goo":
+ default:
+ println("no")
+ }
+}
+func stringSwitch() {
+ // amd64:-"CMP",-"CALL"
+ // arm64:-"CMP",-"CALL"
+ stringSwitchInlineable("foo")
+}