p.To.Type = obj.TYPE_MEM
p.To.Reg = v.Args[0].Reg()
gc.AddAux(&p.To, v)
+ case ssa.OpRISCV64MOVBstorezero, ssa.OpRISCV64MOVHstorezero, ssa.OpRISCV64MOVWstorezero, ssa.OpRISCV64MOVDstorezero:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = riscv.REG_ZERO
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ gc.AddAux(&p.To, v)
case ssa.OpRISCV64SEQZ, ssa.OpRISCV64SNEZ:
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_REG
(MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
(MOVDstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
(MOVDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+(MOVBstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) ->
+ (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVHstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) ->
+ (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVWstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) ->
+ (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVDstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(off1+off2) ->
+ (MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
(MOVBUload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(off1+off2) ->
(MOVBUload [off1+off2] {sym} base mem)
(MOVWstore [off1+off2] {sym} base val mem)
(MOVDstore [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(off1+off2) ->
(MOVDstore [off1+off2] {sym} base val mem)
+(MOVBstorezero [off1] {sym} (ADDI [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVBstorezero [off1+off2] {sym} ptr mem)
+(MOVHstorezero [off1] {sym} (ADDI [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVHstorezero [off1+off2] {sym} ptr mem)
+(MOVWstorezero [off1] {sym} (ADDI [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVWstorezero [off1+off2] {sym} ptr mem)
+(MOVDstorezero [off1] {sym} (ADDI [off2] ptr) mem) && is32Bit(off1+off2) -> (MOVDstorezero [off1+off2] {sym} ptr mem)
// Similarly, fold ADDI into MOVaddr to avoid confusing live variable analysis
// with OffPtr -> ADDI.
// Absorb SNEZ into branch.
(BNE (SNEZ x) yes no) -> (BNE x yes no)
+// Store zero
+(MOVBstore [off] {sym} ptr (MOVBconst [0]) mem) -> (MOVBstorezero [off] {sym} ptr mem)
+(MOVHstore [off] {sym} ptr (MOVHconst [0]) mem) -> (MOVHstorezero [off] {sym} ptr mem)
+(MOVWstore [off] {sym} ptr (MOVWconst [0]) mem) -> (MOVWstorezero [off] {sym} ptr mem)
+(MOVDstore [off] {sym} ptr (MOVDconst [0]) mem) -> (MOVDstorezero [off] {sym} ptr mem)
+
// Fold ADD+MOVDconst into ADDI where possible.
(ADD (MOVDconst [off]) ptr) && is32Bit(off) -> (ADDI [off] ptr)
callerSave := gpMask | fpMask | regNamed["g"]
var (
- gpstore = regInfo{inputs: []regMask{gpspsbMask, gpspMask, 0}} // SB in first input so we can load from a global, but not in second to avoid using SB as a temporary register
- gp01 = regInfo{outputs: []regMask{gpMask}}
- gp11 = regInfo{inputs: []regMask{gpMask}, outputs: []regMask{gpMask}}
- gp21 = regInfo{inputs: []regMask{gpMask, gpMask}, outputs: []regMask{gpMask}}
- gpload = regInfo{inputs: []regMask{gpspsbMask, 0}, outputs: []regMask{gpMask}}
- gp11sb = regInfo{inputs: []regMask{gpspsbMask}, outputs: []regMask{gpMask}}
+ gpstore = regInfo{inputs: []regMask{gpspsbMask, gpspMask, 0}} // SB in first input so we can load from a global, but not in second to avoid using SB as a temporary register
+ gpstore0 = regInfo{inputs: []regMask{gpspsbMask}}
+ gp01 = regInfo{outputs: []regMask{gpMask}}
+ gp11 = regInfo{inputs: []regMask{gpMask}, outputs: []regMask{gpMask}}
+ gp21 = regInfo{inputs: []regMask{gpMask, gpMask}, outputs: []regMask{gpMask}}
+ gpload = regInfo{inputs: []regMask{gpspsbMask, 0}, outputs: []regMask{gpMask}}
+ gp11sb = regInfo{inputs: []regMask{gpspsbMask}, outputs: []regMask{gpMask}}
fp11 = regInfo{inputs: []regMask{fpMask}, outputs: []regMask{fpMask}}
fp21 = regInfo{inputs: []regMask{fpMask, fpMask}, outputs: []regMask{fpMask}}
{name: "MOVWstore", argLength: 3, reg: gpstore, asm: "MOVW", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // 32 bits
{name: "MOVDstore", argLength: 3, reg: gpstore, asm: "MOV", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // 64 bits
+ // Stores: store <size> of zero in arg0+auxint+aux; arg1=mem
+ {name: "MOVBstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVB", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // 8 bits
+ {name: "MOVHstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVH", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // 16 bits
+ {name: "MOVWstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVW", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // 32 bits
+ {name: "MOVDstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOV", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // 64 bits
+
// Shift ops
{name: "SLL", argLength: 2, reg: gp21, asm: "SLL"}, // arg0 << aux1
{name: "SRA", argLength: 2, reg: gp21, asm: "SRA"}, // arg0 >> aux1, signed
OpRISCV64MOVHstore
OpRISCV64MOVWstore
OpRISCV64MOVDstore
+ OpRISCV64MOVBstorezero
+ OpRISCV64MOVHstorezero
+ OpRISCV64MOVWstorezero
+ OpRISCV64MOVDstorezero
OpRISCV64SLL
OpRISCV64SRA
OpRISCV64SRL
},
},
},
+ {
+ name: "MOVBstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: riscv.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB
+ },
+ },
+ },
+ {
+ name: "MOVHstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: riscv.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB
+ },
+ },
+ },
+ {
+ name: "MOVWstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: riscv.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB
+ },
+ },
+ },
+ {
+ name: "MOVDstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: riscv.AMOV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB
+ },
+ },
+ },
{
name: "SLL",
argLen: 2,
return rewriteValueRISCV64_OpRISCV64MOVBload(v)
case OpRISCV64MOVBstore:
return rewriteValueRISCV64_OpRISCV64MOVBstore(v)
+ case OpRISCV64MOVBstorezero:
+ return rewriteValueRISCV64_OpRISCV64MOVBstorezero(v)
case OpRISCV64MOVDconst:
return rewriteValueRISCV64_OpRISCV64MOVDconst(v)
case OpRISCV64MOVDload:
return rewriteValueRISCV64_OpRISCV64MOVDload(v)
case OpRISCV64MOVDstore:
return rewriteValueRISCV64_OpRISCV64MOVDstore(v)
+ case OpRISCV64MOVDstorezero:
+ return rewriteValueRISCV64_OpRISCV64MOVDstorezero(v)
case OpRISCV64MOVHUload:
return rewriteValueRISCV64_OpRISCV64MOVHUload(v)
case OpRISCV64MOVHload:
return rewriteValueRISCV64_OpRISCV64MOVHload(v)
case OpRISCV64MOVHstore:
return rewriteValueRISCV64_OpRISCV64MOVHstore(v)
+ case OpRISCV64MOVHstorezero:
+ return rewriteValueRISCV64_OpRISCV64MOVHstorezero(v)
case OpRISCV64MOVWUload:
return rewriteValueRISCV64_OpRISCV64MOVWUload(v)
case OpRISCV64MOVWload:
return rewriteValueRISCV64_OpRISCV64MOVWload(v)
case OpRISCV64MOVWstore:
return rewriteValueRISCV64_OpRISCV64MOVWstore(v)
+ case OpRISCV64MOVWstorezero:
+ return rewriteValueRISCV64_OpRISCV64MOVWstorezero(v)
case OpRISCV64SUB:
return rewriteValueRISCV64_OpRISCV64SUB(v)
case OpRISCV64SUBW:
v.AddArg3(base, val, mem)
return true
}
+ // match: (MOVBstore [off] {sym} ptr (MOVBconst [0]) mem)
+ // result: (MOVBstorezero [off] {sym} ptr mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v_0
+ if v_1.Op != OpRISCV64MOVBconst || v_1.AuxInt != 0 {
+ break
+ }
+ mem := v_2
+ v.reset(OpRISCV64MOVBstorezero)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVBstorezero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2)
+ // result: (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ if v_0.Op != OpRISCV64MOVaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVBstorezero)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBstorezero [off1] {sym} (ADDI [off2] ptr) mem)
+ // cond: is32Bit(off1+off2)
+ // result: (MOVBstorezero [off1+off2] {sym} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ if v_0.Op != OpRISCV64ADDI {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVBstorezero)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg2(ptr, mem)
+ return true
+ }
return false
}
func rewriteValueRISCV64_OpRISCV64MOVDconst(v *Value) bool {
v.AddArg3(base, val, mem)
return true
}
+ // match: (MOVDstore [off] {sym} ptr (MOVDconst [0]) mem)
+ // result: (MOVDstorezero [off] {sym} ptr mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v_0
+ if v_1.Op != OpRISCV64MOVDconst || v_1.AuxInt != 0 {
+ break
+ }
+ mem := v_2
+ v.reset(OpRISCV64MOVDstorezero)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVDstorezero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVDstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2)
+ // result: (MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ if v_0.Op != OpRISCV64MOVaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVDstorezero)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVDstorezero [off1] {sym} (ADDI [off2] ptr) mem)
+ // cond: is32Bit(off1+off2)
+ // result: (MOVDstorezero [off1+off2] {sym} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ if v_0.Op != OpRISCV64ADDI {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVDstorezero)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg2(ptr, mem)
+ return true
+ }
return false
}
func rewriteValueRISCV64_OpRISCV64MOVHUload(v *Value) bool {
v.AddArg3(base, val, mem)
return true
}
+ // match: (MOVHstore [off] {sym} ptr (MOVHconst [0]) mem)
+ // result: (MOVHstorezero [off] {sym} ptr mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v_0
+ if v_1.Op != OpRISCV64MOVHconst || v_1.AuxInt != 0 {
+ break
+ }
+ mem := v_2
+ v.reset(OpRISCV64MOVHstorezero)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVHstorezero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2)
+ // result: (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ if v_0.Op != OpRISCV64MOVaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVHstorezero)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHstorezero [off1] {sym} (ADDI [off2] ptr) mem)
+ // cond: is32Bit(off1+off2)
+ // result: (MOVHstorezero [off1+off2] {sym} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ if v_0.Op != OpRISCV64ADDI {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVHstorezero)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg2(ptr, mem)
+ return true
+ }
return false
}
func rewriteValueRISCV64_OpRISCV64MOVWUload(v *Value) bool {
v.AddArg3(base, val, mem)
return true
}
+ // match: (MOVWstore [off] {sym} ptr (MOVWconst [0]) mem)
+ // result: (MOVWstorezero [off] {sym} ptr mem)
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v_0
+ if v_1.Op != OpRISCV64MOVWconst || v_1.AuxInt != 0 {
+ break
+ }
+ mem := v_2
+ v.reset(OpRISCV64MOVWstorezero)
+ v.AuxInt = off
+ v.Aux = sym
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVWstorezero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(off1+off2)
+ // result: (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ if v_0.Op != OpRISCV64MOVaddr {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVWstorezero)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWstorezero [off1] {sym} (ADDI [off2] ptr) mem)
+ // cond: is32Bit(off1+off2)
+ // result: (MOVWstorezero [off1+off2] {sym} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ if v_0.Op != OpRISCV64ADDI {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVWstorezero)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg2(ptr, mem)
+ return true
+ }
return false
}
func rewriteValueRISCV64_OpRISCV64SUB(v *Value) bool {