v = v.Args[0]
continue
}
- case ssa.OpRISCV64SUB:
- // RISCV64 lowers Neq32 to include a SUB with multiple arguments.
+ case ssa.OpRISCV64SUBW:
+ // RISCV64 lowers Neq32 to include a SUBW with multiple arguments.
// TODO(jsing): it would be preferable not to use Neq32 for
// writeBuffer.enabled checks on this platform.
v = v.Args[0]
gc.AddrAuto(&p.To, v)
case ssa.OpSP, ssa.OpSB, ssa.OpGetG:
// nothing to do
- case ssa.OpRISCV64ADD, ssa.OpRISCV64SUB, ssa.OpRISCV64XOR, ssa.OpRISCV64OR, ssa.OpRISCV64AND,
+ case ssa.OpRISCV64ADD, ssa.OpRISCV64SUB, ssa.OpRISCV64SUBW, ssa.OpRISCV64XOR, ssa.OpRISCV64OR, ssa.OpRISCV64AND,
ssa.OpRISCV64SLL, ssa.OpRISCV64SRA, ssa.OpRISCV64SRL,
ssa.OpRISCV64SLT, ssa.OpRISCV64SLTU, ssa.OpRISCV64MUL, ssa.OpRISCV64MULW, ssa.OpRISCV64MULH,
ssa.OpRISCV64MULHU, ssa.OpRISCV64DIV, ssa.OpRISCV64DIVU, ssa.OpRISCV64DIVW,
(EqPtr x y) -> (SEQZ (SUB <x.Type> x y))
(Eq64 x y) -> (SEQZ (SUB <x.Type> x y))
-(Eq32 x y) -> (SEQZ (ZeroExt32to64 (SUB <x.Type> x y)))
+(Eq32 x y) -> (SEQZ (SUBW <x.Type> x y))
(Eq16 x y) -> (SEQZ (ZeroExt16to64 (SUB <x.Type> x y)))
(Eq8 x y) -> (SEQZ (ZeroExt8to64 (SUB <x.Type> x y)))
(Eq64F ...) -> (FEQD ...)
(NeqPtr x y) -> (SNEZ (SUB <x.Type> x y))
(Neq64 x y) -> (SNEZ (SUB <x.Type> x y))
-(Neq32 x y) -> (SNEZ (ZeroExt32to64 (SUB <x.Type> x y)))
+(Neq32 x y) -> (SNEZ (SUBW <x.Type> x y))
(Neq16 x y) -> (SNEZ (ZeroExt16to64 (SUB <x.Type> x y)))
(Neq8 x y) -> (SNEZ (ZeroExt8to64 (SUB <x.Type> x y)))
(Neq64F ...) -> (FNED ...)
{name: "ADD", argLength: 2, reg: gp21, asm: "ADD", commutative: true}, // arg0 + arg1
{name: "ADDI", argLength: 1, reg: gp11sb, asm: "ADDI", aux: "Int64"}, // arg0 + auxint
{name: "SUB", argLength: 2, reg: gp21, asm: "SUB"}, // arg0 - arg1
+ {name: "SUBW", argLength: 2, reg: gp21, asm: "SUBW"}, // 32 low bits of arg 0 - 32 low bits of arg 1, sign extended to 64 bits
// M extension. H means high (i.e., it returns the top bits of
// the result). U means unsigned. W means word (i.e., 32-bit).
OpRISCV64ADD
OpRISCV64ADDI
OpRISCV64SUB
+ OpRISCV64SUBW
OpRISCV64MUL
OpRISCV64MULW
OpRISCV64MULH
},
},
},
+ {
+ name: "SUBW",
+ argLen: 2,
+ asm: riscv.ASUBW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30
+ },
+ },
+ },
{
name: "MUL",
argLen: 2,
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
- typ := &b.Func.Config.Types
// match: (Eq32 x y)
- // result: (SEQZ (ZeroExt32to64 (SUB <x.Type> x y)))
+ // result: (SEQZ (SUBW <x.Type> x y))
for {
x := v_0
y := v_1
v.reset(OpRISCV64SEQZ)
- v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
- v1 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type)
- v1.AddArg(x)
- v1.AddArg(y)
- v0.AddArg(v1)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SUBW, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
v.AddArg(v0)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
b := v.Block
- typ := &b.Func.Config.Types
// match: (Neq32 x y)
- // result: (SNEZ (ZeroExt32to64 (SUB <x.Type> x y)))
+ // result: (SNEZ (SUBW <x.Type> x y))
for {
x := v_0
y := v_1
v.reset(OpRISCV64SNEZ)
- v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
- v1 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type)
- v1.AddArg(x)
- v1.AddArg(y)
- v0.AddArg(v1)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SUBW, x.Type)
+ v0.AddArg(x)
+ v0.AddArg(y)
v.AddArg(v0)
return true
}