(Signmask x) -> (SARLconst x [31])
(Zeromask <t> x) -> (XORLconst [-1] (SBBLcarrymask <t> (CMPLconst x [1])))
-(Slicemask <t> x) -> (XORLconst [-1] (SARLconst <t> (SUBLconst <t> x [1]) [31]))
+(Slicemask <t> x) -> (SARLconst (NEGL <t> x) [31])
// Lowering truncation
// Because we ignore high parts of registers, truncates are just copies.
(ZeroExt16to64 x) -> (MOVWQZX x)
(ZeroExt32to64 x) -> (MOVLQZX x)
-(Slicemask <t> x) -> (XORQconst [-1] (SARQconst <t> (SUBQconst <t> x [1]) [63]))
+(Slicemask <t> x) -> (SARQconst (NEGQ <t> x) [63])
// Lowering truncation
// Because we ignore high parts of registers, truncates are just copies.
(Signmask x) -> (SRAconst x [31])
(Zeromask x) -> (SRAconst (RSBshiftRL <config.fe.TypeInt32()> x x [1]) [31]) // sign bit of uint32(x)>>1 - x
-(Slicemask <t> x) -> (MVN (SRAconst <t> (SUBconst <t> x [1]) [31]))
+(Slicemask <t> x) -> (SRAconst (RSBconst <t> [0] x) [31])
// float <-> int conversion
(Cvt32to32F x) -> (MOVWF x)
(ConstNil) -> (MOVDconst [0])
(ConstBool [b]) -> (MOVDconst [b])
-(Slicemask <t> x) -> (MVN (SRAconst <t> (SUBconst <t> x [1]) [63]))
+(Slicemask <t> x) -> (SRAconst (NEG <t> x) [63])
// truncations
// Because we ignore high parts of registers, truncates are just copies.
(Signmask x) -> (SRAconst x [31])
(Zeromask x) -> (NEG (SGTU x (MOVWconst [0])))
-(Slicemask x) -> (NEG (SGT x (MOVWconst [0])))
+(Slicemask <t> x) -> (SRAconst (NEG <t> x) [31])
// float <-> int conversion
(Cvt32to32F x) -> (MOVWF x)
(ConstNil) -> (MOVVconst [0])
(ConstBool [b]) -> (MOVVconst [b])
-(Slicemask <t> x) -> (NORconst [0] (SRAVconst <t> (SUBVconst <t> x [1]) [63]))
+(Slicemask <t> x) -> (SRAVconst (NEGV <t> x) [63])
// truncations
// Because we ignore high parts of registers, truncates are just copies.
(Trunc64to16 x) -> (MOVHreg x)
(Trunc64to32 x) -> (MOVWreg x)
-(Slicemask <t> x) -> (XORconst [-1] (SRADconst <t> (ADDconst <t> x [-1]) [63]))
+(Slicemask <t> x) -> (SRADconst (NEG <t> x) [63])
// Note that MOV??reg returns a 64-bit int, x is not necessarily that wide
// This may interact with other patterns in the future. (Compare with arm64)
(ZeroExt16to64 x) -> (MOVHZreg x)
(ZeroExt32to64 x) -> (MOVWZreg x)
-(Slicemask <t> x) -> (XOR (MOVDconst [-1]) (SRADconst <t> (SUBconst <t> x [1]) [63]))
+(Slicemask <t> x) -> (SRADconst (NEG <t> x) [63])
// Lowering truncation
// Because we ignore high parts of registers, truncates are just copies.
_ = b
// match: (Slicemask <t> x)
// cond:
- // result: (XORLconst [-1] (SARLconst <t> (SUBLconst <t> x [1]) [31]))
+ // result: (SARLconst (NEGL <t> x) [31])
for {
t := v.Type
x := v.Args[0]
- v.reset(Op386XORLconst)
- v.AuxInt = -1
- v0 := b.NewValue0(v.Pos, Op386SARLconst, t)
- v0.AuxInt = 31
- v1 := b.NewValue0(v.Pos, Op386SUBLconst, t)
- v1.AuxInt = 1
- v1.AddArg(x)
- v0.AddArg(v1)
+ v.reset(Op386SARLconst)
+ v.AuxInt = 31
+ v0 := b.NewValue0(v.Pos, Op386NEGL, t)
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
_ = b
// match: (Slicemask <t> x)
// cond:
- // result: (XORQconst [-1] (SARQconst <t> (SUBQconst <t> x [1]) [63]))
+ // result: (SARQconst (NEGQ <t> x) [63])
for {
t := v.Type
x := v.Args[0]
- v.reset(OpAMD64XORQconst)
- v.AuxInt = -1
- v0 := b.NewValue0(v.Pos, OpAMD64SARQconst, t)
- v0.AuxInt = 63
- v1 := b.NewValue0(v.Pos, OpAMD64SUBQconst, t)
- v1.AuxInt = 1
- v1.AddArg(x)
- v0.AddArg(v1)
+ v.reset(OpAMD64SARQconst)
+ v.AuxInt = 63
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
_ = b
// match: (Slicemask <t> x)
// cond:
- // result: (MVN (SRAconst <t> (SUBconst <t> x [1]) [31]))
+ // result: (SRAconst (RSBconst <t> [0] x) [31])
for {
t := v.Type
x := v.Args[0]
- v.reset(OpARMMVN)
- v0 := b.NewValue0(v.Pos, OpARMSRAconst, t)
- v0.AuxInt = 31
- v1 := b.NewValue0(v.Pos, OpARMSUBconst, t)
- v1.AuxInt = 1
- v1.AddArg(x)
- v0.AddArg(v1)
+ v.reset(OpARMSRAconst)
+ v.AuxInt = 31
+ v0 := b.NewValue0(v.Pos, OpARMRSBconst, t)
+ v0.AuxInt = 0
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
_ = b
// match: (Slicemask <t> x)
// cond:
- // result: (MVN (SRAconst <t> (SUBconst <t> x [1]) [63]))
+ // result: (SRAconst (NEG <t> x) [63])
for {
t := v.Type
x := v.Args[0]
- v.reset(OpARM64MVN)
- v0 := b.NewValue0(v.Pos, OpARM64SRAconst, t)
- v0.AuxInt = 63
- v1 := b.NewValue0(v.Pos, OpARM64SUBconst, t)
- v1.AuxInt = 1
- v1.AddArg(x)
- v0.AddArg(v1)
+ v.reset(OpARM64SRAconst)
+ v.AuxInt = 63
+ v0 := b.NewValue0(v.Pos, OpARM64NEG, t)
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
func rewriteValueMIPS_OpSlicemask(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (Slicemask x)
+ // match: (Slicemask <t> x)
// cond:
- // result: (NEG (SGT x (MOVWconst [0])))
+ // result: (SRAconst (NEG <t> x) [31])
for {
+ t := v.Type
x := v.Args[0]
- v.reset(OpMIPSNEG)
- v0 := b.NewValue0(v.Pos, OpMIPSSGT, config.fe.TypeBool())
+ v.reset(OpMIPSSRAconst)
+ v.AuxInt = 31
+ v0 := b.NewValue0(v.Pos, OpMIPSNEG, t)
v0.AddArg(x)
- v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, config.fe.TypeUInt32())
- v1.AuxInt = 0
- v0.AddArg(v1)
v.AddArg(v0)
return true
}
_ = b
// match: (Slicemask <t> x)
// cond:
- // result: (NORconst [0] (SRAVconst <t> (SUBVconst <t> x [1]) [63]))
+ // result: (SRAVconst (NEGV <t> x) [63])
for {
t := v.Type
x := v.Args[0]
- v.reset(OpMIPS64NORconst)
- v.AuxInt = 0
- v0 := b.NewValue0(v.Pos, OpMIPS64SRAVconst, t)
- v0.AuxInt = 63
- v1 := b.NewValue0(v.Pos, OpMIPS64SUBVconst, t)
- v1.AuxInt = 1
- v1.AddArg(x)
- v0.AddArg(v1)
+ v.reset(OpMIPS64SRAVconst)
+ v.AuxInt = 63
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
_ = b
// match: (Slicemask <t> x)
// cond:
- // result: (XORconst [-1] (SRADconst <t> (ADDconst <t> x [-1]) [63]))
+ // result: (SRADconst (NEG <t> x) [63])
for {
t := v.Type
x := v.Args[0]
- v.reset(OpPPC64XORconst)
- v.AuxInt = -1
- v0 := b.NewValue0(v.Pos, OpPPC64SRADconst, t)
- v0.AuxInt = 63
- v1 := b.NewValue0(v.Pos, OpPPC64ADDconst, t)
- v1.AuxInt = -1
- v1.AddArg(x)
- v0.AddArg(v1)
+ v.reset(OpPPC64SRADconst)
+ v.AuxInt = 63
+ v0 := b.NewValue0(v.Pos, OpPPC64NEG, t)
+ v0.AddArg(x)
v.AddArg(v0)
return true
}
_ = b
// match: (Slicemask <t> x)
// cond:
- // result: (XOR (MOVDconst [-1]) (SRADconst <t> (SUBconst <t> x [1]) [63]))
+ // result: (SRADconst (NEG <t> x) [63])
for {
t := v.Type
x := v.Args[0]
- v.reset(OpS390XXOR)
- v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, config.fe.TypeUInt64())
- v0.AuxInt = -1
+ v.reset(OpS390XSRADconst)
+ v.AuxInt = 63
+ v0 := b.NewValue0(v.Pos, OpS390XNEG, t)
+ v0.AddArg(x)
v.AddArg(v0)
- v1 := b.NewValue0(v.Pos, OpS390XSRADconst, t)
- v1.AuxInt = 63
- v2 := b.NewValue0(v.Pos, OpS390XSUBconst, t)
- v2.AuxInt = 1
- v2.AddArg(x)
- v1.AddArg(v2)
- v.AddArg(v1)
return true
}
}