ssa.OpMIPSANDconst,
ssa.OpMIPSORconst,
ssa.OpMIPSXORconst,
- ssa.OpMIPSNORconst,
ssa.OpMIPSSLLconst,
ssa.OpMIPSSRLconst,
ssa.OpMIPSSRAconst,
ssa.OpMIPS64ANDconst,
ssa.OpMIPS64ORconst,
ssa.OpMIPS64XORconst,
- ssa.OpMIPS64NORconst,
ssa.OpMIPS64SLLVconst,
ssa.OpMIPS64SRLVconst,
ssa.OpMIPS64SRAVconst,
(Neg(32|16|8) ...) => (NEG ...)
(Neg(32|64)F ...) => (NEG(F|D) ...)
-(Com(32|16|8) x) => (NORconst [0] x)
+(Com(32|16|8) x) => (NOR (MOVWconst [0]) x)
(Sqrt ...) => (SQRTD ...)
(Sqrt32 ...) => (SQRTF ...)
(OR <typ.UInt32> (SLL <typ.UInt32> (ZeroExt8to32 val)
(SLLconst <typ.UInt32> [3]
(ANDconst <typ.UInt32> [3] ptr)))
- (NORconst [0] <typ.UInt32> (SLL <typ.UInt32>
+ (NOR (MOVWconst [0]) <typ.UInt32> (SLL <typ.UInt32>
(MOVWconst [0xff]) (SLLconst <typ.UInt32> [3]
(ANDconst <typ.UInt32> [3] ptr))))) mem)
(SLLconst <typ.UInt32> [3]
(ANDconst <typ.UInt32> [3]
(XORconst <typ.UInt32> [3] ptr))))
- (NORconst [0] <typ.UInt32> (SLL <typ.UInt32>
+ (NOR (MOVWconst [0]) <typ.UInt32> (SLL <typ.UInt32>
(MOVWconst [0xff]) (SLLconst <typ.UInt32> [3]
(ANDconst <typ.UInt32> [3]
(XORconst <typ.UInt32> [3] ptr)))))) mem)
(AND x (MOVWconst [c])) => (ANDconst [c] x)
(OR x (MOVWconst [c])) => (ORconst [c] x)
(XOR x (MOVWconst [c])) => (XORconst [c] x)
-(NOR x (MOVWconst [c])) => (NORconst [c] x)
(SLL x (MOVWconst [c])) => (SLLconst x [c&31])
(SRL x (MOVWconst [c])) => (SRLconst x [c&31])
(ORconst [0] x) => x
(ORconst [-1] _) => (MOVWconst [-1])
(XORconst [0] x) => x
-(XORconst [-1] x) => (NORconst [0] x)
// generic constant folding
(ADDconst [c] (MOVWconst [d])) => (MOVWconst [int32(c+d)])
(ORconst [c] (ORconst [d] x)) => (ORconst [c|d] x)
(XORconst [c] (MOVWconst [d])) => (MOVWconst [c^d])
(XORconst [c] (XORconst [d] x)) => (XORconst [c^d] x)
-(NORconst [c] (MOVWconst [d])) => (MOVWconst [^(c|d)])
(NEG (MOVWconst [c])) => (MOVWconst [-c])
(MOVBreg (MOVWconst [c])) => (MOVWconst [int32(int8(c))])
(MOVBUreg (MOVWconst [c])) => (MOVWconst [int32(uint8(c))])
(OR <typ.UInt64> (SLLV <typ.UInt32> (ZeroExt8to32 val)
(SLLVconst <typ.UInt64> [3]
(ANDconst <typ.UInt64> [3] ptr)))
- (NORconst [0] <typ.UInt64> (SLLV <typ.UInt64>
+ (NOR (MOVVconst [0]) <typ.UInt64> (SLLV <typ.UInt64>
(MOVVconst [0xff]) (SLLVconst <typ.UInt64> [3]
(ANDconst <typ.UInt64> [3] ptr))))) mem)
(SLLVconst <typ.UInt64> [3]
(ANDconst <typ.UInt64> [3]
(XORconst <typ.UInt64> [3] ptr))))
- (NORconst [0] <typ.UInt64> (SLLV <typ.UInt64>
+ (NOR (MOVVconst [0]) <typ.UInt64> (SLLV <typ.UInt64>
(MOVVconst [0xff]) (SLLVconst <typ.UInt64> [3]
(ANDconst <typ.UInt64> [3]
(XORconst <typ.UInt64> [3] ptr)))))) mem)
(AND x (MOVVconst [c])) && is32Bit(c) => (ANDconst [c] x)
(OR x (MOVVconst [c])) && is32Bit(c) => (ORconst [c] x)
(XOR x (MOVVconst [c])) && is32Bit(c) => (XORconst [c] x)
-(NOR x (MOVVconst [c])) && is32Bit(c) => (NORconst [c] x)
(SLLV _ (MOVVconst [c])) && uint64(c)>=64 => (MOVVconst [0])
(SRLV _ (MOVVconst [c])) && uint64(c)>=64 => (MOVVconst [0])
(ORconst [0] x) => x
(ORconst [-1] _) => (MOVVconst [-1])
(XORconst [0] x) => x
-(XORconst [-1] x) => (NORconst [0] x)
// generic constant folding
(ADDVconst [c] (MOVVconst [d])) => (MOVVconst [c+d])
(ORconst [c] (ORconst [d] x)) && is32Bit(c|d) => (ORconst [c|d] x)
(XORconst [c] (MOVVconst [d])) => (MOVVconst [c^d])
(XORconst [c] (XORconst [d] x)) && is32Bit(c^d) => (XORconst [c^d] x)
-(NORconst [c] (MOVVconst [d])) => (MOVVconst [^(c|d)])
(NEGV (MOVVconst [c])) => (MOVVconst [-c])
(MOVBreg (MOVVconst [c])) => (MOVVconst [int64(int8(c))])
(MOVBUreg (MOVVconst [c])) => (MOVVconst [int64(uint8(c))])
{name: "XOR", argLength: 2, reg: gp21, asm: "XOR", commutative: true, typ: "UInt64"}, // arg0 ^ arg1
{name: "XORconst", argLength: 1, reg: gp11, asm: "XOR", aux: "Int64", typ: "UInt64"}, // arg0 ^ auxInt
{name: "NOR", argLength: 2, reg: gp21, asm: "NOR", commutative: true}, // ^(arg0 | arg1)
- {name: "NORconst", argLength: 1, reg: gp11, asm: "NOR", aux: "Int64"}, // ^(arg0 | auxInt)
{name: "NEGV", argLength: 1, reg: gp11}, // -arg0
{name: "NEGF", argLength: 1, reg: fp11, asm: "NEGF"}, // -arg0, float32
{name: "XOR", argLength: 2, reg: gp21, asm: "XOR", commutative: true, typ: "UInt32"}, // arg0 ^ arg1
{name: "XORconst", argLength: 1, reg: gp11, asm: "XOR", aux: "Int32", typ: "UInt32"}, // arg0 ^ auxInt
{name: "NOR", argLength: 2, reg: gp21, asm: "NOR", commutative: true}, // ^(arg0 | arg1)
- {name: "NORconst", argLength: 1, reg: gp11, asm: "NOR", aux: "Int32"}, // ^(arg0 | auxInt)
{name: "NEG", argLength: 1, reg: gp11}, // -arg0
{name: "NEGF", argLength: 1, reg: fp11, asm: "NEGF"}, // -arg0, float32
OpMIPSXOR
OpMIPSXORconst
OpMIPSNOR
- OpMIPSNORconst
OpMIPSNEG
OpMIPSNEGF
OpMIPSNEGD
OpMIPS64XOR
OpMIPS64XORconst
OpMIPS64NOR
- OpMIPS64NORconst
OpMIPS64NEGV
OpMIPS64NEGF
OpMIPS64NEGD
},
},
},
- {
- name: "NORconst",
- auxType: auxInt32,
- argLen: 1,
- asm: mips.ANOR,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
- },
- outputs: []outputInfo{
- {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
- },
- },
- },
{
name: "NEG",
argLen: 1,
},
},
},
- {
- name: "NORconst",
- auxType: auxInt64,
- argLen: 1,
- asm: mips.ANOR,
- reg: regInfo{
- inputs: []inputInfo{
- {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
- },
- outputs: []outputInfo{
- {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
- },
- },
- },
{
name: "NEGV",
argLen: 1,
return rewriteValueMIPS_OpMIPSMUL(v)
case OpMIPSNEG:
return rewriteValueMIPS_OpMIPSNEG(v)
- case OpMIPSNOR:
- return rewriteValueMIPS_OpMIPSNOR(v)
- case OpMIPSNORconst:
- return rewriteValueMIPS_OpMIPSNORconst(v)
case OpMIPSOR:
return rewriteValueMIPS_OpMIPSOR(v)
case OpMIPSORconst:
typ := &b.Func.Config.Types
// match: (AtomicAnd8 ptr val mem)
// cond: !config.BigEndian
- // result: (LoweredAtomicAnd (AND <typ.UInt32Ptr> (MOVWconst [^3]) ptr) (OR <typ.UInt32> (SLL <typ.UInt32> (ZeroExt8to32 val) (SLLconst <typ.UInt32> [3] (ANDconst <typ.UInt32> [3] ptr))) (NORconst [0] <typ.UInt32> (SLL <typ.UInt32> (MOVWconst [0xff]) (SLLconst <typ.UInt32> [3] (ANDconst <typ.UInt32> [3] ptr))))) mem)
+ // result: (LoweredAtomicAnd (AND <typ.UInt32Ptr> (MOVWconst [^3]) ptr) (OR <typ.UInt32> (SLL <typ.UInt32> (ZeroExt8to32 val) (SLLconst <typ.UInt32> [3] (ANDconst <typ.UInt32> [3] ptr))) (NOR (MOVWconst [0]) <typ.UInt32> (SLL <typ.UInt32> (MOVWconst [0xff]) (SLLconst <typ.UInt32> [3] (ANDconst <typ.UInt32> [3] ptr))))) mem)
for {
ptr := v_0
val := v_1
v6.AddArg(ptr)
v5.AddArg(v6)
v3.AddArg2(v4, v5)
- v7 := b.NewValue0(v.Pos, OpMIPSNORconst, typ.UInt32)
- v7.AuxInt = int32ToAuxInt(0)
- v8 := b.NewValue0(v.Pos, OpMIPSSLL, typ.UInt32)
- v9 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
- v9.AuxInt = int32ToAuxInt(0xff)
- v8.AddArg2(v9, v5)
- v7.AddArg(v8)
+ v7 := b.NewValue0(v.Pos, OpMIPSNOR, typ.UInt32)
+ v8 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v8.AuxInt = int32ToAuxInt(0)
+ v9 := b.NewValue0(v.Pos, OpMIPSSLL, typ.UInt32)
+ v10 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v10.AuxInt = int32ToAuxInt(0xff)
+ v9.AddArg2(v10, v5)
+ v7.AddArg2(v8, v9)
v2.AddArg2(v3, v7)
v.AddArg3(v0, v2, mem)
return true
}
// match: (AtomicAnd8 ptr val mem)
// cond: config.BigEndian
- // result: (LoweredAtomicAnd (AND <typ.UInt32Ptr> (MOVWconst [^3]) ptr) (OR <typ.UInt32> (SLL <typ.UInt32> (ZeroExt8to32 val) (SLLconst <typ.UInt32> [3] (ANDconst <typ.UInt32> [3] (XORconst <typ.UInt32> [3] ptr)))) (NORconst [0] <typ.UInt32> (SLL <typ.UInt32> (MOVWconst [0xff]) (SLLconst <typ.UInt32> [3] (ANDconst <typ.UInt32> [3] (XORconst <typ.UInt32> [3] ptr)))))) mem)
+ // result: (LoweredAtomicAnd (AND <typ.UInt32Ptr> (MOVWconst [^3]) ptr) (OR <typ.UInt32> (SLL <typ.UInt32> (ZeroExt8to32 val) (SLLconst <typ.UInt32> [3] (ANDconst <typ.UInt32> [3] (XORconst <typ.UInt32> [3] ptr)))) (NOR (MOVWconst [0]) <typ.UInt32> (SLL <typ.UInt32> (MOVWconst [0xff]) (SLLconst <typ.UInt32> [3] (ANDconst <typ.UInt32> [3] (XORconst <typ.UInt32> [3] ptr)))))) mem)
for {
ptr := v_0
val := v_1
v6.AddArg(v7)
v5.AddArg(v6)
v3.AddArg2(v4, v5)
- v8 := b.NewValue0(v.Pos, OpMIPSNORconst, typ.UInt32)
- v8.AuxInt = int32ToAuxInt(0)
- v9 := b.NewValue0(v.Pos, OpMIPSSLL, typ.UInt32)
- v10 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
- v10.AuxInt = int32ToAuxInt(0xff)
- v9.AddArg2(v10, v5)
- v8.AddArg(v9)
+ v8 := b.NewValue0(v.Pos, OpMIPSNOR, typ.UInt32)
+ v9 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v9.AuxInt = int32ToAuxInt(0)
+ v10 := b.NewValue0(v.Pos, OpMIPSSLL, typ.UInt32)
+ v11 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v11.AuxInt = int32ToAuxInt(0xff)
+ v10.AddArg2(v11, v5)
+ v8.AddArg2(v9, v10)
v2.AddArg2(v3, v8)
v.AddArg3(v0, v2, mem)
return true
}
func rewriteValueMIPS_OpCom16(v *Value) bool {
v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
// match: (Com16 x)
- // result: (NORconst [0] x)
+ // result: (NOR (MOVWconst [0]) x)
for {
x := v_0
- v.reset(OpMIPSNORconst)
- v.AuxInt = int32ToAuxInt(0)
- v.AddArg(x)
+ v.reset(OpMIPSNOR)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg2(v0, x)
return true
}
}
func rewriteValueMIPS_OpCom32(v *Value) bool {
v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
// match: (Com32 x)
- // result: (NORconst [0] x)
+ // result: (NOR (MOVWconst [0]) x)
for {
x := v_0
- v.reset(OpMIPSNORconst)
- v.AuxInt = int32ToAuxInt(0)
- v.AddArg(x)
+ v.reset(OpMIPSNOR)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg2(v0, x)
return true
}
}
func rewriteValueMIPS_OpCom8(v *Value) bool {
v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
// match: (Com8 x)
- // result: (NORconst [0] x)
+ // result: (NOR (MOVWconst [0]) x)
for {
x := v_0
- v.reset(OpMIPSNORconst)
- v.AuxInt = int32ToAuxInt(0)
- v.AddArg(x)
+ v.reset(OpMIPSNOR)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg2(v0, x)
return true
}
}
}
return false
}
-func rewriteValueMIPS_OpMIPSNOR(v *Value) bool {
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- // match: (NOR x (MOVWconst [c]))
- // result: (NORconst [c] x)
- for {
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- x := v_0
- if v_1.Op != OpMIPSMOVWconst {
- continue
- }
- c := auxIntToInt32(v_1.AuxInt)
- v.reset(OpMIPSNORconst)
- v.AuxInt = int32ToAuxInt(c)
- v.AddArg(x)
- return true
- }
- break
- }
- return false
-}
-func rewriteValueMIPS_OpMIPSNORconst(v *Value) bool {
- v_0 := v.Args[0]
- // match: (NORconst [c] (MOVWconst [d]))
- // result: (MOVWconst [^(c|d)])
- for {
- c := auxIntToInt32(v.AuxInt)
- if v_0.Op != OpMIPSMOVWconst {
- break
- }
- d := auxIntToInt32(v_0.AuxInt)
- v.reset(OpMIPSMOVWconst)
- v.AuxInt = int32ToAuxInt(^(c | d))
- return true
- }
- return false
-}
func rewriteValueMIPS_OpMIPSOR(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
v.copyOf(x)
return true
}
- // match: (XORconst [-1] x)
- // result: (NORconst [0] x)
- for {
- if auxIntToInt32(v.AuxInt) != -1 {
- break
- }
- x := v_0
- v.reset(OpMIPSNORconst)
- v.AuxInt = int32ToAuxInt(0)
- v.AddArg(x)
- return true
- }
// match: (XORconst [c] (MOVWconst [d]))
// result: (MOVWconst [c^d])
for {
return rewriteValueMIPS64_OpMIPS64MOVWstore(v)
case OpMIPS64NEGV:
return rewriteValueMIPS64_OpMIPS64NEGV(v)
- case OpMIPS64NOR:
- return rewriteValueMIPS64_OpMIPS64NOR(v)
- case OpMIPS64NORconst:
- return rewriteValueMIPS64_OpMIPS64NORconst(v)
case OpMIPS64OR:
return rewriteValueMIPS64_OpMIPS64OR(v)
case OpMIPS64ORconst:
typ := &b.Func.Config.Types
// match: (AtomicAnd8 ptr val mem)
// cond: !config.BigEndian
- // result: (LoweredAtomicAnd32 (AND <typ.UInt32Ptr> (MOVVconst [^3]) ptr) (OR <typ.UInt64> (SLLV <typ.UInt32> (ZeroExt8to32 val) (SLLVconst <typ.UInt64> [3] (ANDconst <typ.UInt64> [3] ptr))) (NORconst [0] <typ.UInt64> (SLLV <typ.UInt64> (MOVVconst [0xff]) (SLLVconst <typ.UInt64> [3] (ANDconst <typ.UInt64> [3] ptr))))) mem)
+ // result: (LoweredAtomicAnd32 (AND <typ.UInt32Ptr> (MOVVconst [^3]) ptr) (OR <typ.UInt64> (SLLV <typ.UInt32> (ZeroExt8to32 val) (SLLVconst <typ.UInt64> [3] (ANDconst <typ.UInt64> [3] ptr))) (NOR (MOVVconst [0]) <typ.UInt64> (SLLV <typ.UInt64> (MOVVconst [0xff]) (SLLVconst <typ.UInt64> [3] (ANDconst <typ.UInt64> [3] ptr))))) mem)
for {
ptr := v_0
val := v_1
v6.AddArg(ptr)
v5.AddArg(v6)
v3.AddArg2(v4, v5)
- v7 := b.NewValue0(v.Pos, OpMIPS64NORconst, typ.UInt64)
- v7.AuxInt = int64ToAuxInt(0)
- v8 := b.NewValue0(v.Pos, OpMIPS64SLLV, typ.UInt64)
- v9 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v9.AuxInt = int64ToAuxInt(0xff)
- v8.AddArg2(v9, v5)
- v7.AddArg(v8)
+ v7 := b.NewValue0(v.Pos, OpMIPS64NOR, typ.UInt64)
+ v8 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v8.AuxInt = int64ToAuxInt(0)
+ v9 := b.NewValue0(v.Pos, OpMIPS64SLLV, typ.UInt64)
+ v10 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v10.AuxInt = int64ToAuxInt(0xff)
+ v9.AddArg2(v10, v5)
+ v7.AddArg2(v8, v9)
v2.AddArg2(v3, v7)
v.AddArg3(v0, v2, mem)
return true
}
// match: (AtomicAnd8 ptr val mem)
// cond: config.BigEndian
- // result: (LoweredAtomicAnd32 (AND <typ.UInt32Ptr> (MOVVconst [^3]) ptr) (OR <typ.UInt64> (SLLV <typ.UInt32> (ZeroExt8to32 val) (SLLVconst <typ.UInt64> [3] (ANDconst <typ.UInt64> [3] (XORconst <typ.UInt64> [3] ptr)))) (NORconst [0] <typ.UInt64> (SLLV <typ.UInt64> (MOVVconst [0xff]) (SLLVconst <typ.UInt64> [3] (ANDconst <typ.UInt64> [3] (XORconst <typ.UInt64> [3] ptr)))))) mem)
+ // result: (LoweredAtomicAnd32 (AND <typ.UInt32Ptr> (MOVVconst [^3]) ptr) (OR <typ.UInt64> (SLLV <typ.UInt32> (ZeroExt8to32 val) (SLLVconst <typ.UInt64> [3] (ANDconst <typ.UInt64> [3] (XORconst <typ.UInt64> [3] ptr)))) (NOR (MOVVconst [0]) <typ.UInt64> (SLLV <typ.UInt64> (MOVVconst [0xff]) (SLLVconst <typ.UInt64> [3] (ANDconst <typ.UInt64> [3] (XORconst <typ.UInt64> [3] ptr)))))) mem)
for {
ptr := v_0
val := v_1
v6.AddArg(v7)
v5.AddArg(v6)
v3.AddArg2(v4, v5)
- v8 := b.NewValue0(v.Pos, OpMIPS64NORconst, typ.UInt64)
- v8.AuxInt = int64ToAuxInt(0)
- v9 := b.NewValue0(v.Pos, OpMIPS64SLLV, typ.UInt64)
- v10 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
- v10.AuxInt = int64ToAuxInt(0xff)
- v9.AddArg2(v10, v5)
- v8.AddArg(v9)
+ v8 := b.NewValue0(v.Pos, OpMIPS64NOR, typ.UInt64)
+ v9 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v9.AuxInt = int64ToAuxInt(0)
+ v10 := b.NewValue0(v.Pos, OpMIPS64SLLV, typ.UInt64)
+ v11 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v11.AuxInt = int64ToAuxInt(0xff)
+ v10.AddArg2(v11, v5)
+ v8.AddArg2(v9, v10)
v2.AddArg2(v3, v8)
v.AddArg3(v0, v2, mem)
return true
}
return false
}
-func rewriteValueMIPS64_OpMIPS64NOR(v *Value) bool {
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- // match: (NOR x (MOVVconst [c]))
- // cond: is32Bit(c)
- // result: (NORconst [c] x)
- for {
- for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
- x := v_0
- if v_1.Op != OpMIPS64MOVVconst {
- continue
- }
- c := auxIntToInt64(v_1.AuxInt)
- if !(is32Bit(c)) {
- continue
- }
- v.reset(OpMIPS64NORconst)
- v.AuxInt = int64ToAuxInt(c)
- v.AddArg(x)
- return true
- }
- break
- }
- return false
-}
-func rewriteValueMIPS64_OpMIPS64NORconst(v *Value) bool {
- v_0 := v.Args[0]
- // match: (NORconst [c] (MOVVconst [d]))
- // result: (MOVVconst [^(c|d)])
- for {
- c := auxIntToInt64(v.AuxInt)
- if v_0.Op != OpMIPS64MOVVconst {
- break
- }
- d := auxIntToInt64(v_0.AuxInt)
- v.reset(OpMIPS64MOVVconst)
- v.AuxInt = int64ToAuxInt(^(c | d))
- return true
- }
- return false
-}
func rewriteValueMIPS64_OpMIPS64OR(v *Value) bool {
v_1 := v.Args[1]
v_0 := v.Args[0]
v.copyOf(x)
return true
}
- // match: (XORconst [-1] x)
- // result: (NORconst [0] x)
- for {
- if auxIntToInt64(v.AuxInt) != -1 {
- break
- }
- x := v_0
- v.reset(OpMIPS64NORconst)
- v.AuxInt = int64ToAuxInt(0)
- v.AddArg(x)
- return true
- }
// match: (XORconst [c] (MOVVconst [d]))
// result: (MOVVconst [c^d])
for {