From: Cherry Zhang Date: Mon, 13 Jun 2016 20:49:09 +0000 (-0400) Subject: [dev.ssa] cmd/compile: add some ARM optimization rewriting rules X-Git-Tag: go1.8beta1~1892^2^2~41 X-Git-Url: http://www.git.cypherpunks.su/?a=commitdiff_plain;h=8599fdd9b66a384ac1e82f301a9ff4adfe448b08;p=gostls13.git [dev.ssa] cmd/compile: add some ARM optimization rewriting rules Mostly constant folding rules, analogous to AMD64 ones. Along with some simplifications. Updates #15365. Change-Id: If83bc1188bb05acb982ef3a1c21704c187e3eb24 Reviewed-on: https://go-review.googlesource.com/24210 Run-TryBot: David Chase TryBot-Result: Gobot Gobot Reviewed-by: David Chase --- diff --git a/src/cmd/compile/internal/arm/ssa.go b/src/cmd/compile/internal/arm/ssa.go index bb7f6c5957..51722c4f35 100644 --- a/src/cmd/compile/internal/arm/ssa.go +++ b/src/cmd/compile/internal/arm/ssa.go @@ -117,7 +117,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { // input args need no code case ssa.OpSP, ssa.OpSB, ssa.OpGetG: // nothing to do - case ssa.OpCopy, ssa.OpARMMOVWconvert: + case ssa.OpCopy, ssa.OpARMMOVWconvert, ssa.OpARMMOVWreg: if v.Type.IsMemory() { return } @@ -290,8 +290,11 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.To.Type = obj.TYPE_REG p.To.Reg = r case ssa.OpARMADDconst, + ssa.OpARMADCconst, ssa.OpARMSUBconst, + ssa.OpARMSBCconst, ssa.OpARMRSBconst, + ssa.OpARMRSCconst, ssa.OpARMANDconst, ssa.OpARMORconst, ssa.OpARMXORconst, @@ -305,6 +308,16 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { p.Reg = gc.SSARegNum(v.Args[0]) p.To.Type = obj.TYPE_REG p.To.Reg = gc.SSARegNum(v) + case ssa.OpARMADDSconst, + ssa.OpARMSUBSconst, + ssa.OpARMRSBSconst: + p := gc.Prog(v.Op.Asm()) + p.Scond = arm.C_SBIT + p.From.Type = obj.TYPE_CONST + p.From.Offset = v.AuxInt + p.Reg = gc.SSARegNum(v.Args[0]) + p.To.Type = obj.TYPE_REG + p.To.Reg = gc.SSARegNum(v) case ssa.OpARMSRRconst: p := gc.Prog(arm.AMOVW) p.From.Type = obj.TYPE_SHIFT @@ -710,6 +723,14 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { case ssa.OpARMLoweredGetClosurePtr: // Closure pointer is R7 (arm.REGCTXT). gc.CheckLoweredGetClosurePtr(v) + case ssa.OpARMFlagEQ, + ssa.OpARMFlagLT_ULT, + ssa.OpARMFlagLT_UGT, + ssa.OpARMFlagGT_ULT, + ssa.OpARMFlagGT_UGT: + v.Fatalf("Flag* ops should never make it to codegen %v", v.LongString()) + case ssa.OpARMInvertFlags: + v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString()) default: v.Unimplementedf("genValue not implemented: %s", v.LongString()) } diff --git a/src/cmd/compile/internal/ssa/gen/ARM.rules b/src/cmd/compile/internal/ssa/gen/ARM.rules index f869c738b8..ee68ad540f 100644 --- a/src/cmd/compile/internal/ssa/gen/ARM.rules +++ b/src/cmd/compile/internal/ssa/gen/ARM.rules @@ -399,11 +399,10 @@ // Optimizations -(ADD (MOVWconst [c]) x) -> (ADDconst [c] x) -(ADD x (MOVWconst [c])) -> (ADDconst [c] x) - +// fold offset into address (ADDconst [off1] (MOVWaddr [off2] {sym} ptr)) -> (MOVWaddr [off1+off2] {sym} ptr) +// fold address into load/store (MOVBload [off1] {sym} (ADDconst [off2] ptr) mem) -> (MOVBload [off1+off2] {sym} ptr mem) (MOVBUload [off1] {sym} (ADDconst [off2] ptr) mem) -> (MOVBUload [off1+off2] {sym} ptr mem) (MOVHload [off1] {sym} (ADDconst [off2] ptr) mem) -> (MOVHload [off1+off2] {sym} ptr mem) @@ -444,5 +443,314 @@ (MOVDstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) -> (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) +// replace load from same location as preceding store with copy +(MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && isSigned(x.Type) -> x +(MOVBUload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && !isSigned(x.Type) -> x +(MOVHload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && isSigned(x.Type) -> x +(MOVHUload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && !isSigned(x.Type) -> x +(MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x +(MOVFload [off] {sym} ptr (MOVFstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x +(MOVDload [off] {sym} ptr (MOVDstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x + +// fold constant into arithmatic ops +(ADD (MOVWconst [c]) x) -> (ADDconst [c] x) +(ADD x (MOVWconst [c])) -> (ADDconst [c] x) +(SUB (MOVWconst [c]) x) -> (RSBconst [c] x) +(SUB x (MOVWconst [c])) -> (SUBconst [c] x) +(RSB (MOVWconst [c]) x) -> (SUBconst [c] x) +(RSB x (MOVWconst [c])) -> (RSBconst [c] x) + +(ADDS (MOVWconst [c]) x) -> (ADDSconst [c] x) +(ADDS x (MOVWconst [c])) -> (ADDSconst [c] x) +(SUBS (MOVWconst [c]) x) -> (RSBSconst [c] x) +(SUBS x (MOVWconst [c])) -> (SUBSconst [c] x) + +(ADC (MOVWconst [c]) x flags) -> (ADCconst [c] x flags) +(ADC x (MOVWconst [c]) flags) -> (ADCconst [c] x flags) +(SBC (MOVWconst [c]) x flags) -> (RSCconst [c] x flags) +(SBC x (MOVWconst [c]) flags) -> (SBCconst [c] x flags) + +(AND (MOVWconst [c]) x) -> (ANDconst [c] x) +(AND x (MOVWconst [c])) -> (ANDconst [c] x) +(OR (MOVWconst [c]) x) -> (ORconst [c] x) +(OR x (MOVWconst [c])) -> (ORconst [c] x) +(XOR (MOVWconst [c]) x) -> (XORconst [c] x) +(XOR x (MOVWconst [c])) -> (XORconst [c] x) +(BIC x (MOVWconst [c])) -> (BICconst [c] x) + +(SLL x (MOVWconst [c])) -> (SLLconst x [c&31]) // Note: I don't think we ever generate bad constant shifts (i.e. c>=32) +(SRL x (MOVWconst [c])) -> (SRLconst x [c&31]) +(SRA x (MOVWconst [c])) -> (SRAconst x [c&31]) + +(CMP x (MOVWconst [c])) -> (CMPconst [c] x) +(CMP (MOVWconst [c]) x) -> (InvertFlags (CMPconst [c] x)) + +(LoweredZeromask (MOVWconst [0])) -> (MOVWconst [0]) +(LoweredZeromask (MOVWconst [c])) && c != 0 -> (MOVWconst [0xffffffff]) + +// don't extend after proper load +// MOVWreg instruction is not emitted if src and dst registers are same, but it ensures the type. +(MOVBreg x:(MOVBload _ _)) -> (MOVWreg x) +(MOVBUreg x:(MOVBUload _ _)) -> (MOVWreg x) +(MOVHreg x:(MOVBload _ _)) -> (MOVWreg x) +(MOVHreg x:(MOVBUload _ _)) -> (MOVWreg x) +(MOVHreg x:(MOVHload _ _)) -> (MOVWreg x) +(MOVHUreg x:(MOVBUload _ _)) -> (MOVWreg x) +(MOVHUreg x:(MOVHUload _ _)) -> (MOVWreg x) + +// fold extensions and ANDs together +(MOVBUreg (ANDconst [c] x)) -> (ANDconst [c&0xff] x) +(MOVHUreg (ANDconst [c] x)) -> (ANDconst [c&0xffff] x) +(MOVBreg (ANDconst [c] x)) && c & 0x80 == 0 -> (ANDconst [c&0x7f] x) +(MOVHreg (ANDconst [c] x)) && c & 0x8000 == 0 -> (ANDconst [c&0x7fff] x) + +// don't extend before store +(MOVBstore [off] {sym} ptr (MOVBreg x) mem) -> (MOVBstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr (MOVBUreg x) mem) -> (MOVBstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr (MOVHreg x) mem) -> (MOVBstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr (MOVHUreg x) mem) -> (MOVBstore [off] {sym} ptr x mem) +(MOVHstore [off] {sym} ptr (MOVHreg x) mem) -> (MOVHstore [off] {sym} ptr x mem) +(MOVHstore [off] {sym} ptr (MOVHUreg x) mem) -> (MOVHstore [off] {sym} ptr x mem) + +// mul by constant +(MUL x (MOVWconst [-1])) -> (RSBconst [0] x) +(MUL _ (MOVWconst [0])) -> (MOVWconst [0]) +(MUL x (MOVWconst [1])) -> x +(MUL x (MOVWconst [c])) && isPowerOfTwo(c) -> (SLLconst [log2(c)] x) + +(MUL (MOVWconst [-1]) x) -> (RSBconst [0] x) +(MUL (MOVWconst [0]) _) -> (MOVWconst [0]) +(MUL (MOVWconst [1]) x) -> x +(MUL (MOVWconst [c]) x) && isPowerOfTwo(c) -> (SLLconst [log2(c)] x) + +(MULA x (MOVWconst [-1]) a) -> (SUB a x) +(MULA _ (MOVWconst [0]) a) -> a +(MULA x (MOVWconst [1]) a) -> (ADD x a) +(MULA x (MOVWconst [c]) a) && isPowerOfTwo(c) -> (ADD (SLLconst [log2(c)] x) a) + +(MULA (MOVWconst [-1]) x a) -> (SUB a x) +(MULA (MOVWconst [0]) _ a) -> a +(MULA (MOVWconst [1]) x a) -> (ADD x a) +(MULA (MOVWconst [c]) x a) && isPowerOfTwo(c) -> (ADD (SLLconst [log2(c)] x) a) + +// div by constant +(DIVU x (MOVWconst [1])) -> x +(DIVU x (MOVWconst [c])) && isPowerOfTwo(c) -> (SRLconst [log2(c)] x) + +// constant comparisons +(CMPconst (MOVWconst [x]) [y]) && int32(x)==int32(y) -> (FlagEQ) +(CMPconst (MOVWconst [x]) [y]) && int32(x) (FlagLT_ULT) +(CMPconst (MOVWconst [x]) [y]) && int32(x)uint32(y) -> (FlagLT_UGT) +(CMPconst (MOVWconst [x]) [y]) && int32(x)>int32(y) && uint32(x) (FlagGT_ULT) +(CMPconst (MOVWconst [x]) [y]) && int32(x)>int32(y) && uint32(x)>uint32(y) -> (FlagGT_UGT) + +// other known comparisons +(CMPconst (MOVBUreg _) [c]) && 0xff < c -> (FlagLT_ULT) +(CMPconst (MOVHUreg _) [c]) && 0xffff < c -> (FlagLT_ULT) +(CMPconst (ANDconst _ [m]) [n]) && 0 <= int32(m) && int32(m) < int32(n) -> (FlagLT_ULT) +(CMPconst (SRLconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 32 && (1< (FlagLT_ULT) + +// absorb flag constants into branches +(EQ (FlagEQ) yes no) -> (First nil yes no) +(EQ (FlagLT_ULT) yes no) -> (First nil no yes) +(EQ (FlagLT_UGT) yes no) -> (First nil no yes) +(EQ (FlagGT_ULT) yes no) -> (First nil no yes) +(EQ (FlagGT_UGT) yes no) -> (First nil no yes) + +(NE (FlagEQ) yes no) -> (First nil no yes) +(NE (FlagLT_ULT) yes no) -> (First nil yes no) +(NE (FlagLT_UGT) yes no) -> (First nil yes no) +(NE (FlagGT_ULT) yes no) -> (First nil yes no) +(NE (FlagGT_UGT) yes no) -> (First nil yes no) + +(LT (FlagEQ) yes no) -> (First nil no yes) +(LT (FlagLT_ULT) yes no) -> (First nil yes no) +(LT (FlagLT_UGT) yes no) -> (First nil yes no) +(LT (FlagGT_ULT) yes no) -> (First nil no yes) +(LT (FlagGT_UGT) yes no) -> (First nil no yes) + +(LE (FlagEQ) yes no) -> (First nil yes no) +(LE (FlagLT_ULT) yes no) -> (First nil yes no) +(LE (FlagLT_UGT) yes no) -> (First nil yes no) +(LE (FlagGT_ULT) yes no) -> (First nil no yes) +(LE (FlagGT_UGT) yes no) -> (First nil no yes) + +(GT (FlagEQ) yes no) -> (First nil no yes) +(GT (FlagLT_ULT) yes no) -> (First nil no yes) +(GT (FlagLT_UGT) yes no) -> (First nil no yes) +(GT (FlagGT_ULT) yes no) -> (First nil yes no) +(GT (FlagGT_UGT) yes no) -> (First nil yes no) + +(GE (FlagEQ) yes no) -> (First nil yes no) +(GE (FlagLT_ULT) yes no) -> (First nil no yes) +(GE (FlagLT_UGT) yes no) -> (First nil no yes) +(GE (FlagGT_ULT) yes no) -> (First nil yes no) +(GE (FlagGT_UGT) yes no) -> (First nil yes no) + +(ULT (FlagEQ) yes no) -> (First nil no yes) +(ULT (FlagLT_ULT) yes no) -> (First nil yes no) +(ULT (FlagLT_UGT) yes no) -> (First nil no yes) +(ULT (FlagGT_ULT) yes no) -> (First nil yes no) +(ULT (FlagGT_UGT) yes no) -> (First nil no yes) + +(ULE (FlagEQ) yes no) -> (First nil yes no) +(ULE (FlagLT_ULT) yes no) -> (First nil yes no) +(ULE (FlagLT_UGT) yes no) -> (First nil no yes) +(ULE (FlagGT_ULT) yes no) -> (First nil yes no) +(ULE (FlagGT_UGT) yes no) -> (First nil no yes) + +(UGT (FlagEQ) yes no) -> (First nil no yes) +(UGT (FlagLT_ULT) yes no) -> (First nil no yes) +(UGT (FlagLT_UGT) yes no) -> (First nil yes no) +(UGT (FlagGT_ULT) yes no) -> (First nil no yes) +(UGT (FlagGT_UGT) yes no) -> (First nil yes no) + +(UGE (FlagEQ) yes no) -> (First nil yes no) +(UGE (FlagLT_ULT) yes no) -> (First nil no yes) +(UGE (FlagLT_UGT) yes no) -> (First nil yes no) +(UGE (FlagGT_ULT) yes no) -> (First nil no yes) +(UGE (FlagGT_UGT) yes no) -> (First nil yes no) + +// absorb InvertFlags into branches +(LT (InvertFlags cmp) yes no) -> (GT cmp yes no) +(GT (InvertFlags cmp) yes no) -> (LT cmp yes no) +(LE (InvertFlags cmp) yes no) -> (GE cmp yes no) +(GE (InvertFlags cmp) yes no) -> (LE cmp yes no) +(ULT (InvertFlags cmp) yes no) -> (UGT cmp yes no) +(UGT (InvertFlags cmp) yes no) -> (ULT cmp yes no) +(ULE (InvertFlags cmp) yes no) -> (UGE cmp yes no) +(UGE (InvertFlags cmp) yes no) -> (ULE cmp yes no) +(EQ (InvertFlags cmp) yes no) -> (EQ cmp yes no) +(NE (InvertFlags cmp) yes no) -> (NE cmp yes no) + +// absorb flag constants into boolean values +(Equal (FlagEQ)) -> (MOVWconst [1]) +(Equal (FlagLT_ULT)) -> (MOVWconst [0]) +(Equal (FlagLT_UGT)) -> (MOVWconst [0]) +(Equal (FlagGT_ULT)) -> (MOVWconst [0]) +(Equal (FlagGT_UGT)) -> (MOVWconst [0]) + +(NotEqual (FlagEQ)) -> (MOVWconst [0]) +(NotEqual (FlagLT_ULT)) -> (MOVWconst [1]) +(NotEqual (FlagLT_UGT)) -> (MOVWconst [1]) +(NotEqual (FlagGT_ULT)) -> (MOVWconst [1]) +(NotEqual (FlagGT_UGT)) -> (MOVWconst [1]) + +(LessThan (FlagEQ)) -> (MOVWconst [0]) +(LessThan (FlagLT_ULT)) -> (MOVWconst [1]) +(LessThan (FlagLT_UGT)) -> (MOVWconst [1]) +(LessThan (FlagGT_ULT)) -> (MOVWconst [0]) +(LessThan (FlagGT_UGT)) -> (MOVWconst [0]) + +(LessThanU (FlagEQ)) -> (MOVWconst [0]) +(LessThanU (FlagLT_ULT)) -> (MOVWconst [1]) +(LessThanU (FlagLT_UGT)) -> (MOVWconst [0]) +(LessThanU (FlagGT_ULT)) -> (MOVWconst [1]) +(LessThanU (FlagGT_UGT)) -> (MOVWconst [0]) + +(LessEqual (FlagEQ)) -> (MOVWconst [1]) +(LessEqual (FlagLT_ULT)) -> (MOVWconst [1]) +(LessEqual (FlagLT_UGT)) -> (MOVWconst [1]) +(LessEqual (FlagGT_ULT)) -> (MOVWconst [0]) +(LessEqual (FlagGT_UGT)) -> (MOVWconst [0]) + +(LessEqualU (FlagEQ)) -> (MOVWconst [1]) +(LessEqualU (FlagLT_ULT)) -> (MOVWconst [1]) +(LessEqualU (FlagLT_UGT)) -> (MOVWconst [0]) +(LessEqualU (FlagGT_ULT)) -> (MOVWconst [1]) +(LessEqualU (FlagGT_UGT)) -> (MOVWconst [0]) + +(GreaterThan (FlagEQ)) -> (MOVWconst [0]) +(GreaterThan (FlagLT_ULT)) -> (MOVWconst [0]) +(GreaterThan (FlagLT_UGT)) -> (MOVWconst [0]) +(GreaterThan (FlagGT_ULT)) -> (MOVWconst [1]) +(GreaterThan (FlagGT_UGT)) -> (MOVWconst [1]) + +(GreaterThanU (FlagEQ)) -> (MOVWconst [0]) +(GreaterThanU (FlagLT_ULT)) -> (MOVWconst [0]) +(GreaterThanU (FlagLT_UGT)) -> (MOVWconst [1]) +(GreaterThanU (FlagGT_ULT)) -> (MOVWconst [0]) +(GreaterThanU (FlagGT_UGT)) -> (MOVWconst [1]) + +(GreaterEqual (FlagEQ)) -> (MOVWconst [1]) +(GreaterEqual (FlagLT_ULT)) -> (MOVWconst [0]) +(GreaterEqual (FlagLT_UGT)) -> (MOVWconst [0]) +(GreaterEqual (FlagGT_ULT)) -> (MOVWconst [1]) +(GreaterEqual (FlagGT_UGT)) -> (MOVWconst [1]) + +(GreaterEqualU (FlagEQ)) -> (MOVWconst [1]) +(GreaterEqualU (FlagLT_ULT)) -> (MOVWconst [0]) +(GreaterEqualU (FlagLT_UGT)) -> (MOVWconst [1]) +(GreaterEqualU (FlagGT_ULT)) -> (MOVWconst [0]) +(GreaterEqualU (FlagGT_UGT)) -> (MOVWconst [1]) + +// absorb InvertFlags into boolean values +(Equal (InvertFlags x)) -> (Equal x) +(NotEqual (InvertFlags x)) -> (NotEqual x) +(LessThan (InvertFlags x)) -> (GreaterThan x) +(LessThanU (InvertFlags x)) -> (GreaterThanU x) +(GreaterThan (InvertFlags x)) -> (LessThan x) +(GreaterThanU (InvertFlags x)) -> (LessThanU x) +(LessEqual (InvertFlags x)) -> (GreaterEqual x) +(LessEqualU (InvertFlags x)) -> (GreaterEqualU x) +(GreaterEqual (InvertFlags x)) -> (LessEqual x) +(GreaterEqualU (InvertFlags x)) -> (LessEqualU x) + +// remove redundant *const ops +(ADDconst [0] x) -> x +(SUBconst [0] x) -> x +(ANDconst [0] _) -> (MOVWconst [0]) +(ANDconst [c] x) && int32(c)==-1 -> x +(ORconst [0] x) -> x +(ORconst [c] _) && int32(c)==-1 -> (MOVWconst [-1]) +(XORconst [0] x) -> x +(BICconst [0] x) -> x +(BICconst [c] _) && int32(c)==-1 -> (MOVWconst [0]) + +// generic constant folding +(ADDconst [c] (MOVWconst [d])) -> (MOVWconst [int64(int32(c+d))]) +(ADDconst [c] (ADDconst [d] x)) -> (ADDconst [int64(int32(c+d))] x) +(ADDconst [c] (SUBconst [d] x)) -> (ADDconst [int64(int32(c-d))] x) +(ADDconst [c] (RSBconst [d] x)) -> (RSBconst [int64(int32(c+d))] x) +(ADCconst [c] (ADDconst [d] x) flags) -> (ADCconst [int64(int32(c+d))] x flags) +(ADCconst [c] (SUBconst [d] x) flags) -> (ADCconst [int64(int32(c-d))] x flags) +(SUBconst [c] (MOVWconst [d])) -> (MOVWconst [int64(int32(d-c))]) +(SUBconst [c] (SUBconst [d] x)) -> (ADDconst [int64(int32(-c-d))] x) +(SUBconst [c] (ADDconst [d] x)) -> (ADDconst [int64(int32(-c+d))] x) +(SUBconst [c] (RSBconst [d] x)) -> (RSBconst [int64(int32(-c+d))] x) +(SBCconst [c] (ADDconst [d] x) flags) -> (SBCconst [int64(int32(c-d))] x flags) +(SBCconst [c] (SUBconst [d] x) flags) -> (SBCconst [int64(int32(c+d))] x flags) +(RSBconst [c] (MOVWconst [d])) -> (MOVWconst [int64(int32(c-d))]) +(RSBconst [c] (RSBconst [d] x)) -> (ADDconst [int64(int32(c-d))] x) +(RSBconst [c] (ADDconst [d] x)) -> (RSBconst [int64(int32(c-d))] x) +(RSBconst [c] (SUBconst [d] x)) -> (RSBconst [int64(int32(c+d))] x) +(RSCconst [c] (ADDconst [d] x) flags) -> (RSCconst [int64(int32(c-d))] x flags) +(RSCconst [c] (SUBconst [d] x) flags) -> (RSCconst [int64(int32(c+d))] x flags) +(SLLconst [c] (MOVWconst [d])) -> (MOVWconst [int64(uint32(d)< (MOVWconst [int64(uint32(d)>>uint64(c))]) +(SRAconst [c] (MOVWconst [d])) -> (MOVWconst [int64(int32(d)>>uint64(c))]) +(MUL (MOVWconst [c]) (MOVWconst [d])) -> (MOVWconst [int64(int32(c*d))]) +(MULA (MOVWconst [c]) (MOVWconst [d]) a) -> (ADDconst [int64(int32(c*d))] a) +(DIV (MOVWconst [c]) (MOVWconst [d])) -> (MOVWconst [int64(int32(c)/int32(d))]) +(DIVU (MOVWconst [c]) (MOVWconst [d])) -> (MOVWconst [int64(uint32(c)/uint32(d))]) +(ANDconst [c] (MOVWconst [d])) -> (MOVWconst [c&d]) +(ANDconst [c] (ANDconst [d] x)) -> (ANDconst [c&d] x) +(ORconst [c] (MOVWconst [d])) -> (MOVWconst [c|d]) +(ORconst [c] (ORconst [d] x)) -> (ORconst [c|d] x) +(XORconst [c] (MOVWconst [d])) -> (MOVWconst [c^d]) +(XORconst [c] (XORconst [d] x)) -> (XORconst [c^d] x) +(BICconst [c] (MOVWconst [d])) -> (MOVWconst [d&^c]) +(MVN (MOVWconst [c])) -> (MOVWconst [^c]) + +// generic simplifications +(ADD x (RSBconst [0] y)) -> (SUB x y) +(SUB x x) -> (MOVWconst [0]) +(AND x x) -> x +(OR x x) -> x +(XOR x x) -> (MOVWconst [0]) +(BIC x x) -> (MOVWconst [0]) + (ADD (MUL x y) a) -> (MULA x y a) (ADD a (MUL x y)) -> (MULA x y a) + +(AND x (MVN y)) -> (BIC x y) diff --git a/src/cmd/compile/internal/ssa/gen/ARMOps.go b/src/cmd/compile/internal/ssa/gen/ARMOps.go index 0a7bae4e9d..a58bdf8b58 100644 --- a/src/cmd/compile/internal/ssa/gen/ARMOps.go +++ b/src/cmd/compile/internal/ssa/gen/ARMOps.go @@ -101,8 +101,10 @@ func init() { var ( gp01 = regInfo{inputs: []regMask{}, outputs: []regMask{gp}} gp11 = regInfo{inputs: []regMask{gpg}, outputs: []regMask{gp}} + gp11cf = regInfo{inputs: []regMask{gpg}, outputs: []regMask{gp}, clobbers: flags} // cf: clobbers flags gp11sp = regInfo{inputs: []regMask{gpspg}, outputs: []regMask{gp}} gp1flags = regInfo{inputs: []regMask{gpg}, outputs: []regMask{flags}} + gp1flags1 = regInfo{inputs: []regMask{gp, flags}, outputs: []regMask{gp}} gp21 = regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{gp}} gp21cf = regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{gp}, clobbers: flags} // cf: clobbers flags gp2flags = regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{flags}} @@ -137,9 +139,15 @@ func init() { {name: "MODU", argLength: 2, reg: gp21cf, asm: "MODU"}, // arg0 % arg1, unsigned {name: "ADDS", argLength: 2, reg: gp21cf, asm: "ADD", commutative: true}, // arg0 + arg1, set carry flag + {name: "ADDSconst", argLength: 1, reg: gp11cf, asm: "ADD", aux: "Int32"}, // arg0 + auxInt, set carry flag {name: "ADC", argLength: 3, reg: gp2flags1, asm: "ADC", commutative: true}, // arg0 + arg1 + carry, arg2=flags + {name: "ADCconst", argLength: 2, reg: gp1flags1, asm: "ADC", aux: "Int32"}, // arg0 + auxInt + carry, arg1=flags {name: "SUBS", argLength: 2, reg: gp21cf, asm: "SUB"}, // arg0 - arg1, set carry flag + {name: "SUBSconst", argLength: 1, reg: gp11cf, asm: "SUB", aux: "Int32"}, // arg0 - auxInt, set carry flag + {name: "RSBSconst", argLength: 1, reg: gp11cf, asm: "RSB", aux: "Int32"}, // auxInt - arg0, set carry flag {name: "SBC", argLength: 3, reg: gp2flags1, asm: "SBC"}, // arg0 - arg1 - carry, arg2=flags + {name: "SBCconst", argLength: 2, reg: gp1flags1, asm: "SBC", aux: "Int32"}, // arg0 - auxInt - carry, arg1=flags + {name: "RSCconst", argLength: 2, reg: gp1flags1, asm: "RSC", aux: "Int32"}, // auxInt - arg0 - carry, arg1=flags {name: "MULLU", argLength: 2, reg: regInfo{inputs: []regMask{gp, gp}, outputs: []regMask{gp &^ buildReg("R0")}, clobbers: buildReg("R0")}, asm: "MULLU", commutative: true}, // arg0 * arg1, results 64-bit, high 32-bit in R0 {name: "MULA", argLength: 3, reg: gp31, asm: "MULA"}, // arg0 * arg1 + arg2 @@ -211,6 +219,7 @@ func init() { {name: "MOVBUreg", argLength: 1, reg: gp11, asm: "MOVBU"}, // move from arg0, unsign-extended from byte {name: "MOVHreg", argLength: 1, reg: gp11, asm: "MOVHS"}, // move from arg0, sign-extended from half {name: "MOVHUreg", argLength: 1, reg: gp11, asm: "MOVHU"}, // move from arg0, unsign-extended from half + {name: "MOVWreg", argLength: 1, reg: gp11, asm: "MOVW"}, // move from arg0 {name: "MOVWF", argLength: 1, reg: gpfp, asm: "MOVWF"}, // int32 -> float32 {name: "MOVWD", argLength: 1, reg: gpfp, asm: "MOVWD"}, // int32 -> float64 @@ -366,6 +375,23 @@ func init() { // gets correctly ordered with respect to GC safepoints. // arg0=ptr/int arg1=mem, output=int/ptr {name: "MOVWconvert", argLength: 2, reg: gp11, asm: "MOVW"}, + + // Constant flag values. For any comparison, there are 5 possible + // outcomes: the three from the signed total order (<,==,>) and the + // three from the unsigned total order. The == cases overlap. + // Note: there's a sixth "unordered" outcome for floating-point + // comparisons, but we don't use such a beast yet. + // These ops are for temporary use by rewrite rules. They + // cannot appear in the generated assembly. + {name: "FlagEQ"}, // equal + {name: "FlagLT_ULT"}, // signed < and unsigned < + {name: "FlagLT_UGT"}, // signed < and unsigned > + {name: "FlagGT_UGT"}, // signed > and unsigned < + {name: "FlagGT_ULT"}, // signed > and unsigned > + + // (InvertFlags (CMP a b)) == (CMP b a) + // InvertFlags is a pseudo-op which can't appear in assembly output. + {name: "InvertFlags", argLength: 1}, // reverse direction of arg0 } blocks := []blockData{ diff --git a/src/cmd/compile/internal/ssa/op.go b/src/cmd/compile/internal/ssa/op.go index 5e819240c5..07c439de4b 100644 --- a/src/cmd/compile/internal/ssa/op.go +++ b/src/cmd/compile/internal/ssa/op.go @@ -156,7 +156,8 @@ func MakeSizeAndAlign(size, align int64) SizeAndAlign { func (op Op) isTupleGenerator() bool { switch op { case OpAdd32carry, OpSub32carry, OpMul32uhilo, - OpARMADDS, OpARMSUBS, OpARMMULLU: + OpARMADDS, OpARMSUBS, OpARMMULLU, + OpARMADDSconst, OpARMSUBSconst, OpARMRSBSconst: return true } return false diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 053ba435ea..99982c0aab 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -360,9 +360,15 @@ const ( OpARMMOD OpARMMODU OpARMADDS + OpARMADDSconst OpARMADC + OpARMADCconst OpARMSUBS + OpARMSUBSconst + OpARMRSBSconst OpARMSBC + OpARMSBCconst + OpARMRSCconst OpARMMULLU OpARMMULA OpARMADDF @@ -420,6 +426,7 @@ const ( OpARMMOVBUreg OpARMMOVHreg OpARMMOVHUreg + OpARMMOVWreg OpARMMOVWF OpARMMOVWD OpARMMOVWUF @@ -458,6 +465,12 @@ const ( OpARMLoweredMoveU OpARMLoweredGetClosurePtr OpARMMOVWconvert + OpARMFlagEQ + OpARMFlagLT_ULT + OpARMFlagLT_UGT + OpARMFlagGT_UGT + OpARMFlagGT_ULT + OpARMInvertFlags OpPPC64ADD OpPPC64ADDconst @@ -4212,6 +4225,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "ADDSconst", + auxType: auxInt32, + argLen: 1, + asm: arm.AADD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 + }, + clobbers: 4294967296, // FLAGS + outputs: []regMask{ + 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, { name: "ADC", argLen: 3, @@ -4228,6 +4256,21 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "ADCconst", + auxType: auxInt32, + argLen: 2, + asm: arm.AADC, + reg: regInfo{ + inputs: []inputInfo{ + {1, 4294967296}, // FLAGS + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + outputs: []regMask{ + 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, { name: "SUBS", argLen: 2, @@ -4243,6 +4286,36 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "SUBSconst", + auxType: auxInt32, + argLen: 1, + asm: arm.ASUB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 + }, + clobbers: 4294967296, // FLAGS + outputs: []regMask{ + 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "RSBSconst", + auxType: auxInt32, + argLen: 1, + asm: arm.ARSB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 + }, + clobbers: 4294967296, // FLAGS + outputs: []regMask{ + 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, { name: "SBC", argLen: 3, @@ -4258,6 +4331,36 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "SBCconst", + auxType: auxInt32, + argLen: 2, + asm: arm.ASBC, + reg: regInfo{ + inputs: []inputInfo{ + {1, 4294967296}, // FLAGS + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + outputs: []regMask{ + 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, + { + name: "RSCconst", + auxType: auxInt32, + argLen: 2, + asm: arm.ARSC, + reg: regInfo{ + inputs: []inputInfo{ + {1, 4294967296}, // FLAGS + {0, 5119}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + outputs: []regMask{ + 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, { name: "MULLU", argLen: 2, @@ -5049,6 +5152,19 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "MOVWreg", + argLen: 1, + asm: arm.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 6143}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 + }, + outputs: []regMask{ + 5119, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 + }, + }, + }, { name: "MOVWF", argLen: 1, @@ -5492,6 +5608,36 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "FlagEQ", + argLen: 0, + reg: regInfo{}, + }, + { + name: "FlagLT_ULT", + argLen: 0, + reg: regInfo{}, + }, + { + name: "FlagLT_UGT", + argLen: 0, + reg: regInfo{}, + }, + { + name: "FlagGT_UGT", + argLen: 0, + reg: regInfo{}, + }, + { + name: "FlagGT_ULT", + argLen: 0, + reg: regInfo{}, + }, + { + name: "InvertFlags", + argLen: 1, + reg: regInfo{}, + }, { name: "ADD", diff --git a/src/cmd/compile/internal/ssa/rewriteARM.go b/src/cmd/compile/internal/ssa/rewriteARM.go index f8165f9473..c36976c953 100644 --- a/src/cmd/compile/internal/ssa/rewriteARM.go +++ b/src/cmd/compile/internal/ssa/rewriteARM.go @@ -8,10 +8,20 @@ import "math" var _ = math.MinInt8 // in case not otherwise used func rewriteValueARM(v *Value, config *Config) bool { switch v.Op { + case OpARMADC: + return rewriteValueARM_OpARMADC(v, config) + case OpARMADCconst: + return rewriteValueARM_OpARMADCconst(v, config) case OpARMADD: return rewriteValueARM_OpARMADD(v, config) + case OpARMADDS: + return rewriteValueARM_OpARMADDS(v, config) case OpARMADDconst: return rewriteValueARM_OpARMADDconst(v, config) + case OpARMAND: + return rewriteValueARM_OpARMAND(v, config) + case OpARMANDconst: + return rewriteValueARM_OpARMANDconst(v, config) case OpAdd16: return rewriteValueARM_OpAdd16(v, config) case OpAdd32: @@ -38,6 +48,14 @@ func rewriteValueARM(v *Value, config *Config) bool { return rewriteValueARM_OpAnd8(v, config) case OpAndB: return rewriteValueARM_OpAndB(v, config) + case OpARMBIC: + return rewriteValueARM_OpARMBIC(v, config) + case OpARMBICconst: + return rewriteValueARM_OpARMBICconst(v, config) + case OpARMCMP: + return rewriteValueARM_OpARMCMP(v, config) + case OpARMCMPconst: + return rewriteValueARM_OpARMCMPconst(v, config) case OpClosureCall: return rewriteValueARM_OpClosureCall(v, config) case OpCom16: @@ -82,6 +100,10 @@ func rewriteValueARM(v *Value, config *Config) bool { return rewriteValueARM_OpCvt64Fto32F(v, config) case OpCvt64Fto32U: return rewriteValueARM_OpCvt64Fto32U(v, config) + case OpARMDIV: + return rewriteValueARM_OpARMDIV(v, config) + case OpARMDIVU: + return rewriteValueARM_OpARMDIVU(v, config) case OpDeferCall: return rewriteValueARM_OpDeferCall(v, config) case OpDiv16: @@ -114,6 +136,8 @@ func rewriteValueARM(v *Value, config *Config) bool { return rewriteValueARM_OpEqB(v, config) case OpEqPtr: return rewriteValueARM_OpEqPtr(v, config) + case OpARMEqual: + return rewriteValueARM_OpARMEqual(v, config) case OpGeq16: return rewriteValueARM_OpGeq16(v, config) case OpGeq16U: @@ -150,6 +174,14 @@ func rewriteValueARM(v *Value, config *Config) bool { return rewriteValueARM_OpGreater8(v, config) case OpGreater8U: return rewriteValueARM_OpGreater8U(v, config) + case OpARMGreaterEqual: + return rewriteValueARM_OpARMGreaterEqual(v, config) + case OpARMGreaterEqualU: + return rewriteValueARM_OpARMGreaterEqualU(v, config) + case OpARMGreaterThan: + return rewriteValueARM_OpARMGreaterThan(v, config) + case OpARMGreaterThanU: + return rewriteValueARM_OpARMGreaterThanU(v, config) case OpHmul16: return rewriteValueARM_OpHmul16(v, config) case OpHmul16u: @@ -202,8 +234,18 @@ func rewriteValueARM(v *Value, config *Config) bool { return rewriteValueARM_OpLess8(v, config) case OpLess8U: return rewriteValueARM_OpLess8U(v, config) + case OpARMLessEqual: + return rewriteValueARM_OpARMLessEqual(v, config) + case OpARMLessEqualU: + return rewriteValueARM_OpARMLessEqualU(v, config) + case OpARMLessThan: + return rewriteValueARM_OpARMLessThan(v, config) + case OpARMLessThanU: + return rewriteValueARM_OpARMLessThanU(v, config) case OpLoad: return rewriteValueARM_OpLoad(v, config) + case OpARMLoweredZeromask: + return rewriteValueARM_OpARMLoweredZeromask(v, config) case OpLrot16: return rewriteValueARM_OpLrot16(v, config) case OpLrot32: @@ -236,8 +278,12 @@ func rewriteValueARM(v *Value, config *Config) bool { return rewriteValueARM_OpLsh8x8(v, config) case OpARMMOVBUload: return rewriteValueARM_OpARMMOVBUload(v, config) + case OpARMMOVBUreg: + return rewriteValueARM_OpARMMOVBUreg(v, config) case OpARMMOVBload: return rewriteValueARM_OpARMMOVBload(v, config) + case OpARMMOVBreg: + return rewriteValueARM_OpARMMOVBreg(v, config) case OpARMMOVBstore: return rewriteValueARM_OpARMMOVBstore(v, config) case OpARMMOVDload: @@ -250,14 +296,24 @@ func rewriteValueARM(v *Value, config *Config) bool { return rewriteValueARM_OpARMMOVFstore(v, config) case OpARMMOVHUload: return rewriteValueARM_OpARMMOVHUload(v, config) + case OpARMMOVHUreg: + return rewriteValueARM_OpARMMOVHUreg(v, config) case OpARMMOVHload: return rewriteValueARM_OpARMMOVHload(v, config) + case OpARMMOVHreg: + return rewriteValueARM_OpARMMOVHreg(v, config) case OpARMMOVHstore: return rewriteValueARM_OpARMMOVHstore(v, config) case OpARMMOVWload: return rewriteValueARM_OpARMMOVWload(v, config) case OpARMMOVWstore: return rewriteValueARM_OpARMMOVWstore(v, config) + case OpARMMUL: + return rewriteValueARM_OpARMMUL(v, config) + case OpARMMULA: + return rewriteValueARM_OpARMMULA(v, config) + case OpARMMVN: + return rewriteValueARM_OpARMMVN(v, config) case OpMod16: return rewriteValueARM_OpMod16(v, config) case OpMod16u: @@ -312,6 +368,12 @@ func rewriteValueARM(v *Value, config *Config) bool { return rewriteValueARM_OpNilCheck(v, config) case OpNot: return rewriteValueARM_OpNot(v, config) + case OpARMNotEqual: + return rewriteValueARM_OpARMNotEqual(v, config) + case OpARMOR: + return rewriteValueARM_OpARMOR(v, config) + case OpARMORconst: + return rewriteValueARM_OpARMORconst(v, config) case OpOffPtr: return rewriteValueARM_OpOffPtr(v, config) case OpOr16: @@ -322,6 +384,12 @@ func rewriteValueARM(v *Value, config *Config) bool { return rewriteValueARM_OpOr8(v, config) case OpOrB: return rewriteValueARM_OpOrB(v, config) + case OpARMRSB: + return rewriteValueARM_OpARMRSB(v, config) + case OpARMRSBconst: + return rewriteValueARM_OpARMRSBconst(v, config) + case OpARMRSCconst: + return rewriteValueARM_OpARMRSCconst(v, config) case OpRsh16Ux16: return rewriteValueARM_OpRsh16Ux16(v, config) case OpRsh16Ux32: @@ -370,6 +438,28 @@ func rewriteValueARM(v *Value, config *Config) bool { return rewriteValueARM_OpRsh8x64(v, config) case OpRsh8x8: return rewriteValueARM_OpRsh8x8(v, config) + case OpARMSBC: + return rewriteValueARM_OpARMSBC(v, config) + case OpARMSBCconst: + return rewriteValueARM_OpARMSBCconst(v, config) + case OpARMSLL: + return rewriteValueARM_OpARMSLL(v, config) + case OpARMSLLconst: + return rewriteValueARM_OpARMSLLconst(v, config) + case OpARMSRA: + return rewriteValueARM_OpARMSRA(v, config) + case OpARMSRAconst: + return rewriteValueARM_OpARMSRAconst(v, config) + case OpARMSRL: + return rewriteValueARM_OpARMSRL(v, config) + case OpARMSRLconst: + return rewriteValueARM_OpARMSRLconst(v, config) + case OpARMSUB: + return rewriteValueARM_OpARMSUB(v, config) + case OpARMSUBS: + return rewriteValueARM_OpARMSUBS(v, config) + case OpARMSUBconst: + return rewriteValueARM_OpARMSUBconst(v, config) case OpSelect0: return rewriteValueARM_OpSelect0(v, config) case OpSelect1: @@ -410,6 +500,10 @@ func rewriteValueARM(v *Value, config *Config) bool { return rewriteValueARM_OpTrunc32to16(v, config) case OpTrunc32to8: return rewriteValueARM_OpTrunc32to8(v, config) + case OpARMXOR: + return rewriteValueARM_OpARMXOR(v, config) + case OpARMXORconst: + return rewriteValueARM_OpARMXORconst(v, config) case OpXor16: return rewriteValueARM_OpXor16(v, config) case OpXor32: @@ -429,6 +523,86 @@ func rewriteValueARM(v *Value, config *Config) bool { } return false } +func rewriteValueARM_OpARMADC(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (ADC (MOVWconst [c]) x flags) + // cond: + // result: (ADCconst [c] x flags) + for { + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + flags := v.Args[2] + v.reset(OpARMADCconst) + v.AuxInt = c + v.AddArg(x) + v.AddArg(flags) + return true + } + // match: (ADC x (MOVWconst [c]) flags) + // cond: + // result: (ADCconst [c] x flags) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { + break + } + c := v_1.AuxInt + flags := v.Args[2] + v.reset(OpARMADCconst) + v.AuxInt = c + v.AddArg(x) + v.AddArg(flags) + return true + } + return false +} +func rewriteValueARM_OpARMADCconst(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (ADCconst [c] (ADDconst [d] x) flags) + // cond: + // result: (ADCconst [int64(int32(c+d))] x flags) + for { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARMADDconst { + break + } + d := v_0.AuxInt + x := v_0.Args[0] + flags := v.Args[1] + v.reset(OpARMADCconst) + v.AuxInt = int64(int32(c + d)) + v.AddArg(x) + v.AddArg(flags) + return true + } + // match: (ADCconst [c] (SUBconst [d] x) flags) + // cond: + // result: (ADCconst [int64(int32(c-d))] x flags) + for { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARMSUBconst { + break + } + d := v_0.AuxInt + x := v_0.Args[0] + flags := v.Args[1] + v.reset(OpARMADCconst) + v.AuxInt = int64(int32(c - d)) + v.AddArg(x) + v.AddArg(flags) + return true + } + return false +} func rewriteValueARM_OpARMADD(v *Value, config *Config) bool { b := v.Block _ = b @@ -462,6 +636,24 @@ func rewriteValueARM_OpARMADD(v *Value, config *Config) bool { v.AddArg(x) return true } + // match: (ADD x (RSBconst [0] y)) + // cond: + // result: (SUB x y) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMRSBconst { + break + } + if v_1.AuxInt != 0 { + break + } + y := v_1.Args[0] + v.reset(OpARMSUB) + v.AddArg(x) + v.AddArg(y) + return true + } // match: (ADD (MUL x y) a) // cond: // result: (MULA x y a) @@ -498,6 +690,41 @@ func rewriteValueARM_OpARMADD(v *Value, config *Config) bool { } return false } +func rewriteValueARM_OpARMADDS(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (ADDS (MOVWconst [c]) x) + // cond: + // result: (ADDSconst [c] x) + for { + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + v.reset(OpARMADDSconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (ADDS x (MOVWconst [c])) + // cond: + // result: (ADDSconst [c] x) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { + break + } + c := v_1.AuxInt + v.reset(OpARMADDSconst) + v.AuxInt = c + v.AddArg(x) + return true + } + return false +} func rewriteValueARM_OpARMADDconst(v *Value, config *Config) bool { b := v.Block _ = b @@ -519,6 +746,204 @@ func rewriteValueARM_OpARMADDconst(v *Value, config *Config) bool { v.AddArg(ptr) return true } + // match: (ADDconst [0] x) + // cond: + // result: x + for { + if v.AuxInt != 0 { + break + } + x := v.Args[0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (ADDconst [c] (MOVWconst [d])) + // cond: + // result: (MOVWconst [int64(int32(c+d))]) + for { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + d := v_0.AuxInt + v.reset(OpARMMOVWconst) + v.AuxInt = int64(int32(c + d)) + return true + } + // match: (ADDconst [c] (ADDconst [d] x)) + // cond: + // result: (ADDconst [int64(int32(c+d))] x) + for { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARMADDconst { + break + } + d := v_0.AuxInt + x := v_0.Args[0] + v.reset(OpARMADDconst) + v.AuxInt = int64(int32(c + d)) + v.AddArg(x) + return true + } + // match: (ADDconst [c] (SUBconst [d] x)) + // cond: + // result: (ADDconst [int64(int32(c-d))] x) + for { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARMSUBconst { + break + } + d := v_0.AuxInt + x := v_0.Args[0] + v.reset(OpARMADDconst) + v.AuxInt = int64(int32(c - d)) + v.AddArg(x) + return true + } + // match: (ADDconst [c] (RSBconst [d] x)) + // cond: + // result: (RSBconst [int64(int32(c+d))] x) + for { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARMRSBconst { + break + } + d := v_0.AuxInt + x := v_0.Args[0] + v.reset(OpARMRSBconst) + v.AuxInt = int64(int32(c + d)) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM_OpARMAND(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (AND (MOVWconst [c]) x) + // cond: + // result: (ANDconst [c] x) + for { + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + v.reset(OpARMANDconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (AND x (MOVWconst [c])) + // cond: + // result: (ANDconst [c] x) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { + break + } + c := v_1.AuxInt + v.reset(OpARMANDconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (AND x x) + // cond: + // result: x + for { + x := v.Args[0] + if x != v.Args[1] { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (AND x (MVN y)) + // cond: + // result: (BIC x y) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMVN { + break + } + y := v_1.Args[0] + v.reset(OpARMBIC) + v.AddArg(x) + v.AddArg(y) + return true + } + return false +} +func rewriteValueARM_OpARMANDconst(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (ANDconst [0] _) + // cond: + // result: (MOVWconst [0]) + for { + if v.AuxInt != 0 { + break + } + v.reset(OpARMMOVWconst) + v.AuxInt = 0 + return true + } + // match: (ANDconst [c] x) + // cond: int32(c)==-1 + // result: x + for { + c := v.AuxInt + x := v.Args[0] + if !(int32(c) == -1) { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (ANDconst [c] (MOVWconst [d])) + // cond: + // result: (MOVWconst [c&d]) + for { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + d := v_0.AuxInt + v.reset(OpARMMOVWconst) + v.AuxInt = c & d + return true + } + // match: (ANDconst [c] (ANDconst [d] x)) + // cond: + // result: (ANDconst [c&d] x) + for { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARMANDconst { + break + } + d := v_0.AuxInt + x := v_0.Args[0] + v.reset(OpARMANDconst) + v.AuxInt = c & d + v.AddArg(x) + return true + } return false } func rewriteValueARM_OpAdd16(v *Value, config *Config) bool { @@ -718,23 +1143,283 @@ func rewriteValueARM_OpAndB(v *Value, config *Config) bool { return true } } -func rewriteValueARM_OpClosureCall(v *Value, config *Config) bool { +func rewriteValueARM_OpARMBIC(v *Value, config *Config) bool { b := v.Block _ = b - // match: (ClosureCall [argwid] entry closure mem) + // match: (BIC x (MOVWconst [c])) // cond: - // result: (CALLclosure [argwid] entry closure mem) + // result: (BICconst [c] x) for { - argwid := v.AuxInt - entry := v.Args[0] - closure := v.Args[1] - mem := v.Args[2] - v.reset(OpARMCALLclosure) - v.AuxInt = argwid - v.AddArg(entry) - v.AddArg(closure) - v.AddArg(mem) - return true + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { + break + } + c := v_1.AuxInt + v.reset(OpARMBICconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (BIC x x) + // cond: + // result: (MOVWconst [0]) + for { + x := v.Args[0] + if x != v.Args[1] { + break + } + v.reset(OpARMMOVWconst) + v.AuxInt = 0 + return true + } + return false +} +func rewriteValueARM_OpARMBICconst(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (BICconst [0] x) + // cond: + // result: x + for { + if v.AuxInt != 0 { + break + } + x := v.Args[0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (BICconst [c] _) + // cond: int32(c)==-1 + // result: (MOVWconst [0]) + for { + c := v.AuxInt + if !(int32(c) == -1) { + break + } + v.reset(OpARMMOVWconst) + v.AuxInt = 0 + return true + } + // match: (BICconst [c] (MOVWconst [d])) + // cond: + // result: (MOVWconst [d&^c]) + for { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + d := v_0.AuxInt + v.reset(OpARMMOVWconst) + v.AuxInt = d &^ c + return true + } + return false +} +func rewriteValueARM_OpARMCMP(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (CMP x (MOVWconst [c])) + // cond: + // result: (CMPconst [c] x) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { + break + } + c := v_1.AuxInt + v.reset(OpARMCMPconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (CMP (MOVWconst [c]) x) + // cond: + // result: (InvertFlags (CMPconst [c] x)) + for { + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + v.reset(OpARMInvertFlags) + v0 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags) + v0.AuxInt = c + v0.AddArg(x) + v.AddArg(v0) + return true + } + return false +} +func rewriteValueARM_OpARMCMPconst(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (CMPconst (MOVWconst [x]) [y]) + // cond: int32(x)==int32(y) + // result: (FlagEQ) + for { + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + x := v_0.AuxInt + y := v.AuxInt + if !(int32(x) == int32(y)) { + break + } + v.reset(OpARMFlagEQ) + return true + } + // match: (CMPconst (MOVWconst [x]) [y]) + // cond: int32(x)uint32(y) + // result: (FlagLT_UGT) + for { + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + x := v_0.AuxInt + y := v.AuxInt + if !(int32(x) < int32(y) && uint32(x) > uint32(y)) { + break + } + v.reset(OpARMFlagLT_UGT) + return true + } + // match: (CMPconst (MOVWconst [x]) [y]) + // cond: int32(x)>int32(y) && uint32(x) int32(y) && uint32(x) < uint32(y)) { + break + } + v.reset(OpARMFlagGT_ULT) + return true + } + // match: (CMPconst (MOVWconst [x]) [y]) + // cond: int32(x)>int32(y) && uint32(x)>uint32(y) + // result: (FlagGT_UGT) + for { + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + x := v_0.AuxInt + y := v.AuxInt + if !(int32(x) > int32(y) && uint32(x) > uint32(y)) { + break + } + v.reset(OpARMFlagGT_UGT) + return true + } + // match: (CMPconst (MOVBUreg _) [c]) + // cond: 0xff < c + // result: (FlagLT_ULT) + for { + v_0 := v.Args[0] + if v_0.Op != OpARMMOVBUreg { + break + } + c := v.AuxInt + if !(0xff < c) { + break + } + v.reset(OpARMFlagLT_ULT) + return true + } + // match: (CMPconst (MOVHUreg _) [c]) + // cond: 0xffff < c + // result: (FlagLT_ULT) + for { + v_0 := v.Args[0] + if v_0.Op != OpARMMOVHUreg { + break + } + c := v.AuxInt + if !(0xffff < c) { + break + } + v.reset(OpARMFlagLT_ULT) + return true + } + // match: (CMPconst (ANDconst _ [m]) [n]) + // cond: 0 <= int32(m) && int32(m) < int32(n) + // result: (FlagLT_ULT) + for { + v_0 := v.Args[0] + if v_0.Op != OpARMANDconst { + break + } + m := v_0.AuxInt + n := v.AuxInt + if !(0 <= int32(m) && int32(m) < int32(n)) { + break + } + v.reset(OpARMFlagLT_ULT) + return true + } + // match: (CMPconst (SRLconst _ [c]) [n]) + // cond: 0 <= n && 0 < c && c <= 32 && (1< (SignExt16to32 x) (SignExt16to32 y)) [16]) + // result: (MOVWconst [1]) for { - x := v.Args[0] - y := v.Args[1] - v.reset(OpARMSRAconst) - v0 := b.NewValue0(v.Line, OpARMMUL, config.fe.TypeInt32()) - v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32()) - v1.AddArg(x) - v0.AddArg(v1) - v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32()) - v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) - v.AuxInt = 16 + v_0 := v.Args[0] + if v_0.Op != OpARMFlagEQ { + break + } + v.reset(OpARMMOVWconst) + v.AuxInt = 1 return true } -} -func rewriteValueARM_OpHmul16u(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (Hmul16u x y) + // match: (GreaterEqual (FlagLT_ULT)) // cond: - // result: (SRLconst (MUL (ZeroExt16to32 x) (ZeroExt16to32 y)) [16]) + // result: (MOVWconst [0]) for { - x := v.Args[0] - y := v.Args[1] - v.reset(OpARMSRLconst) - v0 := b.NewValue0(v.Line, OpARMMUL, config.fe.TypeUInt32()) - v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32()) - v1.AddArg(x) - v0.AddArg(v1) - v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32()) - v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) - v.AuxInt = 16 + v_0 := v.Args[0] + if v_0.Op != OpARMFlagLT_ULT { + break + } + v.reset(OpARMMOVWconst) + v.AuxInt = 0 return true } -} -func rewriteValueARM_OpHmul32(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (Hmul32 x y) + // match: (GreaterEqual (FlagLT_UGT)) // cond: - // result: (HMUL x y) + // result: (MOVWconst [0]) for { - x := v.Args[0] - y := v.Args[1] - v.reset(OpARMHMUL) - v.AddArg(x) - v.AddArg(y) + v_0 := v.Args[0] + if v_0.Op != OpARMFlagLT_UGT { + break + } + v.reset(OpARMMOVWconst) + v.AuxInt = 0 return true } -} -func rewriteValueARM_OpHmul32u(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (Hmul32u x y) + // match: (GreaterEqual (FlagGT_ULT)) // cond: - // result: (HMULU x y) + // result: (MOVWconst [1]) for { - x := v.Args[0] - y := v.Args[1] - v.reset(OpARMHMULU) - v.AddArg(x) - v.AddArg(y) + v_0 := v.Args[0] + if v_0.Op != OpARMFlagGT_ULT { + break + } + v.reset(OpARMMOVWconst) + v.AuxInt = 1 return true } -} -func rewriteValueARM_OpHmul8(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (Hmul8 x y) + // match: (GreaterEqual (FlagGT_UGT)) // cond: - // result: (SRAconst (MUL (SignExt8to32 x) (SignExt8to32 y)) [8]) + // result: (MOVWconst [1]) for { - x := v.Args[0] - y := v.Args[1] - v.reset(OpARMSRAconst) - v0 := b.NewValue0(v.Line, OpARMMUL, config.fe.TypeInt16()) - v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32()) - v1.AddArg(x) - v0.AddArg(v1) - v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32()) - v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) - v.AuxInt = 8 + v_0 := v.Args[0] + if v_0.Op != OpARMFlagGT_UGT { + break + } + v.reset(OpARMMOVWconst) + v.AuxInt = 1 return true } -} -func rewriteValueARM_OpHmul8u(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (Hmul8u x y) + // match: (GreaterEqual (InvertFlags x)) // cond: - // result: (SRLconst (MUL (ZeroExt8to32 x) (ZeroExt8to32 y)) [8]) + // result: (LessEqual x) for { - x := v.Args[0] - y := v.Args[1] - v.reset(OpARMSRLconst) - v0 := b.NewValue0(v.Line, OpARMMUL, config.fe.TypeUInt16()) - v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32()) - v1.AddArg(x) - v0.AddArg(v1) - v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32()) - v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) - v.AuxInt = 8 + v_0 := v.Args[0] + if v_0.Op != OpARMInvertFlags { + break + } + x := v_0.Args[0] + v.reset(OpARMLessEqual) + v.AddArg(x) return true } + return false } -func rewriteValueARM_OpInterCall(v *Value, config *Config) bool { +func rewriteValueARM_OpARMGreaterEqualU(v *Value, config *Config) bool { b := v.Block _ = b - // match: (InterCall [argwid] entry mem) + // match: (GreaterEqualU (FlagEQ)) // cond: - // result: (CALLinter [argwid] entry mem) + // result: (MOVWconst [1]) for { - argwid := v.AuxInt - entry := v.Args[0] - mem := v.Args[1] - v.reset(OpARMCALLinter) - v.AuxInt = argwid - v.AddArg(entry) - v.AddArg(mem) + v_0 := v.Args[0] + if v_0.Op != OpARMFlagEQ { + break + } + v.reset(OpARMMOVWconst) + v.AuxInt = 1 return true } -} -func rewriteValueARM_OpIsInBounds(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (IsInBounds idx len) + // match: (GreaterEqualU (FlagLT_ULT)) // cond: - // result: (LessThanU (CMP idx len)) + // result: (MOVWconst [0]) for { - idx := v.Args[0] - len := v.Args[1] - v.reset(OpARMLessThanU) - v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags) - v0.AddArg(idx) - v0.AddArg(len) - v.AddArg(v0) + v_0 := v.Args[0] + if v_0.Op != OpARMFlagLT_ULT { + break + } + v.reset(OpARMMOVWconst) + v.AuxInt = 0 return true } -} -func rewriteValueARM_OpIsNonNil(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (IsNonNil ptr) + // match: (GreaterEqualU (FlagLT_UGT)) // cond: - // result: (NotEqual (CMPconst [0] ptr)) + // result: (MOVWconst [1]) for { - ptr := v.Args[0] - v.reset(OpARMNotEqual) - v0 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags) - v0.AuxInt = 0 - v0.AddArg(ptr) - v.AddArg(v0) + v_0 := v.Args[0] + if v_0.Op != OpARMFlagLT_UGT { + break + } + v.reset(OpARMMOVWconst) + v.AuxInt = 1 return true } -} -func rewriteValueARM_OpIsSliceInBounds(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (IsSliceInBounds idx len) + // match: (GreaterEqualU (FlagGT_ULT)) // cond: - // result: (LessEqualU (CMP idx len)) + // result: (MOVWconst [0]) for { - idx := v.Args[0] - len := v.Args[1] - v.reset(OpARMLessEqualU) - v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags) - v0.AddArg(idx) - v0.AddArg(len) - v.AddArg(v0) + v_0 := v.Args[0] + if v_0.Op != OpARMFlagGT_ULT { + break + } + v.reset(OpARMMOVWconst) + v.AuxInt = 0 + return true + } + // match: (GreaterEqualU (FlagGT_UGT)) + // cond: + // result: (MOVWconst [1]) + for { + v_0 := v.Args[0] + if v_0.Op != OpARMFlagGT_UGT { + break + } + v.reset(OpARMMOVWconst) + v.AuxInt = 1 + return true + } + // match: (GreaterEqualU (InvertFlags x)) + // cond: + // result: (LessEqualU x) + for { + v_0 := v.Args[0] + if v_0.Op != OpARMInvertFlags { + break + } + x := v_0.Args[0] + v.reset(OpARMLessEqualU) + v.AddArg(x) return true } + return false } -func rewriteValueARM_OpLeq16(v *Value, config *Config) bool { +func rewriteValueARM_OpARMGreaterThan(v *Value, config *Config) bool { b := v.Block _ = b - // match: (Leq16 x y) + // match: (GreaterThan (FlagEQ)) // cond: - // result: (LessEqual (CMP (SignExt16to32 x) (SignExt16to32 y))) + // result: (MOVWconst [0]) for { - x := v.Args[0] - y := v.Args[1] - v.reset(OpARMLessEqual) - v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags) - v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32()) - v1.AddArg(x) - v0.AddArg(v1) - v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32()) - v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v_0 := v.Args[0] + if v_0.Op != OpARMFlagEQ { + break + } + v.reset(OpARMMOVWconst) + v.AuxInt = 0 + return true + } + // match: (GreaterThan (FlagLT_ULT)) + // cond: + // result: (MOVWconst [0]) + for { + v_0 := v.Args[0] + if v_0.Op != OpARMFlagLT_ULT { + break + } + v.reset(OpARMMOVWconst) + v.AuxInt = 0 + return true + } + // match: (GreaterThan (FlagLT_UGT)) + // cond: + // result: (MOVWconst [0]) + for { + v_0 := v.Args[0] + if v_0.Op != OpARMFlagLT_UGT { + break + } + v.reset(OpARMMOVWconst) + v.AuxInt = 0 + return true + } + // match: (GreaterThan (FlagGT_ULT)) + // cond: + // result: (MOVWconst [1]) + for { + v_0 := v.Args[0] + if v_0.Op != OpARMFlagGT_ULT { + break + } + v.reset(OpARMMOVWconst) + v.AuxInt = 1 + return true + } + // match: (GreaterThan (FlagGT_UGT)) + // cond: + // result: (MOVWconst [1]) + for { + v_0 := v.Args[0] + if v_0.Op != OpARMFlagGT_UGT { + break + } + v.reset(OpARMMOVWconst) + v.AuxInt = 1 + return true + } + // match: (GreaterThan (InvertFlags x)) + // cond: + // result: (LessThan x) + for { + v_0 := v.Args[0] + if v_0.Op != OpARMInvertFlags { + break + } + x := v_0.Args[0] + v.reset(OpARMLessThan) + v.AddArg(x) return true } + return false } -func rewriteValueARM_OpLeq16U(v *Value, config *Config) bool { +func rewriteValueARM_OpARMGreaterThanU(v *Value, config *Config) bool { b := v.Block _ = b - // match: (Leq16U x y) + // match: (GreaterThanU (FlagEQ)) // cond: - // result: (LessEqualU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y))) + // result: (MOVWconst [0]) for { - x := v.Args[0] - y := v.Args[1] - v.reset(OpARMLessEqualU) - v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags) - v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32()) - v1.AddArg(x) - v0.AddArg(v1) - v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32()) - v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + v_0 := v.Args[0] + if v_0.Op != OpARMFlagEQ { + break + } + v.reset(OpARMMOVWconst) + v.AuxInt = 0 + return true + } + // match: (GreaterThanU (FlagLT_ULT)) + // cond: + // result: (MOVWconst [0]) + for { + v_0 := v.Args[0] + if v_0.Op != OpARMFlagLT_ULT { + break + } + v.reset(OpARMMOVWconst) + v.AuxInt = 0 + return true + } + // match: (GreaterThanU (FlagLT_UGT)) + // cond: + // result: (MOVWconst [1]) + for { + v_0 := v.Args[0] + if v_0.Op != OpARMFlagLT_UGT { + break + } + v.reset(OpARMMOVWconst) + v.AuxInt = 1 + return true + } + // match: (GreaterThanU (FlagGT_ULT)) + // cond: + // result: (MOVWconst [0]) + for { + v_0 := v.Args[0] + if v_0.Op != OpARMFlagGT_ULT { + break + } + v.reset(OpARMMOVWconst) + v.AuxInt = 0 + return true + } + // match: (GreaterThanU (FlagGT_UGT)) + // cond: + // result: (MOVWconst [1]) + for { + v_0 := v.Args[0] + if v_0.Op != OpARMFlagGT_UGT { + break + } + v.reset(OpARMMOVWconst) + v.AuxInt = 1 + return true + } + // match: (GreaterThanU (InvertFlags x)) + // cond: + // result: (LessThanU x) + for { + v_0 := v.Args[0] + if v_0.Op != OpARMInvertFlags { + break + } + x := v_0.Args[0] + v.reset(OpARMLessThanU) + v.AddArg(x) return true } + return false } -func rewriteValueARM_OpLeq32(v *Value, config *Config) bool { +func rewriteValueARM_OpHmul16(v *Value, config *Config) bool { b := v.Block _ = b - // match: (Leq32 x y) + // match: (Hmul16 x y) // cond: - // result: (LessEqual (CMP x y)) + // result: (SRAconst (MUL (SignExt16to32 x) (SignExt16to32 y)) [16]) for { x := v.Args[0] y := v.Args[1] - v.reset(OpARMLessEqual) - v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags) - v0.AddArg(x) - v0.AddArg(y) + v.reset(OpARMSRAconst) + v0 := b.NewValue0(v.Line, OpARMMUL, config.fe.TypeInt32()) + v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32()) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32()) + v2.AddArg(y) + v0.AddArg(v2) v.AddArg(v0) + v.AuxInt = 16 return true } } -func rewriteValueARM_OpLeq32F(v *Value, config *Config) bool { +func rewriteValueARM_OpHmul16u(v *Value, config *Config) bool { b := v.Block _ = b - // match: (Leq32F x y) + // match: (Hmul16u x y) // cond: - // result: (GreaterEqual (CMPF y x)) + // result: (SRLconst (MUL (ZeroExt16to32 x) (ZeroExt16to32 y)) [16]) for { x := v.Args[0] y := v.Args[1] - v.reset(OpARMGreaterEqual) - v0 := b.NewValue0(v.Line, OpARMCMPF, TypeFlags) - v0.AddArg(y) - v0.AddArg(x) + v.reset(OpARMSRLconst) + v0 := b.NewValue0(v.Line, OpARMMUL, config.fe.TypeUInt32()) + v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32()) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32()) + v2.AddArg(y) + v0.AddArg(v2) v.AddArg(v0) + v.AuxInt = 16 return true } } -func rewriteValueARM_OpLeq32U(v *Value, config *Config) bool { +func rewriteValueARM_OpHmul32(v *Value, config *Config) bool { b := v.Block _ = b - // match: (Leq32U x y) + // match: (Hmul32 x y) // cond: - // result: (LessEqualU (CMP x y)) + // result: (HMUL x y) for { x := v.Args[0] y := v.Args[1] - v.reset(OpARMLessEqualU) - v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + v.reset(OpARMHMUL) + v.AddArg(x) + v.AddArg(y) return true } } -func rewriteValueARM_OpLeq64F(v *Value, config *Config) bool { +func rewriteValueARM_OpHmul32u(v *Value, config *Config) bool { b := v.Block _ = b - // match: (Leq64F x y) + // match: (Hmul32u x y) // cond: - // result: (GreaterEqual (CMPD y x)) + // result: (HMULU x y) for { x := v.Args[0] y := v.Args[1] - v.reset(OpARMGreaterEqual) - v0 := b.NewValue0(v.Line, OpARMCMPD, TypeFlags) - v0.AddArg(y) - v0.AddArg(x) - v.AddArg(v0) + v.reset(OpARMHMULU) + v.AddArg(x) + v.AddArg(y) return true } } -func rewriteValueARM_OpLeq8(v *Value, config *Config) bool { +func rewriteValueARM_OpHmul8(v *Value, config *Config) bool { b := v.Block _ = b - // match: (Leq8 x y) + // match: (Hmul8 x y) // cond: - // result: (LessEqual (CMP (SignExt8to32 x) (SignExt8to32 y))) + // result: (SRAconst (MUL (SignExt8to32 x) (SignExt8to32 y)) [8]) for { x := v.Args[0] y := v.Args[1] - v.reset(OpARMLessEqual) - v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags) + v.reset(OpARMSRAconst) + v0 := b.NewValue0(v.Line, OpARMMUL, config.fe.TypeInt16()) v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32()) v1.AddArg(x) v0.AddArg(v1) @@ -1933,20 +2868,21 @@ func rewriteValueARM_OpLeq8(v *Value, config *Config) bool { v2.AddArg(y) v0.AddArg(v2) v.AddArg(v0) + v.AuxInt = 8 return true } } -func rewriteValueARM_OpLeq8U(v *Value, config *Config) bool { +func rewriteValueARM_OpHmul8u(v *Value, config *Config) bool { b := v.Block _ = b - // match: (Leq8U x y) + // match: (Hmul8u x y) // cond: - // result: (LessEqualU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y))) + // result: (SRLconst (MUL (ZeroExt8to32 x) (ZeroExt8to32 y)) [8]) for { x := v.Args[0] y := v.Args[1] - v.reset(OpARMLessEqualU) - v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags) + v.reset(OpARMSRLconst) + v0 := b.NewValue0(v.Line, OpARMMUL, config.fe.TypeUInt16()) v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32()) v1.AddArg(x) v0.AddArg(v1) @@ -1954,10 +2890,230 @@ func rewriteValueARM_OpLeq8U(v *Value, config *Config) bool { v2.AddArg(y) v0.AddArg(v2) v.AddArg(v0) + v.AuxInt = 8 return true } } -func rewriteValueARM_OpLess16(v *Value, config *Config) bool { +func rewriteValueARM_OpInterCall(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (InterCall [argwid] entry mem) + // cond: + // result: (CALLinter [argwid] entry mem) + for { + argwid := v.AuxInt + entry := v.Args[0] + mem := v.Args[1] + v.reset(OpARMCALLinter) + v.AuxInt = argwid + v.AddArg(entry) + v.AddArg(mem) + return true + } +} +func rewriteValueARM_OpIsInBounds(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (IsInBounds idx len) + // cond: + // result: (LessThanU (CMP idx len)) + for { + idx := v.Args[0] + len := v.Args[1] + v.reset(OpARMLessThanU) + v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags) + v0.AddArg(idx) + v0.AddArg(len) + v.AddArg(v0) + return true + } +} +func rewriteValueARM_OpIsNonNil(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (IsNonNil ptr) + // cond: + // result: (NotEqual (CMPconst [0] ptr)) + for { + ptr := v.Args[0] + v.reset(OpARMNotEqual) + v0 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags) + v0.AuxInt = 0 + v0.AddArg(ptr) + v.AddArg(v0) + return true + } +} +func rewriteValueARM_OpIsSliceInBounds(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (IsSliceInBounds idx len) + // cond: + // result: (LessEqualU (CMP idx len)) + for { + idx := v.Args[0] + len := v.Args[1] + v.reset(OpARMLessEqualU) + v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags) + v0.AddArg(idx) + v0.AddArg(len) + v.AddArg(v0) + return true + } +} +func rewriteValueARM_OpLeq16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Leq16 x y) + // cond: + // result: (LessEqual (CMP (SignExt16to32 x) (SignExt16to32 y))) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMLessEqual) + v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags) + v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32()) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32()) + v2.AddArg(y) + v0.AddArg(v2) + v.AddArg(v0) + return true + } +} +func rewriteValueARM_OpLeq16U(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Leq16U x y) + // cond: + // result: (LessEqualU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y))) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMLessEqualU) + v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags) + v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32()) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32()) + v2.AddArg(y) + v0.AddArg(v2) + v.AddArg(v0) + return true + } +} +func rewriteValueARM_OpLeq32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Leq32 x y) + // cond: + // result: (LessEqual (CMP x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMLessEqual) + v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } +} +func rewriteValueARM_OpLeq32F(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Leq32F x y) + // cond: + // result: (GreaterEqual (CMPF y x)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMGreaterEqual) + v0 := b.NewValue0(v.Line, OpARMCMPF, TypeFlags) + v0.AddArg(y) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueARM_OpLeq32U(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Leq32U x y) + // cond: + // result: (LessEqualU (CMP x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMLessEqualU) + v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } +} +func rewriteValueARM_OpLeq64F(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Leq64F x y) + // cond: + // result: (GreaterEqual (CMPD y x)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMGreaterEqual) + v0 := b.NewValue0(v.Line, OpARMCMPD, TypeFlags) + v0.AddArg(y) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueARM_OpLeq8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Leq8 x y) + // cond: + // result: (LessEqual (CMP (SignExt8to32 x) (SignExt8to32 y))) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMLessEqual) + v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags) + v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32()) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32()) + v2.AddArg(y) + v0.AddArg(v2) + v.AddArg(v0) + return true + } +} +func rewriteValueARM_OpLeq8U(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Leq8U x y) + // cond: + // result: (LessEqualU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y))) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMLessEqualU) + v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags) + v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32()) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32()) + v2.AddArg(y) + v0.AddArg(v2) + v.AddArg(v0) + return true + } +} +func rewriteValueARM_OpLess16(v *Value, config *Config) bool { b := v.Block _ = b // match: (Less16 x y) @@ -2109,3603 +3265,6545 @@ func rewriteValueARM_OpLess8U(v *Value, config *Config) bool { return true } } -func rewriteValueARM_OpLoad(v *Value, config *Config) bool { +func rewriteValueARM_OpARMLessEqual(v *Value, config *Config) bool { b := v.Block _ = b - // match: (Load ptr mem) - // cond: t.IsBoolean() - // result: (MOVBUload ptr mem) + // match: (LessEqual (FlagEQ)) + // cond: + // result: (MOVWconst [1]) for { - t := v.Type - ptr := v.Args[0] - mem := v.Args[1] - if !(t.IsBoolean()) { + v_0 := v.Args[0] + if v_0.Op != OpARMFlagEQ { break } - v.reset(OpARMMOVBUload) - v.AddArg(ptr) - v.AddArg(mem) + v.reset(OpARMMOVWconst) + v.AuxInt = 1 return true } - // match: (Load ptr mem) - // cond: (is8BitInt(t) && isSigned(t)) - // result: (MOVBload ptr mem) + // match: (LessEqual (FlagLT_ULT)) + // cond: + // result: (MOVWconst [1]) for { - t := v.Type - ptr := v.Args[0] - mem := v.Args[1] - if !(is8BitInt(t) && isSigned(t)) { + v_0 := v.Args[0] + if v_0.Op != OpARMFlagLT_ULT { break } - v.reset(OpARMMOVBload) - v.AddArg(ptr) - v.AddArg(mem) + v.reset(OpARMMOVWconst) + v.AuxInt = 1 return true } - // match: (Load ptr mem) - // cond: (is8BitInt(t) && !isSigned(t)) - // result: (MOVBUload ptr mem) + // match: (LessEqual (FlagLT_UGT)) + // cond: + // result: (MOVWconst [1]) for { - t := v.Type - ptr := v.Args[0] - mem := v.Args[1] - if !(is8BitInt(t) && !isSigned(t)) { + v_0 := v.Args[0] + if v_0.Op != OpARMFlagLT_UGT { break } - v.reset(OpARMMOVBUload) - v.AddArg(ptr) - v.AddArg(mem) + v.reset(OpARMMOVWconst) + v.AuxInt = 1 return true } - // match: (Load ptr mem) - // cond: (is16BitInt(t) && isSigned(t)) - // result: (MOVHload ptr mem) + // match: (LessEqual (FlagGT_ULT)) + // cond: + // result: (MOVWconst [0]) for { - t := v.Type - ptr := v.Args[0] - mem := v.Args[1] - if !(is16BitInt(t) && isSigned(t)) { + v_0 := v.Args[0] + if v_0.Op != OpARMFlagGT_ULT { break } - v.reset(OpARMMOVHload) - v.AddArg(ptr) - v.AddArg(mem) + v.reset(OpARMMOVWconst) + v.AuxInt = 0 return true } - // match: (Load ptr mem) - // cond: (is16BitInt(t) && !isSigned(t)) - // result: (MOVHUload ptr mem) + // match: (LessEqual (FlagGT_UGT)) + // cond: + // result: (MOVWconst [0]) for { - t := v.Type - ptr := v.Args[0] - mem := v.Args[1] - if !(is16BitInt(t) && !isSigned(t)) { + v_0 := v.Args[0] + if v_0.Op != OpARMFlagGT_UGT { break } - v.reset(OpARMMOVHUload) - v.AddArg(ptr) - v.AddArg(mem) + v.reset(OpARMMOVWconst) + v.AuxInt = 0 return true } - // match: (Load ptr mem) - // cond: (is32BitInt(t) || isPtr(t)) - // result: (MOVWload ptr mem) + // match: (LessEqual (InvertFlags x)) + // cond: + // result: (GreaterEqual x) for { - t := v.Type - ptr := v.Args[0] - mem := v.Args[1] - if !(is32BitInt(t) || isPtr(t)) { + v_0 := v.Args[0] + if v_0.Op != OpARMInvertFlags { break } - v.reset(OpARMMOVWload) - v.AddArg(ptr) - v.AddArg(mem) + x := v_0.Args[0] + v.reset(OpARMGreaterEqual) + v.AddArg(x) return true } - // match: (Load ptr mem) - // cond: is32BitFloat(t) - // result: (MOVFload ptr mem) + return false +} +func rewriteValueARM_OpARMLessEqualU(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (LessEqualU (FlagEQ)) + // cond: + // result: (MOVWconst [1]) for { - t := v.Type - ptr := v.Args[0] - mem := v.Args[1] - if !(is32BitFloat(t)) { + v_0 := v.Args[0] + if v_0.Op != OpARMFlagEQ { break } - v.reset(OpARMMOVFload) - v.AddArg(ptr) - v.AddArg(mem) + v.reset(OpARMMOVWconst) + v.AuxInt = 1 return true } - // match: (Load ptr mem) - // cond: is64BitFloat(t) - // result: (MOVDload ptr mem) + // match: (LessEqualU (FlagLT_ULT)) + // cond: + // result: (MOVWconst [1]) for { - t := v.Type - ptr := v.Args[0] - mem := v.Args[1] - if !(is64BitFloat(t)) { + v_0 := v.Args[0] + if v_0.Op != OpARMFlagLT_ULT { break } - v.reset(OpARMMOVDload) - v.AddArg(ptr) - v.AddArg(mem) + v.reset(OpARMMOVWconst) + v.AuxInt = 1 return true } - return false -} -func rewriteValueARM_OpLrot16(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (Lrot16 x [c]) + // match: (LessEqualU (FlagLT_UGT)) // cond: - // result: (OR (SLLconst x [c&15]) (SRLconst x [16-c&15])) + // result: (MOVWconst [0]) for { - t := v.Type - x := v.Args[0] - c := v.AuxInt - v.reset(OpARMOR) - v0 := b.NewValue0(v.Line, OpARMSLLconst, t) - v0.AddArg(x) - v0.AuxInt = c & 15 - v.AddArg(v0) - v1 := b.NewValue0(v.Line, OpARMSRLconst, t) - v1.AddArg(x) - v1.AuxInt = 16 - c&15 - v.AddArg(v1) + v_0 := v.Args[0] + if v_0.Op != OpARMFlagLT_UGT { + break + } + v.reset(OpARMMOVWconst) + v.AuxInt = 0 return true } -} -func rewriteValueARM_OpLrot32(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (Lrot32 x [c]) + // match: (LessEqualU (FlagGT_ULT)) // cond: - // result: (SRRconst x [32-c&31]) + // result: (MOVWconst [1]) for { - x := v.Args[0] - c := v.AuxInt - v.reset(OpARMSRRconst) - v.AddArg(x) - v.AuxInt = 32 - c&31 + v_0 := v.Args[0] + if v_0.Op != OpARMFlagGT_ULT { + break + } + v.reset(OpARMMOVWconst) + v.AuxInt = 1 return true } -} -func rewriteValueARM_OpLrot8(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (Lrot8 x [c]) + // match: (LessEqualU (FlagGT_UGT)) // cond: - // result: (OR (SLLconst x [c&7]) (SRLconst x [8-c&7])) + // result: (MOVWconst [0]) for { - t := v.Type - x := v.Args[0] - c := v.AuxInt - v.reset(OpARMOR) - v0 := b.NewValue0(v.Line, OpARMSLLconst, t) - v0.AddArg(x) - v0.AuxInt = c & 7 - v.AddArg(v0) - v1 := b.NewValue0(v.Line, OpARMSRLconst, t) - v1.AddArg(x) - v1.AuxInt = 8 - c&7 - v.AddArg(v1) + v_0 := v.Args[0] + if v_0.Op != OpARMFlagGT_UGT { + break + } + v.reset(OpARMMOVWconst) + v.AuxInt = 0 return true } -} -func rewriteValueARM_OpLsh16x16(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (Lsh16x16 x y) + // match: (LessEqualU (InvertFlags x)) // cond: - // result: (SLL x (ZeroExt16to32 y)) + // result: (GreaterEqualU x) for { - x := v.Args[0] - y := v.Args[1] - v.reset(OpARMSLL) + v_0 := v.Args[0] + if v_0.Op != OpARMInvertFlags { + break + } + x := v_0.Args[0] + v.reset(OpARMGreaterEqualU) v.AddArg(x) - v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32()) - v0.AddArg(y) - v.AddArg(v0) return true } + return false } -func rewriteValueARM_OpLsh16x32(v *Value, config *Config) bool { +func rewriteValueARM_OpARMLessThan(v *Value, config *Config) bool { b := v.Block _ = b - // match: (Lsh16x32 x y) + // match: (LessThan (FlagEQ)) // cond: - // result: (SLL x y) + // result: (MOVWconst [0]) for { - x := v.Args[0] - y := v.Args[1] - v.reset(OpARMSLL) - v.AddArg(x) - v.AddArg(y) + v_0 := v.Args[0] + if v_0.Op != OpARMFlagEQ { + break + } + v.reset(OpARMMOVWconst) + v.AuxInt = 0 return true } -} -func rewriteValueARM_OpLsh16x64(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (Lsh16x64 x (Const64 [c])) - // cond: uint64(c) < 16 - // result: (SLLconst x [c]) + // match: (LessThan (FlagLT_ULT)) + // cond: + // result: (MOVWconst [1]) for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst64 { + v_0 := v.Args[0] + if v_0.Op != OpARMFlagLT_ULT { break } - c := v_1.AuxInt - if !(uint64(c) < 16) { + v.reset(OpARMMOVWconst) + v.AuxInt = 1 + return true + } + // match: (LessThan (FlagLT_UGT)) + // cond: + // result: (MOVWconst [1]) + for { + v_0 := v.Args[0] + if v_0.Op != OpARMFlagLT_UGT { break } - v.reset(OpARMSLLconst) - v.AddArg(x) - v.AuxInt = c + v.reset(OpARMMOVWconst) + v.AuxInt = 1 return true } - // match: (Lsh16x64 _ (Const64 [c])) - // cond: uint64(c) >= 16 - // result: (Const16 [0]) + // match: (LessThan (FlagGT_ULT)) + // cond: + // result: (MOVWconst [0]) for { - v_1 := v.Args[1] - if v_1.Op != OpConst64 { + v_0 := v.Args[0] + if v_0.Op != OpARMFlagGT_ULT { break } - c := v_1.AuxInt - if !(uint64(c) >= 16) { + v.reset(OpARMMOVWconst) + v.AuxInt = 0 + return true + } + // match: (LessThan (FlagGT_UGT)) + // cond: + // result: (MOVWconst [0]) + for { + v_0 := v.Args[0] + if v_0.Op != OpARMFlagGT_UGT { break } - v.reset(OpConst16) + v.reset(OpARMMOVWconst) v.AuxInt = 0 return true } - return false -} -func rewriteValueARM_OpLsh16x8(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (Lsh16x8 x y) + // match: (LessThan (InvertFlags x)) // cond: - // result: (SLL x (ZeroExt8to32 y)) + // result: (GreaterThan x) for { - x := v.Args[0] - y := v.Args[1] - v.reset(OpARMSLL) + v_0 := v.Args[0] + if v_0.Op != OpARMInvertFlags { + break + } + x := v_0.Args[0] + v.reset(OpARMGreaterThan) v.AddArg(x) - v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32()) - v0.AddArg(y) - v.AddArg(v0) return true } + return false } -func rewriteValueARM_OpLsh32x16(v *Value, config *Config) bool { +func rewriteValueARM_OpARMLessThanU(v *Value, config *Config) bool { b := v.Block _ = b - // match: (Lsh32x16 x y) + // match: (LessThanU (FlagEQ)) // cond: - // result: (SLL x (ZeroExt16to32 y)) + // result: (MOVWconst [0]) for { - x := v.Args[0] - y := v.Args[1] - v.reset(OpARMSLL) - v.AddArg(x) - v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32()) - v0.AddArg(y) - v.AddArg(v0) + v_0 := v.Args[0] + if v_0.Op != OpARMFlagEQ { + break + } + v.reset(OpARMMOVWconst) + v.AuxInt = 0 return true } -} -func rewriteValueARM_OpLsh32x32(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (Lsh32x32 x y) + // match: (LessThanU (FlagLT_ULT)) // cond: - // result: (SLL x y) - for { - x := v.Args[0] - y := v.Args[1] - v.reset(OpARMSLL) - v.AddArg(x) - v.AddArg(y) - return true - } -} -func rewriteValueARM_OpLsh32x64(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (Lsh32x64 x (Const64 [c])) - // cond: uint64(c) < 32 - // result: (SLLconst x [c]) + // result: (MOVWconst [1]) for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - if !(uint64(c) < 32) { + v_0 := v.Args[0] + if v_0.Op != OpARMFlagLT_ULT { break } - v.reset(OpARMSLLconst) - v.AddArg(x) - v.AuxInt = c + v.reset(OpARMMOVWconst) + v.AuxInt = 1 return true } - // match: (Lsh32x64 _ (Const64 [c])) - // cond: uint64(c) >= 32 - // result: (Const32 [0]) + // match: (LessThanU (FlagLT_UGT)) + // cond: + // result: (MOVWconst [0]) for { - v_1 := v.Args[1] - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - if !(uint64(c) >= 32) { + v_0 := v.Args[0] + if v_0.Op != OpARMFlagLT_UGT { break } - v.reset(OpConst32) + v.reset(OpARMMOVWconst) v.AuxInt = 0 return true } - return false -} -func rewriteValueARM_OpLsh32x8(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (Lsh32x8 x y) - // cond: - // result: (SLL x (ZeroExt8to32 y)) - for { - x := v.Args[0] - y := v.Args[1] - v.reset(OpARMSLL) - v.AddArg(x) - v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32()) - v0.AddArg(y) - v.AddArg(v0) - return true - } -} -func rewriteValueARM_OpLsh8x16(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (Lsh8x16 x y) - // cond: - // result: (SLL x (ZeroExt16to32 y)) - for { - x := v.Args[0] - y := v.Args[1] - v.reset(OpARMSLL) - v.AddArg(x) - v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32()) - v0.AddArg(y) - v.AddArg(v0) - return true - } -} -func rewriteValueARM_OpLsh8x32(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (Lsh8x32 x y) + // match: (LessThanU (FlagGT_ULT)) // cond: - // result: (SLL x y) - for { - x := v.Args[0] - y := v.Args[1] - v.reset(OpARMSLL) - v.AddArg(x) - v.AddArg(y) - return true - } -} -func rewriteValueARM_OpLsh8x64(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (Lsh8x64 x (Const64 [c])) - // cond: uint64(c) < 8 - // result: (SLLconst x [c]) + // result: (MOVWconst [1]) for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - if !(uint64(c) < 8) { + v_0 := v.Args[0] + if v_0.Op != OpARMFlagGT_ULT { break } - v.reset(OpARMSLLconst) - v.AddArg(x) - v.AuxInt = c + v.reset(OpARMMOVWconst) + v.AuxInt = 1 return true } - // match: (Lsh8x64 _ (Const64 [c])) - // cond: uint64(c) >= 8 - // result: (Const8 [0]) + // match: (LessThanU (FlagGT_UGT)) + // cond: + // result: (MOVWconst [0]) for { - v_1 := v.Args[1] - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - if !(uint64(c) >= 8) { + v_0 := v.Args[0] + if v_0.Op != OpARMFlagGT_UGT { break } - v.reset(OpConst8) + v.reset(OpARMMOVWconst) v.AuxInt = 0 return true } - return false -} -func rewriteValueARM_OpLsh8x8(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (Lsh8x8 x y) + // match: (LessThanU (InvertFlags x)) // cond: - // result: (SLL x (ZeroExt8to32 y)) + // result: (GreaterThanU x) for { - x := v.Args[0] - y := v.Args[1] - v.reset(OpARMSLL) + v_0 := v.Args[0] + if v_0.Op != OpARMInvertFlags { + break + } + x := v_0.Args[0] + v.reset(OpARMGreaterThanU) v.AddArg(x) - v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32()) - v0.AddArg(y) - v.AddArg(v0) return true } + return false } -func rewriteValueARM_OpARMMOVBUload(v *Value, config *Config) bool { +func rewriteValueARM_OpLoad(v *Value, config *Config) bool { b := v.Block _ = b - // match: (MOVBUload [off1] {sym} (ADDconst [off2] ptr) mem) - // cond: - // result: (MOVBUload [off1+off2] {sym} ptr mem) + // match: (Load ptr mem) + // cond: t.IsBoolean() + // result: (MOVBUload ptr mem) for { - off1 := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpARMADDconst { + t := v.Type + ptr := v.Args[0] + mem := v.Args[1] + if !(t.IsBoolean()) { break } - off2 := v_0.AuxInt - ptr := v_0.Args[0] - mem := v.Args[1] v.reset(OpARMMOVBUload) - v.AuxInt = off1 + off2 - v.Aux = sym v.AddArg(ptr) v.AddArg(mem) return true } - // match: (MOVBUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) - // cond: canMergeSym(sym1,sym2) - // result: (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + // match: (Load ptr mem) + // cond: (is8BitInt(t) && isSigned(t)) + // result: (MOVBload ptr mem) for { - off1 := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpARMMOVWaddr { - break - } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] + t := v.Type + ptr := v.Args[0] mem := v.Args[1] - if !(canMergeSym(sym1, sym2)) { + if !(is8BitInt(t) && isSigned(t)) { break } - v.reset(OpARMMOVBUload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.reset(OpARMMOVBload) v.AddArg(ptr) v.AddArg(mem) return true } - return false -} -func rewriteValueARM_OpARMMOVBload(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (MOVBload [off1] {sym} (ADDconst [off2] ptr) mem) - // cond: - // result: (MOVBload [off1+off2] {sym} ptr mem) + // match: (Load ptr mem) + // cond: (is8BitInt(t) && !isSigned(t)) + // result: (MOVBUload ptr mem) for { - off1 := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpARMADDconst { + t := v.Type + ptr := v.Args[0] + mem := v.Args[1] + if !(is8BitInt(t) && !isSigned(t)) { break } - off2 := v_0.AuxInt - ptr := v_0.Args[0] - mem := v.Args[1] - v.reset(OpARMMOVBload) - v.AuxInt = off1 + off2 - v.Aux = sym + v.reset(OpARMMOVBUload) v.AddArg(ptr) v.AddArg(mem) return true } - // match: (MOVBload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) - // cond: canMergeSym(sym1,sym2) - // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + // match: (Load ptr mem) + // cond: (is16BitInt(t) && isSigned(t)) + // result: (MOVHload ptr mem) for { - off1 := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpARMMOVWaddr { - break - } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] + t := v.Type + ptr := v.Args[0] mem := v.Args[1] - if !(canMergeSym(sym1, sym2)) { + if !(is16BitInt(t) && isSigned(t)) { break } - v.reset(OpARMMOVBload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.reset(OpARMMOVHload) v.AddArg(ptr) v.AddArg(mem) return true } - return false -} -func rewriteValueARM_OpARMMOVBstore(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem) - // cond: - // result: (MOVBstore [off1+off2] {sym} ptr val mem) + // match: (Load ptr mem) + // cond: (is16BitInt(t) && !isSigned(t)) + // result: (MOVHUload ptr mem) for { - off1 := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpARMADDconst { + t := v.Type + ptr := v.Args[0] + mem := v.Args[1] + if !(is16BitInt(t) && !isSigned(t)) { break } - off2 := v_0.AuxInt - ptr := v_0.Args[0] - val := v.Args[1] - mem := v.Args[2] - v.reset(OpARMMOVBstore) - v.AuxInt = off1 + off2 - v.Aux = sym + v.reset(OpARMMOVHUload) v.AddArg(ptr) - v.AddArg(val) v.AddArg(mem) return true } - // match: (MOVBstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) - // cond: canMergeSym(sym1,sym2) - // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) + // match: (Load ptr mem) + // cond: (is32BitInt(t) || isPtr(t)) + // result: (MOVWload ptr mem) for { - off1 := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpARMMOVWaddr { - break - } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - val := v.Args[1] - mem := v.Args[2] - if !(canMergeSym(sym1, sym2)) { + t := v.Type + ptr := v.Args[0] + mem := v.Args[1] + if !(is32BitInt(t) || isPtr(t)) { break } - v.reset(OpARMMOVBstore) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) + v.reset(OpARMMOVWload) v.AddArg(ptr) - v.AddArg(val) v.AddArg(mem) return true } - return false -} -func rewriteValueARM_OpARMMOVDload(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (MOVDload [off1] {sym} (ADDconst [off2] ptr) mem) - // cond: - // result: (MOVDload [off1+off2] {sym} ptr mem) + // match: (Load ptr mem) + // cond: is32BitFloat(t) + // result: (MOVFload ptr mem) for { - off1 := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpARMADDconst { + t := v.Type + ptr := v.Args[0] + mem := v.Args[1] + if !(is32BitFloat(t)) { break } - off2 := v_0.AuxInt - ptr := v_0.Args[0] - mem := v.Args[1] - v.reset(OpARMMOVDload) - v.AuxInt = off1 + off2 - v.Aux = sym + v.reset(OpARMMOVFload) v.AddArg(ptr) v.AddArg(mem) return true } - // match: (MOVDload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) - // cond: canMergeSym(sym1,sym2) - // result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + // match: (Load ptr mem) + // cond: is64BitFloat(t) + // result: (MOVDload ptr mem) for { - off1 := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpARMMOVWaddr { - break - } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] + t := v.Type + ptr := v.Args[0] mem := v.Args[1] - if !(canMergeSym(sym1, sym2)) { + if !(is64BitFloat(t)) { break } v.reset(OpARMMOVDload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) v.AddArg(ptr) v.AddArg(mem) return true } return false } -func rewriteValueARM_OpARMMOVDstore(v *Value, config *Config) bool { +func rewriteValueARM_OpARMLoweredZeromask(v *Value, config *Config) bool { b := v.Block _ = b - // match: (MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) + // match: (LoweredZeromask (MOVWconst [0])) // cond: - // result: (MOVDstore [off1+off2] {sym} ptr val mem) + // result: (MOVWconst [0]) for { - off1 := v.AuxInt - sym := v.Aux v_0 := v.Args[0] - if v_0.Op != OpARMADDconst { + if v_0.Op != OpARMMOVWconst { break } - off2 := v_0.AuxInt - ptr := v_0.Args[0] - val := v.Args[1] - mem := v.Args[2] - v.reset(OpARMMOVDstore) - v.AuxInt = off1 + off2 - v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + if v_0.AuxInt != 0 { + break + } + v.reset(OpARMMOVWconst) + v.AuxInt = 0 return true } - // match: (MOVDstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) - // cond: canMergeSym(sym1,sym2) - // result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) + // match: (LoweredZeromask (MOVWconst [c])) + // cond: c != 0 + // result: (MOVWconst [0xffffffff]) for { - off1 := v.AuxInt - sym1 := v.Aux v_0 := v.Args[0] - if v_0.Op != OpARMMOVWaddr { + if v_0.Op != OpARMMOVWconst { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - val := v.Args[1] - mem := v.Args[2] - if !(canMergeSym(sym1, sym2)) { + c := v_0.AuxInt + if !(c != 0) { break } - v.reset(OpARMMOVDstore) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.reset(OpARMMOVWconst) + v.AuxInt = 0xffffffff return true } return false } -func rewriteValueARM_OpARMMOVFload(v *Value, config *Config) bool { +func rewriteValueARM_OpLrot16(v *Value, config *Config) bool { b := v.Block _ = b - // match: (MOVFload [off1] {sym} (ADDconst [off2] ptr) mem) + // match: (Lrot16 x [c]) // cond: - // result: (MOVFload [off1+off2] {sym} ptr mem) - for { - off1 := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpARMADDconst { - break - } - off2 := v_0.AuxInt - ptr := v_0.Args[0] - mem := v.Args[1] - v.reset(OpARMMOVFload) - v.AuxInt = off1 + off2 - v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) - return true - } - // match: (MOVFload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) - // cond: canMergeSym(sym1,sym2) - // result: (MOVFload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + // result: (OR (SLLconst x [c&15]) (SRLconst x [16-c&15])) for { - off1 := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpARMMOVWaddr { - break - } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - mem := v.Args[1] - if !(canMergeSym(sym1, sym2)) { - break - } - v.reset(OpARMMOVFload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + t := v.Type + x := v.Args[0] + c := v.AuxInt + v.reset(OpARMOR) + v0 := b.NewValue0(v.Line, OpARMSLLconst, t) + v0.AddArg(x) + v0.AuxInt = c & 15 + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpARMSRLconst, t) + v1.AddArg(x) + v1.AuxInt = 16 - c&15 + v.AddArg(v1) return true } - return false } -func rewriteValueARM_OpARMMOVFstore(v *Value, config *Config) bool { +func rewriteValueARM_OpLrot32(v *Value, config *Config) bool { b := v.Block _ = b - // match: (MOVFstore [off1] {sym} (ADDconst [off2] ptr) val mem) + // match: (Lrot32 x [c]) // cond: - // result: (MOVFstore [off1+off2] {sym} ptr val mem) - for { - off1 := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpARMADDconst { - break - } - off2 := v_0.AuxInt - ptr := v_0.Args[0] - val := v.Args[1] - mem := v.Args[2] - v.reset(OpARMMOVFstore) - v.AuxInt = off1 + off2 - v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVFstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) - // cond: canMergeSym(sym1,sym2) - // result: (MOVFstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) + // result: (SRRconst x [32-c&31]) for { - off1 := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpARMMOVWaddr { - break - } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - val := v.Args[1] - mem := v.Args[2] - if !(canMergeSym(sym1, sym2)) { - break - } - v.reset(OpARMMOVFstore) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + x := v.Args[0] + c := v.AuxInt + v.reset(OpARMSRRconst) + v.AddArg(x) + v.AuxInt = 32 - c&31 return true } - return false } -func rewriteValueARM_OpARMMOVHUload(v *Value, config *Config) bool { +func rewriteValueARM_OpLrot8(v *Value, config *Config) bool { b := v.Block _ = b - // match: (MOVHUload [off1] {sym} (ADDconst [off2] ptr) mem) + // match: (Lrot8 x [c]) // cond: - // result: (MOVHUload [off1+off2] {sym} ptr mem) + // result: (OR (SLLconst x [c&7]) (SRLconst x [8-c&7])) for { - off1 := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpARMADDconst { - break - } - off2 := v_0.AuxInt - ptr := v_0.Args[0] - mem := v.Args[1] - v.reset(OpARMMOVHUload) - v.AuxInt = off1 + off2 - v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + t := v.Type + x := v.Args[0] + c := v.AuxInt + v.reset(OpARMOR) + v0 := b.NewValue0(v.Line, OpARMSLLconst, t) + v0.AddArg(x) + v0.AuxInt = c & 7 + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpARMSRLconst, t) + v1.AddArg(x) + v1.AuxInt = 8 - c&7 + v.AddArg(v1) return true } - // match: (MOVHUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) - // cond: canMergeSym(sym1,sym2) - // result: (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) +} +func rewriteValueARM_OpLsh16x16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Lsh16x16 x y) + // cond: + // result: (SLL x (ZeroExt16to32 y)) for { - off1 := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpARMMOVWaddr { - break - } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - mem := v.Args[1] - if !(canMergeSym(sym1, sym2)) { - break - } - v.reset(OpARMMOVHUload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMSLL) + v.AddArg(x) + v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32()) + v0.AddArg(y) + v.AddArg(v0) return true } - return false } -func rewriteValueARM_OpARMMOVHload(v *Value, config *Config) bool { +func rewriteValueARM_OpLsh16x32(v *Value, config *Config) bool { b := v.Block _ = b - // match: (MOVHload [off1] {sym} (ADDconst [off2] ptr) mem) + // match: (Lsh16x32 x y) // cond: - // result: (MOVHload [off1+off2] {sym} ptr mem) + // result: (SLL x y) for { - off1 := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpARMADDconst { + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMSLL) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueARM_OpLsh16x64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Lsh16x64 x (Const64 [c])) + // cond: uint64(c) < 16 + // result: (SLLconst x [c]) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpConst64 { break } - off2 := v_0.AuxInt - ptr := v_0.Args[0] - mem := v.Args[1] - v.reset(OpARMMOVHload) - v.AuxInt = off1 + off2 - v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + c := v_1.AuxInt + if !(uint64(c) < 16) { + break + } + v.reset(OpARMSLLconst) + v.AddArg(x) + v.AuxInt = c return true } - // match: (MOVHload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) - // cond: canMergeSym(sym1,sym2) - // result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) + // match: (Lsh16x64 _ (Const64 [c])) + // cond: uint64(c) >= 16 + // result: (Const16 [0]) for { - off1 := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpARMMOVWaddr { + v_1 := v.Args[1] + if v_1.Op != OpConst64 { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - mem := v.Args[1] - if !(canMergeSym(sym1, sym2)) { + c := v_1.AuxInt + if !(uint64(c) >= 16) { break } - v.reset(OpARMMOVHload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.reset(OpConst16) + v.AuxInt = 0 return true } return false } -func rewriteValueARM_OpARMMOVHstore(v *Value, config *Config) bool { +func rewriteValueARM_OpLsh16x8(v *Value, config *Config) bool { b := v.Block _ = b - // match: (MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem) + // match: (Lsh16x8 x y) // cond: - // result: (MOVHstore [off1+off2] {sym} ptr val mem) + // result: (SLL x (ZeroExt8to32 y)) for { - off1 := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpARMADDconst { - break - } - off2 := v_0.AuxInt - ptr := v_0.Args[0] - val := v.Args[1] - mem := v.Args[2] - v.reset(OpARMMOVHstore) - v.AuxInt = off1 + off2 - v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMSLL) + v.AddArg(x) + v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32()) + v0.AddArg(y) + v.AddArg(v0) return true } - // match: (MOVHstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) - // cond: canMergeSym(sym1,sym2) - // result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) +} +func rewriteValueARM_OpLsh32x16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Lsh32x16 x y) + // cond: + // result: (SLL x (ZeroExt16to32 y)) for { - off1 := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpARMMOVWaddr { - break - } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - val := v.Args[1] - mem := v.Args[2] - if !(canMergeSym(sym1, sym2)) { - break - } - v.reset(OpARMMOVHstore) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMSLL) + v.AddArg(x) + v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32()) + v0.AddArg(y) + v.AddArg(v0) return true } - return false } -func rewriteValueARM_OpARMMOVWload(v *Value, config *Config) bool { +func rewriteValueARM_OpLsh32x32(v *Value, config *Config) bool { b := v.Block _ = b - // match: (MOVWload [off1] {sym} (ADDconst [off2] ptr) mem) + // match: (Lsh32x32 x y) // cond: - // result: (MOVWload [off1+off2] {sym} ptr mem) + // result: (SLL x y) for { - off1 := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpARMADDconst { - break - } - off2 := v_0.AuxInt - ptr := v_0.Args[0] - mem := v.Args[1] - v.reset(OpARMMOVWload) - v.AuxInt = off1 + off2 - v.Aux = sym - v.AddArg(ptr) - v.AddArg(mem) + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMSLL) + v.AddArg(x) + v.AddArg(y) return true } - // match: (MOVWload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) - // cond: canMergeSym(sym1,sym2) - // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) +} +func rewriteValueARM_OpLsh32x64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Lsh32x64 x (Const64 [c])) + // cond: uint64(c) < 32 + // result: (SLLconst x [c]) for { - off1 := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpARMMOVWaddr { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpConst64 { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - mem := v.Args[1] - if !(canMergeSym(sym1, sym2)) { + c := v_1.AuxInt + if !(uint64(c) < 32) { break } - v.reset(OpARMMOVWload) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(mem) + v.reset(OpARMSLLconst) + v.AddArg(x) + v.AuxInt = c return true } - return false -} -func rewriteValueARM_OpARMMOVWstore(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem) - // cond: - // result: (MOVWstore [off1+off2] {sym} ptr val mem) - for { - off1 := v.AuxInt - sym := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpARMADDconst { - break - } - off2 := v_0.AuxInt - ptr := v_0.Args[0] - val := v.Args[1] - mem := v.Args[2] - v.reset(OpARMMOVWstore) - v.AuxInt = off1 + off2 - v.Aux = sym - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) - return true - } - // match: (MOVWstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) - // cond: canMergeSym(sym1,sym2) - // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) + // match: (Lsh32x64 _ (Const64 [c])) + // cond: uint64(c) >= 32 + // result: (Const32 [0]) for { - off1 := v.AuxInt - sym1 := v.Aux - v_0 := v.Args[0] - if v_0.Op != OpARMMOVWaddr { + v_1 := v.Args[1] + if v_1.Op != OpConst64 { break } - off2 := v_0.AuxInt - sym2 := v_0.Aux - ptr := v_0.Args[0] - val := v.Args[1] - mem := v.Args[2] - if !(canMergeSym(sym1, sym2)) { + c := v_1.AuxInt + if !(uint64(c) >= 32) { break } - v.reset(OpARMMOVWstore) - v.AuxInt = off1 + off2 - v.Aux = mergeSym(sym1, sym2) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.reset(OpConst32) + v.AuxInt = 0 return true } return false } -func rewriteValueARM_OpMod16(v *Value, config *Config) bool { +func rewriteValueARM_OpLsh32x8(v *Value, config *Config) bool { b := v.Block _ = b - // match: (Mod16 x y) + // match: (Lsh32x8 x y) // cond: - // result: (MOD (SignExt16to32 x) (SignExt16to32 y)) + // result: (SLL x (ZeroExt8to32 y)) for { x := v.Args[0] y := v.Args[1] - v.reset(OpARMMOD) - v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32()) - v0.AddArg(x) + v.reset(OpARMSLL) + v.AddArg(x) + v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32()) + v0.AddArg(y) v.AddArg(v0) - v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32()) - v1.AddArg(y) - v.AddArg(v1) return true } } -func rewriteValueARM_OpMod16u(v *Value, config *Config) bool { +func rewriteValueARM_OpLsh8x16(v *Value, config *Config) bool { b := v.Block _ = b - // match: (Mod16u x y) + // match: (Lsh8x16 x y) // cond: - // result: (MODU (ZeroExt16to32 x) (ZeroExt16to32 y)) + // result: (SLL x (ZeroExt16to32 y)) for { x := v.Args[0] y := v.Args[1] - v.reset(OpARMMODU) + v.reset(OpARMSLL) + v.AddArg(x) v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32()) - v0.AddArg(x) + v0.AddArg(y) v.AddArg(v0) - v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32()) - v1.AddArg(y) - v.AddArg(v1) return true } } -func rewriteValueARM_OpMod32(v *Value, config *Config) bool { +func rewriteValueARM_OpLsh8x32(v *Value, config *Config) bool { b := v.Block _ = b - // match: (Mod32 x y) + // match: (Lsh8x32 x y) // cond: - // result: (MOD x y) + // result: (SLL x y) for { x := v.Args[0] y := v.Args[1] - v.reset(OpARMMOD) + v.reset(OpARMSLL) v.AddArg(x) v.AddArg(y) return true } } -func rewriteValueARM_OpMod32u(v *Value, config *Config) bool { +func rewriteValueARM_OpLsh8x64(v *Value, config *Config) bool { b := v.Block _ = b - // match: (Mod32u x y) - // cond: - // result: (MODU x y) + // match: (Lsh8x64 x (Const64 [c])) + // cond: uint64(c) < 8 + // result: (SLLconst x [c]) for { x := v.Args[0] - y := v.Args[1] - v.reset(OpARMMODU) + v_1 := v.Args[1] + if v_1.Op != OpConst64 { + break + } + c := v_1.AuxInt + if !(uint64(c) < 8) { + break + } + v.reset(OpARMSLLconst) v.AddArg(x) - v.AddArg(y) + v.AuxInt = c return true } -} -func rewriteValueARM_OpMod8(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (Mod8 x y) - // cond: - // result: (MOD (SignExt8to32 x) (SignExt8to32 y)) + // match: (Lsh8x64 _ (Const64 [c])) + // cond: uint64(c) >= 8 + // result: (Const8 [0]) for { - x := v.Args[0] - y := v.Args[1] - v.reset(OpARMMOD) - v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32()) - v0.AddArg(x) - v.AddArg(v0) - v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32()) - v1.AddArg(y) - v.AddArg(v1) + v_1 := v.Args[1] + if v_1.Op != OpConst64 { + break + } + c := v_1.AuxInt + if !(uint64(c) >= 8) { + break + } + v.reset(OpConst8) + v.AuxInt = 0 return true } + return false } -func rewriteValueARM_OpMod8u(v *Value, config *Config) bool { +func rewriteValueARM_OpLsh8x8(v *Value, config *Config) bool { b := v.Block _ = b - // match: (Mod8u x y) + // match: (Lsh8x8 x y) // cond: - // result: (MODU (ZeroExt8to32 x) (ZeroExt8to32 y)) + // result: (SLL x (ZeroExt8to32 y)) for { x := v.Args[0] y := v.Args[1] - v.reset(OpARMMODU) + v.reset(OpARMSLL) + v.AddArg(x) v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32()) - v0.AddArg(x) + v0.AddArg(y) v.AddArg(v0) - v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32()) - v1.AddArg(y) - v.AddArg(v1) return true } } -func rewriteValueARM_OpMove(v *Value, config *Config) bool { +func rewriteValueARM_OpARMMOVBUload(v *Value, config *Config) bool { b := v.Block _ = b - // match: (Move [s] _ _ mem) - // cond: SizeAndAlign(s).Size() == 0 - // result: mem + // match: (MOVBUload [off1] {sym} (ADDconst [off2] ptr) mem) + // cond: + // result: (MOVBUload [off1+off2] {sym} ptr mem) for { - s := v.AuxInt - mem := v.Args[2] - if !(SizeAndAlign(s).Size() == 0) { + off1 := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpARMADDconst { break } - v.reset(OpCopy) - v.Type = mem.Type + off2 := v_0.AuxInt + ptr := v_0.Args[0] + mem := v.Args[1] + v.reset(OpARMMOVBUload) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(ptr) v.AddArg(mem) return true } - // match: (Move [s] dst src mem) - // cond: SizeAndAlign(s).Size() == 1 - // result: (MOVBstore dst (MOVBUload src mem) mem) + // match: (MOVBUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) + // result: (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) for { - s := v.AuxInt - dst := v.Args[0] - src := v.Args[1] - mem := v.Args[2] - if !(SizeAndAlign(s).Size() == 1) { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWaddr { break } - v.reset(OpARMMOVBstore) - v.AddArg(dst) - v0 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8()) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + mem := v.Args[1] + if !(canMergeSym(sym1, sym2)) { + break + } + v.reset(OpARMMOVBUload) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) v.AddArg(mem) return true } - // match: (Move [s] dst src mem) - // cond: SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0 - // result: (MOVHstore dst (MOVHUload src mem) mem) + // match: (MOVBUload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) + // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && !isSigned(x.Type) + // result: x for { - s := v.AuxInt - dst := v.Args[0] - src := v.Args[1] - mem := v.Args[2] - if !(SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0) { + off := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVBstore { break } - v.reset(OpARMMOVHstore) - v.AddArg(dst) - v0 := b.NewValue0(v.Line, OpARMMOVHUload, config.fe.TypeUInt16()) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + off2 := v_1.AuxInt + sym2 := v_1.Aux + ptr2 := v_1.Args[0] + x := v_1.Args[1] + if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && !isSigned(x.Type)) { + break + } + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) return true } - // match: (Move [s] dst src mem) - // cond: SizeAndAlign(s).Size() == 2 - // result: (MOVBstore [1] dst (MOVBUload [1] src mem) (MOVBstore dst (MOVBUload src mem) mem)) + return false +} +func rewriteValueARM_OpARMMOVBUreg(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MOVBUreg x:(MOVBUload _ _)) + // cond: + // result: (MOVWreg x) for { - s := v.AuxInt - dst := v.Args[0] - src := v.Args[1] - mem := v.Args[2] - if !(SizeAndAlign(s).Size() == 2) { + x := v.Args[0] + if x.Op != OpARMMOVBUload { break } - v.reset(OpARMMOVBstore) - v.AuxInt = 1 - v.AddArg(dst) - v0 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8()) - v0.AuxInt = 1 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v1 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem) - v1.AddArg(dst) - v2 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8()) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) - return true - } - // match: (Move [s] dst src mem) - // cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0 - // result: (MOVWstore dst (MOVWload src mem) mem) - for { - s := v.AuxInt - dst := v.Args[0] - src := v.Args[1] - mem := v.Args[2] - if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0) { - break - } - v.reset(OpARMMOVWstore) - v.AddArg(dst) - v0 := b.NewValue0(v.Line, OpARMMOVWload, config.fe.TypeUInt32()) - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v.AddArg(mem) + v.reset(OpARMMOVWreg) + v.AddArg(x) return true } - // match: (Move [s] dst src mem) - // cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0 - // result: (MOVHstore [2] dst (MOVHUload [2] src mem) (MOVHstore dst (MOVHUload src mem) mem)) + // match: (MOVBUreg (ANDconst [c] x)) + // cond: + // result: (ANDconst [c&0xff] x) for { - s := v.AuxInt - dst := v.Args[0] - src := v.Args[1] - mem := v.Args[2] - if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0) { + v_0 := v.Args[0] + if v_0.Op != OpARMANDconst { break } - v.reset(OpARMMOVHstore) - v.AuxInt = 2 - v.AddArg(dst) - v0 := b.NewValue0(v.Line, OpARMMOVHUload, config.fe.TypeUInt16()) - v0.AuxInt = 2 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v1 := b.NewValue0(v.Line, OpARMMOVHstore, TypeMem) - v1.AddArg(dst) - v2 := b.NewValue0(v.Line, OpARMMOVHUload, config.fe.TypeUInt16()) - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + c := v_0.AuxInt + x := v_0.Args[0] + v.reset(OpARMANDconst) + v.AuxInt = c & 0xff + v.AddArg(x) return true } - // match: (Move [s] dst src mem) - // cond: SizeAndAlign(s).Size() == 4 - // result: (MOVBstore [3] dst (MOVBUload [3] src mem) (MOVBstore [2] dst (MOVBUload [2] src mem) (MOVBstore [1] dst (MOVBUload [1] src mem) (MOVBstore dst (MOVBUload src mem) mem)))) + return false +} +func rewriteValueARM_OpARMMOVBload(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (MOVBload [off1] {sym} (ADDconst [off2] ptr) mem) + // cond: + // result: (MOVBload [off1+off2] {sym} ptr mem) for { - s := v.AuxInt - dst := v.Args[0] - src := v.Args[1] - mem := v.Args[2] - if !(SizeAndAlign(s).Size() == 4) { + off1 := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpARMADDconst { break } - v.reset(OpARMMOVBstore) - v.AuxInt = 3 - v.AddArg(dst) - v0 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8()) - v0.AuxInt = 3 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v1 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem) - v1.AuxInt = 2 - v1.AddArg(dst) - v2 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8()) - v2.AuxInt = 2 - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v3 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem) - v3.AuxInt = 1 - v3.AddArg(dst) - v4 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8()) - v4.AuxInt = 1 - v4.AddArg(src) - v4.AddArg(mem) - v3.AddArg(v4) - v5 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem) - v5.AddArg(dst) - v6 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8()) - v6.AddArg(src) - v6.AddArg(mem) - v5.AddArg(v6) - v5.AddArg(mem) - v3.AddArg(v5) - v1.AddArg(v3) - v.AddArg(v1) + off2 := v_0.AuxInt + ptr := v_0.Args[0] + mem := v.Args[1] + v.reset(OpARMMOVBload) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) return true } - // match: (Move [s] dst src mem) - // cond: SizeAndAlign(s).Size() == 3 - // result: (MOVBstore [2] dst (MOVBUload [2] src mem) (MOVBstore [1] dst (MOVBUload [1] src mem) (MOVBstore dst (MOVBUload src mem) mem))) + // match: (MOVBload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) + // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) for { - s := v.AuxInt - dst := v.Args[0] - src := v.Args[1] - mem := v.Args[2] - if !(SizeAndAlign(s).Size() == 3) { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWaddr { break } - v.reset(OpARMMOVBstore) - v.AuxInt = 2 - v.AddArg(dst) - v0 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8()) - v0.AuxInt = 2 - v0.AddArg(src) - v0.AddArg(mem) - v.AddArg(v0) - v1 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem) - v1.AuxInt = 1 - v1.AddArg(dst) - v2 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8()) - v2.AuxInt = 1 - v2.AddArg(src) - v2.AddArg(mem) - v1.AddArg(v2) - v3 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem) - v3.AddArg(dst) - v4 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8()) - v4.AddArg(src) - v4.AddArg(mem) - v3.AddArg(v4) - v3.AddArg(mem) - v1.AddArg(v3) - v.AddArg(v1) - return true - } - // match: (Move [s] dst src mem) - // cond: SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Size() <= 512 && SizeAndAlign(s).Align()%4 == 0 - // result: (DUFFCOPY [8 * (128 - int64(SizeAndAlign(s).Size()/4))] dst src mem) - for { - s := v.AuxInt - dst := v.Args[0] - src := v.Args[1] - mem := v.Args[2] - if !(SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Size() <= 512 && SizeAndAlign(s).Align()%4 == 0) { + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + mem := v.Args[1] + if !(canMergeSym(sym1, sym2)) { break } - v.reset(OpARMDUFFCOPY) - v.AuxInt = 8 * (128 - int64(SizeAndAlign(s).Size()/4)) - v.AddArg(dst) - v.AddArg(src) + v.reset(OpARMMOVBload) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) v.AddArg(mem) return true } - // match: (Move [s] dst src mem) - // cond: SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 512 && SizeAndAlign(s).Align()%4 == 0 - // result: (LoweredMove dst src (ADDconst src [SizeAndAlign(s).Size()]) mem) + // match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) + // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && isSigned(x.Type) + // result: x for { - s := v.AuxInt - dst := v.Args[0] - src := v.Args[1] - mem := v.Args[2] - if !(SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 512 && SizeAndAlign(s).Align()%4 == 0) { + off := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVBstore { break } - v.reset(OpARMLoweredMove) - v.AddArg(dst) - v.AddArg(src) - v0 := b.NewValue0(v.Line, OpARMADDconst, src.Type) - v0.AddArg(src) - v0.AuxInt = SizeAndAlign(s).Size() - v.AddArg(v0) - v.AddArg(mem) - return true - } - // match: (Move [s] dst src mem) - // cond: SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Align()%4 != 0 - // result: (LoweredMoveU dst src (ADDconst src [SizeAndAlign(s).Size()]) mem) - for { - s := v.AuxInt - dst := v.Args[0] - src := v.Args[1] - mem := v.Args[2] - if !(SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Align()%4 != 0) { + off2 := v_1.AuxInt + sym2 := v_1.Aux + ptr2 := v_1.Args[0] + x := v_1.Args[1] + if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && isSigned(x.Type)) { break } - v.reset(OpARMLoweredMoveU) - v.AddArg(dst) - v.AddArg(src) - v0 := b.NewValue0(v.Line, OpARMADDconst, src.Type) - v0.AddArg(src) - v0.AuxInt = SizeAndAlign(s).Size() - v.AddArg(v0) - v.AddArg(mem) + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) return true } return false } -func rewriteValueARM_OpMul16(v *Value, config *Config) bool { +func rewriteValueARM_OpARMMOVBreg(v *Value, config *Config) bool { b := v.Block _ = b - // match: (Mul16 x y) + // match: (MOVBreg x:(MOVBload _ _)) // cond: - // result: (MUL x y) + // result: (MOVWreg x) for { x := v.Args[0] - y := v.Args[1] - v.reset(OpARMMUL) + if x.Op != OpARMMOVBload { + break + } + v.reset(OpARMMOVWreg) v.AddArg(x) - v.AddArg(y) return true } -} -func rewriteValueARM_OpMul32(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (Mul32 x y) - // cond: - // result: (MUL x y) + // match: (MOVBreg (ANDconst [c] x)) + // cond: c & 0x80 == 0 + // result: (ANDconst [c&0x7f] x) for { - x := v.Args[0] - y := v.Args[1] - v.reset(OpARMMUL) + v_0 := v.Args[0] + if v_0.Op != OpARMANDconst { + break + } + c := v_0.AuxInt + x := v_0.Args[0] + if !(c&0x80 == 0) { + break + } + v.reset(OpARMANDconst) + v.AuxInt = c & 0x7f v.AddArg(x) - v.AddArg(y) return true } + return false } -func rewriteValueARM_OpMul32F(v *Value, config *Config) bool { +func rewriteValueARM_OpARMMOVBstore(v *Value, config *Config) bool { b := v.Block _ = b - // match: (Mul32F x y) + // match: (MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem) // cond: - // result: (MULF x y) + // result: (MOVBstore [off1+off2] {sym} ptr val mem) for { - x := v.Args[0] - y := v.Args[1] - v.reset(OpARMMULF) - v.AddArg(x) - v.AddArg(y) + off1 := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpARMADDconst { + break + } + off2 := v_0.AuxInt + ptr := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + v.reset(OpARMMOVBstore) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) return true } -} -func rewriteValueARM_OpMul32uhilo(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (Mul32uhilo x y) - // cond: - // result: (MULLU x y) + // match: (MOVBstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) + // cond: canMergeSym(sym1,sym2) + // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) for { - x := v.Args[0] - y := v.Args[1] - v.reset(OpARMMULLU) - v.AddArg(x) - v.AddArg(y) + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWaddr { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(canMergeSym(sym1, sym2)) { + break + } + v.reset(OpARMMOVBstore) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) return true } -} -func rewriteValueARM_OpMul64F(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (Mul64F x y) + // match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem) // cond: - // result: (MULD x y) + // result: (MOVBstore [off] {sym} ptr x mem) for { - x := v.Args[0] - y := v.Args[1] - v.reset(OpARMMULD) + off := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVBreg { + break + } + x := v_1.Args[0] + mem := v.Args[2] + v.reset(OpARMMOVBstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) v.AddArg(x) - v.AddArg(y) + v.AddArg(mem) return true } -} -func rewriteValueARM_OpMul8(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (Mul8 x y) + // match: (MOVBstore [off] {sym} ptr (MOVBUreg x) mem) // cond: - // result: (MUL x y) + // result: (MOVBstore [off] {sym} ptr x mem) for { - x := v.Args[0] - y := v.Args[1] - v.reset(OpARMMUL) + off := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVBUreg { + break + } + x := v_1.Args[0] + mem := v.Args[2] + v.reset(OpARMMOVBstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) v.AddArg(x) - v.AddArg(y) + v.AddArg(mem) return true } -} -func rewriteValueARM_OpNeg16(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (Neg16 x) + // match: (MOVBstore [off] {sym} ptr (MOVHreg x) mem) // cond: - // result: (RSBconst [0] x) + // result: (MOVBstore [off] {sym} ptr x mem) for { - x := v.Args[0] - v.reset(OpARMRSBconst) - v.AuxInt = 0 + off := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVHreg { + break + } + x := v_1.Args[0] + mem := v.Args[2] + v.reset(OpARMMOVBstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) v.AddArg(x) + v.AddArg(mem) return true } -} -func rewriteValueARM_OpNeg32(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (Neg32 x) + // match: (MOVBstore [off] {sym} ptr (MOVHUreg x) mem) // cond: - // result: (RSBconst [0] x) + // result: (MOVBstore [off] {sym} ptr x mem) for { - x := v.Args[0] - v.reset(OpARMRSBconst) - v.AuxInt = 0 + off := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVHUreg { + break + } + x := v_1.Args[0] + mem := v.Args[2] + v.reset(OpARMMOVBstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) v.AddArg(x) + v.AddArg(mem) return true } + return false } -func rewriteValueARM_OpNeg32F(v *Value, config *Config) bool { +func rewriteValueARM_OpARMMOVDload(v *Value, config *Config) bool { b := v.Block _ = b - // match: (Neg32F x) + // match: (MOVDload [off1] {sym} (ADDconst [off2] ptr) mem) // cond: - // result: (MULF (MOVFconst [int64(math.Float64bits(-1))]) x) + // result: (MOVDload [off1+off2] {sym} ptr mem) for { - x := v.Args[0] - v.reset(OpARMMULF) - v0 := b.NewValue0(v.Line, OpARMMOVFconst, config.fe.TypeFloat32()) - v0.AuxInt = int64(math.Float64bits(-1)) - v.AddArg(v0) - v.AddArg(x) + off1 := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpARMADDconst { + break + } + off2 := v_0.AuxInt + ptr := v_0.Args[0] + mem := v.Args[1] + v.reset(OpARMMOVDload) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) return true } -} -func rewriteValueARM_OpNeg64F(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (Neg64F x) - // cond: - // result: (MULD (MOVDconst [int64(math.Float64bits(-1))]) x) + // match: (MOVDload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) + // result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) for { - x := v.Args[0] - v.reset(OpARMMULD) - v0 := b.NewValue0(v.Line, OpARMMOVDconst, config.fe.TypeFloat64()) - v0.AuxInt = int64(math.Float64bits(-1)) - v.AddArg(v0) - v.AddArg(x) + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWaddr { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + mem := v.Args[1] + if !(canMergeSym(sym1, sym2)) { + break + } + v.reset(OpARMMOVDload) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(mem) return true } -} -func rewriteValueARM_OpNeg8(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (Neg8 x) - // cond: - // result: (RSBconst [0] x) + // match: (MOVDload [off] {sym} ptr (MOVDstore [off2] {sym2} ptr2 x _)) + // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) + // result: x for { - x := v.Args[0] - v.reset(OpARMRSBconst) - v.AuxInt = 0 + off := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVDstore { + break + } + off2 := v_1.AuxInt + sym2 := v_1.Aux + ptr2 := v_1.Args[0] + x := v_1.Args[1] + if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { + break + } + v.reset(OpCopy) + v.Type = x.Type v.AddArg(x) return true } + return false } -func rewriteValueARM_OpNeq16(v *Value, config *Config) bool { +func rewriteValueARM_OpARMMOVDstore(v *Value, config *Config) bool { b := v.Block _ = b - // match: (Neq16 x y) + // match: (MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) // cond: - // result: (NotEqual (CMP (ZeroExt16to32 x) (ZeroExt16to32 y))) + // result: (MOVDstore [off1+off2] {sym} ptr val mem) for { - x := v.Args[0] - y := v.Args[1] - v.reset(OpARMNotEqual) - v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags) - v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32()) - v1.AddArg(x) - v0.AddArg(v1) - v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32()) - v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) - return true - } -} -func rewriteValueARM_OpNeq32(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (Neq32 x y) - // cond: - // result: (NotEqual (CMP x y)) - for { - x := v.Args[0] - y := v.Args[1] - v.reset(OpARMNotEqual) - v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) - return true - } -} -func rewriteValueARM_OpNeq32F(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (Neq32F x y) - // cond: - // result: (NotEqual (CMPF x y)) - for { - x := v.Args[0] - y := v.Args[1] - v.reset(OpARMNotEqual) - v0 := b.NewValue0(v.Line, OpARMCMPF, TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) - return true - } -} -func rewriteValueARM_OpNeq64F(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (Neq64F x y) - // cond: - // result: (NotEqual (CMPD x y)) - for { - x := v.Args[0] - y := v.Args[1] - v.reset(OpARMNotEqual) - v0 := b.NewValue0(v.Line, OpARMCMPD, TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) - return true - } -} -func rewriteValueARM_OpNeq8(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (Neq8 x y) - // cond: - // result: (NotEqual (CMP (ZeroExt8to32 x) (ZeroExt8to32 y))) - for { - x := v.Args[0] - y := v.Args[1] - v.reset(OpARMNotEqual) - v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags) - v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32()) - v1.AddArg(x) - v0.AddArg(v1) - v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32()) - v2.AddArg(y) - v0.AddArg(v2) - v.AddArg(v0) + off1 := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpARMADDconst { + break + } + off2 := v_0.AuxInt + ptr := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + v.reset(OpARMMOVDstore) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) return true } -} -func rewriteValueARM_OpNeqB(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (NeqB x y) - // cond: - // result: (XOR x y) + // match: (MOVDstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) + // cond: canMergeSym(sym1,sym2) + // result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) for { - x := v.Args[0] - y := v.Args[1] - v.reset(OpARMXOR) - v.AddArg(x) - v.AddArg(y) + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWaddr { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(canMergeSym(sym1, sym2)) { + break + } + v.reset(OpARMMOVDstore) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) return true } + return false } -func rewriteValueARM_OpNeqPtr(v *Value, config *Config) bool { +func rewriteValueARM_OpARMMOVFload(v *Value, config *Config) bool { b := v.Block _ = b - // match: (NeqPtr x y) + // match: (MOVFload [off1] {sym} (ADDconst [off2] ptr) mem) // cond: - // result: (NotEqual (CMP x y)) + // result: (MOVFload [off1+off2] {sym} ptr mem) for { - x := v.Args[0] - y := v.Args[1] - v.reset(OpARMNotEqual) - v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags) - v0.AddArg(x) - v0.AddArg(y) - v.AddArg(v0) + off1 := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpARMADDconst { + break + } + off2 := v_0.AuxInt + ptr := v_0.Args[0] + mem := v.Args[1] + v.reset(OpARMMOVFload) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) return true } -} -func rewriteValueARM_OpNilCheck(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (NilCheck ptr mem) - // cond: - // result: (LoweredNilCheck ptr mem) + // match: (MOVFload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) + // result: (MOVFload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) for { - ptr := v.Args[0] + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWaddr { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] mem := v.Args[1] - v.reset(OpARMLoweredNilCheck) + if !(canMergeSym(sym1, sym2)) { + break + } + v.reset(OpARMMOVFload) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) v.AddArg(ptr) v.AddArg(mem) return true } -} -func rewriteValueARM_OpNot(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (Not x) - // cond: - // result: (XORconst [1] x) + // match: (MOVFload [off] {sym} ptr (MOVFstore [off2] {sym2} ptr2 x _)) + // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) + // result: x for { - x := v.Args[0] - v.reset(OpARMXORconst) - v.AuxInt = 1 + off := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVFstore { + break + } + off2 := v_1.AuxInt + sym2 := v_1.Aux + ptr2 := v_1.Args[0] + x := v_1.Args[1] + if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { + break + } + v.reset(OpCopy) + v.Type = x.Type v.AddArg(x) return true } + return false } -func rewriteValueARM_OpOffPtr(v *Value, config *Config) bool { +func rewriteValueARM_OpARMMOVFstore(v *Value, config *Config) bool { b := v.Block _ = b - // match: (OffPtr [off] ptr:(SP)) + // match: (MOVFstore [off1] {sym} (ADDconst [off2] ptr) val mem) // cond: - // result: (MOVWaddr [off] ptr) + // result: (MOVFstore [off1+off2] {sym} ptr val mem) for { - off := v.AuxInt - ptr := v.Args[0] - if ptr.Op != OpSP { + off1 := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpARMADDconst { break } - v.reset(OpARMMOVWaddr) - v.AuxInt = off + off2 := v_0.AuxInt + ptr := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + v.reset(OpARMMOVFstore) + v.AuxInt = off1 + off2 + v.Aux = sym v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) return true } - // match: (OffPtr [off] ptr) - // cond: - // result: (ADDconst [off] ptr) + // match: (MOVFstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) + // cond: canMergeSym(sym1,sym2) + // result: (MOVFstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) for { - off := v.AuxInt - ptr := v.Args[0] - v.reset(OpARMADDconst) - v.AuxInt = off + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWaddr { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(canMergeSym(sym1, sym2)) { + break + } + v.reset(OpARMMOVFstore) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) return true } + return false } -func rewriteValueARM_OpOr16(v *Value, config *Config) bool { +func rewriteValueARM_OpARMMOVHUload(v *Value, config *Config) bool { b := v.Block _ = b - // match: (Or16 x y) + // match: (MOVHUload [off1] {sym} (ADDconst [off2] ptr) mem) // cond: - // result: (OR x y) + // result: (MOVHUload [off1+off2] {sym} ptr mem) for { - x := v.Args[0] - y := v.Args[1] - v.reset(OpARMOR) - v.AddArg(x) - v.AddArg(y) + off1 := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpARMADDconst { + break + } + off2 := v_0.AuxInt + ptr := v_0.Args[0] + mem := v.Args[1] + v.reset(OpARMMOVHUload) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) return true } -} -func rewriteValueARM_OpOr32(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (Or32 x y) - // cond: - // result: (OR x y) + // match: (MOVHUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) + // result: (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) for { - x := v.Args[0] - y := v.Args[1] - v.reset(OpARMOR) + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWaddr { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + mem := v.Args[1] + if !(canMergeSym(sym1, sym2)) { + break + } + v.reset(OpARMMOVHUload) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (MOVHUload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) + // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && !isSigned(x.Type) + // result: x + for { + off := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVHstore { + break + } + off2 := v_1.AuxInt + sym2 := v_1.Aux + ptr2 := v_1.Args[0] + x := v_1.Args[1] + if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && !isSigned(x.Type)) { + break + } + v.reset(OpCopy) + v.Type = x.Type v.AddArg(x) - v.AddArg(y) return true } + return false } -func rewriteValueARM_OpOr8(v *Value, config *Config) bool { +func rewriteValueARM_OpARMMOVHUreg(v *Value, config *Config) bool { b := v.Block _ = b - // match: (Or8 x y) + // match: (MOVHUreg x:(MOVBUload _ _)) // cond: - // result: (OR x y) + // result: (MOVWreg x) for { x := v.Args[0] - y := v.Args[1] - v.reset(OpARMOR) + if x.Op != OpARMMOVBUload { + break + } + v.reset(OpARMMOVWreg) v.AddArg(x) - v.AddArg(y) return true } -} -func rewriteValueARM_OpOrB(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (OrB x y) + // match: (MOVHUreg x:(MOVHUload _ _)) // cond: - // result: (OR x y) + // result: (MOVWreg x) for { x := v.Args[0] - y := v.Args[1] - v.reset(OpARMOR) + if x.Op != OpARMMOVHUload { + break + } + v.reset(OpARMMOVWreg) v.AddArg(x) - v.AddArg(y) return true } -} -func rewriteValueARM_OpRsh16Ux16(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (Rsh16Ux16 x y) + // match: (MOVHUreg (ANDconst [c] x)) // cond: - // result: (SRL (ZeroExt16to32 x) (ZeroExt16to32 y)) + // result: (ANDconst [c&0xffff] x) for { - x := v.Args[0] - y := v.Args[1] - v.reset(OpARMSRL) - v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32()) - v0.AddArg(x) - v.AddArg(v0) - v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32()) - v1.AddArg(y) - v.AddArg(v1) + v_0 := v.Args[0] + if v_0.Op != OpARMANDconst { + break + } + c := v_0.AuxInt + x := v_0.Args[0] + v.reset(OpARMANDconst) + v.AuxInt = c & 0xffff + v.AddArg(x) return true } + return false } -func rewriteValueARM_OpRsh16Ux32(v *Value, config *Config) bool { +func rewriteValueARM_OpARMMOVHload(v *Value, config *Config) bool { b := v.Block _ = b - // match: (Rsh16Ux32 x y) + // match: (MOVHload [off1] {sym} (ADDconst [off2] ptr) mem) // cond: - // result: (SRL (ZeroExt16to32 x) y) + // result: (MOVHload [off1+off2] {sym} ptr mem) for { - x := v.Args[0] - y := v.Args[1] - v.reset(OpARMSRL) - v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32()) - v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + off1 := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpARMADDconst { + break + } + off2 := v_0.AuxInt + ptr := v_0.Args[0] + mem := v.Args[1] + v.reset(OpARMMOVHload) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) return true } -} -func rewriteValueARM_OpRsh16Ux64(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (Rsh16Ux64 x (Const64 [c])) - // cond: uint64(c) < 16 - // result: (SRLconst (SLLconst x [16]) [c+16]) + // match: (MOVHload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) + // result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst64 { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWaddr { break } - c := v_1.AuxInt - if !(uint64(c) < 16) { + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + mem := v.Args[1] + if !(canMergeSym(sym1, sym2)) { break } - v.reset(OpARMSRLconst) - v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32()) - v0.AddArg(x) - v0.AuxInt = 16 - v.AddArg(v0) - v.AuxInt = c + 16 + v.reset(OpARMMOVHload) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(mem) return true } - // match: (Rsh16Ux64 _ (Const64 [c])) - // cond: uint64(c) >= 16 - // result: (Const16 [0]) + // match: (MOVHload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) + // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && isSigned(x.Type) + // result: x for { + off := v.AuxInt + sym := v.Aux + ptr := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpConst64 { + if v_1.Op != OpARMMOVHstore { break } - c := v_1.AuxInt - if !(uint64(c) >= 16) { + off2 := v_1.AuxInt + sym2 := v_1.Aux + ptr2 := v_1.Args[0] + x := v_1.Args[1] + if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) && isSigned(x.Type)) { break } - v.reset(OpConst16) - v.AuxInt = 0 + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) return true } return false } -func rewriteValueARM_OpRsh16Ux8(v *Value, config *Config) bool { +func rewriteValueARM_OpARMMOVHreg(v *Value, config *Config) bool { b := v.Block _ = b - // match: (Rsh16Ux8 x y) + // match: (MOVHreg x:(MOVBload _ _)) // cond: - // result: (SRL (ZeroExt16to32 x) (ZeroExt8to32 y)) + // result: (MOVWreg x) for { x := v.Args[0] - y := v.Args[1] - v.reset(OpARMSRL) - v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32()) - v0.AddArg(x) - v.AddArg(v0) - v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32()) - v1.AddArg(y) - v.AddArg(v1) + if x.Op != OpARMMOVBload { + break + } + v.reset(OpARMMOVWreg) + v.AddArg(x) return true } -} -func rewriteValueARM_OpRsh16x16(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (Rsh16x16 x y) + // match: (MOVHreg x:(MOVBUload _ _)) // cond: - // result: (SRA (SignExt16to32 x) (ZeroExt16to32 y)) + // result: (MOVWreg x) for { x := v.Args[0] - y := v.Args[1] - v.reset(OpARMSRA) - v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32()) - v0.AddArg(x) - v.AddArg(v0) - v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32()) - v1.AddArg(y) - v.AddArg(v1) + if x.Op != OpARMMOVBUload { + break + } + v.reset(OpARMMOVWreg) + v.AddArg(x) return true } -} -func rewriteValueARM_OpRsh16x32(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (Rsh16x32 x y) + // match: (MOVHreg x:(MOVHload _ _)) // cond: - // result: (SRA (SignExt16to32 x) y) + // result: (MOVWreg x) for { x := v.Args[0] - y := v.Args[1] - v.reset(OpARMSRA) - v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32()) - v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + if x.Op != OpARMMOVHload { + break + } + v.reset(OpARMMOVWreg) + v.AddArg(x) return true } -} -func rewriteValueARM_OpRsh16x64(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (Rsh16x64 x (Const64 [c])) - // cond: uint64(c) < 16 - // result: (SRAconst (SLLconst x [16]) [c+16]) + // match: (MOVHreg (ANDconst [c] x)) + // cond: c & 0x8000 == 0 + // result: (ANDconst [c&0x7fff] x) for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst64 { + v_0 := v.Args[0] + if v_0.Op != OpARMANDconst { break } - c := v_1.AuxInt - if !(uint64(c) < 16) { + c := v_0.AuxInt + x := v_0.Args[0] + if !(c&0x8000 == 0) { break } - v.reset(OpARMSRAconst) - v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32()) - v0.AddArg(x) - v0.AuxInt = 16 - v.AddArg(v0) - v.AuxInt = c + 16 - return true - } - // match: (Rsh16x64 x (Const64 [c])) - // cond: uint64(c) >= 16 - // result: (SRAconst (SLLconst x [16]) [31]) - for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - if !(uint64(c) >= 16) { - break - } - v.reset(OpARMSRAconst) - v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32()) - v0.AddArg(x) - v0.AuxInt = 16 - v.AddArg(v0) - v.AuxInt = 31 + v.reset(OpARMANDconst) + v.AuxInt = c & 0x7fff + v.AddArg(x) return true } return false } -func rewriteValueARM_OpRsh16x8(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (Rsh16x8 x y) - // cond: - // result: (SRA (SignExt16to32 x) (ZeroExt8to32 y)) - for { - x := v.Args[0] - y := v.Args[1] - v.reset(OpARMSRA) - v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32()) - v0.AddArg(x) - v.AddArg(v0) - v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32()) - v1.AddArg(y) - v.AddArg(v1) - return true - } -} -func rewriteValueARM_OpRsh32Ux16(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (Rsh32Ux16 x y) - // cond: - // result: (SRL x (ZeroExt16to32 y)) - for { - x := v.Args[0] - y := v.Args[1] - v.reset(OpARMSRL) - v.AddArg(x) - v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32()) - v0.AddArg(y) - v.AddArg(v0) - return true - } -} -func rewriteValueARM_OpRsh32Ux32(v *Value, config *Config) bool { +func rewriteValueARM_OpARMMOVHstore(v *Value, config *Config) bool { b := v.Block _ = b - // match: (Rsh32Ux32 x y) + // match: (MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem) // cond: - // result: (SRL x y) - for { - x := v.Args[0] - y := v.Args[1] - v.reset(OpARMSRL) - v.AddArg(x) - v.AddArg(y) - return true - } -} -func rewriteValueARM_OpRsh32Ux64(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (Rsh32Ux64 x (Const64 [c])) - // cond: uint64(c) < 32 - // result: (SRLconst x [c]) + // result: (MOVHstore [off1+off2] {sym} ptr val mem) for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst64 { - break - } - c := v_1.AuxInt - if !(uint64(c) < 32) { + off1 := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpARMADDconst { break } - v.reset(OpARMSRLconst) - v.AddArg(x) - v.AuxInt = c + off2 := v_0.AuxInt + ptr := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + v.reset(OpARMMOVHstore) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) return true } - // match: (Rsh32Ux64 _ (Const64 [c])) - // cond: uint64(c) >= 32 - // result: (Const32 [0]) + // match: (MOVHstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) + // cond: canMergeSym(sym1,sym2) + // result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) for { - v_1 := v.Args[1] - if v_1.Op != OpConst64 { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWaddr { break } - c := v_1.AuxInt - if !(uint64(c) >= 32) { + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(canMergeSym(sym1, sym2)) { break } - v.reset(OpConst32) - v.AuxInt = 0 + v.reset(OpARMMOVHstore) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) return true } - return false -} -func rewriteValueARM_OpRsh32Ux8(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (Rsh32Ux8 x y) + // match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem) // cond: - // result: (SRL x (ZeroExt8to32 y)) + // result: (MOVHstore [off] {sym} ptr x mem) for { - x := v.Args[0] - y := v.Args[1] - v.reset(OpARMSRL) + off := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVHreg { + break + } + x := v_1.Args[0] + mem := v.Args[2] + v.reset(OpARMMOVHstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) v.AddArg(x) - v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32()) - v0.AddArg(y) - v.AddArg(v0) + v.AddArg(mem) return true } -} -func rewriteValueARM_OpRsh32x16(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (Rsh32x16 x y) + // match: (MOVHstore [off] {sym} ptr (MOVHUreg x) mem) // cond: - // result: (SRA x (ZeroExt16to32 y)) + // result: (MOVHstore [off] {sym} ptr x mem) for { - x := v.Args[0] - y := v.Args[1] - v.reset(OpARMSRA) + off := v.AuxInt + sym := v.Aux + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVHUreg { + break + } + x := v_1.Args[0] + mem := v.Args[2] + v.reset(OpARMMOVHstore) + v.AuxInt = off + v.Aux = sym + v.AddArg(ptr) v.AddArg(x) - v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32()) - v0.AddArg(y) - v.AddArg(v0) + v.AddArg(mem) return true } + return false } -func rewriteValueARM_OpRsh32x32(v *Value, config *Config) bool { +func rewriteValueARM_OpARMMOVWload(v *Value, config *Config) bool { b := v.Block _ = b - // match: (Rsh32x32 x y) + // match: (MOVWload [off1] {sym} (ADDconst [off2] ptr) mem) // cond: - // result: (SRA x y) + // result: (MOVWload [off1+off2] {sym} ptr mem) for { - x := v.Args[0] - y := v.Args[1] - v.reset(OpARMSRA) - v.AddArg(x) - v.AddArg(y) + off1 := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpARMADDconst { + break + } + off2 := v_0.AuxInt + ptr := v_0.Args[0] + mem := v.Args[1] + v.reset(OpARMMOVWload) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(ptr) + v.AddArg(mem) return true } -} -func rewriteValueARM_OpRsh32x64(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (Rsh32x64 x (Const64 [c])) - // cond: uint64(c) < 32 - // result: (SRAconst x [c]) + // match: (MOVWload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) + // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem) for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst64 { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWaddr { break } - c := v_1.AuxInt - if !(uint64(c) < 32) { + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + mem := v.Args[1] + if !(canMergeSym(sym1, sym2)) { break } - v.reset(OpARMSRAconst) - v.AddArg(x) - v.AuxInt = c + v.reset(OpARMMOVWload) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(mem) return true } - // match: (Rsh32x64 x (Const64 [c])) - // cond: uint64(c) >= 32 - // result: (SRAconst x [31]) + // match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) + // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) + // result: x for { - x := v.Args[0] + off := v.AuxInt + sym := v.Aux + ptr := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpConst64 { + if v_1.Op != OpARMMOVWstore { break } - c := v_1.AuxInt - if !(uint64(c) >= 32) { + off2 := v_1.AuxInt + sym2 := v_1.Aux + ptr2 := v_1.Args[0] + x := v_1.Args[1] + if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) { break } - v.reset(OpARMSRAconst) + v.reset(OpCopy) + v.Type = x.Type v.AddArg(x) - v.AuxInt = 31 return true } return false } -func rewriteValueARM_OpRsh32x8(v *Value, config *Config) bool { +func rewriteValueARM_OpARMMOVWstore(v *Value, config *Config) bool { b := v.Block _ = b - // match: (Rsh32x8 x y) + // match: (MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem) // cond: - // result: (SRA x (ZeroExt8to32 y)) + // result: (MOVWstore [off1+off2] {sym} ptr val mem) for { - x := v.Args[0] - y := v.Args[1] - v.reset(OpARMSRA) - v.AddArg(x) - v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32()) - v0.AddArg(y) - v.AddArg(v0) + off1 := v.AuxInt + sym := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpARMADDconst { + break + } + off2 := v_0.AuxInt + ptr := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + v.reset(OpARMMOVWstore) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVWstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) + // cond: canMergeSym(sym1,sym2) + // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWaddr { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + ptr := v_0.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(canMergeSym(sym1, sym2)) { + break + } + v.reset(OpARMMOVWstore) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) return true } + return false } -func rewriteValueARM_OpRsh8Ux16(v *Value, config *Config) bool { +func rewriteValueARM_OpARMMUL(v *Value, config *Config) bool { b := v.Block _ = b - // match: (Rsh8Ux16 x y) + // match: (MUL x (MOVWconst [-1])) // cond: - // result: (SRL (ZeroExt8to32 x) (ZeroExt16to32 y)) + // result: (RSBconst [0] x) for { x := v.Args[0] - y := v.Args[1] - v.reset(OpARMSRL) - v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32()) - v0.AddArg(x) - v.AddArg(v0) - v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32()) - v1.AddArg(y) - v.AddArg(v1) + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { + break + } + if v_1.AuxInt != -1 { + break + } + v.reset(OpARMRSBconst) + v.AuxInt = 0 + v.AddArg(x) return true } -} -func rewriteValueARM_OpRsh8Ux32(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (Rsh8Ux32 x y) + // match: (MUL _ (MOVWconst [0])) // cond: - // result: (SRL (ZeroExt8to32 x) y) + // result: (MOVWconst [0]) for { - x := v.Args[0] - y := v.Args[1] - v.reset(OpARMSRL) - v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32()) - v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { + break + } + if v_1.AuxInt != 0 { + break + } + v.reset(OpARMMOVWconst) + v.AuxInt = 0 return true } -} -func rewriteValueARM_OpRsh8Ux64(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (Rsh8Ux64 x (Const64 [c])) - // cond: uint64(c) < 8 - // result: (SRLconst (SLLconst x [24]) [c+24]) + // match: (MUL x (MOVWconst [1])) + // cond: + // result: x for { x := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpConst64 { + if v_1.Op != OpARMMOVWconst { break } - c := v_1.AuxInt - if !(uint64(c) < 8) { + if v_1.AuxInt != 1 { break } - v.reset(OpARMSRLconst) - v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32()) - v0.AddArg(x) - v0.AuxInt = 24 - v.AddArg(v0) - v.AuxInt = c + 24 + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) return true } - // match: (Rsh8Ux64 _ (Const64 [c])) - // cond: uint64(c) >= 8 - // result: (Const8 [0]) + // match: (MUL x (MOVWconst [c])) + // cond: isPowerOfTwo(c) + // result: (SLLconst [log2(c)] x) for { + x := v.Args[0] v_1 := v.Args[1] - if v_1.Op != OpConst64 { + if v_1.Op != OpARMMOVWconst { break } c := v_1.AuxInt - if !(uint64(c) >= 8) { + if !(isPowerOfTwo(c)) { break } - v.reset(OpConst8) - v.AuxInt = 0 + v.reset(OpARMSLLconst) + v.AuxInt = log2(c) + v.AddArg(x) return true } - return false -} -func rewriteValueARM_OpRsh8Ux8(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (Rsh8Ux8 x y) + // match: (MUL (MOVWconst [-1]) x) // cond: - // result: (SRL (ZeroExt8to32 x) (ZeroExt8to32 y)) + // result: (RSBconst [0] x) for { - x := v.Args[0] - y := v.Args[1] - v.reset(OpARMSRL) - v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32()) - v0.AddArg(x) - v.AddArg(v0) - v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32()) - v1.AddArg(y) - v.AddArg(v1) + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + if v_0.AuxInt != -1 { + break + } + x := v.Args[1] + v.reset(OpARMRSBconst) + v.AuxInt = 0 + v.AddArg(x) return true } -} -func rewriteValueARM_OpRsh8x16(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (Rsh8x16 x y) + // match: (MUL (MOVWconst [0]) _) // cond: - // result: (SRA (SignExt8to32 x) (ZeroExt16to32 y)) + // result: (MOVWconst [0]) for { - x := v.Args[0] - y := v.Args[1] - v.reset(OpARMSRA) - v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32()) - v0.AddArg(x) - v.AddArg(v0) - v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32()) - v1.AddArg(y) - v.AddArg(v1) + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + if v_0.AuxInt != 0 { + break + } + v.reset(OpARMMOVWconst) + v.AuxInt = 0 return true } -} -func rewriteValueARM_OpRsh8x32(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (Rsh8x32 x y) + // match: (MUL (MOVWconst [1]) x) // cond: - // result: (SRA (SignExt8to32 x) y) + // result: x for { - x := v.Args[0] - y := v.Args[1] - v.reset(OpARMSRA) - v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32()) - v0.AddArg(x) - v.AddArg(v0) - v.AddArg(y) + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + if v_0.AuxInt != 1 { + break + } + x := v.Args[1] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) return true } -} -func rewriteValueARM_OpRsh8x64(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (Rsh8x64 x (Const64 [c])) - // cond: uint64(c) < 8 - // result: (SRAconst (SLLconst x [24]) [c+24]) + // match: (MUL (MOVWconst [c]) x) + // cond: isPowerOfTwo(c) + // result: (SLLconst [log2(c)] x) for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst64 { + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { break } - c := v_1.AuxInt - if !(uint64(c) < 8) { + c := v_0.AuxInt + x := v.Args[1] + if !(isPowerOfTwo(c)) { break } - v.reset(OpARMSRAconst) - v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32()) - v0.AddArg(x) - v0.AuxInt = 24 - v.AddArg(v0) - v.AuxInt = c + 24 + v.reset(OpARMSLLconst) + v.AuxInt = log2(c) + v.AddArg(x) return true } - // match: (Rsh8x64 x (Const64 [c])) - // cond: uint64(c) >= 8 - // result: (SRAconst (SLLconst x [24]) [31]) + // match: (MUL (MOVWconst [c]) (MOVWconst [d])) + // cond: + // result: (MOVWconst [int64(int32(c*d))]) for { - x := v.Args[0] - v_1 := v.Args[1] - if v_1.Op != OpConst64 { + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { break } - c := v_1.AuxInt - if !(uint64(c) >= 8) { + c := v_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { break } - v.reset(OpARMSRAconst) - v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32()) - v0.AddArg(x) - v0.AuxInt = 24 - v.AddArg(v0) - v.AuxInt = 31 + d := v_1.AuxInt + v.reset(OpARMMOVWconst) + v.AuxInt = int64(int32(c * d)) return true } return false } -func rewriteValueARM_OpRsh8x8(v *Value, config *Config) bool { +func rewriteValueARM_OpARMMULA(v *Value, config *Config) bool { b := v.Block _ = b - // match: (Rsh8x8 x y) + // match: (MULA x (MOVWconst [-1]) a) // cond: - // result: (SRA (SignExt8to32 x) (ZeroExt8to32 y)) + // result: (SUB a x) for { x := v.Args[0] - y := v.Args[1] - v.reset(OpARMSRA) - v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32()) - v0.AddArg(x) - v.AddArg(v0) - v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32()) - v1.AddArg(y) - v.AddArg(v1) + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { + break + } + if v_1.AuxInt != -1 { + break + } + a := v.Args[2] + v.reset(OpARMSUB) + v.AddArg(a) + v.AddArg(x) return true } -} -func rewriteValueARM_OpSelect0(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (Select0 x) - // cond: t.IsFlags() - // result: (Carry x) + // match: (MULA _ (MOVWconst [0]) a) + // cond: + // result: a + for { + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { + break + } + if v_1.AuxInt != 0 { + break + } + a := v.Args[2] + v.reset(OpCopy) + v.Type = a.Type + v.AddArg(a) + return true + } + // match: (MULA x (MOVWconst [1]) a) + // cond: + // result: (ADD x a) for { - t := v.Type x := v.Args[0] - if !(t.IsFlags()) { + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { break } - v.reset(OpARMCarry) + if v_1.AuxInt != 1 { + break + } + a := v.Args[2] + v.reset(OpARMADD) v.AddArg(x) + v.AddArg(a) return true } - // match: (Select0 x) - // cond: !t.IsFlags() - // result: (LoweredSelect0 x) + // match: (MULA x (MOVWconst [c]) a) + // cond: isPowerOfTwo(c) + // result: (ADD (SLLconst [log2(c)] x) a) for { - t := v.Type x := v.Args[0] - if !(!t.IsFlags()) { + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { break } - v.reset(OpARMLoweredSelect0) + c := v_1.AuxInt + a := v.Args[2] + if !(isPowerOfTwo(c)) { + break + } + v.reset(OpARMADD) + v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type) + v0.AuxInt = log2(c) + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(a) + return true + } + // match: (MULA (MOVWconst [-1]) x a) + // cond: + // result: (SUB a x) + for { + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + if v_0.AuxInt != -1 { + break + } + x := v.Args[1] + a := v.Args[2] + v.reset(OpARMSUB) + v.AddArg(a) + v.AddArg(x) + return true + } + // match: (MULA (MOVWconst [0]) _ a) + // cond: + // result: a + for { + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + if v_0.AuxInt != 0 { + break + } + a := v.Args[2] + v.reset(OpCopy) + v.Type = a.Type + v.AddArg(a) + return true + } + // match: (MULA (MOVWconst [1]) x a) + // cond: + // result: (ADD x a) + for { + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + if v_0.AuxInt != 1 { + break + } + x := v.Args[1] + a := v.Args[2] + v.reset(OpARMADD) v.AddArg(x) + v.AddArg(a) + return true + } + // match: (MULA (MOVWconst [c]) x a) + // cond: isPowerOfTwo(c) + // result: (ADD (SLLconst [log2(c)] x) a) + for { + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + a := v.Args[2] + if !(isPowerOfTwo(c)) { + break + } + v.reset(OpARMADD) + v0 := b.NewValue0(v.Line, OpARMSLLconst, x.Type) + v0.AuxInt = log2(c) + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(a) + return true + } + // match: (MULA (MOVWconst [c]) (MOVWconst [d]) a) + // cond: + // result: (ADDconst [int64(int32(c*d))] a) + for { + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + c := v_0.AuxInt + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { + break + } + d := v_1.AuxInt + a := v.Args[2] + v.reset(OpARMADDconst) + v.AuxInt = int64(int32(c * d)) + v.AddArg(a) return true } return false } -func rewriteValueARM_OpSelect1(v *Value, config *Config) bool { +func rewriteValueARM_OpARMMVN(v *Value, config *Config) bool { b := v.Block _ = b - // match: (Select1 x) + // match: (MVN (MOVWconst [c])) // cond: - // result: (LoweredSelect1 x) + // result: (MOVWconst [^c]) for { - x := v.Args[0] - v.reset(OpARMLoweredSelect1) - v.AddArg(x) + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + c := v_0.AuxInt + v.reset(OpARMMOVWconst) + v.AuxInt = ^c return true } + return false } -func rewriteValueARM_OpSignExt16to32(v *Value, config *Config) bool { +func rewriteValueARM_OpMod16(v *Value, config *Config) bool { b := v.Block _ = b - // match: (SignExt16to32 x) + // match: (Mod16 x y) // cond: - // result: (MOVHreg x) + // result: (MOD (SignExt16to32 x) (SignExt16to32 y)) for { x := v.Args[0] - v.reset(OpARMMOVHreg) - v.AddArg(x) + y := v.Args[1] + v.reset(OpARMMOD) + v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32()) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32()) + v1.AddArg(y) + v.AddArg(v1) return true } } -func rewriteValueARM_OpSignExt8to16(v *Value, config *Config) bool { +func rewriteValueARM_OpMod16u(v *Value, config *Config) bool { b := v.Block _ = b - // match: (SignExt8to16 x) + // match: (Mod16u x y) // cond: - // result: (MOVBreg x) + // result: (MODU (ZeroExt16to32 x) (ZeroExt16to32 y)) for { x := v.Args[0] - v.reset(OpARMMOVBreg) - v.AddArg(x) + y := v.Args[1] + v.reset(OpARMMODU) + v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32()) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32()) + v1.AddArg(y) + v.AddArg(v1) return true } } -func rewriteValueARM_OpSignExt8to32(v *Value, config *Config) bool { +func rewriteValueARM_OpMod32(v *Value, config *Config) bool { b := v.Block _ = b - // match: (SignExt8to32 x) + // match: (Mod32 x y) // cond: - // result: (MOVBreg x) + // result: (MOD x y) for { x := v.Args[0] - v.reset(OpARMMOVBreg) + y := v.Args[1] + v.reset(OpARMMOD) v.AddArg(x) + v.AddArg(y) return true } } -func rewriteValueARM_OpSignmask(v *Value, config *Config) bool { +func rewriteValueARM_OpMod32u(v *Value, config *Config) bool { b := v.Block _ = b - // match: (Signmask x) + // match: (Mod32u x y) // cond: - // result: (SRAconst x [31]) + // result: (MODU x y) for { x := v.Args[0] - v.reset(OpARMSRAconst) + y := v.Args[1] + v.reset(OpARMMODU) v.AddArg(x) - v.AuxInt = 31 + v.AddArg(y) return true } } -func rewriteValueARM_OpSqrt(v *Value, config *Config) bool { +func rewriteValueARM_OpMod8(v *Value, config *Config) bool { b := v.Block _ = b - // match: (Sqrt x) + // match: (Mod8 x y) // cond: - // result: (SQRTD x) + // result: (MOD (SignExt8to32 x) (SignExt8to32 y)) for { x := v.Args[0] - v.reset(OpARMSQRTD) - v.AddArg(x) + y := v.Args[1] + v.reset(OpARMMOD) + v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32()) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32()) + v1.AddArg(y) + v.AddArg(v1) return true } } -func rewriteValueARM_OpStaticCall(v *Value, config *Config) bool { +func rewriteValueARM_OpMod8u(v *Value, config *Config) bool { b := v.Block _ = b - // match: (StaticCall [argwid] {target} mem) + // match: (Mod8u x y) // cond: - // result: (CALLstatic [argwid] {target} mem) + // result: (MODU (ZeroExt8to32 x) (ZeroExt8to32 y)) for { - argwid := v.AuxInt - target := v.Aux - mem := v.Args[0] - v.reset(OpARMCALLstatic) - v.AuxInt = argwid - v.Aux = target - v.AddArg(mem) + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMMODU) + v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32()) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32()) + v1.AddArg(y) + v.AddArg(v1) return true } } -func rewriteValueARM_OpStore(v *Value, config *Config) bool { +func rewriteValueARM_OpMove(v *Value, config *Config) bool { b := v.Block _ = b - // match: (Store [1] ptr val mem) - // cond: - // result: (MOVBstore ptr val mem) + // match: (Move [s] _ _ mem) + // cond: SizeAndAlign(s).Size() == 0 + // result: mem for { - if v.AuxInt != 1 { + s := v.AuxInt + mem := v.Args[2] + if !(SizeAndAlign(s).Size() == 0) { break } - ptr := v.Args[0] - val := v.Args[1] - mem := v.Args[2] - v.reset(OpARMMOVBstore) - v.AddArg(ptr) - v.AddArg(val) + v.reset(OpCopy) + v.Type = mem.Type v.AddArg(mem) return true } - // match: (Store [2] ptr val mem) - // cond: - // result: (MOVHstore ptr val mem) + // match: (Move [s] dst src mem) + // cond: SizeAndAlign(s).Size() == 1 + // result: (MOVBstore dst (MOVBUload src mem) mem) for { - if v.AuxInt != 2 { - break - } - ptr := v.Args[0] - val := v.Args[1] + s := v.AuxInt + dst := v.Args[0] + src := v.Args[1] mem := v.Args[2] - v.reset(OpARMMOVHstore) - v.AddArg(ptr) - v.AddArg(val) + if !(SizeAndAlign(s).Size() == 1) { + break + } + v.reset(OpARMMOVBstore) + v.AddArg(dst) + v0 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8()) + v0.AddArg(src) + v0.AddArg(mem) + v.AddArg(v0) v.AddArg(mem) return true } - // match: (Store [4] ptr val mem) - // cond: !is32BitFloat(val.Type) - // result: (MOVWstore ptr val mem) + // match: (Move [s] dst src mem) + // cond: SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0 + // result: (MOVHstore dst (MOVHUload src mem) mem) for { - if v.AuxInt != 4 { - break - } - ptr := v.Args[0] - val := v.Args[1] + s := v.AuxInt + dst := v.Args[0] + src := v.Args[1] mem := v.Args[2] - if !(!is32BitFloat(val.Type)) { + if !(SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0) { break } - v.reset(OpARMMOVWstore) - v.AddArg(ptr) - v.AddArg(val) + v.reset(OpARMMOVHstore) + v.AddArg(dst) + v0 := b.NewValue0(v.Line, OpARMMOVHUload, config.fe.TypeUInt16()) + v0.AddArg(src) + v0.AddArg(mem) + v.AddArg(v0) v.AddArg(mem) return true } - // match: (Store [4] ptr val mem) - // cond: is32BitFloat(val.Type) - // result: (MOVFstore ptr val mem) + // match: (Move [s] dst src mem) + // cond: SizeAndAlign(s).Size() == 2 + // result: (MOVBstore [1] dst (MOVBUload [1] src mem) (MOVBstore dst (MOVBUload src mem) mem)) for { - if v.AuxInt != 4 { + s := v.AuxInt + dst := v.Args[0] + src := v.Args[1] + mem := v.Args[2] + if !(SizeAndAlign(s).Size() == 2) { break } - ptr := v.Args[0] - val := v.Args[1] + v.reset(OpARMMOVBstore) + v.AuxInt = 1 + v.AddArg(dst) + v0 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8()) + v0.AuxInt = 1 + v0.AddArg(src) + v0.AddArg(mem) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem) + v1.AddArg(dst) + v2 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8()) + v2.AddArg(src) + v2.AddArg(mem) + v1.AddArg(v2) + v1.AddArg(mem) + v.AddArg(v1) + return true + } + // match: (Move [s] dst src mem) + // cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0 + // result: (MOVWstore dst (MOVWload src mem) mem) + for { + s := v.AuxInt + dst := v.Args[0] + src := v.Args[1] mem := v.Args[2] - if !(is32BitFloat(val.Type)) { + if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0) { break } - v.reset(OpARMMOVFstore) - v.AddArg(ptr) - v.AddArg(val) + v.reset(OpARMMOVWstore) + v.AddArg(dst) + v0 := b.NewValue0(v.Line, OpARMMOVWload, config.fe.TypeUInt32()) + v0.AddArg(src) + v0.AddArg(mem) + v.AddArg(v0) v.AddArg(mem) return true } - // match: (Store [8] ptr val mem) - // cond: is64BitFloat(val.Type) - // result: (MOVDstore ptr val mem) + // match: (Move [s] dst src mem) + // cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0 + // result: (MOVHstore [2] dst (MOVHUload [2] src mem) (MOVHstore dst (MOVHUload src mem) mem)) for { - if v.AuxInt != 8 { + s := v.AuxInt + dst := v.Args[0] + src := v.Args[1] + mem := v.Args[2] + if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0) { break } - ptr := v.Args[0] - val := v.Args[1] + v.reset(OpARMMOVHstore) + v.AuxInt = 2 + v.AddArg(dst) + v0 := b.NewValue0(v.Line, OpARMMOVHUload, config.fe.TypeUInt16()) + v0.AuxInt = 2 + v0.AddArg(src) + v0.AddArg(mem) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpARMMOVHstore, TypeMem) + v1.AddArg(dst) + v2 := b.NewValue0(v.Line, OpARMMOVHUload, config.fe.TypeUInt16()) + v2.AddArg(src) + v2.AddArg(mem) + v1.AddArg(v2) + v1.AddArg(mem) + v.AddArg(v1) + return true + } + // match: (Move [s] dst src mem) + // cond: SizeAndAlign(s).Size() == 4 + // result: (MOVBstore [3] dst (MOVBUload [3] src mem) (MOVBstore [2] dst (MOVBUload [2] src mem) (MOVBstore [1] dst (MOVBUload [1] src mem) (MOVBstore dst (MOVBUload src mem) mem)))) + for { + s := v.AuxInt + dst := v.Args[0] + src := v.Args[1] mem := v.Args[2] - if !(is64BitFloat(val.Type)) { + if !(SizeAndAlign(s).Size() == 4) { break } - v.reset(OpARMMOVDstore) - v.AddArg(ptr) - v.AddArg(val) - v.AddArg(mem) + v.reset(OpARMMOVBstore) + v.AuxInt = 3 + v.AddArg(dst) + v0 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8()) + v0.AuxInt = 3 + v0.AddArg(src) + v0.AddArg(mem) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem) + v1.AuxInt = 2 + v1.AddArg(dst) + v2 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8()) + v2.AuxInt = 2 + v2.AddArg(src) + v2.AddArg(mem) + v1.AddArg(v2) + v3 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem) + v3.AuxInt = 1 + v3.AddArg(dst) + v4 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8()) + v4.AuxInt = 1 + v4.AddArg(src) + v4.AddArg(mem) + v3.AddArg(v4) + v5 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem) + v5.AddArg(dst) + v6 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8()) + v6.AddArg(src) + v6.AddArg(mem) + v5.AddArg(v6) + v5.AddArg(mem) + v3.AddArg(v5) + v1.AddArg(v3) + v.AddArg(v1) return true } - return false -} -func rewriteValueARM_OpSub16(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (Sub16 x y) - // cond: - // result: (SUB x y) + // match: (Move [s] dst src mem) + // cond: SizeAndAlign(s).Size() == 3 + // result: (MOVBstore [2] dst (MOVBUload [2] src mem) (MOVBstore [1] dst (MOVBUload [1] src mem) (MOVBstore dst (MOVBUload src mem) mem))) for { - x := v.Args[0] - y := v.Args[1] - v.reset(OpARMSUB) - v.AddArg(x) - v.AddArg(y) + s := v.AuxInt + dst := v.Args[0] + src := v.Args[1] + mem := v.Args[2] + if !(SizeAndAlign(s).Size() == 3) { + break + } + v.reset(OpARMMOVBstore) + v.AuxInt = 2 + v.AddArg(dst) + v0 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8()) + v0.AuxInt = 2 + v0.AddArg(src) + v0.AddArg(mem) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem) + v1.AuxInt = 1 + v1.AddArg(dst) + v2 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8()) + v2.AuxInt = 1 + v2.AddArg(src) + v2.AddArg(mem) + v1.AddArg(v2) + v3 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem) + v3.AddArg(dst) + v4 := b.NewValue0(v.Line, OpARMMOVBUload, config.fe.TypeUInt8()) + v4.AddArg(src) + v4.AddArg(mem) + v3.AddArg(v4) + v3.AddArg(mem) + v1.AddArg(v3) + v.AddArg(v1) return true } -} -func rewriteValueARM_OpSub32(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (Sub32 x y) - // cond: - // result: (SUB x y) + // match: (Move [s] dst src mem) + // cond: SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Size() <= 512 && SizeAndAlign(s).Align()%4 == 0 + // result: (DUFFCOPY [8 * (128 - int64(SizeAndAlign(s).Size()/4))] dst src mem) for { - x := v.Args[0] - y := v.Args[1] - v.reset(OpARMSUB) - v.AddArg(x) - v.AddArg(y) + s := v.AuxInt + dst := v.Args[0] + src := v.Args[1] + mem := v.Args[2] + if !(SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Size() <= 512 && SizeAndAlign(s).Align()%4 == 0) { + break + } + v.reset(OpARMDUFFCOPY) + v.AuxInt = 8 * (128 - int64(SizeAndAlign(s).Size()/4)) + v.AddArg(dst) + v.AddArg(src) + v.AddArg(mem) return true } -} -func rewriteValueARM_OpSub32F(v *Value, config *Config) bool { - b := v.Block - _ = b - // match: (Sub32F x y) - // cond: - // result: (SUBF x y) + // match: (Move [s] dst src mem) + // cond: SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 512 && SizeAndAlign(s).Align()%4 == 0 + // result: (LoweredMove dst src (ADDconst src [SizeAndAlign(s).Size()]) mem) + for { + s := v.AuxInt + dst := v.Args[0] + src := v.Args[1] + mem := v.Args[2] + if !(SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 512 && SizeAndAlign(s).Align()%4 == 0) { + break + } + v.reset(OpARMLoweredMove) + v.AddArg(dst) + v.AddArg(src) + v0 := b.NewValue0(v.Line, OpARMADDconst, src.Type) + v0.AddArg(src) + v0.AuxInt = SizeAndAlign(s).Size() + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (Move [s] dst src mem) + // cond: SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Align()%4 != 0 + // result: (LoweredMoveU dst src (ADDconst src [SizeAndAlign(s).Size()]) mem) + for { + s := v.AuxInt + dst := v.Args[0] + src := v.Args[1] + mem := v.Args[2] + if !(SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Align()%4 != 0) { + break + } + v.reset(OpARMLoweredMoveU) + v.AddArg(dst) + v.AddArg(src) + v0 := b.NewValue0(v.Line, OpARMADDconst, src.Type) + v0.AddArg(src) + v0.AuxInt = SizeAndAlign(s).Size() + v.AddArg(v0) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueARM_OpMul16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Mul16 x y) + // cond: + // result: (MUL x y) for { x := v.Args[0] y := v.Args[1] - v.reset(OpARMSUBF) + v.reset(OpARMMUL) v.AddArg(x) v.AddArg(y) return true } } -func rewriteValueARM_OpSub32carry(v *Value, config *Config) bool { +func rewriteValueARM_OpMul32(v *Value, config *Config) bool { b := v.Block _ = b - // match: (Sub32carry x y) + // match: (Mul32 x y) // cond: - // result: (SUBS x y) + // result: (MUL x y) for { x := v.Args[0] y := v.Args[1] - v.reset(OpARMSUBS) + v.reset(OpARMMUL) v.AddArg(x) v.AddArg(y) return true } } -func rewriteValueARM_OpSub32withcarry(v *Value, config *Config) bool { +func rewriteValueARM_OpMul32F(v *Value, config *Config) bool { b := v.Block _ = b - // match: (Sub32withcarry x y c) + // match: (Mul32F x y) // cond: - // result: (SBC x y c) + // result: (MULF x y) for { x := v.Args[0] y := v.Args[1] - c := v.Args[2] - v.reset(OpARMSBC) + v.reset(OpARMMULF) v.AddArg(x) v.AddArg(y) - v.AddArg(c) return true } } -func rewriteValueARM_OpSub64F(v *Value, config *Config) bool { +func rewriteValueARM_OpMul32uhilo(v *Value, config *Config) bool { b := v.Block _ = b - // match: (Sub64F x y) + // match: (Mul32uhilo x y) // cond: - // result: (SUBD x y) + // result: (MULLU x y) for { x := v.Args[0] y := v.Args[1] - v.reset(OpARMSUBD) + v.reset(OpARMMULLU) v.AddArg(x) v.AddArg(y) return true } } -func rewriteValueARM_OpSub8(v *Value, config *Config) bool { +func rewriteValueARM_OpMul64F(v *Value, config *Config) bool { b := v.Block _ = b - // match: (Sub8 x y) + // match: (Mul64F x y) // cond: - // result: (SUB x y) + // result: (MULD x y) for { x := v.Args[0] y := v.Args[1] - v.reset(OpARMSUB) + v.reset(OpARMMULD) v.AddArg(x) v.AddArg(y) return true } } -func rewriteValueARM_OpSubPtr(v *Value, config *Config) bool { +func rewriteValueARM_OpMul8(v *Value, config *Config) bool { b := v.Block _ = b - // match: (SubPtr x y) + // match: (Mul8 x y) // cond: - // result: (SUB x y) + // result: (MUL x y) for { x := v.Args[0] y := v.Args[1] - v.reset(OpARMSUB) + v.reset(OpARMMUL) v.AddArg(x) v.AddArg(y) return true } } -func rewriteValueARM_OpTrunc16to8(v *Value, config *Config) bool { +func rewriteValueARM_OpNeg16(v *Value, config *Config) bool { b := v.Block _ = b - // match: (Trunc16to8 x) + // match: (Neg16 x) // cond: - // result: x + // result: (RSBconst [0] x) for { x := v.Args[0] - v.reset(OpCopy) - v.Type = x.Type + v.reset(OpARMRSBconst) + v.AuxInt = 0 v.AddArg(x) return true } } -func rewriteValueARM_OpTrunc32to16(v *Value, config *Config) bool { +func rewriteValueARM_OpNeg32(v *Value, config *Config) bool { b := v.Block _ = b - // match: (Trunc32to16 x) + // match: (Neg32 x) // cond: - // result: x + // result: (RSBconst [0] x) for { x := v.Args[0] - v.reset(OpCopy) - v.Type = x.Type + v.reset(OpARMRSBconst) + v.AuxInt = 0 v.AddArg(x) return true } } -func rewriteValueARM_OpTrunc32to8(v *Value, config *Config) bool { +func rewriteValueARM_OpNeg32F(v *Value, config *Config) bool { b := v.Block _ = b - // match: (Trunc32to8 x) + // match: (Neg32F x) // cond: - // result: x + // result: (MULF (MOVFconst [int64(math.Float64bits(-1))]) x) for { x := v.Args[0] - v.reset(OpCopy) - v.Type = x.Type + v.reset(OpARMMULF) + v0 := b.NewValue0(v.Line, OpARMMOVFconst, config.fe.TypeFloat32()) + v0.AuxInt = int64(math.Float64bits(-1)) + v.AddArg(v0) v.AddArg(x) return true } } -func rewriteValueARM_OpXor16(v *Value, config *Config) bool { +func rewriteValueARM_OpNeg64F(v *Value, config *Config) bool { b := v.Block _ = b - // match: (Xor16 x y) + // match: (Neg64F x) // cond: - // result: (XOR x y) + // result: (MULD (MOVDconst [int64(math.Float64bits(-1))]) x) for { x := v.Args[0] - y := v.Args[1] - v.reset(OpARMXOR) + v.reset(OpARMMULD) + v0 := b.NewValue0(v.Line, OpARMMOVDconst, config.fe.TypeFloat64()) + v0.AuxInt = int64(math.Float64bits(-1)) + v.AddArg(v0) v.AddArg(x) - v.AddArg(y) return true } } -func rewriteValueARM_OpXor32(v *Value, config *Config) bool { +func rewriteValueARM_OpNeg8(v *Value, config *Config) bool { b := v.Block _ = b - // match: (Xor32 x y) + // match: (Neg8 x) // cond: - // result: (XOR x y) + // result: (RSBconst [0] x) for { x := v.Args[0] - y := v.Args[1] - v.reset(OpARMXOR) + v.reset(OpARMRSBconst) + v.AuxInt = 0 v.AddArg(x) - v.AddArg(y) return true } } -func rewriteValueARM_OpXor8(v *Value, config *Config) bool { +func rewriteValueARM_OpNeq16(v *Value, config *Config) bool { b := v.Block _ = b - // match: (Xor8 x y) + // match: (Neq16 x y) // cond: - // result: (XOR x y) + // result: (NotEqual (CMP (ZeroExt16to32 x) (ZeroExt16to32 y))) for { x := v.Args[0] y := v.Args[1] - v.reset(OpARMXOR) - v.AddArg(x) - v.AddArg(y) + v.reset(OpARMNotEqual) + v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags) + v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32()) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32()) + v2.AddArg(y) + v0.AddArg(v2) + v.AddArg(v0) return true } } -func rewriteValueARM_OpZero(v *Value, config *Config) bool { +func rewriteValueARM_OpNeq32(v *Value, config *Config) bool { b := v.Block _ = b - // match: (Zero [s] _ mem) - // cond: SizeAndAlign(s).Size() == 0 - // result: mem + // match: (Neq32 x y) + // cond: + // result: (NotEqual (CMP x y)) for { - s := v.AuxInt - mem := v.Args[1] - if !(SizeAndAlign(s).Size() == 0) { - break - } - v.reset(OpCopy) - v.Type = mem.Type - v.AddArg(mem) + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMNotEqual) + v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) return true } - // match: (Zero [s] ptr mem) - // cond: SizeAndAlign(s).Size() == 1 - // result: (MOVBstore ptr (MOVWconst [0]) mem) +} +func rewriteValueARM_OpNeq32F(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Neq32F x y) + // cond: + // result: (NotEqual (CMPF x y)) for { - s := v.AuxInt - ptr := v.Args[0] - mem := v.Args[1] - if !(SizeAndAlign(s).Size() == 1) { - break - } - v.reset(OpARMMOVBstore) - v.AddArg(ptr) - v0 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32()) - v0.AuxInt = 0 + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMNotEqual) + v0 := b.NewValue0(v.Line, OpARMCMPF, TypeFlags) + v0.AddArg(x) + v0.AddArg(y) v.AddArg(v0) - v.AddArg(mem) return true } - // match: (Zero [s] ptr mem) - // cond: SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0 - // result: (MOVHstore ptr (MOVWconst [0]) mem) +} +func rewriteValueARM_OpNeq64F(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Neq64F x y) + // cond: + // result: (NotEqual (CMPD x y)) for { - s := v.AuxInt - ptr := v.Args[0] - mem := v.Args[1] - if !(SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0) { - break - } - v.reset(OpARMMOVHstore) - v.AddArg(ptr) - v0 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32()) - v0.AuxInt = 0 + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMNotEqual) + v0 := b.NewValue0(v.Line, OpARMCMPD, TypeFlags) + v0.AddArg(x) + v0.AddArg(y) v.AddArg(v0) - v.AddArg(mem) return true } - // match: (Zero [s] ptr mem) - // cond: SizeAndAlign(s).Size() == 2 - // result: (MOVBstore [1] ptr (MOVWconst [0]) (MOVBstore [0] ptr (MOVWconst [0]) mem)) +} +func rewriteValueARM_OpNeq8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Neq8 x y) + // cond: + // result: (NotEqual (CMP (ZeroExt8to32 x) (ZeroExt8to32 y))) for { - s := v.AuxInt - ptr := v.Args[0] - mem := v.Args[1] - if !(SizeAndAlign(s).Size() == 2) { - break - } - v.reset(OpARMMOVBstore) - v.AuxInt = 1 - v.AddArg(ptr) - v0 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32()) - v0.AuxInt = 0 + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMNotEqual) + v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags) + v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32()) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32()) + v2.AddArg(y) + v0.AddArg(v2) v.AddArg(v0) - v1 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem) - v1.AuxInt = 0 - v1.AddArg(ptr) - v2 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32()) - v2.AuxInt = 0 - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) return true } - // match: (Zero [s] ptr mem) - // cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0 - // result: (MOVWstore ptr (MOVWconst [0]) mem) +} +func rewriteValueARM_OpNeqB(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (NeqB x y) + // cond: + // result: (XOR x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMXOR) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueARM_OpNeqPtr(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (NeqPtr x y) + // cond: + // result: (NotEqual (CMP x y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMNotEqual) + v0 := b.NewValue0(v.Line, OpARMCMP, TypeFlags) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } +} +func rewriteValueARM_OpNilCheck(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (NilCheck ptr mem) + // cond: + // result: (LoweredNilCheck ptr mem) for { - s := v.AuxInt ptr := v.Args[0] mem := v.Args[1] - if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0) { - break - } - v.reset(OpARMMOVWstore) + v.reset(OpARMLoweredNilCheck) v.AddArg(ptr) - v0 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32()) - v0.AuxInt = 0 - v.AddArg(v0) v.AddArg(mem) return true } - // match: (Zero [s] ptr mem) - // cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0 - // result: (MOVHstore [2] ptr (MOVWconst [0]) (MOVHstore [0] ptr (MOVWconst [0]) mem)) +} +func rewriteValueARM_OpNot(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Not x) + // cond: + // result: (XORconst [1] x) for { - s := v.AuxInt - ptr := v.Args[0] - mem := v.Args[1] - if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0) { + x := v.Args[0] + v.reset(OpARMXORconst) + v.AuxInt = 1 + v.AddArg(x) + return true + } +} +func rewriteValueARM_OpARMNotEqual(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (NotEqual (FlagEQ)) + // cond: + // result: (MOVWconst [0]) + for { + v_0 := v.Args[0] + if v_0.Op != OpARMFlagEQ { break } - v.reset(OpARMMOVHstore) - v.AuxInt = 2 - v.AddArg(ptr) - v0 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32()) - v0.AuxInt = 0 - v.AddArg(v0) - v1 := b.NewValue0(v.Line, OpARMMOVHstore, TypeMem) - v1.AuxInt = 0 - v1.AddArg(ptr) - v2 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32()) - v2.AuxInt = 0 - v1.AddArg(v2) - v1.AddArg(mem) - v.AddArg(v1) + v.reset(OpARMMOVWconst) + v.AuxInt = 0 return true } - // match: (Zero [s] ptr mem) - // cond: SizeAndAlign(s).Size() == 4 - // result: (MOVBstore [3] ptr (MOVWconst [0]) (MOVBstore [2] ptr (MOVWconst [0]) (MOVBstore [1] ptr (MOVWconst [0]) (MOVBstore [0] ptr (MOVWconst [0]) mem)))) + // match: (NotEqual (FlagLT_ULT)) + // cond: + // result: (MOVWconst [1]) for { - s := v.AuxInt - ptr := v.Args[0] - mem := v.Args[1] - if !(SizeAndAlign(s).Size() == 4) { + v_0 := v.Args[0] + if v_0.Op != OpARMFlagLT_ULT { break } - v.reset(OpARMMOVBstore) - v.AuxInt = 3 - v.AddArg(ptr) - v0 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32()) - v0.AuxInt = 0 - v.AddArg(v0) - v1 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem) - v1.AuxInt = 2 - v1.AddArg(ptr) - v2 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32()) - v2.AuxInt = 0 - v1.AddArg(v2) - v3 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem) - v3.AuxInt = 1 - v3.AddArg(ptr) - v4 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32()) - v4.AuxInt = 0 - v3.AddArg(v4) - v5 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem) - v5.AuxInt = 0 - v5.AddArg(ptr) - v6 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32()) - v6.AuxInt = 0 - v5.AddArg(v6) - v5.AddArg(mem) - v3.AddArg(v5) - v1.AddArg(v3) - v.AddArg(v1) + v.reset(OpARMMOVWconst) + v.AuxInt = 1 return true } - // match: (Zero [s] ptr mem) - // cond: SizeAndAlign(s).Size() == 3 - // result: (MOVBstore [2] ptr (MOVWconst [0]) (MOVBstore [1] ptr (MOVWconst [0]) (MOVBstore [0] ptr (MOVWconst [0]) mem))) + // match: (NotEqual (FlagLT_UGT)) + // cond: + // result: (MOVWconst [1]) for { - s := v.AuxInt - ptr := v.Args[0] - mem := v.Args[1] - if !(SizeAndAlign(s).Size() == 3) { + v_0 := v.Args[0] + if v_0.Op != OpARMFlagLT_UGT { break } - v.reset(OpARMMOVBstore) - v.AuxInt = 2 - v.AddArg(ptr) - v0 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32()) - v0.AuxInt = 0 - v.AddArg(v0) - v1 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem) - v1.AuxInt = 1 - v1.AddArg(ptr) - v2 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32()) - v2.AuxInt = 0 - v1.AddArg(v2) - v3 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem) - v3.AuxInt = 0 - v3.AddArg(ptr) - v4 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32()) - v4.AuxInt = 0 - v3.AddArg(v4) - v3.AddArg(mem) - v1.AddArg(v3) - v.AddArg(v1) + v.reset(OpARMMOVWconst) + v.AuxInt = 1 return true } - // match: (Zero [s] ptr mem) - // cond: SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Size() <= 512 && SizeAndAlign(s).Align()%4 == 0 - // result: (DUFFZERO [4 * (128 - int64(SizeAndAlign(s).Size()/4))] ptr (MOVWconst [0]) mem) + // match: (NotEqual (FlagGT_ULT)) + // cond: + // result: (MOVWconst [1]) for { - s := v.AuxInt - ptr := v.Args[0] - mem := v.Args[1] - if !(SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Size() <= 512 && SizeAndAlign(s).Align()%4 == 0) { + v_0 := v.Args[0] + if v_0.Op != OpARMFlagGT_ULT { break } - v.reset(OpARMDUFFZERO) - v.AuxInt = 4 * (128 - int64(SizeAndAlign(s).Size()/4)) - v.AddArg(ptr) - v0 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32()) - v0.AuxInt = 0 - v.AddArg(v0) - v.AddArg(mem) + v.reset(OpARMMOVWconst) + v.AuxInt = 1 return true } - // match: (Zero [s] ptr mem) - // cond: SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 512 && SizeAndAlign(s).Align()%4 == 0 - // result: (LoweredZero ptr (ADDconst ptr [SizeAndAlign(s).Size()]) (MOVWconst [0]) mem) + // match: (NotEqual (FlagGT_UGT)) + // cond: + // result: (MOVWconst [1]) for { - s := v.AuxInt - ptr := v.Args[0] - mem := v.Args[1] - if !(SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 512 && SizeAndAlign(s).Align()%4 == 0) { + v_0 := v.Args[0] + if v_0.Op != OpARMFlagGT_UGT { break } - v.reset(OpARMLoweredZero) - v.AddArg(ptr) - v0 := b.NewValue0(v.Line, OpARMADDconst, ptr.Type) - v0.AddArg(ptr) - v0.AuxInt = SizeAndAlign(s).Size() - v.AddArg(v0) - v1 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32()) - v1.AuxInt = 0 - v.AddArg(v1) - v.AddArg(mem) + v.reset(OpARMMOVWconst) + v.AuxInt = 1 return true } - // match: (Zero [s] ptr mem) - // cond: SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Align()%4 != 0 - // result: (LoweredZeroU ptr (ADDconst ptr [SizeAndAlign(s).Size()]) (MOVWconst [0]) mem) + // match: (NotEqual (InvertFlags x)) + // cond: + // result: (NotEqual x) for { - s := v.AuxInt - ptr := v.Args[0] - mem := v.Args[1] - if !(SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Align()%4 != 0) { + v_0 := v.Args[0] + if v_0.Op != OpARMInvertFlags { break } - v.reset(OpARMLoweredZeroU) - v.AddArg(ptr) - v0 := b.NewValue0(v.Line, OpARMADDconst, ptr.Type) - v0.AddArg(ptr) - v0.AuxInt = SizeAndAlign(s).Size() - v.AddArg(v0) - v1 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32()) - v1.AuxInt = 0 - v.AddArg(v1) - v.AddArg(mem) + x := v_0.Args[0] + v.reset(OpARMNotEqual) + v.AddArg(x) return true } return false } -func rewriteValueARM_OpZeroExt16to32(v *Value, config *Config) bool { +func rewriteValueARM_OpARMOR(v *Value, config *Config) bool { b := v.Block _ = b - // match: (ZeroExt16to32 x) + // match: (OR (MOVWconst [c]) x) // cond: - // result: (MOVHUreg x) + // result: (ORconst [c] x) for { - x := v.Args[0] - v.reset(OpARMMOVHUreg) + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + v.reset(OpARMORconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (OR x (MOVWconst [c])) + // cond: + // result: (ORconst [c] x) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { + break + } + c := v_1.AuxInt + v.reset(OpARMORconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (OR x x) + // cond: + // result: x + for { + x := v.Args[0] + if x != v.Args[1] { + break + } + v.reset(OpCopy) + v.Type = x.Type v.AddArg(x) return true } + return false } -func rewriteValueARM_OpZeroExt8to16(v *Value, config *Config) bool { +func rewriteValueARM_OpARMORconst(v *Value, config *Config) bool { b := v.Block _ = b - // match: (ZeroExt8to16 x) + // match: (ORconst [0] x) // cond: - // result: (MOVBUreg x) + // result: x for { + if v.AuxInt != 0 { + break + } x := v.Args[0] - v.reset(OpARMMOVBUreg) + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (ORconst [c] _) + // cond: int32(c)==-1 + // result: (MOVWconst [-1]) + for { + c := v.AuxInt + if !(int32(c) == -1) { + break + } + v.reset(OpARMMOVWconst) + v.AuxInt = -1 + return true + } + // match: (ORconst [c] (MOVWconst [d])) + // cond: + // result: (MOVWconst [c|d]) + for { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + d := v_0.AuxInt + v.reset(OpARMMOVWconst) + v.AuxInt = c | d + return true + } + // match: (ORconst [c] (ORconst [d] x)) + // cond: + // result: (ORconst [c|d] x) + for { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARMORconst { + break + } + d := v_0.AuxInt + x := v_0.Args[0] + v.reset(OpARMORconst) + v.AuxInt = c | d v.AddArg(x) return true } + return false } -func rewriteValueARM_OpZeroExt8to32(v *Value, config *Config) bool { +func rewriteValueARM_OpOffPtr(v *Value, config *Config) bool { b := v.Block _ = b - // match: (ZeroExt8to32 x) + // match: (OffPtr [off] ptr:(SP)) // cond: - // result: (MOVBUreg x) + // result: (MOVWaddr [off] ptr) + for { + off := v.AuxInt + ptr := v.Args[0] + if ptr.Op != OpSP { + break + } + v.reset(OpARMMOVWaddr) + v.AuxInt = off + v.AddArg(ptr) + return true + } + // match: (OffPtr [off] ptr) + // cond: + // result: (ADDconst [off] ptr) + for { + off := v.AuxInt + ptr := v.Args[0] + v.reset(OpARMADDconst) + v.AuxInt = off + v.AddArg(ptr) + return true + } +} +func rewriteValueARM_OpOr16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Or16 x y) + // cond: + // result: (OR x y) for { x := v.Args[0] - v.reset(OpARMMOVBUreg) + y := v.Args[1] + v.reset(OpARMOR) v.AddArg(x) + v.AddArg(y) return true } } -func rewriteValueARM_OpZeromask(v *Value, config *Config) bool { +func rewriteValueARM_OpOr32(v *Value, config *Config) bool { b := v.Block _ = b - // match: (Zeromask x) + // match: (Or32 x y) // cond: - // result: (LoweredZeromask x) + // result: (OR x y) for { x := v.Args[0] - v.reset(OpARMLoweredZeromask) + y := v.Args[1] + v.reset(OpARMOR) v.AddArg(x) + v.AddArg(y) return true } } -func rewriteBlockARM(b *Block) bool { - switch b.Kind { - case BlockIf: - // match: (If (Equal cc) yes no) +func rewriteValueARM_OpOr8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Or8 x y) + // cond: + // result: (OR x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMOR) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueARM_OpOrB(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (OrB x y) + // cond: + // result: (OR x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMOR) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueARM_OpARMRSB(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (RSB (MOVWconst [c]) x) + // cond: + // result: (SUBconst [c] x) + for { + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + v.reset(OpARMSUBconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (RSB x (MOVWconst [c])) + // cond: + // result: (RSBconst [c] x) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { + break + } + c := v_1.AuxInt + v.reset(OpARMRSBconst) + v.AuxInt = c + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM_OpARMRSBconst(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (RSBconst [c] (MOVWconst [d])) + // cond: + // result: (MOVWconst [int64(int32(c-d))]) + for { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + d := v_0.AuxInt + v.reset(OpARMMOVWconst) + v.AuxInt = int64(int32(c - d)) + return true + } + // match: (RSBconst [c] (RSBconst [d] x)) + // cond: + // result: (ADDconst [int64(int32(c-d))] x) + for { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARMRSBconst { + break + } + d := v_0.AuxInt + x := v_0.Args[0] + v.reset(OpARMADDconst) + v.AuxInt = int64(int32(c - d)) + v.AddArg(x) + return true + } + // match: (RSBconst [c] (ADDconst [d] x)) + // cond: + // result: (RSBconst [int64(int32(c-d))] x) + for { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARMADDconst { + break + } + d := v_0.AuxInt + x := v_0.Args[0] + v.reset(OpARMRSBconst) + v.AuxInt = int64(int32(c - d)) + v.AddArg(x) + return true + } + // match: (RSBconst [c] (SUBconst [d] x)) + // cond: + // result: (RSBconst [int64(int32(c+d))] x) + for { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARMSUBconst { + break + } + d := v_0.AuxInt + x := v_0.Args[0] + v.reset(OpARMRSBconst) + v.AuxInt = int64(int32(c + d)) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM_OpARMRSCconst(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (RSCconst [c] (ADDconst [d] x) flags) + // cond: + // result: (RSCconst [int64(int32(c-d))] x flags) + for { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARMADDconst { + break + } + d := v_0.AuxInt + x := v_0.Args[0] + flags := v.Args[1] + v.reset(OpARMRSCconst) + v.AuxInt = int64(int32(c - d)) + v.AddArg(x) + v.AddArg(flags) + return true + } + // match: (RSCconst [c] (SUBconst [d] x) flags) + // cond: + // result: (RSCconst [int64(int32(c+d))] x flags) + for { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARMSUBconst { + break + } + d := v_0.AuxInt + x := v_0.Args[0] + flags := v.Args[1] + v.reset(OpARMRSCconst) + v.AuxInt = int64(int32(c + d)) + v.AddArg(x) + v.AddArg(flags) + return true + } + return false +} +func rewriteValueARM_OpRsh16Ux16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh16Ux16 x y) + // cond: + // result: (SRL (ZeroExt16to32 x) (ZeroExt16to32 y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMSRL) + v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32()) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32()) + v1.AddArg(y) + v.AddArg(v1) + return true + } +} +func rewriteValueARM_OpRsh16Ux32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh16Ux32 x y) + // cond: + // result: (SRL (ZeroExt16to32 x) y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMSRL) + v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32()) + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(y) + return true + } +} +func rewriteValueARM_OpRsh16Ux64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh16Ux64 x (Const64 [c])) + // cond: uint64(c) < 16 + // result: (SRLconst (SLLconst x [16]) [c+16]) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpConst64 { + break + } + c := v_1.AuxInt + if !(uint64(c) < 16) { + break + } + v.reset(OpARMSRLconst) + v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32()) + v0.AddArg(x) + v0.AuxInt = 16 + v.AddArg(v0) + v.AuxInt = c + 16 + return true + } + // match: (Rsh16Ux64 _ (Const64 [c])) + // cond: uint64(c) >= 16 + // result: (Const16 [0]) + for { + v_1 := v.Args[1] + if v_1.Op != OpConst64 { + break + } + c := v_1.AuxInt + if !(uint64(c) >= 16) { + break + } + v.reset(OpConst16) + v.AuxInt = 0 + return true + } + return false +} +func rewriteValueARM_OpRsh16Ux8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh16Ux8 x y) + // cond: + // result: (SRL (ZeroExt16to32 x) (ZeroExt8to32 y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMSRL) + v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32()) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32()) + v1.AddArg(y) + v.AddArg(v1) + return true + } +} +func rewriteValueARM_OpRsh16x16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh16x16 x y) + // cond: + // result: (SRA (SignExt16to32 x) (ZeroExt16to32 y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMSRA) + v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32()) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32()) + v1.AddArg(y) + v.AddArg(v1) + return true + } +} +func rewriteValueARM_OpRsh16x32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh16x32 x y) + // cond: + // result: (SRA (SignExt16to32 x) y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMSRA) + v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32()) + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(y) + return true + } +} +func rewriteValueARM_OpRsh16x64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh16x64 x (Const64 [c])) + // cond: uint64(c) < 16 + // result: (SRAconst (SLLconst x [16]) [c+16]) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpConst64 { + break + } + c := v_1.AuxInt + if !(uint64(c) < 16) { + break + } + v.reset(OpARMSRAconst) + v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32()) + v0.AddArg(x) + v0.AuxInt = 16 + v.AddArg(v0) + v.AuxInt = c + 16 + return true + } + // match: (Rsh16x64 x (Const64 [c])) + // cond: uint64(c) >= 16 + // result: (SRAconst (SLLconst x [16]) [31]) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpConst64 { + break + } + c := v_1.AuxInt + if !(uint64(c) >= 16) { + break + } + v.reset(OpARMSRAconst) + v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32()) + v0.AddArg(x) + v0.AuxInt = 16 + v.AddArg(v0) + v.AuxInt = 31 + return true + } + return false +} +func rewriteValueARM_OpRsh16x8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh16x8 x y) + // cond: + // result: (SRA (SignExt16to32 x) (ZeroExt8to32 y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMSRA) + v0 := b.NewValue0(v.Line, OpSignExt16to32, config.fe.TypeInt32()) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32()) + v1.AddArg(y) + v.AddArg(v1) + return true + } +} +func rewriteValueARM_OpRsh32Ux16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh32Ux16 x y) + // cond: + // result: (SRL x (ZeroExt16to32 y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMSRL) + v.AddArg(x) + v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32()) + v0.AddArg(y) + v.AddArg(v0) + return true + } +} +func rewriteValueARM_OpRsh32Ux32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh32Ux32 x y) + // cond: + // result: (SRL x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMSRL) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueARM_OpRsh32Ux64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh32Ux64 x (Const64 [c])) + // cond: uint64(c) < 32 + // result: (SRLconst x [c]) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpConst64 { + break + } + c := v_1.AuxInt + if !(uint64(c) < 32) { + break + } + v.reset(OpARMSRLconst) + v.AddArg(x) + v.AuxInt = c + return true + } + // match: (Rsh32Ux64 _ (Const64 [c])) + // cond: uint64(c) >= 32 + // result: (Const32 [0]) + for { + v_1 := v.Args[1] + if v_1.Op != OpConst64 { + break + } + c := v_1.AuxInt + if !(uint64(c) >= 32) { + break + } + v.reset(OpConst32) + v.AuxInt = 0 + return true + } + return false +} +func rewriteValueARM_OpRsh32Ux8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh32Ux8 x y) + // cond: + // result: (SRL x (ZeroExt8to32 y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMSRL) + v.AddArg(x) + v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32()) + v0.AddArg(y) + v.AddArg(v0) + return true + } +} +func rewriteValueARM_OpRsh32x16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh32x16 x y) + // cond: + // result: (SRA x (ZeroExt16to32 y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMSRA) + v.AddArg(x) + v0 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32()) + v0.AddArg(y) + v.AddArg(v0) + return true + } +} +func rewriteValueARM_OpRsh32x32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh32x32 x y) + // cond: + // result: (SRA x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMSRA) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueARM_OpRsh32x64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh32x64 x (Const64 [c])) + // cond: uint64(c) < 32 + // result: (SRAconst x [c]) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpConst64 { + break + } + c := v_1.AuxInt + if !(uint64(c) < 32) { + break + } + v.reset(OpARMSRAconst) + v.AddArg(x) + v.AuxInt = c + return true + } + // match: (Rsh32x64 x (Const64 [c])) + // cond: uint64(c) >= 32 + // result: (SRAconst x [31]) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpConst64 { + break + } + c := v_1.AuxInt + if !(uint64(c) >= 32) { + break + } + v.reset(OpARMSRAconst) + v.AddArg(x) + v.AuxInt = 31 + return true + } + return false +} +func rewriteValueARM_OpRsh32x8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh32x8 x y) + // cond: + // result: (SRA x (ZeroExt8to32 y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMSRA) + v.AddArg(x) + v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32()) + v0.AddArg(y) + v.AddArg(v0) + return true + } +} +func rewriteValueARM_OpRsh8Ux16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh8Ux16 x y) + // cond: + // result: (SRL (ZeroExt8to32 x) (ZeroExt16to32 y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMSRL) + v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32()) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32()) + v1.AddArg(y) + v.AddArg(v1) + return true + } +} +func rewriteValueARM_OpRsh8Ux32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh8Ux32 x y) + // cond: + // result: (SRL (ZeroExt8to32 x) y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMSRL) + v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32()) + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(y) + return true + } +} +func rewriteValueARM_OpRsh8Ux64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh8Ux64 x (Const64 [c])) + // cond: uint64(c) < 8 + // result: (SRLconst (SLLconst x [24]) [c+24]) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpConst64 { + break + } + c := v_1.AuxInt + if !(uint64(c) < 8) { + break + } + v.reset(OpARMSRLconst) + v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32()) + v0.AddArg(x) + v0.AuxInt = 24 + v.AddArg(v0) + v.AuxInt = c + 24 + return true + } + // match: (Rsh8Ux64 _ (Const64 [c])) + // cond: uint64(c) >= 8 + // result: (Const8 [0]) + for { + v_1 := v.Args[1] + if v_1.Op != OpConst64 { + break + } + c := v_1.AuxInt + if !(uint64(c) >= 8) { + break + } + v.reset(OpConst8) + v.AuxInt = 0 + return true + } + return false +} +func rewriteValueARM_OpRsh8Ux8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh8Ux8 x y) + // cond: + // result: (SRL (ZeroExt8to32 x) (ZeroExt8to32 y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMSRL) + v0 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32()) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32()) + v1.AddArg(y) + v.AddArg(v1) + return true + } +} +func rewriteValueARM_OpRsh8x16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh8x16 x y) + // cond: + // result: (SRA (SignExt8to32 x) (ZeroExt16to32 y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMSRA) + v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32()) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpZeroExt16to32, config.fe.TypeUInt32()) + v1.AddArg(y) + v.AddArg(v1) + return true + } +} +func rewriteValueARM_OpRsh8x32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh8x32 x y) + // cond: + // result: (SRA (SignExt8to32 x) y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMSRA) + v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32()) + v0.AddArg(x) + v.AddArg(v0) + v.AddArg(y) + return true + } +} +func rewriteValueARM_OpRsh8x64(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh8x64 x (Const64 [c])) + // cond: uint64(c) < 8 + // result: (SRAconst (SLLconst x [24]) [c+24]) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpConst64 { + break + } + c := v_1.AuxInt + if !(uint64(c) < 8) { + break + } + v.reset(OpARMSRAconst) + v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32()) + v0.AddArg(x) + v0.AuxInt = 24 + v.AddArg(v0) + v.AuxInt = c + 24 + return true + } + // match: (Rsh8x64 x (Const64 [c])) + // cond: uint64(c) >= 8 + // result: (SRAconst (SLLconst x [24]) [31]) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpConst64 { + break + } + c := v_1.AuxInt + if !(uint64(c) >= 8) { + break + } + v.reset(OpARMSRAconst) + v0 := b.NewValue0(v.Line, OpARMSLLconst, config.fe.TypeUInt32()) + v0.AddArg(x) + v0.AuxInt = 24 + v.AddArg(v0) + v.AuxInt = 31 + return true + } + return false +} +func rewriteValueARM_OpRsh8x8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Rsh8x8 x y) + // cond: + // result: (SRA (SignExt8to32 x) (ZeroExt8to32 y)) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMSRA) + v0 := b.NewValue0(v.Line, OpSignExt8to32, config.fe.TypeInt32()) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpZeroExt8to32, config.fe.TypeUInt32()) + v1.AddArg(y) + v.AddArg(v1) + return true + } +} +func rewriteValueARM_OpARMSBC(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SBC (MOVWconst [c]) x flags) + // cond: + // result: (RSCconst [c] x flags) + for { + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + flags := v.Args[2] + v.reset(OpARMRSCconst) + v.AuxInt = c + v.AddArg(x) + v.AddArg(flags) + return true + } + // match: (SBC x (MOVWconst [c]) flags) + // cond: + // result: (SBCconst [c] x flags) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { + break + } + c := v_1.AuxInt + flags := v.Args[2] + v.reset(OpARMSBCconst) + v.AuxInt = c + v.AddArg(x) + v.AddArg(flags) + return true + } + return false +} +func rewriteValueARM_OpARMSBCconst(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SBCconst [c] (ADDconst [d] x) flags) + // cond: + // result: (SBCconst [int64(int32(c-d))] x flags) + for { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARMADDconst { + break + } + d := v_0.AuxInt + x := v_0.Args[0] + flags := v.Args[1] + v.reset(OpARMSBCconst) + v.AuxInt = int64(int32(c - d)) + v.AddArg(x) + v.AddArg(flags) + return true + } + // match: (SBCconst [c] (SUBconst [d] x) flags) + // cond: + // result: (SBCconst [int64(int32(c+d))] x flags) + for { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARMSUBconst { + break + } + d := v_0.AuxInt + x := v_0.Args[0] + flags := v.Args[1] + v.reset(OpARMSBCconst) + v.AuxInt = int64(int32(c + d)) + v.AddArg(x) + v.AddArg(flags) + return true + } + return false +} +func rewriteValueARM_OpARMSLL(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SLL x (MOVWconst [c])) + // cond: + // result: (SLLconst x [c&31]) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { + break + } + c := v_1.AuxInt + v.reset(OpARMSLLconst) + v.AddArg(x) + v.AuxInt = c & 31 + return true + } + return false +} +func rewriteValueARM_OpARMSLLconst(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SLLconst [c] (MOVWconst [d])) + // cond: + // result: (MOVWconst [int64(uint32(d)<>uint64(c))]) + for { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + d := v_0.AuxInt + v.reset(OpARMMOVWconst) + v.AuxInt = int64(int32(d) >> uint64(c)) + return true + } + return false +} +func rewriteValueARM_OpARMSRL(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SRL x (MOVWconst [c])) + // cond: + // result: (SRLconst x [c&31]) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { + break + } + c := v_1.AuxInt + v.reset(OpARMSRLconst) + v.AddArg(x) + v.AuxInt = c & 31 + return true + } + return false +} +func rewriteValueARM_OpARMSRLconst(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SRLconst [c] (MOVWconst [d])) + // cond: + // result: (MOVWconst [int64(uint32(d)>>uint64(c))]) + for { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + d := v_0.AuxInt + v.reset(OpARMMOVWconst) + v.AuxInt = int64(uint32(d) >> uint64(c)) + return true + } + return false +} +func rewriteValueARM_OpARMSUB(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SUB (MOVWconst [c]) x) + // cond: + // result: (RSBconst [c] x) + for { + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + v.reset(OpARMRSBconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (SUB x (MOVWconst [c])) + // cond: + // result: (SUBconst [c] x) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { + break + } + c := v_1.AuxInt + v.reset(OpARMSUBconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (SUB x x) + // cond: + // result: (MOVWconst [0]) + for { + x := v.Args[0] + if x != v.Args[1] { + break + } + v.reset(OpARMMOVWconst) + v.AuxInt = 0 + return true + } + return false +} +func rewriteValueARM_OpARMSUBS(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SUBS (MOVWconst [c]) x) + // cond: + // result: (RSBSconst [c] x) + for { + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + v.reset(OpARMRSBSconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (SUBS x (MOVWconst [c])) + // cond: + // result: (SUBSconst [c] x) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { + break + } + c := v_1.AuxInt + v.reset(OpARMSUBSconst) + v.AuxInt = c + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM_OpARMSUBconst(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SUBconst [0] x) + // cond: + // result: x + for { + if v.AuxInt != 0 { + break + } + x := v.Args[0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (SUBconst [c] (MOVWconst [d])) + // cond: + // result: (MOVWconst [int64(int32(d-c))]) + for { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + d := v_0.AuxInt + v.reset(OpARMMOVWconst) + v.AuxInt = int64(int32(d - c)) + return true + } + // match: (SUBconst [c] (SUBconst [d] x)) + // cond: + // result: (ADDconst [int64(int32(-c-d))] x) + for { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARMSUBconst { + break + } + d := v_0.AuxInt + x := v_0.Args[0] + v.reset(OpARMADDconst) + v.AuxInt = int64(int32(-c - d)) + v.AddArg(x) + return true + } + // match: (SUBconst [c] (ADDconst [d] x)) + // cond: + // result: (ADDconst [int64(int32(-c+d))] x) + for { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARMADDconst { + break + } + d := v_0.AuxInt + x := v_0.Args[0] + v.reset(OpARMADDconst) + v.AuxInt = int64(int32(-c + d)) + v.AddArg(x) + return true + } + // match: (SUBconst [c] (RSBconst [d] x)) + // cond: + // result: (RSBconst [int64(int32(-c+d))] x) + for { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARMRSBconst { + break + } + d := v_0.AuxInt + x := v_0.Args[0] + v.reset(OpARMRSBconst) + v.AuxInt = int64(int32(-c + d)) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM_OpSelect0(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Select0 x) + // cond: t.IsFlags() + // result: (Carry x) + for { + t := v.Type + x := v.Args[0] + if !(t.IsFlags()) { + break + } + v.reset(OpARMCarry) + v.AddArg(x) + return true + } + // match: (Select0 x) + // cond: !t.IsFlags() + // result: (LoweredSelect0 x) + for { + t := v.Type + x := v.Args[0] + if !(!t.IsFlags()) { + break + } + v.reset(OpARMLoweredSelect0) + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM_OpSelect1(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Select1 x) + // cond: + // result: (LoweredSelect1 x) + for { + x := v.Args[0] + v.reset(OpARMLoweredSelect1) + v.AddArg(x) + return true + } +} +func rewriteValueARM_OpSignExt16to32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SignExt16to32 x) + // cond: + // result: (MOVHreg x) + for { + x := v.Args[0] + v.reset(OpARMMOVHreg) + v.AddArg(x) + return true + } +} +func rewriteValueARM_OpSignExt8to16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SignExt8to16 x) + // cond: + // result: (MOVBreg x) + for { + x := v.Args[0] + v.reset(OpARMMOVBreg) + v.AddArg(x) + return true + } +} +func rewriteValueARM_OpSignExt8to32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SignExt8to32 x) + // cond: + // result: (MOVBreg x) + for { + x := v.Args[0] + v.reset(OpARMMOVBreg) + v.AddArg(x) + return true + } +} +func rewriteValueARM_OpSignmask(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Signmask x) + // cond: + // result: (SRAconst x [31]) + for { + x := v.Args[0] + v.reset(OpARMSRAconst) + v.AddArg(x) + v.AuxInt = 31 + return true + } +} +func rewriteValueARM_OpSqrt(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Sqrt x) + // cond: + // result: (SQRTD x) + for { + x := v.Args[0] + v.reset(OpARMSQRTD) + v.AddArg(x) + return true + } +} +func rewriteValueARM_OpStaticCall(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (StaticCall [argwid] {target} mem) + // cond: + // result: (CALLstatic [argwid] {target} mem) + for { + argwid := v.AuxInt + target := v.Aux + mem := v.Args[0] + v.reset(OpARMCALLstatic) + v.AuxInt = argwid + v.Aux = target + v.AddArg(mem) + return true + } +} +func rewriteValueARM_OpStore(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Store [1] ptr val mem) + // cond: + // result: (MOVBstore ptr val mem) + for { + if v.AuxInt != 1 { + break + } + ptr := v.Args[0] + val := v.Args[1] + mem := v.Args[2] + v.reset(OpARMMOVBstore) + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (Store [2] ptr val mem) + // cond: + // result: (MOVHstore ptr val mem) + for { + if v.AuxInt != 2 { + break + } + ptr := v.Args[0] + val := v.Args[1] + mem := v.Args[2] + v.reset(OpARMMOVHstore) + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (Store [4] ptr val mem) + // cond: !is32BitFloat(val.Type) + // result: (MOVWstore ptr val mem) + for { + if v.AuxInt != 4 { + break + } + ptr := v.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(!is32BitFloat(val.Type)) { + break + } + v.reset(OpARMMOVWstore) + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (Store [4] ptr val mem) + // cond: is32BitFloat(val.Type) + // result: (MOVFstore ptr val mem) + for { + if v.AuxInt != 4 { + break + } + ptr := v.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is32BitFloat(val.Type)) { + break + } + v.reset(OpARMMOVFstore) + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (Store [8] ptr val mem) + // cond: is64BitFloat(val.Type) + // result: (MOVDstore ptr val mem) + for { + if v.AuxInt != 8 { + break + } + ptr := v.Args[0] + val := v.Args[1] + mem := v.Args[2] + if !(is64BitFloat(val.Type)) { + break + } + v.reset(OpARMMOVDstore) + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueARM_OpSub16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Sub16 x y) + // cond: + // result: (SUB x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMSUB) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueARM_OpSub32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Sub32 x y) + // cond: + // result: (SUB x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMSUB) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueARM_OpSub32F(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Sub32F x y) + // cond: + // result: (SUBF x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMSUBF) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueARM_OpSub32carry(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Sub32carry x y) + // cond: + // result: (SUBS x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMSUBS) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueARM_OpSub32withcarry(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Sub32withcarry x y c) + // cond: + // result: (SBC x y c) + for { + x := v.Args[0] + y := v.Args[1] + c := v.Args[2] + v.reset(OpARMSBC) + v.AddArg(x) + v.AddArg(y) + v.AddArg(c) + return true + } +} +func rewriteValueARM_OpSub64F(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Sub64F x y) + // cond: + // result: (SUBD x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMSUBD) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueARM_OpSub8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Sub8 x y) + // cond: + // result: (SUB x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMSUB) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueARM_OpSubPtr(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (SubPtr x y) + // cond: + // result: (SUB x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMSUB) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueARM_OpTrunc16to8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Trunc16to8 x) + // cond: + // result: x + for { + x := v.Args[0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } +} +func rewriteValueARM_OpTrunc32to16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Trunc32to16 x) + // cond: + // result: x + for { + x := v.Args[0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } +} +func rewriteValueARM_OpTrunc32to8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Trunc32to8 x) + // cond: + // result: x + for { + x := v.Args[0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } +} +func rewriteValueARM_OpARMXOR(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (XOR (MOVWconst [c]) x) + // cond: + // result: (XORconst [c] x) + for { + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + c := v_0.AuxInt + x := v.Args[1] + v.reset(OpARMXORconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (XOR x (MOVWconst [c])) + // cond: + // result: (XORconst [c] x) + for { + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpARMMOVWconst { + break + } + c := v_1.AuxInt + v.reset(OpARMXORconst) + v.AuxInt = c + v.AddArg(x) + return true + } + // match: (XOR x x) + // cond: + // result: (MOVWconst [0]) + for { + x := v.Args[0] + if x != v.Args[1] { + break + } + v.reset(OpARMMOVWconst) + v.AuxInt = 0 + return true + } + return false +} +func rewriteValueARM_OpARMXORconst(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (XORconst [0] x) + // cond: + // result: x + for { + if v.AuxInt != 0 { + break + } + x := v.Args[0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + // match: (XORconst [c] (MOVWconst [d])) + // cond: + // result: (MOVWconst [c^d]) + for { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARMMOVWconst { + break + } + d := v_0.AuxInt + v.reset(OpARMMOVWconst) + v.AuxInt = c ^ d + return true + } + // match: (XORconst [c] (XORconst [d] x)) + // cond: + // result: (XORconst [c^d] x) + for { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpARMXORconst { + break + } + d := v_0.AuxInt + x := v_0.Args[0] + v.reset(OpARMXORconst) + v.AuxInt = c ^ d + v.AddArg(x) + return true + } + return false +} +func rewriteValueARM_OpXor16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Xor16 x y) + // cond: + // result: (XOR x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMXOR) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueARM_OpXor32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Xor32 x y) + // cond: + // result: (XOR x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMXOR) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueARM_OpXor8(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Xor8 x y) + // cond: + // result: (XOR x y) + for { + x := v.Args[0] + y := v.Args[1] + v.reset(OpARMXOR) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueARM_OpZero(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Zero [s] _ mem) + // cond: SizeAndAlign(s).Size() == 0 + // result: mem + for { + s := v.AuxInt + mem := v.Args[1] + if !(SizeAndAlign(s).Size() == 0) { + break + } + v.reset(OpCopy) + v.Type = mem.Type + v.AddArg(mem) + return true + } + // match: (Zero [s] ptr mem) + // cond: SizeAndAlign(s).Size() == 1 + // result: (MOVBstore ptr (MOVWconst [0]) mem) + for { + s := v.AuxInt + ptr := v.Args[0] + mem := v.Args[1] + if !(SizeAndAlign(s).Size() == 1) { + break + } + v.reset(OpARMMOVBstore) + v.AddArg(ptr) + v0 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32()) + v0.AuxInt = 0 + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (Zero [s] ptr mem) + // cond: SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0 + // result: (MOVHstore ptr (MOVWconst [0]) mem) + for { + s := v.AuxInt + ptr := v.Args[0] + mem := v.Args[1] + if !(SizeAndAlign(s).Size() == 2 && SizeAndAlign(s).Align()%2 == 0) { + break + } + v.reset(OpARMMOVHstore) + v.AddArg(ptr) + v0 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32()) + v0.AuxInt = 0 + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (Zero [s] ptr mem) + // cond: SizeAndAlign(s).Size() == 2 + // result: (MOVBstore [1] ptr (MOVWconst [0]) (MOVBstore [0] ptr (MOVWconst [0]) mem)) + for { + s := v.AuxInt + ptr := v.Args[0] + mem := v.Args[1] + if !(SizeAndAlign(s).Size() == 2) { + break + } + v.reset(OpARMMOVBstore) + v.AuxInt = 1 + v.AddArg(ptr) + v0 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32()) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem) + v1.AuxInt = 0 + v1.AddArg(ptr) + v2 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32()) + v2.AuxInt = 0 + v1.AddArg(v2) + v1.AddArg(mem) + v.AddArg(v1) + return true + } + // match: (Zero [s] ptr mem) + // cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0 + // result: (MOVWstore ptr (MOVWconst [0]) mem) + for { + s := v.AuxInt + ptr := v.Args[0] + mem := v.Args[1] + if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%4 == 0) { + break + } + v.reset(OpARMMOVWstore) + v.AddArg(ptr) + v0 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32()) + v0.AuxInt = 0 + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (Zero [s] ptr mem) + // cond: SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0 + // result: (MOVHstore [2] ptr (MOVWconst [0]) (MOVHstore [0] ptr (MOVWconst [0]) mem)) + for { + s := v.AuxInt + ptr := v.Args[0] + mem := v.Args[1] + if !(SizeAndAlign(s).Size() == 4 && SizeAndAlign(s).Align()%2 == 0) { + break + } + v.reset(OpARMMOVHstore) + v.AuxInt = 2 + v.AddArg(ptr) + v0 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32()) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpARMMOVHstore, TypeMem) + v1.AuxInt = 0 + v1.AddArg(ptr) + v2 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32()) + v2.AuxInt = 0 + v1.AddArg(v2) + v1.AddArg(mem) + v.AddArg(v1) + return true + } + // match: (Zero [s] ptr mem) + // cond: SizeAndAlign(s).Size() == 4 + // result: (MOVBstore [3] ptr (MOVWconst [0]) (MOVBstore [2] ptr (MOVWconst [0]) (MOVBstore [1] ptr (MOVWconst [0]) (MOVBstore [0] ptr (MOVWconst [0]) mem)))) + for { + s := v.AuxInt + ptr := v.Args[0] + mem := v.Args[1] + if !(SizeAndAlign(s).Size() == 4) { + break + } + v.reset(OpARMMOVBstore) + v.AuxInt = 3 + v.AddArg(ptr) + v0 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32()) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem) + v1.AuxInt = 2 + v1.AddArg(ptr) + v2 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32()) + v2.AuxInt = 0 + v1.AddArg(v2) + v3 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem) + v3.AuxInt = 1 + v3.AddArg(ptr) + v4 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32()) + v4.AuxInt = 0 + v3.AddArg(v4) + v5 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem) + v5.AuxInt = 0 + v5.AddArg(ptr) + v6 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32()) + v6.AuxInt = 0 + v5.AddArg(v6) + v5.AddArg(mem) + v3.AddArg(v5) + v1.AddArg(v3) + v.AddArg(v1) + return true + } + // match: (Zero [s] ptr mem) + // cond: SizeAndAlign(s).Size() == 3 + // result: (MOVBstore [2] ptr (MOVWconst [0]) (MOVBstore [1] ptr (MOVWconst [0]) (MOVBstore [0] ptr (MOVWconst [0]) mem))) + for { + s := v.AuxInt + ptr := v.Args[0] + mem := v.Args[1] + if !(SizeAndAlign(s).Size() == 3) { + break + } + v.reset(OpARMMOVBstore) + v.AuxInt = 2 + v.AddArg(ptr) + v0 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32()) + v0.AuxInt = 0 + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem) + v1.AuxInt = 1 + v1.AddArg(ptr) + v2 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32()) + v2.AuxInt = 0 + v1.AddArg(v2) + v3 := b.NewValue0(v.Line, OpARMMOVBstore, TypeMem) + v3.AuxInt = 0 + v3.AddArg(ptr) + v4 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32()) + v4.AuxInt = 0 + v3.AddArg(v4) + v3.AddArg(mem) + v1.AddArg(v3) + v.AddArg(v1) + return true + } + // match: (Zero [s] ptr mem) + // cond: SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Size() <= 512 && SizeAndAlign(s).Align()%4 == 0 + // result: (DUFFZERO [4 * (128 - int64(SizeAndAlign(s).Size()/4))] ptr (MOVWconst [0]) mem) + for { + s := v.AuxInt + ptr := v.Args[0] + mem := v.Args[1] + if !(SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Size() <= 512 && SizeAndAlign(s).Align()%4 == 0) { + break + } + v.reset(OpARMDUFFZERO) + v.AuxInt = 4 * (128 - int64(SizeAndAlign(s).Size()/4)) + v.AddArg(ptr) + v0 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32()) + v0.AuxInt = 0 + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (Zero [s] ptr mem) + // cond: SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 512 && SizeAndAlign(s).Align()%4 == 0 + // result: (LoweredZero ptr (ADDconst ptr [SizeAndAlign(s).Size()]) (MOVWconst [0]) mem) + for { + s := v.AuxInt + ptr := v.Args[0] + mem := v.Args[1] + if !(SizeAndAlign(s).Size()%4 == 0 && SizeAndAlign(s).Size() > 512 && SizeAndAlign(s).Align()%4 == 0) { + break + } + v.reset(OpARMLoweredZero) + v.AddArg(ptr) + v0 := b.NewValue0(v.Line, OpARMADDconst, ptr.Type) + v0.AddArg(ptr) + v0.AuxInt = SizeAndAlign(s).Size() + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32()) + v1.AuxInt = 0 + v.AddArg(v1) + v.AddArg(mem) + return true + } + // match: (Zero [s] ptr mem) + // cond: SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Align()%4 != 0 + // result: (LoweredZeroU ptr (ADDconst ptr [SizeAndAlign(s).Size()]) (MOVWconst [0]) mem) + for { + s := v.AuxInt + ptr := v.Args[0] + mem := v.Args[1] + if !(SizeAndAlign(s).Size() > 4 && SizeAndAlign(s).Align()%4 != 0) { + break + } + v.reset(OpARMLoweredZeroU) + v.AddArg(ptr) + v0 := b.NewValue0(v.Line, OpARMADDconst, ptr.Type) + v0.AddArg(ptr) + v0.AuxInt = SizeAndAlign(s).Size() + v.AddArg(v0) + v1 := b.NewValue0(v.Line, OpARMMOVWconst, config.fe.TypeUInt32()) + v1.AuxInt = 0 + v.AddArg(v1) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueARM_OpZeroExt16to32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (ZeroExt16to32 x) + // cond: + // result: (MOVHUreg x) + for { + x := v.Args[0] + v.reset(OpARMMOVHUreg) + v.AddArg(x) + return true + } +} +func rewriteValueARM_OpZeroExt8to16(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (ZeroExt8to16 x) + // cond: + // result: (MOVBUreg x) + for { + x := v.Args[0] + v.reset(OpARMMOVBUreg) + v.AddArg(x) + return true + } +} +func rewriteValueARM_OpZeroExt8to32(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (ZeroExt8to32 x) + // cond: + // result: (MOVBUreg x) + for { + x := v.Args[0] + v.reset(OpARMMOVBUreg) + v.AddArg(x) + return true + } +} +func rewriteValueARM_OpZeromask(v *Value, config *Config) bool { + b := v.Block + _ = b + // match: (Zeromask x) + // cond: + // result: (LoweredZeromask x) + for { + x := v.Args[0] + v.reset(OpARMLoweredZeromask) + v.AddArg(x) + return true + } +} +func rewriteBlockARM(b *Block) bool { + switch b.Kind { + case BlockARMEQ: + // match: (EQ (FlagEQ) yes no) + // cond: + // result: (First nil yes no) + for { + v := b.Control + if v.Op != OpARMFlagEQ { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.SetControl(nil) + _ = yes + _ = no + return true + } + // match: (EQ (FlagLT_ULT) yes no) + // cond: + // result: (First nil no yes) + for { + v := b.Control + if v.Op != OpARMFlagLT_ULT { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.SetControl(nil) + b.swapSuccessors() + _ = no + _ = yes + return true + } + // match: (EQ (FlagLT_UGT) yes no) + // cond: + // result: (First nil no yes) + for { + v := b.Control + if v.Op != OpARMFlagLT_UGT { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.SetControl(nil) + b.swapSuccessors() + _ = no + _ = yes + return true + } + // match: (EQ (FlagGT_ULT) yes no) + // cond: + // result: (First nil no yes) + for { + v := b.Control + if v.Op != OpARMFlagGT_ULT { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.SetControl(nil) + b.swapSuccessors() + _ = no + _ = yes + return true + } + // match: (EQ (FlagGT_UGT) yes no) + // cond: + // result: (First nil no yes) + for { + v := b.Control + if v.Op != OpARMFlagGT_UGT { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.SetControl(nil) + b.swapSuccessors() + _ = no + _ = yes + return true + } + // match: (EQ (InvertFlags cmp) yes no) + // cond: + // result: (EQ cmp yes no) + for { + v := b.Control + if v.Op != OpARMInvertFlags { + break + } + cmp := v.Args[0] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockARMEQ + b.SetControl(cmp) + _ = yes + _ = no + return true + } + case BlockARMGE: + // match: (GE (FlagEQ) yes no) + // cond: + // result: (First nil yes no) + for { + v := b.Control + if v.Op != OpARMFlagEQ { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.SetControl(nil) + _ = yes + _ = no + return true + } + // match: (GE (FlagLT_ULT) yes no) + // cond: + // result: (First nil no yes) + for { + v := b.Control + if v.Op != OpARMFlagLT_ULT { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.SetControl(nil) + b.swapSuccessors() + _ = no + _ = yes + return true + } + // match: (GE (FlagLT_UGT) yes no) + // cond: + // result: (First nil no yes) + for { + v := b.Control + if v.Op != OpARMFlagLT_UGT { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.SetControl(nil) + b.swapSuccessors() + _ = no + _ = yes + return true + } + // match: (GE (FlagGT_ULT) yes no) + // cond: + // result: (First nil yes no) + for { + v := b.Control + if v.Op != OpARMFlagGT_ULT { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.SetControl(nil) + _ = yes + _ = no + return true + } + // match: (GE (FlagGT_UGT) yes no) + // cond: + // result: (First nil yes no) + for { + v := b.Control + if v.Op != OpARMFlagGT_UGT { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.SetControl(nil) + _ = yes + _ = no + return true + } + // match: (GE (InvertFlags cmp) yes no) + // cond: + // result: (LE cmp yes no) + for { + v := b.Control + if v.Op != OpARMInvertFlags { + break + } + cmp := v.Args[0] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockARMLE + b.SetControl(cmp) + _ = yes + _ = no + return true + } + case BlockARMGT: + // match: (GT (FlagEQ) yes no) + // cond: + // result: (First nil no yes) + for { + v := b.Control + if v.Op != OpARMFlagEQ { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.SetControl(nil) + b.swapSuccessors() + _ = no + _ = yes + return true + } + // match: (GT (FlagLT_ULT) yes no) + // cond: + // result: (First nil no yes) + for { + v := b.Control + if v.Op != OpARMFlagLT_ULT { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.SetControl(nil) + b.swapSuccessors() + _ = no + _ = yes + return true + } + // match: (GT (FlagLT_UGT) yes no) + // cond: + // result: (First nil no yes) + for { + v := b.Control + if v.Op != OpARMFlagLT_UGT { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.SetControl(nil) + b.swapSuccessors() + _ = no + _ = yes + return true + } + // match: (GT (FlagGT_ULT) yes no) + // cond: + // result: (First nil yes no) + for { + v := b.Control + if v.Op != OpARMFlagGT_ULT { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.SetControl(nil) + _ = yes + _ = no + return true + } + // match: (GT (FlagGT_UGT) yes no) + // cond: + // result: (First nil yes no) + for { + v := b.Control + if v.Op != OpARMFlagGT_UGT { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.SetControl(nil) + _ = yes + _ = no + return true + } + // match: (GT (InvertFlags cmp) yes no) + // cond: + // result: (LT cmp yes no) + for { + v := b.Control + if v.Op != OpARMInvertFlags { + break + } + cmp := v.Args[0] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockARMLT + b.SetControl(cmp) + _ = yes + _ = no + return true + } + case BlockIf: + // match: (If (Equal cc) yes no) + // cond: + // result: (EQ cc yes no) + for { + v := b.Control + if v.Op != OpARMEqual { + break + } + cc := v.Args[0] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockARMEQ + b.SetControl(cc) + _ = yes + _ = no + return true + } + // match: (If (NotEqual cc) yes no) + // cond: + // result: (NE cc yes no) + for { + v := b.Control + if v.Op != OpARMNotEqual { + break + } + cc := v.Args[0] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockARMNE + b.SetControl(cc) + _ = yes + _ = no + return true + } + // match: (If (LessThan cc) yes no) + // cond: + // result: (LT cc yes no) + for { + v := b.Control + if v.Op != OpARMLessThan { + break + } + cc := v.Args[0] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockARMLT + b.SetControl(cc) + _ = yes + _ = no + return true + } + // match: (If (LessThanU cc) yes no) + // cond: + // result: (ULT cc yes no) + for { + v := b.Control + if v.Op != OpARMLessThanU { + break + } + cc := v.Args[0] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockARMULT + b.SetControl(cc) + _ = yes + _ = no + return true + } + // match: (If (LessEqual cc) yes no) + // cond: + // result: (LE cc yes no) + for { + v := b.Control + if v.Op != OpARMLessEqual { + break + } + cc := v.Args[0] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockARMLE + b.SetControl(cc) + _ = yes + _ = no + return true + } + // match: (If (LessEqualU cc) yes no) + // cond: + // result: (ULE cc yes no) + for { + v := b.Control + if v.Op != OpARMLessEqualU { + break + } + cc := v.Args[0] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockARMULE + b.SetControl(cc) + _ = yes + _ = no + return true + } + // match: (If (GreaterThan cc) yes no) + // cond: + // result: (GT cc yes no) + for { + v := b.Control + if v.Op != OpARMGreaterThan { + break + } + cc := v.Args[0] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockARMGT + b.SetControl(cc) + _ = yes + _ = no + return true + } + // match: (If (GreaterThanU cc) yes no) + // cond: + // result: (UGT cc yes no) + for { + v := b.Control + if v.Op != OpARMGreaterThanU { + break + } + cc := v.Args[0] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockARMUGT + b.SetControl(cc) + _ = yes + _ = no + return true + } + // match: (If (GreaterEqual cc) yes no) + // cond: + // result: (GE cc yes no) + for { + v := b.Control + if v.Op != OpARMGreaterEqual { + break + } + cc := v.Args[0] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockARMGE + b.SetControl(cc) + _ = yes + _ = no + return true + } + // match: (If (GreaterEqualU cc) yes no) + // cond: + // result: (UGE cc yes no) + for { + v := b.Control + if v.Op != OpARMGreaterEqualU { + break + } + cc := v.Args[0] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockARMUGE + b.SetControl(cc) + _ = yes + _ = no + return true + } + // match: (If cond yes no) + // cond: + // result: (NE (CMPconst [0] cond) yes no) + for { + v := b.Control + cond := b.Control + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockARMNE + v0 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags) + v0.AuxInt = 0 + v0.AddArg(cond) + b.SetControl(v0) + _ = yes + _ = no + return true + } + case BlockARMLE: + // match: (LE (FlagEQ) yes no) + // cond: + // result: (First nil yes no) + for { + v := b.Control + if v.Op != OpARMFlagEQ { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.SetControl(nil) + _ = yes + _ = no + return true + } + // match: (LE (FlagLT_ULT) yes no) + // cond: + // result: (First nil yes no) + for { + v := b.Control + if v.Op != OpARMFlagLT_ULT { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.SetControl(nil) + _ = yes + _ = no + return true + } + // match: (LE (FlagLT_UGT) yes no) + // cond: + // result: (First nil yes no) + for { + v := b.Control + if v.Op != OpARMFlagLT_UGT { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.SetControl(nil) + _ = yes + _ = no + return true + } + // match: (LE (FlagGT_ULT) yes no) + // cond: + // result: (First nil no yes) + for { + v := b.Control + if v.Op != OpARMFlagGT_ULT { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.SetControl(nil) + b.swapSuccessors() + _ = no + _ = yes + return true + } + // match: (LE (FlagGT_UGT) yes no) + // cond: + // result: (First nil no yes) + for { + v := b.Control + if v.Op != OpARMFlagGT_UGT { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.SetControl(nil) + b.swapSuccessors() + _ = no + _ = yes + return true + } + // match: (LE (InvertFlags cmp) yes no) + // cond: + // result: (GE cmp yes no) + for { + v := b.Control + if v.Op != OpARMInvertFlags { + break + } + cmp := v.Args[0] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockARMGE + b.SetControl(cmp) + _ = yes + _ = no + return true + } + case BlockARMLT: + // match: (LT (FlagEQ) yes no) + // cond: + // result: (First nil no yes) + for { + v := b.Control + if v.Op != OpARMFlagEQ { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.SetControl(nil) + b.swapSuccessors() + _ = no + _ = yes + return true + } + // match: (LT (FlagLT_ULT) yes no) + // cond: + // result: (First nil yes no) + for { + v := b.Control + if v.Op != OpARMFlagLT_ULT { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.SetControl(nil) + _ = yes + _ = no + return true + } + // match: (LT (FlagLT_UGT) yes no) + // cond: + // result: (First nil yes no) + for { + v := b.Control + if v.Op != OpARMFlagLT_UGT { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.SetControl(nil) + _ = yes + _ = no + return true + } + // match: (LT (FlagGT_ULT) yes no) + // cond: + // result: (First nil no yes) + for { + v := b.Control + if v.Op != OpARMFlagGT_ULT { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.SetControl(nil) + b.swapSuccessors() + _ = no + _ = yes + return true + } + // match: (LT (FlagGT_UGT) yes no) + // cond: + // result: (First nil no yes) + for { + v := b.Control + if v.Op != OpARMFlagGT_UGT { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.SetControl(nil) + b.swapSuccessors() + _ = no + _ = yes + return true + } + // match: (LT (InvertFlags cmp) yes no) + // cond: + // result: (GT cmp yes no) + for { + v := b.Control + if v.Op != OpARMInvertFlags { + break + } + cmp := v.Args[0] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockARMGT + b.SetControl(cmp) + _ = yes + _ = no + return true + } + case BlockARMNE: + // match: (NE (CMPconst [0] (Equal cc)) yes no) + // cond: + // result: (EQ cc yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpARMEqual { + break + } + cc := v_0.Args[0] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockARMEQ + b.SetControl(cc) + _ = yes + _ = no + return true + } + // match: (NE (CMPconst [0] (NotEqual cc)) yes no) + // cond: + // result: (NE cc yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpARMNotEqual { + break + } + cc := v_0.Args[0] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockARMNE + b.SetControl(cc) + _ = yes + _ = no + return true + } + // match: (NE (CMPconst [0] (LessThan cc)) yes no) + // cond: + // result: (LT cc yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpARMLessThan { + break + } + cc := v_0.Args[0] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockARMLT + b.SetControl(cc) + _ = yes + _ = no + return true + } + // match: (NE (CMPconst [0] (LessThanU cc)) yes no) + // cond: + // result: (ULT cc yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpARMLessThanU { + break + } + cc := v_0.Args[0] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockARMULT + b.SetControl(cc) + _ = yes + _ = no + return true + } + // match: (NE (CMPconst [0] (LessEqual cc)) yes no) + // cond: + // result: (LE cc yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpARMLessEqual { + break + } + cc := v_0.Args[0] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockARMLE + b.SetControl(cc) + _ = yes + _ = no + return true + } + // match: (NE (CMPconst [0] (LessEqualU cc)) yes no) + // cond: + // result: (ULE cc yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpARMLessEqualU { + break + } + cc := v_0.Args[0] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockARMULE + b.SetControl(cc) + _ = yes + _ = no + return true + } + // match: (NE (CMPconst [0] (GreaterThan cc)) yes no) + // cond: + // result: (GT cc yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpARMGreaterThan { + break + } + cc := v_0.Args[0] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockARMGT + b.SetControl(cc) + _ = yes + _ = no + return true + } + // match: (NE (CMPconst [0] (GreaterThanU cc)) yes no) + // cond: + // result: (UGT cc yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpARMGreaterThanU { + break + } + cc := v_0.Args[0] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockARMUGT + b.SetControl(cc) + _ = yes + _ = no + return true + } + // match: (NE (CMPconst [0] (GreaterEqual cc)) yes no) + // cond: + // result: (GE cc yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpARMGreaterEqual { + break + } + cc := v_0.Args[0] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockARMGE + b.SetControl(cc) + _ = yes + _ = no + return true + } + // match: (NE (CMPconst [0] (GreaterEqualU cc)) yes no) + // cond: + // result: (UGE cc yes no) + for { + v := b.Control + if v.Op != OpARMCMPconst { + break + } + if v.AuxInt != 0 { + break + } + v_0 := v.Args[0] + if v_0.Op != OpARMGreaterEqualU { + break + } + cc := v_0.Args[0] + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockARMUGE + b.SetControl(cc) + _ = yes + _ = no + return true + } + // match: (NE (FlagEQ) yes no) + // cond: + // result: (First nil no yes) + for { + v := b.Control + if v.Op != OpARMFlagEQ { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.SetControl(nil) + b.swapSuccessors() + _ = no + _ = yes + return true + } + // match: (NE (FlagLT_ULT) yes no) + // cond: + // result: (First nil yes no) + for { + v := b.Control + if v.Op != OpARMFlagLT_ULT { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.SetControl(nil) + _ = yes + _ = no + return true + } + // match: (NE (FlagLT_UGT) yes no) + // cond: + // result: (First nil yes no) + for { + v := b.Control + if v.Op != OpARMFlagLT_UGT { + break + } + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.SetControl(nil) + _ = yes + _ = no + return true + } + // match: (NE (FlagGT_ULT) yes no) // cond: - // result: (EQ cc yes no) + // result: (First nil yes no) for { v := b.Control - if v.Op != OpARMEqual { + if v.Op != OpARMFlagGT_ULT { break } - cc := v.Args[0] yes := b.Succs[0] no := b.Succs[1] - b.Kind = BlockARMEQ - b.SetControl(cc) + b.Kind = BlockFirst + b.SetControl(nil) _ = yes _ = no return true } - // match: (If (NotEqual cc) yes no) + // match: (NE (FlagGT_UGT) yes no) // cond: - // result: (NE cc yes no) + // result: (First nil yes no) for { v := b.Control - if v.Op != OpARMNotEqual { + if v.Op != OpARMFlagGT_UGT { break } - cc := v.Args[0] yes := b.Succs[0] no := b.Succs[1] - b.Kind = BlockARMNE - b.SetControl(cc) + b.Kind = BlockFirst + b.SetControl(nil) _ = yes _ = no return true } - // match: (If (LessThan cc) yes no) + // match: (NE (InvertFlags cmp) yes no) // cond: - // result: (LT cc yes no) + // result: (NE cmp yes no) for { v := b.Control - if v.Op != OpARMLessThan { + if v.Op != OpARMInvertFlags { break } - cc := v.Args[0] + cmp := v.Args[0] yes := b.Succs[0] no := b.Succs[1] - b.Kind = BlockARMLT - b.SetControl(cc) + b.Kind = BlockARMNE + b.SetControl(cmp) _ = yes _ = no return true } - // match: (If (LessThanU cc) yes no) + case BlockARMUGE: + // match: (UGE (FlagEQ) yes no) // cond: - // result: (ULT cc yes no) + // result: (First nil yes no) for { v := b.Control - if v.Op != OpARMLessThanU { + if v.Op != OpARMFlagEQ { break } - cc := v.Args[0] yes := b.Succs[0] no := b.Succs[1] - b.Kind = BlockARMULT - b.SetControl(cc) + b.Kind = BlockFirst + b.SetControl(nil) _ = yes _ = no return true } - // match: (If (LessEqual cc) yes no) + // match: (UGE (FlagLT_ULT) yes no) // cond: - // result: (LE cc yes no) + // result: (First nil no yes) for { v := b.Control - if v.Op != OpARMLessEqual { + if v.Op != OpARMFlagLT_ULT { break } - cc := v.Args[0] yes := b.Succs[0] no := b.Succs[1] - b.Kind = BlockARMLE - b.SetControl(cc) - _ = yes + b.Kind = BlockFirst + b.SetControl(nil) + b.swapSuccessors() _ = no + _ = yes return true } - // match: (If (LessEqualU cc) yes no) + // match: (UGE (FlagLT_UGT) yes no) // cond: - // result: (ULE cc yes no) + // result: (First nil yes no) for { v := b.Control - if v.Op != OpARMLessEqualU { + if v.Op != OpARMFlagLT_UGT { break } - cc := v.Args[0] yes := b.Succs[0] no := b.Succs[1] - b.Kind = BlockARMULE - b.SetControl(cc) + b.Kind = BlockFirst + b.SetControl(nil) _ = yes _ = no return true } - // match: (If (GreaterThan cc) yes no) + // match: (UGE (FlagGT_ULT) yes no) // cond: - // result: (GT cc yes no) + // result: (First nil no yes) for { v := b.Control - if v.Op != OpARMGreaterThan { + if v.Op != OpARMFlagGT_ULT { break } - cc := v.Args[0] yes := b.Succs[0] no := b.Succs[1] - b.Kind = BlockARMGT - b.SetControl(cc) - _ = yes + b.Kind = BlockFirst + b.SetControl(nil) + b.swapSuccessors() _ = no + _ = yes return true } - // match: (If (GreaterThanU cc) yes no) + // match: (UGE (FlagGT_UGT) yes no) // cond: - // result: (UGT cc yes no) + // result: (First nil yes no) for { v := b.Control - if v.Op != OpARMGreaterThanU { + if v.Op != OpARMFlagGT_UGT { break } - cc := v.Args[0] yes := b.Succs[0] no := b.Succs[1] - b.Kind = BlockARMUGT - b.SetControl(cc) + b.Kind = BlockFirst + b.SetControl(nil) _ = yes _ = no return true } - // match: (If (GreaterEqual cc) yes no) + // match: (UGE (InvertFlags cmp) yes no) // cond: - // result: (GE cc yes no) + // result: (ULE cmp yes no) for { v := b.Control - if v.Op != OpARMGreaterEqual { + if v.Op != OpARMInvertFlags { break } - cc := v.Args[0] + cmp := v.Args[0] yes := b.Succs[0] no := b.Succs[1] - b.Kind = BlockARMGE - b.SetControl(cc) + b.Kind = BlockARMULE + b.SetControl(cmp) _ = yes _ = no return true } - // match: (If (GreaterEqualU cc) yes no) + case BlockARMUGT: + // match: (UGT (FlagEQ) yes no) // cond: - // result: (UGE cc yes no) + // result: (First nil no yes) for { v := b.Control - if v.Op != OpARMGreaterEqualU { + if v.Op != OpARMFlagEQ { break } - cc := v.Args[0] yes := b.Succs[0] no := b.Succs[1] - b.Kind = BlockARMUGE - b.SetControl(cc) - _ = yes + b.Kind = BlockFirst + b.SetControl(nil) + b.swapSuccessors() _ = no + _ = yes return true } - // match: (If cond yes no) + // match: (UGT (FlagLT_ULT) yes no) // cond: - // result: (NE (CMPconst [0] cond) yes no) + // result: (First nil no yes) for { v := b.Control - cond := b.Control + if v.Op != OpARMFlagLT_ULT { + break + } yes := b.Succs[0] no := b.Succs[1] - b.Kind = BlockARMNE - v0 := b.NewValue0(v.Line, OpARMCMPconst, TypeFlags) - v0.AuxInt = 0 - v0.AddArg(cond) - b.SetControl(v0) - _ = yes + b.Kind = BlockFirst + b.SetControl(nil) + b.swapSuccessors() _ = no + _ = yes return true } - case BlockARMNE: - // match: (NE (CMPconst [0] (Equal cc)) yes no) + // match: (UGT (FlagLT_UGT) yes no) // cond: - // result: (EQ cc yes no) + // result: (First nil yes no) for { v := b.Control - if v.Op != OpARMCMPconst { - break - } - if v.AuxInt != 0 { - break - } - v_0 := v.Args[0] - if v_0.Op != OpARMEqual { + if v.Op != OpARMFlagLT_UGT { break } - cc := v_0.Args[0] yes := b.Succs[0] no := b.Succs[1] - b.Kind = BlockARMEQ - b.SetControl(cc) + b.Kind = BlockFirst + b.SetControl(nil) _ = yes _ = no return true } - // match: (NE (CMPconst [0] (NotEqual cc)) yes no) + // match: (UGT (FlagGT_ULT) yes no) // cond: - // result: (NE cc yes no) + // result: (First nil no yes) for { v := b.Control - if v.Op != OpARMCMPconst { - break - } - if v.AuxInt != 0 { - break - } - v_0 := v.Args[0] - if v_0.Op != OpARMNotEqual { + if v.Op != OpARMFlagGT_ULT { break } - cc := v_0.Args[0] yes := b.Succs[0] no := b.Succs[1] - b.Kind = BlockARMNE - b.SetControl(cc) - _ = yes + b.Kind = BlockFirst + b.SetControl(nil) + b.swapSuccessors() _ = no + _ = yes return true } - // match: (NE (CMPconst [0] (LessThan cc)) yes no) + // match: (UGT (FlagGT_UGT) yes no) // cond: - // result: (LT cc yes no) + // result: (First nil yes no) for { v := b.Control - if v.Op != OpARMCMPconst { + if v.Op != OpARMFlagGT_UGT { break } - if v.AuxInt != 0 { - break - } - v_0 := v.Args[0] - if v_0.Op != OpARMLessThan { - break - } - cc := v_0.Args[0] yes := b.Succs[0] no := b.Succs[1] - b.Kind = BlockARMLT - b.SetControl(cc) + b.Kind = BlockFirst + b.SetControl(nil) _ = yes _ = no return true } - // match: (NE (CMPconst [0] (LessThanU cc)) yes no) + // match: (UGT (InvertFlags cmp) yes no) // cond: - // result: (ULT cc yes no) + // result: (ULT cmp yes no) for { v := b.Control - if v.Op != OpARMCMPconst { - break - } - if v.AuxInt != 0 { + if v.Op != OpARMInvertFlags { break } - v_0 := v.Args[0] - if v_0.Op != OpARMLessThanU { - break - } - cc := v_0.Args[0] + cmp := v.Args[0] yes := b.Succs[0] no := b.Succs[1] b.Kind = BlockARMULT - b.SetControl(cc) + b.SetControl(cmp) _ = yes _ = no return true } - // match: (NE (CMPconst [0] (LessEqual cc)) yes no) + case BlockARMULE: + // match: (ULE (FlagEQ) yes no) // cond: - // result: (LE cc yes no) + // result: (First nil yes no) for { v := b.Control - if v.Op != OpARMCMPconst { - break - } - if v.AuxInt != 0 { - break - } - v_0 := v.Args[0] - if v_0.Op != OpARMLessEqual { + if v.Op != OpARMFlagEQ { break } - cc := v_0.Args[0] yes := b.Succs[0] no := b.Succs[1] - b.Kind = BlockARMLE - b.SetControl(cc) + b.Kind = BlockFirst + b.SetControl(nil) _ = yes _ = no return true } - // match: (NE (CMPconst [0] (LessEqualU cc)) yes no) + // match: (ULE (FlagLT_ULT) yes no) // cond: - // result: (ULE cc yes no) + // result: (First nil yes no) for { v := b.Control - if v.Op != OpARMCMPconst { + if v.Op != OpARMFlagLT_ULT { break } - if v.AuxInt != 0 { + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.SetControl(nil) + _ = yes + _ = no + return true + } + // match: (ULE (FlagLT_UGT) yes no) + // cond: + // result: (First nil no yes) + for { + v := b.Control + if v.Op != OpARMFlagLT_UGT { break } - v_0 := v.Args[0] - if v_0.Op != OpARMLessEqualU { + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.SetControl(nil) + b.swapSuccessors() + _ = no + _ = yes + return true + } + // match: (ULE (FlagGT_ULT) yes no) + // cond: + // result: (First nil yes no) + for { + v := b.Control + if v.Op != OpARMFlagGT_ULT { break } - cc := v_0.Args[0] yes := b.Succs[0] no := b.Succs[1] - b.Kind = BlockARMULE - b.SetControl(cc) + b.Kind = BlockFirst + b.SetControl(nil) _ = yes _ = no return true } - // match: (NE (CMPconst [0] (GreaterThan cc)) yes no) + // match: (ULE (FlagGT_UGT) yes no) // cond: - // result: (GT cc yes no) + // result: (First nil no yes) for { v := b.Control - if v.Op != OpARMCMPconst { - break - } - if v.AuxInt != 0 { + if v.Op != OpARMFlagGT_UGT { break } - v_0 := v.Args[0] - if v_0.Op != OpARMGreaterThan { + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.SetControl(nil) + b.swapSuccessors() + _ = no + _ = yes + return true + } + // match: (ULE (InvertFlags cmp) yes no) + // cond: + // result: (UGE cmp yes no) + for { + v := b.Control + if v.Op != OpARMInvertFlags { break } - cc := v_0.Args[0] + cmp := v.Args[0] yes := b.Succs[0] no := b.Succs[1] - b.Kind = BlockARMGT - b.SetControl(cc) + b.Kind = BlockARMUGE + b.SetControl(cmp) _ = yes _ = no return true } - // match: (NE (CMPconst [0] (GreaterThanU cc)) yes no) + case BlockARMULT: + // match: (ULT (FlagEQ) yes no) // cond: - // result: (UGT cc yes no) + // result: (First nil no yes) for { v := b.Control - if v.Op != OpARMCMPconst { - break - } - if v.AuxInt != 0 { + if v.Op != OpARMFlagEQ { break } - v_0 := v.Args[0] - if v_0.Op != OpARMGreaterThanU { + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.SetControl(nil) + b.swapSuccessors() + _ = no + _ = yes + return true + } + // match: (ULT (FlagLT_ULT) yes no) + // cond: + // result: (First nil yes no) + for { + v := b.Control + if v.Op != OpARMFlagLT_ULT { break } - cc := v_0.Args[0] yes := b.Succs[0] no := b.Succs[1] - b.Kind = BlockARMUGT - b.SetControl(cc) + b.Kind = BlockFirst + b.SetControl(nil) _ = yes _ = no return true } - // match: (NE (CMPconst [0] (GreaterEqual cc)) yes no) + // match: (ULT (FlagLT_UGT) yes no) // cond: - // result: (GE cc yes no) + // result: (First nil no yes) for { v := b.Control - if v.Op != OpARMCMPconst { - break - } - if v.AuxInt != 0 { + if v.Op != OpARMFlagLT_UGT { break } - v_0 := v.Args[0] - if v_0.Op != OpARMGreaterEqual { + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.SetControl(nil) + b.swapSuccessors() + _ = no + _ = yes + return true + } + // match: (ULT (FlagGT_ULT) yes no) + // cond: + // result: (First nil yes no) + for { + v := b.Control + if v.Op != OpARMFlagGT_ULT { break } - cc := v_0.Args[0] yes := b.Succs[0] no := b.Succs[1] - b.Kind = BlockARMGE - b.SetControl(cc) + b.Kind = BlockFirst + b.SetControl(nil) _ = yes _ = no return true } - // match: (NE (CMPconst [0] (GreaterEqualU cc)) yes no) + // match: (ULT (FlagGT_UGT) yes no) // cond: - // result: (UGE cc yes no) + // result: (First nil no yes) for { v := b.Control - if v.Op != OpARMCMPconst { - break - } - if v.AuxInt != 0 { + if v.Op != OpARMFlagGT_UGT { break } - v_0 := v.Args[0] - if v_0.Op != OpARMGreaterEqualU { + yes := b.Succs[0] + no := b.Succs[1] + b.Kind = BlockFirst + b.SetControl(nil) + b.swapSuccessors() + _ = no + _ = yes + return true + } + // match: (ULT (InvertFlags cmp) yes no) + // cond: + // result: (UGT cmp yes no) + for { + v := b.Control + if v.Op != OpARMInvertFlags { break } - cc := v_0.Args[0] + cmp := v.Args[0] yes := b.Succs[0] no := b.Succs[1] - b.Kind = BlockARMUGE - b.SetControl(cc) + b.Kind = BlockARMUGT + b.SetControl(cmp) _ = yes _ = no return true