// Lowering extension
// Note: we always extend to 64 bits even though some ops don't need that many result bits.
-(SignExt8to16 ...) -> (MOVBQSX ...)
-(SignExt8to32 ...) -> (MOVBQSX ...)
-(SignExt8to64 ...) -> (MOVBQSX ...)
-(SignExt16to32 ...) -> (MOVWQSX ...)
-(SignExt16to64 ...) -> (MOVWQSX ...)
-(SignExt32to64 ...) -> (MOVLQSX ...)
+(SignExt8to16 ...) => (MOVBQSX ...)
+(SignExt8to32 ...) => (MOVBQSX ...)
+(SignExt8to64 ...) => (MOVBQSX ...)
+(SignExt16to32 ...) => (MOVWQSX ...)
+(SignExt16to64 ...) => (MOVWQSX ...)
+(SignExt32to64 ...) => (MOVLQSX ...)
-(ZeroExt8to16 ...) -> (MOVBQZX ...)
-(ZeroExt8to32 ...) -> (MOVBQZX ...)
-(ZeroExt8to64 ...) -> (MOVBQZX ...)
-(ZeroExt16to32 ...) -> (MOVWQZX ...)
-(ZeroExt16to64 ...) -> (MOVWQZX ...)
-(ZeroExt32to64 ...) -> (MOVLQZX ...)
+(ZeroExt8to16 ...) => (MOVBQZX ...)
+(ZeroExt8to32 ...) => (MOVBQZX ...)
+(ZeroExt8to64 ...) => (MOVBQZX ...)
+(ZeroExt16to32 ...) => (MOVWQZX ...)
+(ZeroExt16to64 ...) => (MOVWQZX ...)
+(ZeroExt32to64 ...) => (MOVLQZX ...)
-(Slicemask <t> x) -> (SARQconst (NEGQ <t> x) [63])
+(Slicemask <t> x) => (SARQconst (NEGQ <t> x) [63])
-(SpectreIndex <t> x y) -> (CMOVQCC x (MOVQconst [0]) (CMPQ x y))
-(SpectreSliceIndex <t> x y) -> (CMOVQHI x (MOVQconst [0]) (CMPQ x y))
+(SpectreIndex <t> x y) => (CMOVQCC x (MOVQconst [0]) (CMPQ x y))
+(SpectreSliceIndex <t> x y) => (CMOVQHI x (MOVQconst [0]) (CMPQ x y))
// Lowering truncation
// Because we ignore high parts of registers, truncates are just copies.
-(Trunc16to8 ...) -> (Copy ...)
-(Trunc32to8 ...) -> (Copy ...)
-(Trunc32to16 ...) -> (Copy ...)
-(Trunc64to8 ...) -> (Copy ...)
-(Trunc64to16 ...) -> (Copy ...)
-(Trunc64to32 ...) -> (Copy ...)
+(Trunc16to8 ...) => (Copy ...)
+(Trunc32to8 ...) => (Copy ...)
+(Trunc32to16 ...) => (Copy ...)
+(Trunc64to8 ...) => (Copy ...)
+(Trunc64to16 ...) => (Copy ...)
+(Trunc64to32 ...) => (Copy ...)
// Lowering float <-> int
-(Cvt32to32F ...) -> (CVTSL2SS ...)
-(Cvt32to64F ...) -> (CVTSL2SD ...)
-(Cvt64to32F ...) -> (CVTSQ2SS ...)
-(Cvt64to64F ...) -> (CVTSQ2SD ...)
+(Cvt32to32F ...) => (CVTSL2SS ...)
+(Cvt32to64F ...) => (CVTSL2SD ...)
+(Cvt64to32F ...) => (CVTSQ2SS ...)
+(Cvt64to64F ...) => (CVTSQ2SD ...)
-(Cvt32Fto32 ...) -> (CVTTSS2SL ...)
-(Cvt32Fto64 ...) -> (CVTTSS2SQ ...)
-(Cvt64Fto32 ...) -> (CVTTSD2SL ...)
-(Cvt64Fto64 ...) -> (CVTTSD2SQ ...)
+(Cvt32Fto32 ...) => (CVTTSS2SL ...)
+(Cvt32Fto64 ...) => (CVTTSS2SQ ...)
+(Cvt64Fto32 ...) => (CVTTSD2SL ...)
+(Cvt64Fto64 ...) => (CVTTSD2SQ ...)
-(Cvt32Fto64F ...) -> (CVTSS2SD ...)
-(Cvt64Fto32F ...) -> (CVTSD2SS ...)
+(Cvt32Fto64F ...) => (CVTSS2SD ...)
+(Cvt64Fto32F ...) => (CVTSD2SS ...)
-(Round(32|64)F ...) -> (Copy ...)
+(Round(32|64)F ...) => (Copy ...)
-(CvtBoolToUint8 ...) -> (Copy ...)
+(CvtBoolToUint8 ...) => (Copy ...)
// Lowering shifts
// Unsigned shifts need to return 0 if shift amount is >= width of shifted value.
// result = (arg << shift) & (shift >= argbits ? 0 : 0xffffffffffffffff)
-(Lsh64x(64|32|16|8) <t> x y) && !shiftIsBounded(v) -> (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMP(Q|L|W|B)const y [64])))
-(Lsh32x(64|32|16|8) <t> x y) && !shiftIsBounded(v) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMP(Q|L|W|B)const y [32])))
-(Lsh16x(64|32|16|8) <t> x y) && !shiftIsBounded(v) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMP(Q|L|W|B)const y [32])))
-(Lsh8x(64|32|16|8) <t> x y) && !shiftIsBounded(v) -> (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMP(Q|L|W|B)const y [32])))
-
-(Lsh64x(64|32|16|8) x y) && shiftIsBounded(v) -> (SHLQ x y)
-(Lsh32x(64|32|16|8) x y) && shiftIsBounded(v) -> (SHLL x y)
-(Lsh16x(64|32|16|8) x y) && shiftIsBounded(v) -> (SHLL x y)
-(Lsh8x(64|32|16|8) x y) && shiftIsBounded(v) -> (SHLL x y)
-
-(Rsh64Ux(64|32|16|8) <t> x y) && !shiftIsBounded(v) -> (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMP(Q|L|W|B)const y [64])))
-(Rsh32Ux(64|32|16|8) <t> x y) && !shiftIsBounded(v) -> (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMP(Q|L|W|B)const y [32])))
-(Rsh16Ux(64|32|16|8) <t> x y) && !shiftIsBounded(v) -> (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMP(Q|L|W|B)const y [16])))
-(Rsh8Ux(64|32|16|8) <t> x y) && !shiftIsBounded(v) -> (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMP(Q|L|W|B)const y [8])))
-
-(Rsh64Ux(64|32|16|8) x y) && shiftIsBounded(v) -> (SHRQ x y)
-(Rsh32Ux(64|32|16|8) x y) && shiftIsBounded(v) -> (SHRL x y)
-(Rsh16Ux(64|32|16|8) x y) && shiftIsBounded(v) -> (SHRW x y)
-(Rsh8Ux(64|32|16|8) x y) && shiftIsBounded(v) -> (SHRB x y)
+(Lsh64x(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMP(Q|L|W|B)const y [64])))
+(Lsh32x(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMP(Q|L|W|B)const y [32])))
+(Lsh16x(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMP(Q|L|W|B)const y [32])))
+(Lsh8x(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMP(Q|L|W|B)const y [32])))
+
+(Lsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SHLQ x y)
+(Lsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SHLL x y)
+(Lsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SHLL x y)
+(Lsh8x(64|32|16|8) x y) && shiftIsBounded(v) => (SHLL x y)
+
+(Rsh64Ux(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMP(Q|L|W|B)const y [64])))
+(Rsh32Ux(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMP(Q|L|W|B)const y [32])))
+(Rsh16Ux(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMP(Q|L|W|B)const y [16])))
+(Rsh8Ux(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMP(Q|L|W|B)const y [8])))
+
+(Rsh64Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SHRQ x y)
+(Rsh32Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SHRL x y)
+(Rsh16Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SHRW x y)
+(Rsh8Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SHRB x y)
// Signed right shift needs to return 0/-1 if shift amount is >= width of shifted value.
// We implement this by setting the shift value to -1 (all ones) if the shift value is >= width.
-(Rsh64x(64|32|16|8) <t> x y) && !shiftIsBounded(v) -> (SARQ <t> x (OR(Q|L|L|L) <y.Type> y (NOT(Q|L|L|L) <y.Type> (SBB(Q|L|L|L)carrymask <y.Type> (CMP(Q|L|W|B)const y [64])))))
-(Rsh32x(64|32|16|8) <t> x y) && !shiftIsBounded(v) -> (SARL <t> x (OR(Q|L|L|L) <y.Type> y (NOT(Q|L|L|L) <y.Type> (SBB(Q|L|L|L)carrymask <y.Type> (CMP(Q|L|W|B)const y [32])))))
-(Rsh16x(64|32|16|8) <t> x y) && !shiftIsBounded(v) -> (SARW <t> x (OR(Q|L|L|L) <y.Type> y (NOT(Q|L|L|L) <y.Type> (SBB(Q|L|L|L)carrymask <y.Type> (CMP(Q|L|W|B)const y [16])))))
-(Rsh8x(64|32|16|8) <t> x y) && !shiftIsBounded(v) -> (SARB <t> x (OR(Q|L|L|L) <y.Type> y (NOT(Q|L|L|L) <y.Type> (SBB(Q|L|L|L)carrymask <y.Type> (CMP(Q|L|W|B)const y [8])))))
+(Rsh64x(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (SARQ <t> x (OR(Q|L|L|L) <y.Type> y (NOT(Q|L|L|L) <y.Type> (SBB(Q|L|L|L)carrymask <y.Type> (CMP(Q|L|W|B)const y [64])))))
+(Rsh32x(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (SARL <t> x (OR(Q|L|L|L) <y.Type> y (NOT(Q|L|L|L) <y.Type> (SBB(Q|L|L|L)carrymask <y.Type> (CMP(Q|L|W|B)const y [32])))))
+(Rsh16x(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (SARW <t> x (OR(Q|L|L|L) <y.Type> y (NOT(Q|L|L|L) <y.Type> (SBB(Q|L|L|L)carrymask <y.Type> (CMP(Q|L|W|B)const y [16])))))
+(Rsh8x(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (SARB <t> x (OR(Q|L|L|L) <y.Type> y (NOT(Q|L|L|L) <y.Type> (SBB(Q|L|L|L)carrymask <y.Type> (CMP(Q|L|W|B)const y [8])))))
-(Rsh64x(64|32|16|8) x y) && shiftIsBounded(v) -> (SARQ x y)
-(Rsh32x(64|32|16|8) x y) && shiftIsBounded(v) -> (SARL x y)
-(Rsh16x(64|32|16|8) x y) && shiftIsBounded(v) -> (SARW x y)
-(Rsh8x(64|32|16|8) x y) && shiftIsBounded(v) -> (SARB x y)
+(Rsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SARQ x y)
+(Rsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SARL x y)
+(Rsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SARW x y)
+(Rsh8x(64|32|16|8) x y) && shiftIsBounded(v) => (SARB x y)
// Lowering integer comparisons
-(Less(64|32|16|8) x y) -> (SETL (CMP(Q|L|W|B) x y))
-(Less(64|32|16|8)U x y) -> (SETB (CMP(Q|L|W|B) x y))
-(Leq(64|32|16|8) x y) -> (SETLE (CMP(Q|L|W|B) x y))
-(Leq(64|32|16|8)U x y) -> (SETBE (CMP(Q|L|W|B) x y))
-(Eq(Ptr|64|32|16|8|B) x y) -> (SETEQ (CMP(Q|Q|L|W|B|B) x y))
-(Neq(Ptr|64|32|16|8|B) x y) -> (SETNE (CMP(Q|Q|L|W|B|B) x y))
+(Less(64|32|16|8) x y) => (SETL (CMP(Q|L|W|B) x y))
+(Less(64|32|16|8)U x y) => (SETB (CMP(Q|L|W|B) x y))
+(Leq(64|32|16|8) x y) => (SETLE (CMP(Q|L|W|B) x y))
+(Leq(64|32|16|8)U x y) => (SETBE (CMP(Q|L|W|B) x y))
+(Eq(Ptr|64|32|16|8|B) x y) => (SETEQ (CMP(Q|Q|L|W|B|B) x y))
+(Neq(Ptr|64|32|16|8|B) x y) => (SETNE (CMP(Q|Q|L|W|B|B) x y))
// Lowering floating point comparisons
// Note Go assembler gets UCOMISx operand order wrong, but it is right here
// and the operands are reversed when generating assembly language.
-(Eq(32|64)F x y) -> (SETEQF (UCOMIS(S|D) x y))
-(Neq(32|64)F x y) -> (SETNEF (UCOMIS(S|D) x y))
+(Eq(32|64)F x y) => (SETEQF (UCOMIS(S|D) x y))
+(Neq(32|64)F x y) => (SETNEF (UCOMIS(S|D) x y))
// Use SETGF/SETGEF with reversed operands to dodge NaN case.
-(Less(32|64)F x y) -> (SETGF (UCOMIS(S|D) y x))
-(Leq(32|64)F x y) -> (SETGEF (UCOMIS(S|D) y x))
+(Less(32|64)F x y) => (SETGF (UCOMIS(S|D) y x))
+(Leq(32|64)F x y) => (SETGEF (UCOMIS(S|D) y x))
// Lowering loads
-(Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) -> (MOVQload ptr mem)
-(Load <t> ptr mem) && is32BitInt(t) -> (MOVLload ptr mem)
-(Load <t> ptr mem) && is16BitInt(t) -> (MOVWload ptr mem)
-(Load <t> ptr mem) && (t.IsBoolean() || is8BitInt(t)) -> (MOVBload ptr mem)
-(Load <t> ptr mem) && is32BitFloat(t) -> (MOVSSload ptr mem)
-(Load <t> ptr mem) && is64BitFloat(t) -> (MOVSDload ptr mem)
+(Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) => (MOVQload ptr mem)
+(Load <t> ptr mem) && is32BitInt(t) => (MOVLload ptr mem)
+(Load <t> ptr mem) && is16BitInt(t) => (MOVWload ptr mem)
+(Load <t> ptr mem) && (t.IsBoolean() || is8BitInt(t)) => (MOVBload ptr mem)
+(Load <t> ptr mem) && is32BitFloat(t) => (MOVSSload ptr mem)
+(Load <t> ptr mem) && is64BitFloat(t) => (MOVSDload ptr mem)
// Lowering stores
// These more-specific FP versions of Store pattern should come first.
-(Store {t} ptr val mem) && t.(*types.Type).Size() == 8 && is64BitFloat(val.Type) -> (MOVSDstore ptr val mem)
-(Store {t} ptr val mem) && t.(*types.Type).Size() == 4 && is32BitFloat(val.Type) -> (MOVSSstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 8 && is64BitFloat(val.Type) => (MOVSDstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 4 && is32BitFloat(val.Type) => (MOVSSstore ptr val mem)
-(Store {t} ptr val mem) && t.(*types.Type).Size() == 8 -> (MOVQstore ptr val mem)
-(Store {t} ptr val mem) && t.(*types.Type).Size() == 4 -> (MOVLstore ptr val mem)
-(Store {t} ptr val mem) && t.(*types.Type).Size() == 2 -> (MOVWstore ptr val mem)
-(Store {t} ptr val mem) && t.(*types.Type).Size() == 1 -> (MOVBstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 8 => (MOVQstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 4 => (MOVLstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 2 => (MOVWstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem)
// Lowering moves
-(Move [0] _ _ mem) -> mem
-(Move [1] dst src mem) -> (MOVBstore dst (MOVBload src mem) mem)
-(Move [2] dst src mem) -> (MOVWstore dst (MOVWload src mem) mem)
-(Move [4] dst src mem) -> (MOVLstore dst (MOVLload src mem) mem)
-(Move [8] dst src mem) -> (MOVQstore dst (MOVQload src mem) mem)
-(Move [16] dst src mem) && config.useSSE -> (MOVOstore dst (MOVOload src mem) mem)
-(Move [16] dst src mem) && !config.useSSE ->
+(Move [0] _ _ mem) => mem
+(Move [1] dst src mem) => (MOVBstore dst (MOVBload src mem) mem)
+(Move [2] dst src mem) => (MOVWstore dst (MOVWload src mem) mem)
+(Move [4] dst src mem) => (MOVLstore dst (MOVLload src mem) mem)
+(Move [8] dst src mem) => (MOVQstore dst (MOVQload src mem) mem)
+(Move [16] dst src mem) && config.useSSE => (MOVOstore dst (MOVOload src mem) mem)
+(Move [16] dst src mem) && !config.useSSE =>
(MOVQstore [8] dst (MOVQload [8] src mem)
(MOVQstore dst (MOVQload src mem) mem))
-(Move [32] dst src mem) ->
+(Move [32] dst src mem) =>
(Move [16]
(OffPtr <dst.Type> dst [16])
(OffPtr <src.Type> src [16])
(Move [16] dst src mem))
-(Move [48] dst src mem) && config.useSSE ->
+(Move [48] dst src mem) && config.useSSE =>
(Move [32]
(OffPtr <dst.Type> dst [16])
(OffPtr <src.Type> src [16])
(Move [16] dst src mem))
-(Move [64] dst src mem) && config.useSSE ->
+(Move [64] dst src mem) && config.useSSE =>
(Move [32]
(OffPtr <dst.Type> dst [32])
(OffPtr <src.Type> src [32])
(Move [32] dst src mem))
-(Move [3] dst src mem) ->
+(Move [3] dst src mem) =>
(MOVBstore [2] dst (MOVBload [2] src mem)
(MOVWstore dst (MOVWload src mem) mem))
-(Move [5] dst src mem) ->
+(Move [5] dst src mem) =>
(MOVBstore [4] dst (MOVBload [4] src mem)
(MOVLstore dst (MOVLload src mem) mem))
-(Move [6] dst src mem) ->
+(Move [6] dst src mem) =>
(MOVWstore [4] dst (MOVWload [4] src mem)
(MOVLstore dst (MOVLload src mem) mem))
-(Move [7] dst src mem) ->
+(Move [7] dst src mem) =>
(MOVLstore [3] dst (MOVLload [3] src mem)
(MOVLstore dst (MOVLload src mem) mem))
-(Move [9] dst src mem) ->
+(Move [9] dst src mem) =>
(MOVBstore [8] dst (MOVBload [8] src mem)
(MOVQstore dst (MOVQload src mem) mem))
-(Move [10] dst src mem) ->
+(Move [10] dst src mem) =>
(MOVWstore [8] dst (MOVWload [8] src mem)
(MOVQstore dst (MOVQload src mem) mem))
-(Move [12] dst src mem) ->
+(Move [12] dst src mem) =>
(MOVLstore [8] dst (MOVLload [8] src mem)
(MOVQstore dst (MOVQload src mem) mem))
-(Move [s] dst src mem) && s == 11 || s >= 13 && s <= 15 ->
- (MOVQstore [s-8] dst (MOVQload [s-8] src mem)
+(Move [s] dst src mem) && s == 11 || s >= 13 && s <= 15 =>
+ (MOVQstore [int32(s-8)] dst (MOVQload [int32(s-8)] src mem)
(MOVQstore dst (MOVQload src mem) mem))
// Adjust moves to be a multiple of 16 bytes.
(Move [s] dst src mem)
- && s > 16 && s%16 != 0 && s%16 <= 8 ->
+ && s > 16 && s%16 != 0 && s%16 <= 8 =>
(Move [s-s%16]
(OffPtr <dst.Type> dst [s%16])
(OffPtr <src.Type> src [s%16])
(MOVQstore dst (MOVQload src mem) mem))
(Move [s] dst src mem)
- && s > 16 && s%16 != 0 && s%16 > 8 && config.useSSE ->
+ && s > 16 && s%16 != 0 && s%16 > 8 && config.useSSE =>
(Move [s-s%16]
(OffPtr <dst.Type> dst [s%16])
(OffPtr <src.Type> src [s%16])
(MOVOstore dst (MOVOload src mem) mem))
(Move [s] dst src mem)
- && s > 16 && s%16 != 0 && s%16 > 8 && !config.useSSE ->
+ && s > 16 && s%16 != 0 && s%16 > 8 && !config.useSSE =>
(Move [s-s%16]
(OffPtr <dst.Type> dst [s%16])
(OffPtr <src.Type> src [s%16])
// Medium copying uses a duff device.
(Move [s] dst src mem)
&& s > 64 && s <= 16*64 && s%16 == 0
- && !config.noDuffDevice && logLargeCopy(v, s) ->
+ && !config.noDuffDevice && logLargeCopy(v, s) =>
(DUFFCOPY [14*(64-s/16)] dst src mem)
// 14 and 64 are magic constants. 14 is the number of bytes to encode:
// MOVUPS (SI), X0
// and 64 is the number of such blocks. See src/runtime/duff_amd64.s:duffcopy.
// Large copying uses REP MOVSQ.
-(Move [s] dst src mem) && (s > 16*64 || config.noDuffDevice) && s%8 == 0 && logLargeCopy(v, s) ->
+(Move [s] dst src mem) && (s > 16*64 || config.noDuffDevice) && s%8 == 0 && logLargeCopy(v, s) =>
(REPMOVSQ dst src (MOVQconst [s/8]) mem)
// Lowering Zero instructions
-(Zero [0] _ mem) -> mem
-(Zero [1] destptr mem) -> (MOVBstoreconst [0] destptr mem)
-(Zero [2] destptr mem) -> (MOVWstoreconst [0] destptr mem)
-(Zero [4] destptr mem) -> (MOVLstoreconst [0] destptr mem)
-(Zero [8] destptr mem) -> (MOVQstoreconst [0] destptr mem)
-
-(Zero [3] destptr mem) ->
- (MOVBstoreconst [makeValAndOff(0,2)] destptr
- (MOVWstoreconst [0] destptr mem))
-(Zero [5] destptr mem) ->
- (MOVBstoreconst [makeValAndOff(0,4)] destptr
- (MOVLstoreconst [0] destptr mem))
-(Zero [6] destptr mem) ->
- (MOVWstoreconst [makeValAndOff(0,4)] destptr
- (MOVLstoreconst [0] destptr mem))
-(Zero [7] destptr mem) ->
- (MOVLstoreconst [makeValAndOff(0,3)] destptr
- (MOVLstoreconst [0] destptr mem))
+(Zero [0] _ mem) => mem
+(Zero [1] destptr mem) => (MOVBstoreconst [makeValAndOff32(0,0)] destptr mem)
+(Zero [2] destptr mem) => (MOVWstoreconst [makeValAndOff32(0,0)] destptr mem)
+(Zero [4] destptr mem) => (MOVLstoreconst [makeValAndOff32(0,0)] destptr mem)
+(Zero [8] destptr mem) => (MOVQstoreconst [makeValAndOff32(0,0)] destptr mem)
+
+(Zero [3] destptr mem) =>
+ (MOVBstoreconst [makeValAndOff32(0,2)] destptr
+ (MOVWstoreconst [makeValAndOff32(0,0)] destptr mem))
+(Zero [5] destptr mem) =>
+ (MOVBstoreconst [makeValAndOff32(0,4)] destptr
+ (MOVLstoreconst [makeValAndOff32(0,0)] destptr mem))
+(Zero [6] destptr mem) =>
+ (MOVWstoreconst [makeValAndOff32(0,4)] destptr
+ (MOVLstoreconst [makeValAndOff32(0,0)] destptr mem))
+(Zero [7] destptr mem) =>
+ (MOVLstoreconst [makeValAndOff32(0,3)] destptr
+ (MOVLstoreconst [makeValAndOff32(0,0)] destptr mem))
// Strip off any fractional word zeroing.
-(Zero [s] destptr mem) && s%8 != 0 && s > 8 && !config.useSSE ->
+(Zero [s] destptr mem) && s%8 != 0 && s > 8 && !config.useSSE =>
(Zero [s-s%8] (OffPtr <destptr.Type> destptr [s%8])
- (MOVQstoreconst [0] destptr mem))
+ (MOVQstoreconst [makeValAndOff32(0,0)] destptr mem))
// Zero small numbers of words directly.
-(Zero [16] destptr mem) && !config.useSSE ->
- (MOVQstoreconst [makeValAndOff(0,8)] destptr
- (MOVQstoreconst [0] destptr mem))
-(Zero [24] destptr mem) && !config.useSSE ->
- (MOVQstoreconst [makeValAndOff(0,16)] destptr
- (MOVQstoreconst [makeValAndOff(0,8)] destptr
- (MOVQstoreconst [0] destptr mem)))
-(Zero [32] destptr mem) && !config.useSSE ->
- (MOVQstoreconst [makeValAndOff(0,24)] destptr
- (MOVQstoreconst [makeValAndOff(0,16)] destptr
- (MOVQstoreconst [makeValAndOff(0,8)] destptr
- (MOVQstoreconst [0] destptr mem))))
-
-(Zero [s] destptr mem) && s > 8 && s < 16 && config.useSSE ->
- (MOVQstoreconst [makeValAndOff(0,s-8)] destptr
- (MOVQstoreconst [0] destptr mem))
+(Zero [16] destptr mem) && !config.useSSE =>
+ (MOVQstoreconst [makeValAndOff32(0,8)] destptr
+ (MOVQstoreconst [makeValAndOff32(0,0)] destptr mem))
+(Zero [24] destptr mem) && !config.useSSE =>
+ (MOVQstoreconst [makeValAndOff32(0,16)] destptr
+ (MOVQstoreconst [makeValAndOff32(0,8)] destptr
+ (MOVQstoreconst [makeValAndOff32(0,0)] destptr mem)))
+(Zero [32] destptr mem) && !config.useSSE =>
+ (MOVQstoreconst [makeValAndOff32(0,24)] destptr
+ (MOVQstoreconst [makeValAndOff32(0,16)] destptr
+ (MOVQstoreconst [makeValAndOff32(0,8)] destptr
+ (MOVQstoreconst [makeValAndOff32(0,0)] destptr mem))))
+
+(Zero [s] destptr mem) && s > 8 && s < 16 && config.useSSE =>
+ (MOVQstoreconst [makeValAndOff32(0,int32(s-8))] destptr
+ (MOVQstoreconst [makeValAndOff32(0,0)] destptr mem))
// Adjust zeros to be a multiple of 16 bytes.
-(Zero [s] destptr mem) && s%16 != 0 && s > 16 && s%16 > 8 && config.useSSE ->
+(Zero [s] destptr mem) && s%16 != 0 && s > 16 && s%16 > 8 && config.useSSE =>
(Zero [s-s%16] (OffPtr <destptr.Type> destptr [s%16])
(MOVOstore destptr (MOVOconst [0]) mem))
-(Zero [s] destptr mem) && s%16 != 0 && s > 16 && s%16 <= 8 && config.useSSE ->
+(Zero [s] destptr mem) && s%16 != 0 && s > 16 && s%16 <= 8 && config.useSSE =>
(Zero [s-s%16] (OffPtr <destptr.Type> destptr [s%16])
- (MOVQstoreconst [0] destptr mem))
+ (MOVQstoreconst [makeValAndOff32(0,0)] destptr mem))
-(Zero [16] destptr mem) && config.useSSE ->
+(Zero [16] destptr mem) && config.useSSE =>
(MOVOstore destptr (MOVOconst [0]) mem)
-(Zero [32] destptr mem) && config.useSSE ->
+(Zero [32] destptr mem) && config.useSSE =>
(MOVOstore (OffPtr <destptr.Type> destptr [16]) (MOVOconst [0])
(MOVOstore destptr (MOVOconst [0]) mem))
-(Zero [48] destptr mem) && config.useSSE ->
+(Zero [48] destptr mem) && config.useSSE =>
(MOVOstore (OffPtr <destptr.Type> destptr [32]) (MOVOconst [0])
(MOVOstore (OffPtr <destptr.Type> destptr [16]) (MOVOconst [0])
(MOVOstore destptr (MOVOconst [0]) mem)))
-(Zero [64] destptr mem) && config.useSSE ->
+(Zero [64] destptr mem) && config.useSSE =>
(MOVOstore (OffPtr <destptr.Type> destptr [48]) (MOVOconst [0])
(MOVOstore (OffPtr <destptr.Type> destptr [32]) (MOVOconst [0])
(MOVOstore (OffPtr <destptr.Type> destptr [16]) (MOVOconst [0])
// Medium zeroing uses a duff device.
(Zero [s] destptr mem)
- && s > 64 && s <= 1024 && s%16 == 0 && !config.noDuffDevice ->
+ && s > 64 && s <= 1024 && s%16 == 0 && !config.noDuffDevice =>
(DUFFZERO [s] destptr (MOVOconst [0]) mem)
// Large zeroing uses REP STOSQ.
(Zero [s] destptr mem)
&& (s > 1024 || (config.noDuffDevice && s > 64 || !config.useSSE && s > 32))
- && s%8 == 0 ->
+ && s%8 == 0 =>
(REPSTOSQ destptr (MOVQconst [s/8]) (MOVQconst [0]) mem)
// Lowering constants
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
- v2.AuxInt = 32
+ v2.AuxInt = int16ToAuxInt(32)
v2.AddArg(y)
v1.AddArg(v2)
v.AddArg2(v0, v1)
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
- v2.AuxInt = 32
+ v2.AuxInt = int32ToAuxInt(32)
v2.AddArg(y)
v1.AddArg(v2)
v.AddArg2(v0, v1)
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
- v2.AuxInt = 32
+ v2.AuxInt = int32ToAuxInt(32)
v2.AddArg(y)
v1.AddArg(v2)
v.AddArg2(v0, v1)
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
- v2.AuxInt = 32
+ v2.AuxInt = int8ToAuxInt(32)
v2.AddArg(y)
v1.AddArg(v2)
v.AddArg2(v0, v1)
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
- v2.AuxInt = 32
+ v2.AuxInt = int16ToAuxInt(32)
v2.AddArg(y)
v1.AddArg(v2)
v.AddArg2(v0, v1)
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
- v2.AuxInt = 32
+ v2.AuxInt = int32ToAuxInt(32)
v2.AddArg(y)
v1.AddArg(v2)
v.AddArg2(v0, v1)
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
- v2.AuxInt = 32
+ v2.AuxInt = int32ToAuxInt(32)
v2.AddArg(y)
v1.AddArg(v2)
v.AddArg2(v0, v1)
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
- v2.AuxInt = 32
+ v2.AuxInt = int8ToAuxInt(32)
v2.AddArg(y)
v1.AddArg(v2)
v.AddArg2(v0, v1)
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
- v2.AuxInt = 64
+ v2.AuxInt = int16ToAuxInt(64)
v2.AddArg(y)
v1.AddArg(v2)
v.AddArg2(v0, v1)
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
- v2.AuxInt = 64
+ v2.AuxInt = int32ToAuxInt(64)
v2.AddArg(y)
v1.AddArg(v2)
v.AddArg2(v0, v1)
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
- v2.AuxInt = 64
+ v2.AuxInt = int32ToAuxInt(64)
v2.AddArg(y)
v1.AddArg(v2)
v.AddArg2(v0, v1)
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
- v2.AuxInt = 64
+ v2.AuxInt = int8ToAuxInt(64)
v2.AddArg(y)
v1.AddArg(v2)
v.AddArg2(v0, v1)
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
- v2.AuxInt = 32
+ v2.AuxInt = int16ToAuxInt(32)
v2.AddArg(y)
v1.AddArg(v2)
v.AddArg2(v0, v1)
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
- v2.AuxInt = 32
+ v2.AuxInt = int32ToAuxInt(32)
v2.AddArg(y)
v1.AddArg(v2)
v.AddArg2(v0, v1)
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
- v2.AuxInt = 32
+ v2.AuxInt = int32ToAuxInt(32)
v2.AddArg(y)
v1.AddArg(v2)
v.AddArg2(v0, v1)
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
- v2.AuxInt = 32
+ v2.AuxInt = int8ToAuxInt(32)
v2.AddArg(y)
v1.AddArg(v2)
v.AddArg2(v0, v1)
// match: (Move [0] _ _ mem)
// result: mem
for {
- if v.AuxInt != 0 {
+ if auxIntToInt64(v.AuxInt) != 0 {
break
}
mem := v_2
// match: (Move [1] dst src mem)
// result: (MOVBstore dst (MOVBload src mem) mem)
for {
- if v.AuxInt != 1 {
+ if auxIntToInt64(v.AuxInt) != 1 {
break
}
dst := v_0
// match: (Move [2] dst src mem)
// result: (MOVWstore dst (MOVWload src mem) mem)
for {
- if v.AuxInt != 2 {
+ if auxIntToInt64(v.AuxInt) != 2 {
break
}
dst := v_0
// match: (Move [4] dst src mem)
// result: (MOVLstore dst (MOVLload src mem) mem)
for {
- if v.AuxInt != 4 {
+ if auxIntToInt64(v.AuxInt) != 4 {
break
}
dst := v_0
// match: (Move [8] dst src mem)
// result: (MOVQstore dst (MOVQload src mem) mem)
for {
- if v.AuxInt != 8 {
+ if auxIntToInt64(v.AuxInt) != 8 {
break
}
dst := v_0
// cond: config.useSSE
// result: (MOVOstore dst (MOVOload src mem) mem)
for {
- if v.AuxInt != 16 {
+ if auxIntToInt64(v.AuxInt) != 16 {
break
}
dst := v_0
// cond: !config.useSSE
// result: (MOVQstore [8] dst (MOVQload [8] src mem) (MOVQstore dst (MOVQload src mem) mem))
for {
- if v.AuxInt != 16 {
+ if auxIntToInt64(v.AuxInt) != 16 {
break
}
dst := v_0
break
}
v.reset(OpAMD64MOVQstore)
- v.AuxInt = 8
+ v.AuxInt = int32ToAuxInt(8)
v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
- v0.AuxInt = 8
+ v0.AuxInt = int32ToAuxInt(8)
v0.AddArg2(src, mem)
v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
// match: (Move [32] dst src mem)
// result: (Move [16] (OffPtr <dst.Type> dst [16]) (OffPtr <src.Type> src [16]) (Move [16] dst src mem))
for {
- if v.AuxInt != 32 {
+ if auxIntToInt64(v.AuxInt) != 32 {
break
}
dst := v_0
src := v_1
mem := v_2
v.reset(OpMove)
- v.AuxInt = 16
+ v.AuxInt = int64ToAuxInt(16)
v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
- v0.AuxInt = 16
+ v0.AuxInt = int64ToAuxInt(16)
v0.AddArg(dst)
v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
- v1.AuxInt = 16
+ v1.AuxInt = int64ToAuxInt(16)
v1.AddArg(src)
v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem)
- v2.AuxInt = 16
+ v2.AuxInt = int64ToAuxInt(16)
v2.AddArg3(dst, src, mem)
v.AddArg3(v0, v1, v2)
return true
// cond: config.useSSE
// result: (Move [32] (OffPtr <dst.Type> dst [16]) (OffPtr <src.Type> src [16]) (Move [16] dst src mem))
for {
- if v.AuxInt != 48 {
+ if auxIntToInt64(v.AuxInt) != 48 {
break
}
dst := v_0
break
}
v.reset(OpMove)
- v.AuxInt = 32
+ v.AuxInt = int64ToAuxInt(32)
v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
- v0.AuxInt = 16
+ v0.AuxInt = int64ToAuxInt(16)
v0.AddArg(dst)
v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
- v1.AuxInt = 16
+ v1.AuxInt = int64ToAuxInt(16)
v1.AddArg(src)
v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem)
- v2.AuxInt = 16
+ v2.AuxInt = int64ToAuxInt(16)
v2.AddArg3(dst, src, mem)
v.AddArg3(v0, v1, v2)
return true
// cond: config.useSSE
// result: (Move [32] (OffPtr <dst.Type> dst [32]) (OffPtr <src.Type> src [32]) (Move [32] dst src mem))
for {
- if v.AuxInt != 64 {
+ if auxIntToInt64(v.AuxInt) != 64 {
break
}
dst := v_0
break
}
v.reset(OpMove)
- v.AuxInt = 32
+ v.AuxInt = int64ToAuxInt(32)
v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
- v0.AuxInt = 32
+ v0.AuxInt = int64ToAuxInt(32)
v0.AddArg(dst)
v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
- v1.AuxInt = 32
+ v1.AuxInt = int64ToAuxInt(32)
v1.AddArg(src)
v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem)
- v2.AuxInt = 32
+ v2.AuxInt = int64ToAuxInt(32)
v2.AddArg3(dst, src, mem)
v.AddArg3(v0, v1, v2)
return true
// match: (Move [3] dst src mem)
// result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVWstore dst (MOVWload src mem) mem))
for {
- if v.AuxInt != 3 {
+ if auxIntToInt64(v.AuxInt) != 3 {
break
}
dst := v_0
src := v_1
mem := v_2
v.reset(OpAMD64MOVBstore)
- v.AuxInt = 2
+ v.AuxInt = int32ToAuxInt(2)
v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
- v0.AuxInt = 2
+ v0.AuxInt = int32ToAuxInt(2)
v0.AddArg2(src, mem)
v1 := b.NewValue0(v.Pos, OpAMD64MOVWstore, types.TypeMem)
v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
// match: (Move [5] dst src mem)
// result: (MOVBstore [4] dst (MOVBload [4] src mem) (MOVLstore dst (MOVLload src mem) mem))
for {
- if v.AuxInt != 5 {
+ if auxIntToInt64(v.AuxInt) != 5 {
break
}
dst := v_0
src := v_1
mem := v_2
v.reset(OpAMD64MOVBstore)
- v.AuxInt = 4
+ v.AuxInt = int32ToAuxInt(4)
v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
- v0.AuxInt = 4
+ v0.AuxInt = int32ToAuxInt(4)
v0.AddArg2(src, mem)
v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem)
v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
// match: (Move [6] dst src mem)
// result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVLstore dst (MOVLload src mem) mem))
for {
- if v.AuxInt != 6 {
+ if auxIntToInt64(v.AuxInt) != 6 {
break
}
dst := v_0
src := v_1
mem := v_2
v.reset(OpAMD64MOVWstore)
- v.AuxInt = 4
+ v.AuxInt = int32ToAuxInt(4)
v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
- v0.AuxInt = 4
+ v0.AuxInt = int32ToAuxInt(4)
v0.AddArg2(src, mem)
v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem)
v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
// match: (Move [7] dst src mem)
// result: (MOVLstore [3] dst (MOVLload [3] src mem) (MOVLstore dst (MOVLload src mem) mem))
for {
- if v.AuxInt != 7 {
+ if auxIntToInt64(v.AuxInt) != 7 {
break
}
dst := v_0
src := v_1
mem := v_2
v.reset(OpAMD64MOVLstore)
- v.AuxInt = 3
+ v.AuxInt = int32ToAuxInt(3)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
- v0.AuxInt = 3
+ v0.AuxInt = int32ToAuxInt(3)
v0.AddArg2(src, mem)
v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem)
v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
// match: (Move [9] dst src mem)
// result: (MOVBstore [8] dst (MOVBload [8] src mem) (MOVQstore dst (MOVQload src mem) mem))
for {
- if v.AuxInt != 9 {
+ if auxIntToInt64(v.AuxInt) != 9 {
break
}
dst := v_0
src := v_1
mem := v_2
v.reset(OpAMD64MOVBstore)
- v.AuxInt = 8
+ v.AuxInt = int32ToAuxInt(8)
v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
- v0.AuxInt = 8
+ v0.AuxInt = int32ToAuxInt(8)
v0.AddArg2(src, mem)
v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
// match: (Move [10] dst src mem)
// result: (MOVWstore [8] dst (MOVWload [8] src mem) (MOVQstore dst (MOVQload src mem) mem))
for {
- if v.AuxInt != 10 {
+ if auxIntToInt64(v.AuxInt) != 10 {
break
}
dst := v_0
src := v_1
mem := v_2
v.reset(OpAMD64MOVWstore)
- v.AuxInt = 8
+ v.AuxInt = int32ToAuxInt(8)
v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
- v0.AuxInt = 8
+ v0.AuxInt = int32ToAuxInt(8)
v0.AddArg2(src, mem)
v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
// match: (Move [12] dst src mem)
// result: (MOVLstore [8] dst (MOVLload [8] src mem) (MOVQstore dst (MOVQload src mem) mem))
for {
- if v.AuxInt != 12 {
+ if auxIntToInt64(v.AuxInt) != 12 {
break
}
dst := v_0
src := v_1
mem := v_2
v.reset(OpAMD64MOVLstore)
- v.AuxInt = 8
+ v.AuxInt = int32ToAuxInt(8)
v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
- v0.AuxInt = 8
+ v0.AuxInt = int32ToAuxInt(8)
v0.AddArg2(src, mem)
v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
}
// match: (Move [s] dst src mem)
// cond: s == 11 || s >= 13 && s <= 15
- // result: (MOVQstore [s-8] dst (MOVQload [s-8] src mem) (MOVQstore dst (MOVQload src mem) mem))
+ // result: (MOVQstore [int32(s-8)] dst (MOVQload [int32(s-8)] src mem) (MOVQstore dst (MOVQload src mem) mem))
for {
- s := v.AuxInt
+ s := auxIntToInt64(v.AuxInt)
dst := v_0
src := v_1
mem := v_2
break
}
v.reset(OpAMD64MOVQstore)
- v.AuxInt = s - 8
+ v.AuxInt = int32ToAuxInt(int32(s - 8))
v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
- v0.AuxInt = s - 8
+ v0.AuxInt = int32ToAuxInt(int32(s - 8))
v0.AddArg2(src, mem)
v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
// cond: s > 16 && s%16 != 0 && s%16 <= 8
// result: (Move [s-s%16] (OffPtr <dst.Type> dst [s%16]) (OffPtr <src.Type> src [s%16]) (MOVQstore dst (MOVQload src mem) mem))
for {
- s := v.AuxInt
+ s := auxIntToInt64(v.AuxInt)
dst := v_0
src := v_1
mem := v_2
break
}
v.reset(OpMove)
- v.AuxInt = s - s%16
+ v.AuxInt = int64ToAuxInt(s - s%16)
v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
- v0.AuxInt = s % 16
+ v0.AuxInt = int64ToAuxInt(s % 16)
v0.AddArg(dst)
v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
- v1.AuxInt = s % 16
+ v1.AuxInt = int64ToAuxInt(s % 16)
v1.AddArg(src)
v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
// cond: s > 16 && s%16 != 0 && s%16 > 8 && config.useSSE
// result: (Move [s-s%16] (OffPtr <dst.Type> dst [s%16]) (OffPtr <src.Type> src [s%16]) (MOVOstore dst (MOVOload src mem) mem))
for {
- s := v.AuxInt
+ s := auxIntToInt64(v.AuxInt)
dst := v_0
src := v_1
mem := v_2
break
}
v.reset(OpMove)
- v.AuxInt = s - s%16
+ v.AuxInt = int64ToAuxInt(s - s%16)
v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
- v0.AuxInt = s % 16
+ v0.AuxInt = int64ToAuxInt(s % 16)
v0.AddArg(dst)
v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
- v1.AuxInt = s % 16
+ v1.AuxInt = int64ToAuxInt(s % 16)
v1.AddArg(src)
v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem)
v3 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128)
// cond: s > 16 && s%16 != 0 && s%16 > 8 && !config.useSSE
// result: (Move [s-s%16] (OffPtr <dst.Type> dst [s%16]) (OffPtr <src.Type> src [s%16]) (MOVQstore [8] dst (MOVQload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)))
for {
- s := v.AuxInt
+ s := auxIntToInt64(v.AuxInt)
dst := v_0
src := v_1
mem := v_2
break
}
v.reset(OpMove)
- v.AuxInt = s - s%16
+ v.AuxInt = int64ToAuxInt(s - s%16)
v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
- v0.AuxInt = s % 16
+ v0.AuxInt = int64ToAuxInt(s % 16)
v0.AddArg(dst)
v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
- v1.AuxInt = s % 16
+ v1.AuxInt = int64ToAuxInt(s % 16)
v1.AddArg(src)
v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
- v2.AuxInt = 8
+ v2.AuxInt = int32ToAuxInt(8)
v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
- v3.AuxInt = 8
+ v3.AuxInt = int32ToAuxInt(8)
v3.AddArg2(src, mem)
v4 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
v5 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
// cond: s > 64 && s <= 16*64 && s%16 == 0 && !config.noDuffDevice && logLargeCopy(v, s)
// result: (DUFFCOPY [14*(64-s/16)] dst src mem)
for {
- s := v.AuxInt
+ s := auxIntToInt64(v.AuxInt)
dst := v_0
src := v_1
mem := v_2
break
}
v.reset(OpAMD64DUFFCOPY)
- v.AuxInt = 14 * (64 - s/16)
+ v.AuxInt = int64ToAuxInt(14 * (64 - s/16))
v.AddArg3(dst, src, mem)
return true
}
// cond: (s > 16*64 || config.noDuffDevice) && s%8 == 0 && logLargeCopy(v, s)
// result: (REPMOVSQ dst src (MOVQconst [s/8]) mem)
for {
- s := v.AuxInt
+ s := auxIntToInt64(v.AuxInt)
dst := v_0
src := v_1
mem := v_2
}
v.reset(OpAMD64REPMOVSQ)
v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
- v0.AuxInt = s / 8
+ v0.AuxInt = int64ToAuxInt(s / 8)
v.AddArg4(dst, src, v0, mem)
return true
}
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
- v2.AuxInt = 16
+ v2.AuxInt = int16ToAuxInt(16)
v2.AddArg(y)
v1.AddArg(v2)
v.AddArg2(v0, v1)
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
- v2.AuxInt = 16
+ v2.AuxInt = int32ToAuxInt(16)
v2.AddArg(y)
v1.AddArg(v2)
v.AddArg2(v0, v1)
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
- v2.AuxInt = 16
+ v2.AuxInt = int32ToAuxInt(16)
v2.AddArg(y)
v1.AddArg(v2)
v.AddArg2(v0, v1)
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
- v2.AuxInt = 16
+ v2.AuxInt = int8ToAuxInt(16)
v2.AddArg(y)
v1.AddArg(v2)
v.AddArg2(v0, v1)
v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
- v3.AuxInt = 16
+ v3.AuxInt = int16ToAuxInt(16)
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
- v3.AuxInt = 16
+ v3.AuxInt = int32ToAuxInt(16)
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type)
v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type)
v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
- v3.AuxInt = 16
+ v3.AuxInt = int32ToAuxInt(16)
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
- v3.AuxInt = 16
+ v3.AuxInt = int8ToAuxInt(16)
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
- v2.AuxInt = 32
+ v2.AuxInt = int16ToAuxInt(32)
v2.AddArg(y)
v1.AddArg(v2)
v.AddArg2(v0, v1)
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
- v2.AuxInt = 32
+ v2.AuxInt = int32ToAuxInt(32)
v2.AddArg(y)
v1.AddArg(v2)
v.AddArg2(v0, v1)
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
- v2.AuxInt = 32
+ v2.AuxInt = int32ToAuxInt(32)
v2.AddArg(y)
v1.AddArg(v2)
v.AddArg2(v0, v1)
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
- v2.AuxInt = 32
+ v2.AuxInt = int8ToAuxInt(32)
v2.AddArg(y)
v1.AddArg(v2)
v.AddArg2(v0, v1)
v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
- v3.AuxInt = 32
+ v3.AuxInt = int16ToAuxInt(32)
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
- v3.AuxInt = 32
+ v3.AuxInt = int32ToAuxInt(32)
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type)
v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type)
v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
- v3.AuxInt = 32
+ v3.AuxInt = int32ToAuxInt(32)
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
- v3.AuxInt = 32
+ v3.AuxInt = int8ToAuxInt(32)
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
- v2.AuxInt = 64
+ v2.AuxInt = int16ToAuxInt(64)
v2.AddArg(y)
v1.AddArg(v2)
v.AddArg2(v0, v1)
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
- v2.AuxInt = 64
+ v2.AuxInt = int32ToAuxInt(64)
v2.AddArg(y)
v1.AddArg(v2)
v.AddArg2(v0, v1)
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
- v2.AuxInt = 64
+ v2.AuxInt = int32ToAuxInt(64)
v2.AddArg(y)
v1.AddArg(v2)
v.AddArg2(v0, v1)
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
- v2.AuxInt = 64
+ v2.AuxInt = int8ToAuxInt(64)
v2.AddArg(y)
v1.AddArg(v2)
v.AddArg2(v0, v1)
v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
- v3.AuxInt = 64
+ v3.AuxInt = int16ToAuxInt(64)
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
- v3.AuxInt = 64
+ v3.AuxInt = int32ToAuxInt(64)
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type)
v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type)
v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
- v3.AuxInt = 64
+ v3.AuxInt = int32ToAuxInt(64)
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
- v3.AuxInt = 64
+ v3.AuxInt = int8ToAuxInt(64)
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
- v2.AuxInt = 8
+ v2.AuxInt = int16ToAuxInt(8)
v2.AddArg(y)
v1.AddArg(v2)
v.AddArg2(v0, v1)
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
- v2.AuxInt = 8
+ v2.AuxInt = int32ToAuxInt(8)
v2.AddArg(y)
v1.AddArg(v2)
v.AddArg2(v0, v1)
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
- v2.AuxInt = 8
+ v2.AuxInt = int32ToAuxInt(8)
v2.AddArg(y)
v1.AddArg(v2)
v.AddArg2(v0, v1)
v0.AddArg2(x, y)
v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
- v2.AuxInt = 8
+ v2.AuxInt = int8ToAuxInt(8)
v2.AddArg(y)
v1.AddArg(v2)
v.AddArg2(v0, v1)
v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
- v3.AuxInt = 8
+ v3.AuxInt = int16ToAuxInt(8)
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
- v3.AuxInt = 8
+ v3.AuxInt = int32ToAuxInt(8)
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type)
v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type)
v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
- v3.AuxInt = 8
+ v3.AuxInt = int32ToAuxInt(8)
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
- v3.AuxInt = 8
+ v3.AuxInt = int8ToAuxInt(8)
v3.AddArg(y)
v2.AddArg(v3)
v1.AddArg(v2)
t := v.Type
x := v_0
v.reset(OpAMD64SARQconst)
- v.AuxInt = 63
+ v.AuxInt = int8ToAuxInt(63)
v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
v0.AddArg(x)
v.AddArg(v0)
y := v_1
v.reset(OpAMD64CMOVQCC)
v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
- v0.AuxInt = 0
+ v0.AuxInt = int64ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
v1.AddArg2(x, y)
v.AddArg3(x, v0, v1)
y := v_1
v.reset(OpAMD64CMOVQHI)
v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
- v0.AuxInt = 0
+ v0.AuxInt = int64ToAuxInt(0)
v1 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
v1.AddArg2(x, y)
v.AddArg3(x, v0, v1)
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (Store {t} ptr val mem)
- // cond: t.(*types.Type).Size() == 8 && is64BitFloat(val.Type)
+ // cond: t.Size() == 8 && is64BitFloat(val.Type)
// result: (MOVSDstore ptr val mem)
for {
- t := v.Aux
+ t := auxToType(v.Aux)
ptr := v_0
val := v_1
mem := v_2
- if !(t.(*types.Type).Size() == 8 && is64BitFloat(val.Type)) {
+ if !(t.Size() == 8 && is64BitFloat(val.Type)) {
break
}
v.reset(OpAMD64MOVSDstore)
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(*types.Type).Size() == 4 && is32BitFloat(val.Type)
+ // cond: t.Size() == 4 && is32BitFloat(val.Type)
// result: (MOVSSstore ptr val mem)
for {
- t := v.Aux
+ t := auxToType(v.Aux)
ptr := v_0
val := v_1
mem := v_2
- if !(t.(*types.Type).Size() == 4 && is32BitFloat(val.Type)) {
+ if !(t.Size() == 4 && is32BitFloat(val.Type)) {
break
}
v.reset(OpAMD64MOVSSstore)
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(*types.Type).Size() == 8
+ // cond: t.Size() == 8
// result: (MOVQstore ptr val mem)
for {
- t := v.Aux
+ t := auxToType(v.Aux)
ptr := v_0
val := v_1
mem := v_2
- if !(t.(*types.Type).Size() == 8) {
+ if !(t.Size() == 8) {
break
}
v.reset(OpAMD64MOVQstore)
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(*types.Type).Size() == 4
+ // cond: t.Size() == 4
// result: (MOVLstore ptr val mem)
for {
- t := v.Aux
+ t := auxToType(v.Aux)
ptr := v_0
val := v_1
mem := v_2
- if !(t.(*types.Type).Size() == 4) {
+ if !(t.Size() == 4) {
break
}
v.reset(OpAMD64MOVLstore)
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(*types.Type).Size() == 2
+ // cond: t.Size() == 2
// result: (MOVWstore ptr val mem)
for {
- t := v.Aux
+ t := auxToType(v.Aux)
ptr := v_0
val := v_1
mem := v_2
- if !(t.(*types.Type).Size() == 2) {
+ if !(t.Size() == 2) {
break
}
v.reset(OpAMD64MOVWstore)
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(*types.Type).Size() == 1
+ // cond: t.Size() == 1
// result: (MOVBstore ptr val mem)
for {
- t := v.Aux
+ t := auxToType(v.Aux)
ptr := v_0
val := v_1
mem := v_2
- if !(t.(*types.Type).Size() == 1) {
+ if !(t.Size() == 1) {
break
}
v.reset(OpAMD64MOVBstore)
// match: (Zero [0] _ mem)
// result: mem
for {
- if v.AuxInt != 0 {
+ if auxIntToInt64(v.AuxInt) != 0 {
break
}
mem := v_1
return true
}
// match: (Zero [1] destptr mem)
- // result: (MOVBstoreconst [0] destptr mem)
+ // result: (MOVBstoreconst [makeValAndOff32(0,0)] destptr mem)
for {
- if v.AuxInt != 1 {
+ if auxIntToInt64(v.AuxInt) != 1 {
break
}
destptr := v_0
mem := v_1
v.reset(OpAMD64MOVBstoreconst)
- v.AuxInt = 0
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 0))
v.AddArg2(destptr, mem)
return true
}
// match: (Zero [2] destptr mem)
- // result: (MOVWstoreconst [0] destptr mem)
+ // result: (MOVWstoreconst [makeValAndOff32(0,0)] destptr mem)
for {
- if v.AuxInt != 2 {
+ if auxIntToInt64(v.AuxInt) != 2 {
break
}
destptr := v_0
mem := v_1
v.reset(OpAMD64MOVWstoreconst)
- v.AuxInt = 0
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 0))
v.AddArg2(destptr, mem)
return true
}
// match: (Zero [4] destptr mem)
- // result: (MOVLstoreconst [0] destptr mem)
+ // result: (MOVLstoreconst [makeValAndOff32(0,0)] destptr mem)
for {
- if v.AuxInt != 4 {
+ if auxIntToInt64(v.AuxInt) != 4 {
break
}
destptr := v_0
mem := v_1
v.reset(OpAMD64MOVLstoreconst)
- v.AuxInt = 0
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 0))
v.AddArg2(destptr, mem)
return true
}
// match: (Zero [8] destptr mem)
- // result: (MOVQstoreconst [0] destptr mem)
+ // result: (MOVQstoreconst [makeValAndOff32(0,0)] destptr mem)
for {
- if v.AuxInt != 8 {
+ if auxIntToInt64(v.AuxInt) != 8 {
break
}
destptr := v_0
mem := v_1
v.reset(OpAMD64MOVQstoreconst)
- v.AuxInt = 0
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 0))
v.AddArg2(destptr, mem)
return true
}
// match: (Zero [3] destptr mem)
- // result: (MOVBstoreconst [makeValAndOff(0,2)] destptr (MOVWstoreconst [0] destptr mem))
+ // result: (MOVBstoreconst [makeValAndOff32(0,2)] destptr (MOVWstoreconst [makeValAndOff32(0,0)] destptr mem))
for {
- if v.AuxInt != 3 {
+ if auxIntToInt64(v.AuxInt) != 3 {
break
}
destptr := v_0
mem := v_1
v.reset(OpAMD64MOVBstoreconst)
- v.AuxInt = makeValAndOff(0, 2)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 2))
v0 := b.NewValue0(v.Pos, OpAMD64MOVWstoreconst, types.TypeMem)
- v0.AuxInt = 0
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 0))
v0.AddArg2(destptr, mem)
v.AddArg2(destptr, v0)
return true
}
// match: (Zero [5] destptr mem)
- // result: (MOVBstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [0] destptr mem))
+ // result: (MOVBstoreconst [makeValAndOff32(0,4)] destptr (MOVLstoreconst [makeValAndOff32(0,0)] destptr mem))
for {
- if v.AuxInt != 5 {
+ if auxIntToInt64(v.AuxInt) != 5 {
break
}
destptr := v_0
mem := v_1
v.reset(OpAMD64MOVBstoreconst)
- v.AuxInt = makeValAndOff(0, 4)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 4))
v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem)
- v0.AuxInt = 0
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 0))
v0.AddArg2(destptr, mem)
v.AddArg2(destptr, v0)
return true
}
// match: (Zero [6] destptr mem)
- // result: (MOVWstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [0] destptr mem))
+ // result: (MOVWstoreconst [makeValAndOff32(0,4)] destptr (MOVLstoreconst [makeValAndOff32(0,0)] destptr mem))
for {
- if v.AuxInt != 6 {
+ if auxIntToInt64(v.AuxInt) != 6 {
break
}
destptr := v_0
mem := v_1
v.reset(OpAMD64MOVWstoreconst)
- v.AuxInt = makeValAndOff(0, 4)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 4))
v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem)
- v0.AuxInt = 0
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 0))
v0.AddArg2(destptr, mem)
v.AddArg2(destptr, v0)
return true
}
// match: (Zero [7] destptr mem)
- // result: (MOVLstoreconst [makeValAndOff(0,3)] destptr (MOVLstoreconst [0] destptr mem))
+ // result: (MOVLstoreconst [makeValAndOff32(0,3)] destptr (MOVLstoreconst [makeValAndOff32(0,0)] destptr mem))
for {
- if v.AuxInt != 7 {
+ if auxIntToInt64(v.AuxInt) != 7 {
break
}
destptr := v_0
mem := v_1
v.reset(OpAMD64MOVLstoreconst)
- v.AuxInt = makeValAndOff(0, 3)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 3))
v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem)
- v0.AuxInt = 0
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 0))
v0.AddArg2(destptr, mem)
v.AddArg2(destptr, v0)
return true
}
// match: (Zero [s] destptr mem)
// cond: s%8 != 0 && s > 8 && !config.useSSE
- // result: (Zero [s-s%8] (OffPtr <destptr.Type> destptr [s%8]) (MOVQstoreconst [0] destptr mem))
+ // result: (Zero [s-s%8] (OffPtr <destptr.Type> destptr [s%8]) (MOVQstoreconst [makeValAndOff32(0,0)] destptr mem))
for {
- s := v.AuxInt
+ s := auxIntToInt64(v.AuxInt)
destptr := v_0
mem := v_1
if !(s%8 != 0 && s > 8 && !config.useSSE) {
break
}
v.reset(OpZero)
- v.AuxInt = s - s%8
+ v.AuxInt = int64ToAuxInt(s - s%8)
v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
- v0.AuxInt = s % 8
+ v0.AuxInt = int64ToAuxInt(s % 8)
v0.AddArg(destptr)
v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
- v1.AuxInt = 0
+ v1.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 0))
v1.AddArg2(destptr, mem)
v.AddArg2(v0, v1)
return true
}
// match: (Zero [16] destptr mem)
// cond: !config.useSSE
- // result: (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [0] destptr mem))
+ // result: (MOVQstoreconst [makeValAndOff32(0,8)] destptr (MOVQstoreconst [makeValAndOff32(0,0)] destptr mem))
for {
- if v.AuxInt != 16 {
+ if auxIntToInt64(v.AuxInt) != 16 {
break
}
destptr := v_0
break
}
v.reset(OpAMD64MOVQstoreconst)
- v.AuxInt = makeValAndOff(0, 8)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 8))
v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
- v0.AuxInt = 0
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 0))
v0.AddArg2(destptr, mem)
v.AddArg2(destptr, v0)
return true
}
// match: (Zero [24] destptr mem)
// cond: !config.useSSE
- // result: (MOVQstoreconst [makeValAndOff(0,16)] destptr (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [0] destptr mem)))
+ // result: (MOVQstoreconst [makeValAndOff32(0,16)] destptr (MOVQstoreconst [makeValAndOff32(0,8)] destptr (MOVQstoreconst [makeValAndOff32(0,0)] destptr mem)))
for {
- if v.AuxInt != 24 {
+ if auxIntToInt64(v.AuxInt) != 24 {
break
}
destptr := v_0
break
}
v.reset(OpAMD64MOVQstoreconst)
- v.AuxInt = makeValAndOff(0, 16)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 16))
v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
- v0.AuxInt = makeValAndOff(0, 8)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 8))
v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
- v1.AuxInt = 0
+ v1.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 0))
v1.AddArg2(destptr, mem)
v0.AddArg2(destptr, v1)
v.AddArg2(destptr, v0)
}
// match: (Zero [32] destptr mem)
// cond: !config.useSSE
- // result: (MOVQstoreconst [makeValAndOff(0,24)] destptr (MOVQstoreconst [makeValAndOff(0,16)] destptr (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [0] destptr mem))))
+ // result: (MOVQstoreconst [makeValAndOff32(0,24)] destptr (MOVQstoreconst [makeValAndOff32(0,16)] destptr (MOVQstoreconst [makeValAndOff32(0,8)] destptr (MOVQstoreconst [makeValAndOff32(0,0)] destptr mem))))
for {
- if v.AuxInt != 32 {
+ if auxIntToInt64(v.AuxInt) != 32 {
break
}
destptr := v_0
break
}
v.reset(OpAMD64MOVQstoreconst)
- v.AuxInt = makeValAndOff(0, 24)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 24))
v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
- v0.AuxInt = makeValAndOff(0, 16)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 16))
v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
- v1.AuxInt = makeValAndOff(0, 8)
+ v1.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 8))
v2 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
- v2.AuxInt = 0
+ v2.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 0))
v2.AddArg2(destptr, mem)
v1.AddArg2(destptr, v2)
v0.AddArg2(destptr, v1)
}
// match: (Zero [s] destptr mem)
// cond: s > 8 && s < 16 && config.useSSE
- // result: (MOVQstoreconst [makeValAndOff(0,s-8)] destptr (MOVQstoreconst [0] destptr mem))
+ // result: (MOVQstoreconst [makeValAndOff32(0,int32(s-8))] destptr (MOVQstoreconst [makeValAndOff32(0,0)] destptr mem))
for {
- s := v.AuxInt
+ s := auxIntToInt64(v.AuxInt)
destptr := v_0
mem := v_1
if !(s > 8 && s < 16 && config.useSSE) {
break
}
v.reset(OpAMD64MOVQstoreconst)
- v.AuxInt = makeValAndOff(0, s-8)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, int32(s-8)))
v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
- v0.AuxInt = 0
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 0))
v0.AddArg2(destptr, mem)
v.AddArg2(destptr, v0)
return true
// cond: s%16 != 0 && s > 16 && s%16 > 8 && config.useSSE
// result: (Zero [s-s%16] (OffPtr <destptr.Type> destptr [s%16]) (MOVOstore destptr (MOVOconst [0]) mem))
for {
- s := v.AuxInt
+ s := auxIntToInt64(v.AuxInt)
destptr := v_0
mem := v_1
if !(s%16 != 0 && s > 16 && s%16 > 8 && config.useSSE) {
break
}
v.reset(OpZero)
- v.AuxInt = s - s%16
+ v.AuxInt = int64ToAuxInt(s - s%16)
v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
- v0.AuxInt = s % 16
+ v0.AuxInt = int64ToAuxInt(s % 16)
v0.AddArg(destptr)
v1 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem)
v2 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128)
- v2.AuxInt = 0
+ v2.AuxInt = int128ToAuxInt(0)
v1.AddArg3(destptr, v2, mem)
v.AddArg2(v0, v1)
return true
}
// match: (Zero [s] destptr mem)
// cond: s%16 != 0 && s > 16 && s%16 <= 8 && config.useSSE
- // result: (Zero [s-s%16] (OffPtr <destptr.Type> destptr [s%16]) (MOVQstoreconst [0] destptr mem))
+ // result: (Zero [s-s%16] (OffPtr <destptr.Type> destptr [s%16]) (MOVQstoreconst [makeValAndOff32(0,0)] destptr mem))
for {
- s := v.AuxInt
+ s := auxIntToInt64(v.AuxInt)
destptr := v_0
mem := v_1
if !(s%16 != 0 && s > 16 && s%16 <= 8 && config.useSSE) {
break
}
v.reset(OpZero)
- v.AuxInt = s - s%16
+ v.AuxInt = int64ToAuxInt(s - s%16)
v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
- v0.AuxInt = s % 16
+ v0.AuxInt = int64ToAuxInt(s % 16)
v0.AddArg(destptr)
v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
- v1.AuxInt = 0
+ v1.AuxInt = valAndOffToAuxInt(makeValAndOff32(0, 0))
v1.AddArg2(destptr, mem)
v.AddArg2(v0, v1)
return true
// cond: config.useSSE
// result: (MOVOstore destptr (MOVOconst [0]) mem)
for {
- if v.AuxInt != 16 {
+ if auxIntToInt64(v.AuxInt) != 16 {
break
}
destptr := v_0
}
v.reset(OpAMD64MOVOstore)
v0 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128)
- v0.AuxInt = 0
+ v0.AuxInt = int128ToAuxInt(0)
v.AddArg3(destptr, v0, mem)
return true
}
// cond: config.useSSE
// result: (MOVOstore (OffPtr <destptr.Type> destptr [16]) (MOVOconst [0]) (MOVOstore destptr (MOVOconst [0]) mem))
for {
- if v.AuxInt != 32 {
+ if auxIntToInt64(v.AuxInt) != 32 {
break
}
destptr := v_0
}
v.reset(OpAMD64MOVOstore)
v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
- v0.AuxInt = 16
+ v0.AuxInt = int64ToAuxInt(16)
v0.AddArg(destptr)
v1 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128)
- v1.AuxInt = 0
+ v1.AuxInt = int128ToAuxInt(0)
v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem)
v3 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128)
- v3.AuxInt = 0
+ v3.AuxInt = int128ToAuxInt(0)
v2.AddArg3(destptr, v3, mem)
v.AddArg3(v0, v1, v2)
return true
// cond: config.useSSE
// result: (MOVOstore (OffPtr <destptr.Type> destptr [32]) (MOVOconst [0]) (MOVOstore (OffPtr <destptr.Type> destptr [16]) (MOVOconst [0]) (MOVOstore destptr (MOVOconst [0]) mem)))
for {
- if v.AuxInt != 48 {
+ if auxIntToInt64(v.AuxInt) != 48 {
break
}
destptr := v_0
}
v.reset(OpAMD64MOVOstore)
v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
- v0.AuxInt = 32
+ v0.AuxInt = int64ToAuxInt(32)
v0.AddArg(destptr)
v1 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128)
- v1.AuxInt = 0
+ v1.AuxInt = int128ToAuxInt(0)
v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem)
v3 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
- v3.AuxInt = 16
+ v3.AuxInt = int64ToAuxInt(16)
v3.AddArg(destptr)
v4 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128)
- v4.AuxInt = 0
+ v4.AuxInt = int128ToAuxInt(0)
v5 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem)
v6 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128)
- v6.AuxInt = 0
+ v6.AuxInt = int128ToAuxInt(0)
v5.AddArg3(destptr, v6, mem)
v2.AddArg3(v3, v4, v5)
v.AddArg3(v0, v1, v2)
// cond: config.useSSE
// result: (MOVOstore (OffPtr <destptr.Type> destptr [48]) (MOVOconst [0]) (MOVOstore (OffPtr <destptr.Type> destptr [32]) (MOVOconst [0]) (MOVOstore (OffPtr <destptr.Type> destptr [16]) (MOVOconst [0]) (MOVOstore destptr (MOVOconst [0]) mem))))
for {
- if v.AuxInt != 64 {
+ if auxIntToInt64(v.AuxInt) != 64 {
break
}
destptr := v_0
}
v.reset(OpAMD64MOVOstore)
v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
- v0.AuxInt = 48
+ v0.AuxInt = int64ToAuxInt(48)
v0.AddArg(destptr)
v1 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128)
- v1.AuxInt = 0
+ v1.AuxInt = int128ToAuxInt(0)
v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem)
v3 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
- v3.AuxInt = 32
+ v3.AuxInt = int64ToAuxInt(32)
v3.AddArg(destptr)
v4 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128)
- v4.AuxInt = 0
+ v4.AuxInt = int128ToAuxInt(0)
v5 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem)
v6 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
- v6.AuxInt = 16
+ v6.AuxInt = int64ToAuxInt(16)
v6.AddArg(destptr)
v7 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128)
- v7.AuxInt = 0
+ v7.AuxInt = int128ToAuxInt(0)
v8 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem)
v9 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128)
- v9.AuxInt = 0
+ v9.AuxInt = int128ToAuxInt(0)
v8.AddArg3(destptr, v9, mem)
v5.AddArg3(v6, v7, v8)
v2.AddArg3(v3, v4, v5)
// cond: s > 64 && s <= 1024 && s%16 == 0 && !config.noDuffDevice
// result: (DUFFZERO [s] destptr (MOVOconst [0]) mem)
for {
- s := v.AuxInt
+ s := auxIntToInt64(v.AuxInt)
destptr := v_0
mem := v_1
if !(s > 64 && s <= 1024 && s%16 == 0 && !config.noDuffDevice) {
break
}
v.reset(OpAMD64DUFFZERO)
- v.AuxInt = s
+ v.AuxInt = int64ToAuxInt(s)
v0 := b.NewValue0(v.Pos, OpAMD64MOVOconst, types.TypeInt128)
- v0.AuxInt = 0
+ v0.AuxInt = int128ToAuxInt(0)
v.AddArg3(destptr, v0, mem)
return true
}
// cond: (s > 1024 || (config.noDuffDevice && s > 64 || !config.useSSE && s > 32)) && s%8 == 0
// result: (REPSTOSQ destptr (MOVQconst [s/8]) (MOVQconst [0]) mem)
for {
- s := v.AuxInt
+ s := auxIntToInt64(v.AuxInt)
destptr := v_0
mem := v_1
if !((s > 1024 || (config.noDuffDevice && s > 64 || !config.useSSE && s > 32)) && s%8 == 0) {
}
v.reset(OpAMD64REPSTOSQ)
v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
- v0.AuxInt = s / 8
+ v0.AuxInt = int64ToAuxInt(s / 8)
v1 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
- v1.AuxInt = 0
+ v1.AuxInt = int64ToAuxInt(0)
v.AddArg4(destptr, v0, v1, mem)
return true
}