(AtomicExchange64 ptr val mem) -> (XCHGQ val ptr mem)
// Atomic adds.
-(Select0 <t> (AtomicAdd32 ptr val mem)) -> (ADDL (Select0 <t> (XADDLlock val ptr mem)) val)
-(Select1 (AtomicAdd32 ptr val mem)) -> (Select1 (XADDLlock val ptr mem))
-(Select0 <t> (AtomicAdd64 ptr val mem)) -> (ADDQ (Select0 <t> (XADDQlock val ptr mem)) val)
-(Select1 (AtomicAdd64 ptr val mem)) -> (Select1 (XADDQlock val ptr mem))
+(AtomicAdd32 ptr val mem) -> (AddTupleFirst32 (XADDLlock val ptr mem) val)
+(AtomicAdd64 ptr val mem) -> (AddTupleFirst64 (XADDQlock val ptr mem) val)
+(Select0 <t> (AddTupleFirst32 tuple val)) -> (ADDL val (Select0 <t> tuple))
+(Select1 (AddTupleFirst32 tuple _ )) -> (Select1 tuple)
+(Select0 <t> (AddTupleFirst64 tuple val)) -> (ADDQ val (Select0 <t> tuple))
+(Select1 (AddTupleFirst64 tuple _ )) -> (Select1 tuple)
// Atomic compare and swap.
(AtomicCompareAndSwap32 ptr old new_ mem) -> (CMPXCHGLlock ptr old new_ mem)
// Note: arg0 and arg1 are backwards compared to MOVLstore (to facilitate resultInArg0)!
{name: "XADDLlock", argLength: 3, reg: gpstorexchg, asm: "XADDL", typ: "(UInt32,Mem)", aux: "SymOff", resultInArg0: true},
{name: "XADDQlock", argLength: 3, reg: gpstorexchg, asm: "XADDQ", typ: "(UInt64,Mem)", aux: "SymOff", resultInArg0: true},
+ {name: "AddTupleFirst32", argLength: 2}, // arg0=tuple <x,y>. Returns <x+arg1,y>.
+ {name: "AddTupleFirst64", argLength: 2}, // arg0=tuple <x,y>. Returns <x+arg1,y>.
// Compare and swap.
// arg0 = pointer, arg1 = old value, arg2 = new value, arg3 = memory.
return rewriteValueAMD64_OpAnd8(v, config)
case OpAndB:
return rewriteValueAMD64_OpAndB(v, config)
+ case OpAtomicAdd32:
+ return rewriteValueAMD64_OpAtomicAdd32(v, config)
+ case OpAtomicAdd64:
+ return rewriteValueAMD64_OpAtomicAdd64(v, config)
case OpAtomicAnd8:
return rewriteValueAMD64_OpAtomicAnd8(v, config)
case OpAtomicCompareAndSwap32:
return true
}
}
+func rewriteValueAMD64_OpAtomicAdd32(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (AtomicAdd32 ptr val mem)
+ // cond:
+ // result: (AddTupleFirst32 (XADDLlock val ptr mem) val)
+ for {
+ ptr := v.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpAMD64AddTupleFirst32)
+ v0 := b.NewValue0(v.Line, OpAMD64XADDLlock, MakeTuple(config.fe.TypeUInt32(), TypeMem))
+ v0.AddArg(val)
+ v0.AddArg(ptr)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v.AddArg(val)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAtomicAdd64(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (AtomicAdd64 ptr val mem)
+ // cond:
+ // result: (AddTupleFirst64 (XADDQlock val ptr mem) val)
+ for {
+ ptr := v.Args[0]
+ val := v.Args[1]
+ mem := v.Args[2]
+ v.reset(OpAMD64AddTupleFirst64)
+ v0 := b.NewValue0(v.Line, OpAMD64XADDQlock, MakeTuple(config.fe.TypeUInt64(), TypeMem))
+ v0.AddArg(val)
+ v0.AddArg(ptr)
+ v0.AddArg(mem)
+ v.AddArg(v0)
+ v.AddArg(val)
+ return true
+ }
+}
func rewriteValueAMD64_OpAtomicAnd8(v *Value, config *Config) bool {
b := v.Block
_ = b
func rewriteValueAMD64_OpSelect0(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (Select0 <t> (AtomicAdd32 ptr val mem))
+ // match: (Select0 <t> (AddTupleFirst32 tuple val))
// cond:
- // result: (ADDL (Select0 <t> (XADDLlock val ptr mem)) val)
+ // result: (ADDL val (Select0 <t> tuple))
for {
t := v.Type
v_0 := v.Args[0]
- if v_0.Op != OpAtomicAdd32 {
+ if v_0.Op != OpAMD64AddTupleFirst32 {
break
}
- ptr := v_0.Args[0]
+ tuple := v_0.Args[0]
val := v_0.Args[1]
- mem := v_0.Args[2]
v.reset(OpAMD64ADDL)
+ v.AddArg(val)
v0 := b.NewValue0(v.Line, OpSelect0, t)
- v1 := b.NewValue0(v.Line, OpAMD64XADDLlock, MakeTuple(config.fe.TypeUInt32(), TypeMem))
- v1.AddArg(val)
- v1.AddArg(ptr)
- v1.AddArg(mem)
- v0.AddArg(v1)
+ v0.AddArg(tuple)
v.AddArg(v0)
- v.AddArg(val)
return true
}
- // match: (Select0 <t> (AtomicAdd64 ptr val mem))
+ // match: (Select0 <t> (AddTupleFirst64 tuple val))
// cond:
- // result: (ADDQ (Select0 <t> (XADDQlock val ptr mem)) val)
+ // result: (ADDQ val (Select0 <t> tuple))
for {
t := v.Type
v_0 := v.Args[0]
- if v_0.Op != OpAtomicAdd64 {
+ if v_0.Op != OpAMD64AddTupleFirst64 {
break
}
- ptr := v_0.Args[0]
+ tuple := v_0.Args[0]
val := v_0.Args[1]
- mem := v_0.Args[2]
v.reset(OpAMD64ADDQ)
+ v.AddArg(val)
v0 := b.NewValue0(v.Line, OpSelect0, t)
- v1 := b.NewValue0(v.Line, OpAMD64XADDQlock, MakeTuple(config.fe.TypeUInt64(), TypeMem))
- v1.AddArg(val)
- v1.AddArg(ptr)
- v1.AddArg(mem)
- v0.AddArg(v1)
+ v0.AddArg(tuple)
v.AddArg(v0)
- v.AddArg(val)
return true
}
return false
func rewriteValueAMD64_OpSelect1(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (Select1 (AtomicAdd32 ptr val mem))
+ // match: (Select1 (AddTupleFirst32 tuple _ ))
// cond:
- // result: (Select1 (XADDLlock val ptr mem))
+ // result: (Select1 tuple)
for {
v_0 := v.Args[0]
- if v_0.Op != OpAtomicAdd32 {
+ if v_0.Op != OpAMD64AddTupleFirst32 {
break
}
- ptr := v_0.Args[0]
- val := v_0.Args[1]
- mem := v_0.Args[2]
+ tuple := v_0.Args[0]
v.reset(OpSelect1)
- v0 := b.NewValue0(v.Line, OpAMD64XADDLlock, MakeTuple(config.fe.TypeUInt32(), TypeMem))
- v0.AddArg(val)
- v0.AddArg(ptr)
- v0.AddArg(mem)
- v.AddArg(v0)
+ v.AddArg(tuple)
return true
}
- // match: (Select1 (AtomicAdd64 ptr val mem))
+ // match: (Select1 (AddTupleFirst64 tuple _ ))
// cond:
- // result: (Select1 (XADDQlock val ptr mem))
+ // result: (Select1 tuple)
for {
v_0 := v.Args[0]
- if v_0.Op != OpAtomicAdd64 {
+ if v_0.Op != OpAMD64AddTupleFirst64 {
break
}
- ptr := v_0.Args[0]
- val := v_0.Args[1]
- mem := v_0.Args[2]
+ tuple := v_0.Args[0]
v.reset(OpSelect1)
- v0 := b.NewValue0(v.Line, OpAMD64XADDQlock, MakeTuple(config.fe.TypeUInt64(), TypeMem))
- v0.AddArg(val)
- v0.AddArg(ptr)
- v0.AddArg(mem)
- v.AddArg(v0)
+ v.AddArg(tuple)
return true
}
return false