// *(arg1+auxint+aux) += arg0. arg2=mem.
// Returns a tuple of <old contents of *(arg1+auxint+aux), memory>.
// Note: arg0 and arg1 are backwards compared to MOVLstore (to facilitate resultInArg0)!
- {name: "XADDLlock", argLength: 3, reg: gpstorexchg, asm: "XADDL", typ: "(UInt32,Mem)", aux: "SymOff", resultInArg0: true},
- {name: "XADDQlock", argLength: 3, reg: gpstorexchg, asm: "XADDQ", typ: "(UInt64,Mem)", aux: "SymOff", resultInArg0: true},
+ {name: "XADDLlock", argLength: 3, reg: gpstorexchg, asm: "XADDL", typ: "(UInt32,Mem)", aux: "SymOff", resultInArg0: true, clobberFlags: true},
+ {name: "XADDQlock", argLength: 3, reg: gpstorexchg, asm: "XADDQ", typ: "(UInt64,Mem)", aux: "SymOff", resultInArg0: true, clobberFlags: true},
{name: "AddTupleFirst32", argLength: 2}, // arg0=tuple <x,y>. Returns <x+arg1,y>.
{name: "AddTupleFirst64", argLength: 2}, // arg0=tuple <x,y>. Returns <x+arg1,y>.
// JEQ ...
// but we can't do that because memory-using ops can't generate flags yet
// (flagalloc wants to move flag-generating instructions around).
- {name: "CMPXCHGLlock", argLength: 4, reg: cmpxchg, asm: "CMPXCHGL", aux: "SymOff"},
- {name: "CMPXCHGQlock", argLength: 4, reg: cmpxchg, asm: "CMPXCHGQ", aux: "SymOff"},
+ {name: "CMPXCHGLlock", argLength: 4, reg: cmpxchg, asm: "CMPXCHGL", aux: "SymOff", clobberFlags: true},
+ {name: "CMPXCHGQlock", argLength: 4, reg: cmpxchg, asm: "CMPXCHGQ", aux: "SymOff", clobberFlags: true},
// Atomic memory updates.
- {name: "ANDBlock", argLength: 3, reg: gpstore, asm: "ANDB", aux: "SymOff"}, // *(arg0+auxint+aux) &= arg1
- {name: "ORBlock", argLength: 3, reg: gpstore, asm: "ORB", aux: "SymOff"}, // *(arg0+auxint+aux) |= arg1
+ {name: "ANDBlock", argLength: 3, reg: gpstore, asm: "ANDB", aux: "SymOff", clobberFlags: true}, // *(arg0+auxint+aux) &= arg1
+ {name: "ORBlock", argLength: 3, reg: gpstore, asm: "ORB", aux: "SymOff", clobberFlags: true}, // *(arg0+auxint+aux) |= arg1
}
var AMD64blocks = []blockData{
--- /dev/null
+// run
+
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// issue 16985: intrinsified AMD64 atomic ops should clobber flags
+
+package main
+
+import "sync/atomic"
+
+var count uint32
+
+func main() {
+ buffer := []byte("T")
+ for i := 0; i < len(buffer); {
+ atomic.AddUint32(&count, 1)
+ _ = buffer[i]
+ i++
+ i++
+ }
+
+ for i := 0; i < len(buffer); {
+ atomic.CompareAndSwapUint32(&count, 0, 1)
+ _ = buffer[i]
+ i++
+ i++
+ }
+
+ for i := 0; i < len(buffer); {
+ atomic.SwapUint32(&count, 1)
+ _ = buffer[i]
+ i++
+ i++
+ }
+}