atomic.Store64(&x, 0)
}
}
+
+func BenchmarkAtomicLoad(b *testing.B) {
+ var x uint32
+ sink = &x
+ for i := 0; i < b.N; i++ {
+ _ = atomic.Load(&x)
+ }
+}
+
+func BenchmarkAtomicStore(b *testing.B) {
+ var x uint32
+ sink = &x
+ for i := 0; i < b.N; i++ {
+ atomic.Store(&x, 0)
+ }
+}
TEXT cas<>(SB),NOSPLIT,$0
MOVW $0xffff0fc0, R15 // R15 is hardware PC.
-TEXT runtime∕internal∕atomic·Cas(SB),NOSPLIT,$0
+TEXT runtime∕internal∕atomic·Cas(SB),NOSPLIT|NOFRAME,$0
+ MOVB runtime·goarm(SB), R11
+ CMP $7, R11
+ BLT 2(PC)
+ JMP ·armcas(SB)
+ JMP ·kernelcas<>(SB)
+
+TEXT runtime∕internal∕atomic·kernelcas<>(SB),NOSPLIT,$0
MOVW ptr+0(FP), R2
// trigger potential paging fault here,
// because we don't know how to traceback through __kuser_cmpxchg
// even on single-core devices. The kernel helper takes care of all of
// this for us.
-TEXT publicationBarrier<>(SB),NOSPLIT,$0
+TEXT kernelPublicationBarrier<>(SB),NOSPLIT,$0
// void __kuser_memory_barrier(void);
- MOVW $0xffff0fa0, R15 // R15 is hardware PC.
+ MOVW $0xffff0fa0, R11
+ CALL (R11)
+ RET
TEXT ·publicationBarrier(SB),NOSPLIT,$0
- BL publicationBarrier<>(SB)
- RET
+ MOVB ·goarm(SB), R11
+ CMP $7, R11
+ BLT 2(PC)
+ JMP ·armPublicationBarrier(SB)
+ JMP kernelPublicationBarrier<>(SB) // extra layer so this function is leaf and no SP adjustment on GOARM=7
TEXT runtime·osyield(SB),NOSPLIT,$0
MOVW $SYS_sched_yield, R7