// defines the pointer map for the function's arguments.
// GO_ARGS should be the first instruction in a function that uses it.
// It can be omitted if there are no arguments at all.
-// GO_ARGS is inserted implicitly by the linker for any function
-// that also has a Go prototype and therefore is usually not necessary
-// to write explicitly.
+// GO_ARGS is inserted implicitly by the linker for any function whose
+// name starts with a middle-dot and that also has a Go prototype; it
+// is therefore usually not necessary to write explicitly.
#define GO_ARGS FUNCDATA $FUNCDATA_ArgsPointerMaps, go_args_stackmap(SB)
// GO_RESULTS_INITIALIZED indicates that the assembly function
// return 1;
// }else
// return 0;
-TEXT runtime∕internal∕atomic·Cas(SB), NOSPLIT, $0-13
+TEXT ·Cas(SB), NOSPLIT, $0-13
MOVL ptr+0(FP), BX
MOVL old+4(FP), AX
MOVL new+8(FP), CX
SETEQ ret+12(FP)
RET
-TEXT runtime∕internal∕atomic·Casuintptr(SB), NOSPLIT, $0-13
- JMP runtime∕internal∕atomic·Cas(SB)
+TEXT ·Casuintptr(SB), NOSPLIT, $0-13
+ JMP ·Cas(SB)
-TEXT runtime∕internal∕atomic·CasRel(SB), NOSPLIT, $0-13
- JMP runtime∕internal∕atomic·Cas(SB)
+TEXT ·CasRel(SB), NOSPLIT, $0-13
+ JMP ·Cas(SB)
-TEXT runtime∕internal∕atomic·Loaduintptr(SB), NOSPLIT, $0-8
- JMP runtime∕internal∕atomic·Load(SB)
+TEXT ·Loaduintptr(SB), NOSPLIT, $0-8
+ JMP ·Load(SB)
-TEXT runtime∕internal∕atomic·Loaduint(SB), NOSPLIT, $0-8
- JMP runtime∕internal∕atomic·Load(SB)
+TEXT ·Loaduint(SB), NOSPLIT, $0-8
+ JMP ·Load(SB)
-TEXT runtime∕internal∕atomic·Storeuintptr(SB), NOSPLIT, $0-8
- JMP runtime∕internal∕atomic·Store(SB)
+TEXT ·Storeuintptr(SB), NOSPLIT, $0-8
+ JMP ·Store(SB)
-TEXT runtime∕internal∕atomic·Xadduintptr(SB), NOSPLIT, $0-12
- JMP runtime∕internal∕atomic·Xadd(SB)
+TEXT ·Xadduintptr(SB), NOSPLIT, $0-12
+ JMP ·Xadd(SB)
-TEXT runtime∕internal∕atomic·Loadint64(SB), NOSPLIT, $0-12
- JMP runtime∕internal∕atomic·Load64(SB)
+TEXT ·Loadint64(SB), NOSPLIT, $0-12
+ JMP ·Load64(SB)
-TEXT runtime∕internal∕atomic·Xaddint64(SB), NOSPLIT, $0-20
- JMP runtime∕internal∕atomic·Xadd64(SB)
+TEXT ·Xaddint64(SB), NOSPLIT, $0-20
+ JMP ·Xadd64(SB)
-// bool runtime∕internal∕atomic·Cas64(uint64 *val, uint64 old, uint64 new)
+// bool ·Cas64(uint64 *val, uint64 old, uint64 new)
// Atomically:
// if(*val == *old){
// *val = new;
// } else {
// return 0;
// }
-TEXT runtime∕internal∕atomic·Cas64(SB), NOSPLIT, $0-21
+TEXT ·Cas64(SB), NOSPLIT, $0-21
MOVL ptr+0(FP), BP
TESTL $7, BP
JZ 2(PC)
// return 1;
// }else
// return 0;
-TEXT runtime∕internal∕atomic·Casp1(SB), NOSPLIT, $0-13
+TEXT ·Casp1(SB), NOSPLIT, $0-13
MOVL ptr+0(FP), BX
MOVL old+4(FP), AX
MOVL new+8(FP), CX
// Atomically:
// *val += delta;
// return *val;
-TEXT runtime∕internal∕atomic·Xadd(SB), NOSPLIT, $0-12
+TEXT ·Xadd(SB), NOSPLIT, $0-12
MOVL ptr+0(FP), BX
MOVL delta+4(FP), AX
MOVL AX, CX
MOVL AX, ret+8(FP)
RET
-TEXT runtime∕internal∕atomic·Xadd64(SB), NOSPLIT, $0-20
+TEXT ·Xadd64(SB), NOSPLIT, $0-20
// no XADDQ so use CMPXCHG8B loop
MOVL ptr+0(FP), BP
TESTL $7, BP
MOVL CX, ret_hi+16(FP)
RET
-TEXT runtime∕internal∕atomic·Xchg(SB), NOSPLIT, $0-12
+TEXT ·Xchg(SB), NOSPLIT, $0-12
MOVL ptr+0(FP), BX
MOVL new+4(FP), AX
XCHGL AX, 0(BX)
MOVL AX, ret+8(FP)
RET
-TEXT runtime∕internal∕atomic·Xchguintptr(SB), NOSPLIT, $0-12
- JMP runtime∕internal∕atomic·Xchg(SB)
+TEXT ·Xchguintptr(SB), NOSPLIT, $0-12
+ JMP ·Xchg(SB)
-TEXT runtime∕internal∕atomic·Xchg64(SB),NOSPLIT,$0-20
+TEXT ·Xchg64(SB),NOSPLIT,$0-20
// no XCHGQ so use CMPXCHG8B loop
MOVL ptr+0(FP), BP
TESTL $7, BP
MOVL DX, ret_hi+16(FP)
RET
-TEXT runtime∕internal∕atomic·StorepNoWB(SB), NOSPLIT, $0-8
+TEXT ·StorepNoWB(SB), NOSPLIT, $0-8
MOVL ptr+0(FP), BX
MOVL val+4(FP), AX
XCHGL AX, 0(BX)
RET
-TEXT runtime∕internal∕atomic·Store(SB), NOSPLIT, $0-8
+TEXT ·Store(SB), NOSPLIT, $0-8
MOVL ptr+0(FP), BX
MOVL val+4(FP), AX
XCHGL AX, 0(BX)
RET
-TEXT runtime∕internal∕atomic·StoreRel(SB), NOSPLIT, $0-8
- JMP runtime∕internal∕atomic·Store(SB)
+TEXT ·StoreRel(SB), NOSPLIT, $0-8
+ JMP ·Store(SB)
// uint64 atomicload64(uint64 volatile* addr);
-TEXT runtime∕internal∕atomic·Load64(SB), NOSPLIT, $0-12
+TEXT ·Load64(SB), NOSPLIT, $0-12
MOVL ptr+0(FP), AX
TESTL $7, AX
JZ 2(PC)
EMMS
RET
-// void runtime∕internal∕atomic·Store64(uint64 volatile* addr, uint64 v);
-TEXT runtime∕internal∕atomic·Store64(SB), NOSPLIT, $0-12
+// void ·Store64(uint64 volatile* addr, uint64 v);
+TEXT ·Store64(SB), NOSPLIT, $0-12
MOVL ptr+0(FP), AX
TESTL $7, AX
JZ 2(PC)
XADDL AX, (SP)
RET
-// void runtime∕internal∕atomic·Or8(byte volatile*, byte);
-TEXT runtime∕internal∕atomic·Or8(SB), NOSPLIT, $0-5
+// void ·Or8(byte volatile*, byte);
+TEXT ·Or8(SB), NOSPLIT, $0-5
MOVL ptr+0(FP), AX
MOVB val+4(FP), BX
LOCK
ORB BX, (AX)
RET
-// void runtime∕internal∕atomic·And8(byte volatile*, byte);
-TEXT runtime∕internal∕atomic·And8(SB), NOSPLIT, $0-5
+// void ·And8(byte volatile*, byte);
+TEXT ·And8(SB), NOSPLIT, $0-5
MOVL ptr+0(FP), AX
MOVB val+4(FP), BX
LOCK
ANDB BX, (AX)
RET
-TEXT runtime∕internal∕atomic·Store8(SB), NOSPLIT, $0-5
+TEXT ·Store8(SB), NOSPLIT, $0-5
MOVL ptr+0(FP), BX
MOVB val+4(FP), AX
XCHGB AX, 0(BX)
// }else
// return 0;
//
-// To implement runtime∕internal∕atomic·cas in sys_$GOOS_arm.s
+// To implement ·cas in sys_$GOOS_arm.s
// using the native instructions, use:
//
-// TEXT runtime∕internal∕atomic·cas(SB),NOSPLIT,$0
-// B runtime∕internal∕atomic·armcas(SB)
+// TEXT ·cas(SB),NOSPLIT,$0
+// B ·armcas(SB)
//
-TEXT runtime∕internal∕atomic·armcas(SB),NOSPLIT,$0-13
+TEXT ·armcas(SB),NOSPLIT,$0-13
MOVW ptr+0(FP), R1
MOVW old+4(FP), R2
MOVW new+8(FP), R3
// stubs
-TEXT runtime∕internal∕atomic·Loadp(SB),NOSPLIT|NOFRAME,$0-8
- B runtime∕internal∕atomic·Load(SB)
+TEXT ·Loadp(SB),NOSPLIT|NOFRAME,$0-8
+ B ·Load(SB)
-TEXT runtime∕internal∕atomic·LoadAcq(SB),NOSPLIT|NOFRAME,$0-8
- B runtime∕internal∕atomic·Load(SB)
+TEXT ·LoadAcq(SB),NOSPLIT|NOFRAME,$0-8
+ B ·Load(SB)
-TEXT runtime∕internal∕atomic·Casuintptr(SB),NOSPLIT,$0-13
- B runtime∕internal∕atomic·Cas(SB)
+TEXT ·Casuintptr(SB),NOSPLIT,$0-13
+ B ·Cas(SB)
-TEXT runtime∕internal∕atomic·Casp1(SB),NOSPLIT,$0-13
- B runtime∕internal∕atomic·Cas(SB)
+TEXT ·Casp1(SB),NOSPLIT,$0-13
+ B ·Cas(SB)
-TEXT runtime∕internal∕atomic·CasRel(SB),NOSPLIT,$0-13
- B runtime∕internal∕atomic·Cas(SB)
+TEXT ·CasRel(SB),NOSPLIT,$0-13
+ B ·Cas(SB)
-TEXT runtime∕internal∕atomic·Loaduintptr(SB),NOSPLIT,$0-8
- B runtime∕internal∕atomic·Load(SB)
+TEXT ·Loaduintptr(SB),NOSPLIT,$0-8
+ B ·Load(SB)
-TEXT runtime∕internal∕atomic·Loaduint(SB),NOSPLIT,$0-8
- B runtime∕internal∕atomic·Load(SB)
+TEXT ·Loaduint(SB),NOSPLIT,$0-8
+ B ·Load(SB)
-TEXT runtime∕internal∕atomic·Storeuintptr(SB),NOSPLIT,$0-8
- B runtime∕internal∕atomic·Store(SB)
+TEXT ·Storeuintptr(SB),NOSPLIT,$0-8
+ B ·Store(SB)
-TEXT runtime∕internal∕atomic·StorepNoWB(SB),NOSPLIT,$0-8
- B runtime∕internal∕atomic·Store(SB)
+TEXT ·StorepNoWB(SB),NOSPLIT,$0-8
+ B ·Store(SB)
-TEXT runtime∕internal∕atomic·StoreRel(SB),NOSPLIT,$0-8
- B runtime∕internal∕atomic·Store(SB)
+TEXT ·StoreRel(SB),NOSPLIT,$0-8
+ B ·Store(SB)
-TEXT runtime∕internal∕atomic·Xadduintptr(SB),NOSPLIT,$0-12
- B runtime∕internal∕atomic·Xadd(SB)
+TEXT ·Xadduintptr(SB),NOSPLIT,$0-12
+ B ·Xadd(SB)
-TEXT runtime∕internal∕atomic·Loadint64(SB),NOSPLIT,$0-12
- B runtime∕internal∕atomic·Load64(SB)
+TEXT ·Loadint64(SB),NOSPLIT,$0-12
+ B ·Load64(SB)
-TEXT runtime∕internal∕atomic·Xaddint64(SB),NOSPLIT,$0-20
- B runtime∕internal∕atomic·Xadd64(SB)
+TEXT ·Xaddint64(SB),NOSPLIT,$0-20
+ B ·Xadd64(SB)
// 64-bit atomics
// The native ARM implementations use LDREXD/STREXD, which are
// On older ARM, we use Go implementations which simulate 64-bit
// atomics with locks.
-TEXT armCas64<>(SB),NOSPLIT,$0-21
+TEXT armCas64<>(SB),NOSPLIT,$0-21
MOVW addr+0(FP), R1
// make unaligned atomic access panic
AND.S $7, R1, R2
MOVBU R0, swapped+20(FP)
RET
-TEXT armXadd64<>(SB),NOSPLIT,$0-20
+TEXT armXadd64<>(SB),NOSPLIT,$0-20
MOVW addr+0(FP), R1
// make unaligned atomic access panic
AND.S $7, R1, R2
MOVW R5, new_hi+16(FP)
RET
-TEXT armXchg64<>(SB),NOSPLIT,$0-20
+TEXT armXchg64<>(SB),NOSPLIT,$0-20
MOVW addr+0(FP), R1
// make unaligned atomic access panic
AND.S $7, R1, R2
MOVW R5, old_hi+16(FP)
RET
-TEXT armLoad64<>(SB),NOSPLIT,$0-12
+TEXT armLoad64<>(SB),NOSPLIT,$0-12
MOVW addr+0(FP), R1
// make unaligned atomic access panic
AND.S $7, R1, R2
MOVW R3, val_hi+8(FP)
RET
-TEXT armStore64<>(SB),NOSPLIT,$0-12
+TEXT armStore64<>(SB),NOSPLIT,$0-12
MOVW addr+0(FP), R1
// make unaligned atomic access panic
AND.S $7, R1, R2
DMB MB_ISH
RET
-TEXT ·Cas64(SB),NOSPLIT,$0-21
+TEXT ·Cas64(SB),NOSPLIT,$0-21
MOVB runtime·goarm(SB), R11
CMP $7, R11
BLT 2(PC)
JMP armCas64<>(SB)
JMP ·goCas64(SB)
-TEXT ·Xadd64(SB),NOSPLIT,$0-20
+TEXT ·Xadd64(SB),NOSPLIT,$0-20
MOVB runtime·goarm(SB), R11
CMP $7, R11
BLT 2(PC)
JMP armXadd64<>(SB)
JMP ·goXadd64(SB)
-TEXT ·Xchg64(SB),NOSPLIT,$0-20
+TEXT ·Xchg64(SB),NOSPLIT,$0-20
MOVB runtime·goarm(SB), R11
CMP $7, R11
BLT 2(PC)
JMP armXchg64<>(SB)
JMP ·goXchg64(SB)
-TEXT ·Load64(SB),NOSPLIT,$0-12
+TEXT ·Load64(SB),NOSPLIT,$0-12
MOVB runtime·goarm(SB), R11
CMP $7, R11
BLT 2(PC)
JMP armLoad64<>(SB)
JMP ·goLoad64(SB)
-TEXT ·Store64(SB),NOSPLIT,$0-12
+TEXT ·Store64(SB),NOSPLIT,$0-12
MOVB runtime·goarm(SB), R11
CMP $7, R11
BLT 2(PC)