From: Austin Clements Date: Thu, 15 Oct 2020 20:11:10 +0000 (-0400) Subject: runtime/internal/atomic: drop package prefixes X-Git-Tag: go1.16beta1~705 X-Git-Url: http://www.git.cypherpunks.su/?a=commitdiff_plain;h=afba990169f41d9026c923da5235584db32cab67;p=gostls13.git runtime/internal/atomic: drop package prefixes This drops package prefixes from the assembly code on 386 and arm. In addition to just being nicer, this allows the assembler to automatically pick up the argument stack map from the Go signatures of these functions. This doesn't matter right now because these functions never call back out to Go, but prepares us for the next CL. Change-Id: I90fed7d4dd63ad49274529c62804211b6390e2e9 Reviewed-on: https://go-review.googlesource.com/c/go/+/262777 Trust: Austin Clements Run-TryBot: Austin Clements TryBot-Result: Go Bot Reviewed-by: Cherry Zhang Reviewed-by: Michael Knyszek --- diff --git a/src/runtime/funcdata.h b/src/runtime/funcdata.h index 0fb50ddfba..cd76c06992 100644 --- a/src/runtime/funcdata.h +++ b/src/runtime/funcdata.h @@ -32,9 +32,9 @@ // defines the pointer map for the function's arguments. // GO_ARGS should be the first instruction in a function that uses it. // It can be omitted if there are no arguments at all. -// GO_ARGS is inserted implicitly by the linker for any function -// that also has a Go prototype and therefore is usually not necessary -// to write explicitly. +// GO_ARGS is inserted implicitly by the linker for any function whose +// name starts with a middle-dot and that also has a Go prototype; it +// is therefore usually not necessary to write explicitly. #define GO_ARGS FUNCDATA $FUNCDATA_ArgsPointerMaps, go_args_stackmap(SB) // GO_RESULTS_INITIALIZED indicates that the assembly function diff --git a/src/runtime/internal/atomic/asm_386.s b/src/runtime/internal/atomic/asm_386.s index 9b9dc14a60..357ca95625 100644 --- a/src/runtime/internal/atomic/asm_386.s +++ b/src/runtime/internal/atomic/asm_386.s @@ -11,7 +11,7 @@ // return 1; // }else // return 0; -TEXT runtime∕internal∕atomic·Cas(SB), NOSPLIT, $0-13 +TEXT ·Cas(SB), NOSPLIT, $0-13 MOVL ptr+0(FP), BX MOVL old+4(FP), AX MOVL new+8(FP), CX @@ -20,32 +20,32 @@ TEXT runtime∕internal∕atomic·Cas(SB), NOSPLIT, $0-13 SETEQ ret+12(FP) RET -TEXT runtime∕internal∕atomic·Casuintptr(SB), NOSPLIT, $0-13 - JMP runtime∕internal∕atomic·Cas(SB) +TEXT ·Casuintptr(SB), NOSPLIT, $0-13 + JMP ·Cas(SB) -TEXT runtime∕internal∕atomic·CasRel(SB), NOSPLIT, $0-13 - JMP runtime∕internal∕atomic·Cas(SB) +TEXT ·CasRel(SB), NOSPLIT, $0-13 + JMP ·Cas(SB) -TEXT runtime∕internal∕atomic·Loaduintptr(SB), NOSPLIT, $0-8 - JMP runtime∕internal∕atomic·Load(SB) +TEXT ·Loaduintptr(SB), NOSPLIT, $0-8 + JMP ·Load(SB) -TEXT runtime∕internal∕atomic·Loaduint(SB), NOSPLIT, $0-8 - JMP runtime∕internal∕atomic·Load(SB) +TEXT ·Loaduint(SB), NOSPLIT, $0-8 + JMP ·Load(SB) -TEXT runtime∕internal∕atomic·Storeuintptr(SB), NOSPLIT, $0-8 - JMP runtime∕internal∕atomic·Store(SB) +TEXT ·Storeuintptr(SB), NOSPLIT, $0-8 + JMP ·Store(SB) -TEXT runtime∕internal∕atomic·Xadduintptr(SB), NOSPLIT, $0-12 - JMP runtime∕internal∕atomic·Xadd(SB) +TEXT ·Xadduintptr(SB), NOSPLIT, $0-12 + JMP ·Xadd(SB) -TEXT runtime∕internal∕atomic·Loadint64(SB), NOSPLIT, $0-12 - JMP runtime∕internal∕atomic·Load64(SB) +TEXT ·Loadint64(SB), NOSPLIT, $0-12 + JMP ·Load64(SB) -TEXT runtime∕internal∕atomic·Xaddint64(SB), NOSPLIT, $0-20 - JMP runtime∕internal∕atomic·Xadd64(SB) +TEXT ·Xaddint64(SB), NOSPLIT, $0-20 + JMP ·Xadd64(SB) -// bool runtime∕internal∕atomic·Cas64(uint64 *val, uint64 old, uint64 new) +// bool ·Cas64(uint64 *val, uint64 old, uint64 new) // Atomically: // if(*val == *old){ // *val = new; @@ -53,7 +53,7 @@ TEXT runtime∕internal∕atomic·Xaddint64(SB), NOSPLIT, $0-20 // } else { // return 0; // } -TEXT runtime∕internal∕atomic·Cas64(SB), NOSPLIT, $0-21 +TEXT ·Cas64(SB), NOSPLIT, $0-21 MOVL ptr+0(FP), BP TESTL $7, BP JZ 2(PC) @@ -74,7 +74,7 @@ TEXT runtime∕internal∕atomic·Cas64(SB), NOSPLIT, $0-21 // return 1; // }else // return 0; -TEXT runtime∕internal∕atomic·Casp1(SB), NOSPLIT, $0-13 +TEXT ·Casp1(SB), NOSPLIT, $0-13 MOVL ptr+0(FP), BX MOVL old+4(FP), AX MOVL new+8(FP), CX @@ -87,7 +87,7 @@ TEXT runtime∕internal∕atomic·Casp1(SB), NOSPLIT, $0-13 // Atomically: // *val += delta; // return *val; -TEXT runtime∕internal∕atomic·Xadd(SB), NOSPLIT, $0-12 +TEXT ·Xadd(SB), NOSPLIT, $0-12 MOVL ptr+0(FP), BX MOVL delta+4(FP), AX MOVL AX, CX @@ -97,7 +97,7 @@ TEXT runtime∕internal∕atomic·Xadd(SB), NOSPLIT, $0-12 MOVL AX, ret+8(FP) RET -TEXT runtime∕internal∕atomic·Xadd64(SB), NOSPLIT, $0-20 +TEXT ·Xadd64(SB), NOSPLIT, $0-20 // no XADDQ so use CMPXCHG8B loop MOVL ptr+0(FP), BP TESTL $7, BP @@ -133,17 +133,17 @@ addloop: MOVL CX, ret_hi+16(FP) RET -TEXT runtime∕internal∕atomic·Xchg(SB), NOSPLIT, $0-12 +TEXT ·Xchg(SB), NOSPLIT, $0-12 MOVL ptr+0(FP), BX MOVL new+4(FP), AX XCHGL AX, 0(BX) MOVL AX, ret+8(FP) RET -TEXT runtime∕internal∕atomic·Xchguintptr(SB), NOSPLIT, $0-12 - JMP runtime∕internal∕atomic·Xchg(SB) +TEXT ·Xchguintptr(SB), NOSPLIT, $0-12 + JMP ·Xchg(SB) -TEXT runtime∕internal∕atomic·Xchg64(SB),NOSPLIT,$0-20 +TEXT ·Xchg64(SB),NOSPLIT,$0-20 // no XCHGQ so use CMPXCHG8B loop MOVL ptr+0(FP), BP TESTL $7, BP @@ -171,23 +171,23 @@ swaploop: MOVL DX, ret_hi+16(FP) RET -TEXT runtime∕internal∕atomic·StorepNoWB(SB), NOSPLIT, $0-8 +TEXT ·StorepNoWB(SB), NOSPLIT, $0-8 MOVL ptr+0(FP), BX MOVL val+4(FP), AX XCHGL AX, 0(BX) RET -TEXT runtime∕internal∕atomic·Store(SB), NOSPLIT, $0-8 +TEXT ·Store(SB), NOSPLIT, $0-8 MOVL ptr+0(FP), BX MOVL val+4(FP), AX XCHGL AX, 0(BX) RET -TEXT runtime∕internal∕atomic·StoreRel(SB), NOSPLIT, $0-8 - JMP runtime∕internal∕atomic·Store(SB) +TEXT ·StoreRel(SB), NOSPLIT, $0-8 + JMP ·Store(SB) // uint64 atomicload64(uint64 volatile* addr); -TEXT runtime∕internal∕atomic·Load64(SB), NOSPLIT, $0-12 +TEXT ·Load64(SB), NOSPLIT, $0-12 MOVL ptr+0(FP), AX TESTL $7, AX JZ 2(PC) @@ -197,8 +197,8 @@ TEXT runtime∕internal∕atomic·Load64(SB), NOSPLIT, $0-12 EMMS RET -// void runtime∕internal∕atomic·Store64(uint64 volatile* addr, uint64 v); -TEXT runtime∕internal∕atomic·Store64(SB), NOSPLIT, $0-12 +// void ·Store64(uint64 volatile* addr, uint64 v); +TEXT ·Store64(SB), NOSPLIT, $0-12 MOVL ptr+0(FP), AX TESTL $7, AX JZ 2(PC) @@ -214,23 +214,23 @@ TEXT runtime∕internal∕atomic·Store64(SB), NOSPLIT, $0-12 XADDL AX, (SP) RET -// void runtime∕internal∕atomic·Or8(byte volatile*, byte); -TEXT runtime∕internal∕atomic·Or8(SB), NOSPLIT, $0-5 +// void ·Or8(byte volatile*, byte); +TEXT ·Or8(SB), NOSPLIT, $0-5 MOVL ptr+0(FP), AX MOVB val+4(FP), BX LOCK ORB BX, (AX) RET -// void runtime∕internal∕atomic·And8(byte volatile*, byte); -TEXT runtime∕internal∕atomic·And8(SB), NOSPLIT, $0-5 +// void ·And8(byte volatile*, byte); +TEXT ·And8(SB), NOSPLIT, $0-5 MOVL ptr+0(FP), AX MOVB val+4(FP), BX LOCK ANDB BX, (AX) RET -TEXT runtime∕internal∕atomic·Store8(SB), NOSPLIT, $0-5 +TEXT ·Store8(SB), NOSPLIT, $0-5 MOVL ptr+0(FP), BX MOVB val+4(FP), AX XCHGB AX, 0(BX) diff --git a/src/runtime/internal/atomic/asm_arm.s b/src/runtime/internal/atomic/asm_arm.s index d4ef11560e..db1267423d 100644 --- a/src/runtime/internal/atomic/asm_arm.s +++ b/src/runtime/internal/atomic/asm_arm.s @@ -12,13 +12,13 @@ // }else // return 0; // -// To implement runtime∕internal∕atomic·cas in sys_$GOOS_arm.s +// To implement ·cas in sys_$GOOS_arm.s // using the native instructions, use: // -// TEXT runtime∕internal∕atomic·cas(SB),NOSPLIT,$0 -// B runtime∕internal∕atomic·armcas(SB) +// TEXT ·cas(SB),NOSPLIT,$0 +// B ·armcas(SB) // -TEXT runtime∕internal∕atomic·armcas(SB),NOSPLIT,$0-13 +TEXT ·armcas(SB),NOSPLIT,$0-13 MOVW ptr+0(FP), R1 MOVW old+4(FP), R2 MOVW new+8(FP), R3 @@ -50,44 +50,44 @@ casfail: // stubs -TEXT runtime∕internal∕atomic·Loadp(SB),NOSPLIT|NOFRAME,$0-8 - B runtime∕internal∕atomic·Load(SB) +TEXT ·Loadp(SB),NOSPLIT|NOFRAME,$0-8 + B ·Load(SB) -TEXT runtime∕internal∕atomic·LoadAcq(SB),NOSPLIT|NOFRAME,$0-8 - B runtime∕internal∕atomic·Load(SB) +TEXT ·LoadAcq(SB),NOSPLIT|NOFRAME,$0-8 + B ·Load(SB) -TEXT runtime∕internal∕atomic·Casuintptr(SB),NOSPLIT,$0-13 - B runtime∕internal∕atomic·Cas(SB) +TEXT ·Casuintptr(SB),NOSPLIT,$0-13 + B ·Cas(SB) -TEXT runtime∕internal∕atomic·Casp1(SB),NOSPLIT,$0-13 - B runtime∕internal∕atomic·Cas(SB) +TEXT ·Casp1(SB),NOSPLIT,$0-13 + B ·Cas(SB) -TEXT runtime∕internal∕atomic·CasRel(SB),NOSPLIT,$0-13 - B runtime∕internal∕atomic·Cas(SB) +TEXT ·CasRel(SB),NOSPLIT,$0-13 + B ·Cas(SB) -TEXT runtime∕internal∕atomic·Loaduintptr(SB),NOSPLIT,$0-8 - B runtime∕internal∕atomic·Load(SB) +TEXT ·Loaduintptr(SB),NOSPLIT,$0-8 + B ·Load(SB) -TEXT runtime∕internal∕atomic·Loaduint(SB),NOSPLIT,$0-8 - B runtime∕internal∕atomic·Load(SB) +TEXT ·Loaduint(SB),NOSPLIT,$0-8 + B ·Load(SB) -TEXT runtime∕internal∕atomic·Storeuintptr(SB),NOSPLIT,$0-8 - B runtime∕internal∕atomic·Store(SB) +TEXT ·Storeuintptr(SB),NOSPLIT,$0-8 + B ·Store(SB) -TEXT runtime∕internal∕atomic·StorepNoWB(SB),NOSPLIT,$0-8 - B runtime∕internal∕atomic·Store(SB) +TEXT ·StorepNoWB(SB),NOSPLIT,$0-8 + B ·Store(SB) -TEXT runtime∕internal∕atomic·StoreRel(SB),NOSPLIT,$0-8 - B runtime∕internal∕atomic·Store(SB) +TEXT ·StoreRel(SB),NOSPLIT,$0-8 + B ·Store(SB) -TEXT runtime∕internal∕atomic·Xadduintptr(SB),NOSPLIT,$0-12 - B runtime∕internal∕atomic·Xadd(SB) +TEXT ·Xadduintptr(SB),NOSPLIT,$0-12 + B ·Xadd(SB) -TEXT runtime∕internal∕atomic·Loadint64(SB),NOSPLIT,$0-12 - B runtime∕internal∕atomic·Load64(SB) +TEXT ·Loadint64(SB),NOSPLIT,$0-12 + B ·Load64(SB) -TEXT runtime∕internal∕atomic·Xaddint64(SB),NOSPLIT,$0-20 - B runtime∕internal∕atomic·Xadd64(SB) +TEXT ·Xaddint64(SB),NOSPLIT,$0-20 + B ·Xadd64(SB) // 64-bit atomics // The native ARM implementations use LDREXD/STREXD, which are @@ -95,7 +95,7 @@ TEXT runtime∕internal∕atomic·Xaddint64(SB),NOSPLIT,$0-20 // On older ARM, we use Go implementations which simulate 64-bit // atomics with locks. -TEXT armCas64<>(SB),NOSPLIT,$0-21 +TEXT armCas64<>(SB),NOSPLIT,$0-21 MOVW addr+0(FP), R1 // make unaligned atomic access panic AND.S $7, R1, R2 @@ -128,7 +128,7 @@ cas64fail: MOVBU R0, swapped+20(FP) RET -TEXT armXadd64<>(SB),NOSPLIT,$0-20 +TEXT armXadd64<>(SB),NOSPLIT,$0-20 MOVW addr+0(FP), R1 // make unaligned atomic access panic AND.S $7, R1, R2 @@ -154,7 +154,7 @@ add64loop: MOVW R5, new_hi+16(FP) RET -TEXT armXchg64<>(SB),NOSPLIT,$0-20 +TEXT armXchg64<>(SB),NOSPLIT,$0-20 MOVW addr+0(FP), R1 // make unaligned atomic access panic AND.S $7, R1, R2 @@ -178,7 +178,7 @@ swap64loop: MOVW R5, old_hi+16(FP) RET -TEXT armLoad64<>(SB),NOSPLIT,$0-12 +TEXT armLoad64<>(SB),NOSPLIT,$0-12 MOVW addr+0(FP), R1 // make unaligned atomic access panic AND.S $7, R1, R2 @@ -192,7 +192,7 @@ TEXT armLoad64<>(SB),NOSPLIT,$0-12 MOVW R3, val_hi+8(FP) RET -TEXT armStore64<>(SB),NOSPLIT,$0-12 +TEXT armStore64<>(SB),NOSPLIT,$0-12 MOVW addr+0(FP), R1 // make unaligned atomic access panic AND.S $7, R1, R2 @@ -213,35 +213,35 @@ store64loop: DMB MB_ISH RET -TEXT ·Cas64(SB),NOSPLIT,$0-21 +TEXT ·Cas64(SB),NOSPLIT,$0-21 MOVB runtime·goarm(SB), R11 CMP $7, R11 BLT 2(PC) JMP armCas64<>(SB) JMP ·goCas64(SB) -TEXT ·Xadd64(SB),NOSPLIT,$0-20 +TEXT ·Xadd64(SB),NOSPLIT,$0-20 MOVB runtime·goarm(SB), R11 CMP $7, R11 BLT 2(PC) JMP armXadd64<>(SB) JMP ·goXadd64(SB) -TEXT ·Xchg64(SB),NOSPLIT,$0-20 +TEXT ·Xchg64(SB),NOSPLIT,$0-20 MOVB runtime·goarm(SB), R11 CMP $7, R11 BLT 2(PC) JMP armXchg64<>(SB) JMP ·goXchg64(SB) -TEXT ·Load64(SB),NOSPLIT,$0-12 +TEXT ·Load64(SB),NOSPLIT,$0-12 MOVB runtime·goarm(SB), R11 CMP $7, R11 BLT 2(PC) JMP armLoad64<>(SB) JMP ·goLoad64(SB) -TEXT ·Store64(SB),NOSPLIT,$0-12 +TEXT ·Store64(SB),NOSPLIT,$0-12 MOVB runtime·goarm(SB), R11 CMP $7, R11 BLT 2(PC)