// Call is okay if inlinable and we have the budget for the body.
case ir.OCALLFUNC:
n := n.(*ir.CallExpr)
- // Functions that call runtime.getcaller{pc,sp} can not be inlined
- // because getcaller{pc,sp} expect a pointer to the caller's first argument.
- //
- // runtime.throw is a "cheap call" like panic in normal code.
var cheap bool
if n.Fun.Op() == ir.ONAME {
name := n.Fun.(*ir.Name)
if name.Class == ir.PFUNC {
- switch fn := types.RuntimeSymName(name.Sym()); fn {
- case "getcallerpc", "getcallersp":
- v.reason = "call to " + fn
- return true
- case "throw":
- v.budget -= inlineExtraThrowCost
- break opSwitch
- case "panicrangestate":
- cheap = true
- }
- // Special case for internal/abi.NoEscape. It does just type
- // conversions to appease the escape analysis, and doesn't
- // generate code.
- if s := name.Sym(); s.Name == "NoEscape" && s.Pkg.Path == "internal/abi" {
- cheap = true
+ s := name.Sym()
+ fn := s.Name
+ switch s.Pkg.Path {
+ case "internal/abi":
+ switch fn {
+ case "NoEscape":
+ // Special case for internal/abi.NoEscape. It does just type
+ // conversions to appease the escape analysis, and doesn't
+ // generate code.
+ cheap = true
+ }
+ case "internal/runtime/sys":
+ switch fn {
+ case "GetCallerPC":
+ // Functions that call GetCallerPC can not be inlined
+ // because users expect the PC of the logical caller,
+ // but GetCallerPC returns the physical caller.
+ v.reason = "call to " + fn
+ return true
+ }
+ case "runtime":
+ switch fn {
+ case "getcallersp":
+ // Functions that call getcallersp can not be inlined
+ // because users expect the SP of the logical caller,
+ // but getcallersp returns the physical caller.
+ v.reason = "call to " + fn
+ return true
+ case "throw":
+ // runtime.throw is a "cheap call" like panic in normal code.
+ v.budget -= inlineExtraThrowCost
+ break opSwitch
+ case "panicrangestate":
+ cheap = true
+ }
}
}
// Special case for coverage counter updates; although
// use of DX (the closure pointer)
{name: "LoweredGetClosurePtr", reg: regInfo{outputs: []regMask{buildReg("DX")}}, zeroWidth: true},
// LoweredGetCallerPC evaluates to the PC to which its "caller" will return.
- // I.e., if f calls g "calls" getcallerpc,
+ // I.e., if f calls g "calls" sys.GetCallerPC,
// the result should be the PC within f that g will return to.
// See runtime/stubs.go for a more detailed discussion.
{name: "LoweredGetCallerPC", reg: gp01, rematerializeable: true},
// use of DX (the closure pointer)
{name: "LoweredGetClosurePtr", reg: regInfo{outputs: []regMask{buildReg("DX")}}, zeroWidth: true},
// LoweredGetCallerPC evaluates to the PC to which its "caller" will return.
- // I.e., if f calls g "calls" getcallerpc,
+ // I.e., if f calls g "calls" sys.GetCallerPC,
// the result should be the PC within f that g will return to.
// See runtime/stubs.go for a more detailed discussion.
{name: "LoweredGetCallerPC", reg: gp01, rematerializeable: true},
{name: "LoweredGetCallerSP", argLength: 1, reg: gp01, rematerializeable: true},
// LoweredGetCallerPC evaluates to the PC to which its "caller" will return.
- // I.e., if f calls g "calls" getcallerpc,
+ // I.e., if f calls g "calls" sys.GetCallerPC,
// the result should be the PC within f that g will return to.
// See runtime/stubs.go for a more detailed discussion.
{name: "LoweredGetCallerPC", reg: gp01, rematerializeable: true},
{name: "LoweredGetCallerSP", argLength: 1, reg: gp01, rematerializeable: true},
// LoweredGetCallerPC evaluates to the PC to which its "caller" will return.
- // I.e., if f calls g "calls" getcallerpc,
+ // I.e., if f calls g "calls" sys.GetCallerPC,
// the result should be the PC within f that g will return to.
// See runtime/stubs.go for a more detailed discussion.
{name: "LoweredGetCallerPC", reg: gp01, rematerializeable: true},
{name: "LoweredGetCallerSP", argLength: 1, reg: gp01, rematerializeable: true},
// LoweredGetCallerPC evaluates to the PC to which its "caller" will return.
- // I.e., if f calls g "calls" getcallerpc,
+ // I.e., if f calls g "calls" sys.GetCallerPC,
// the result should be the PC within f that g will return to.
// See runtime/stubs.go for a more detailed discussion.
{name: "LoweredGetCallerPC", reg: gp01, rematerializeable: true},
{name: "LoweredGetCallerSP", argLength: 1, reg: gp01, rematerializeable: true},
// LoweredGetCallerPC evaluates to the PC to which its "caller" will return.
- // I.e., if f calls g "calls" getcallerpc,
+ // I.e., if f calls g "calls" sys.GetCallerPC,
// the result should be the PC within f that g will return to.
// See runtime/stubs.go for a more detailed discussion.
{name: "LoweredGetCallerPC", reg: gp01, rematerializeable: true},
{name: "LoweredGetCallerSP", argLength: 1, reg: gp01, rematerializeable: true},
// LoweredGetCallerPC evaluates to the PC to which its "caller" will return.
- // I.e., if f calls g "calls" getcallerpc,
+ // I.e., if f calls g "calls" sys.GetCallerPC,
// the result should be the PC within f that g will return to.
// See runtime/stubs.go for a more detailed discussion.
{name: "LoweredGetCallerPC", reg: gp01, rematerializeable: true},
{name: "LoweredGetCallerSP", argLength: 1, reg: gp01, rematerializeable: true},
// LoweredGetCallerPC evaluates to the PC to which its "caller" will return.
- // I.e., if f calls g "calls" getcallerpc,
+ // I.e., if f calls g "calls" sys.GetCallerPC,
// the result should be the PC within f that g will return to.
// See runtime/stubs.go for a more detailed discussion.
{name: "LoweredGetCallerPC", reg: gp01, rematerializeable: true},
{name: "LoweredGetCallerSP", argLength: 1, reg: gp01, rematerializeable: true},
// LoweredGetCallerPC evaluates to the PC to which its "caller" will return.
- // I.e., if f calls g "calls" getcallerpc,
+ // I.e., if f calls g "calls" sys.GetCallerPC,
// the result should be the PC within f that g will return to.
// See runtime/stubs.go for a more detailed discussion.
{name: "LoweredGetCallerPC", reg: gp01, rematerializeable: true},
// LoweredGetCallerSP returns the SP of the caller of the current function. arg0=mem.
{name: "LoweredGetCallerSP", argLength: 1, reg: gp01, rematerializeable: true},
// LoweredGetCallerPC evaluates to the PC to which its "caller" will return.
- // I.e., if f calls g "calls" getcallerpc,
+ // I.e., if f calls g "calls" sys.GetCallerPC,
// the result should be the PC within f that g will return to.
// See runtime/stubs.go for a more detailed discussion.
{name: "LoweredGetCallerPC", reg: gp01, rematerializeable: true},
// Pseudo-ops
{name: "GetG", argLength: 1, zeroWidth: true}, // runtime.getg() (read g pointer). arg0=mem
{name: "GetClosurePtr"}, // get closure pointer from dedicated register
- {name: "GetCallerPC"}, // for getcallerpc intrinsic
+ {name: "GetCallerPC"}, // for GetCallerPC intrinsic
{name: "GetCallerSP", argLength: 1}, // for getcallersp intrinsic. arg0=mem.
// Indexing operations
},
all...)
- add("runtime", "getcallerpc",
- func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
- return s.newValue0(ssa.OpGetCallerPC, s.f.Config.Types.Uintptr)
- },
- all...)
-
add("runtime", "getcallersp",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpGetCallerSP, s.f.Config.Types.Uintptr, s.mem())
},
sys.ARM64, sys.PPC64, sys.RISCV64)
+ /******** internal/runtime/sys ********/
+ add("internal/runtime/sys", "GetCallerPC",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue0(ssa.OpGetCallerPC, s.f.Config.Types.Uintptr)
+ },
+ all...)
+
brev_arch := []sys.ArchFamily{sys.AMD64, sys.I386, sys.ARM64, sys.ARM, sys.S390X}
if cfg.goppc64 >= 10 {
// Use only on Power10 as the new byte reverse instructions that Power10 provide
// make it worthwhile as an intrinsic
brev_arch = append(brev_arch, sys.PPC64)
}
- /******** internal/runtime/sys ********/
addF("internal/runtime/sys", "Bswap32",
func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
return s.newValue1(ssa.OpBswap32, types.Types[types.TUINT32], args[0])
fn := sym.Name
if ssa.IntrinsicsDisable {
- if pkg == "runtime" && (fn == "getcallerpc" || fn == "getcallersp" || fn == "getclosureptr") {
+ if pkg == "runtime" && (fn == "getcallersp" || fn == "getclosureptr") {
+ // These runtime functions don't have definitions, must be intrinsics.
+ } else if pkg == "internal/runtime/sys" && fn == "GetCallerPC" {
// These runtime functions don't have definitions, must be intrinsics.
} else {
return nil
{"386", "internal/runtime/math", "MulUintptr"}: struct{}{},
{"386", "internal/runtime/sys", "Bswap32"}: struct{}{},
{"386", "internal/runtime/sys", "Bswap64"}: struct{}{},
+ {"386", "internal/runtime/sys", "GetCallerPC"}: struct{}{},
{"386", "internal/runtime/sys", "TrailingZeros32"}: struct{}{},
{"386", "internal/runtime/sys", "TrailingZeros64"}: struct{}{},
{"386", "internal/runtime/sys", "TrailingZeros8"}: struct{}{},
{"386", "math/bits", "TrailingZeros64"}: struct{}{},
{"386", "math/bits", "TrailingZeros8"}: struct{}{},
{"386", "runtime", "KeepAlive"}: struct{}{},
- {"386", "runtime", "getcallerpc"}: struct{}{},
{"386", "runtime", "getcallersp"}: struct{}{},
{"386", "runtime", "getclosureptr"}: struct{}{},
{"386", "runtime", "slicebytetostringtmp"}: struct{}{},
{"amd64", "internal/runtime/math", "MulUintptr"}: struct{}{},
{"amd64", "internal/runtime/sys", "Bswap32"}: struct{}{},
{"amd64", "internal/runtime/sys", "Bswap64"}: struct{}{},
+ {"amd64", "internal/runtime/sys", "GetCallerPC"}: struct{}{},
{"amd64", "internal/runtime/sys", "Len64"}: struct{}{},
{"amd64", "internal/runtime/sys", "Len8"}: struct{}{},
{"amd64", "internal/runtime/sys", "OnesCount64"}: struct{}{},
{"amd64", "math/bits", "TrailingZeros64"}: struct{}{},
{"amd64", "math/bits", "TrailingZeros8"}: struct{}{},
{"amd64", "runtime", "KeepAlive"}: struct{}{},
- {"amd64", "runtime", "getcallerpc"}: struct{}{},
{"amd64", "runtime", "getcallersp"}: struct{}{},
{"amd64", "runtime", "getclosureptr"}: struct{}{},
{"amd64", "runtime", "slicebytetostringtmp"}: struct{}{},
{"amd64", "sync/atomic", "SwapUintptr"}: struct{}{},
{"arm", "internal/runtime/sys", "Bswap32"}: struct{}{},
{"arm", "internal/runtime/sys", "Bswap64"}: struct{}{},
+ {"arm", "internal/runtime/sys", "GetCallerPC"}: struct{}{},
{"arm", "internal/runtime/sys", "Len64"}: struct{}{},
{"arm", "internal/runtime/sys", "Len8"}: struct{}{},
{"arm", "internal/runtime/sys", "TrailingZeros32"}: struct{}{},
{"arm", "math/bits", "TrailingZeros64"}: struct{}{},
{"arm", "math/bits", "TrailingZeros8"}: struct{}{},
{"arm", "runtime", "KeepAlive"}: struct{}{},
- {"arm", "runtime", "getcallerpc"}: struct{}{},
{"arm", "runtime", "getcallersp"}: struct{}{},
{"arm", "runtime", "getclosureptr"}: struct{}{},
{"arm", "runtime", "slicebytetostringtmp"}: struct{}{},
{"arm64", "internal/runtime/math", "MulUintptr"}: struct{}{},
{"arm64", "internal/runtime/sys", "Bswap32"}: struct{}{},
{"arm64", "internal/runtime/sys", "Bswap64"}: struct{}{},
+ {"arm64", "internal/runtime/sys", "GetCallerPC"}: struct{}{},
{"arm64", "internal/runtime/sys", "Len64"}: struct{}{},
{"arm64", "internal/runtime/sys", "Len8"}: struct{}{},
{"arm64", "internal/runtime/sys", "OnesCount64"}: struct{}{},
{"arm64", "math/bits", "TrailingZeros64"}: struct{}{},
{"arm64", "math/bits", "TrailingZeros8"}: struct{}{},
{"arm64", "runtime", "KeepAlive"}: struct{}{},
- {"arm64", "runtime", "getcallerpc"}: struct{}{},
{"arm64", "runtime", "getcallersp"}: struct{}{},
{"arm64", "runtime", "getclosureptr"}: struct{}{},
{"arm64", "runtime", "publicationBarrier"}: struct{}{},
{"loong64", "internal/runtime/math", "Add64"}: struct{}{},
{"loong64", "internal/runtime/math", "Mul64"}: struct{}{},
{"loong64", "internal/runtime/math", "MulUintptr"}: struct{}{},
+ {"loong64", "internal/runtime/sys", "GetCallerPC"}: struct{}{},
{"loong64", "math", "Abs"}: struct{}{},
{"loong64", "math", "Copysign"}: struct{}{},
{"loong64", "math", "sqrt"}: struct{}{},
{"loong64", "math/bits", "Sub"}: struct{}{},
{"loong64", "math/bits", "Sub64"}: struct{}{},
{"loong64", "runtime", "KeepAlive"}: struct{}{},
- {"loong64", "runtime", "getcallerpc"}: struct{}{},
{"loong64", "runtime", "getcallersp"}: struct{}{},
{"loong64", "runtime", "getclosureptr"}: struct{}{},
{"loong64", "runtime", "slicebytetostringtmp"}: struct{}{},
{"mips", "internal/runtime/atomic", "Xchg"}: struct{}{},
{"mips", "internal/runtime/atomic", "Xchgint32"}: struct{}{},
{"mips", "internal/runtime/atomic", "Xchguintptr"}: struct{}{},
+ {"mips", "internal/runtime/sys", "GetCallerPC"}: struct{}{},
{"mips", "internal/runtime/sys", "Len64"}: struct{}{},
{"mips", "internal/runtime/sys", "Len8"}: struct{}{},
{"mips", "internal/runtime/sys", "TrailingZeros32"}: struct{}{},
{"mips", "math/bits", "TrailingZeros64"}: struct{}{},
{"mips", "math/bits", "TrailingZeros8"}: struct{}{},
{"mips", "runtime", "KeepAlive"}: struct{}{},
- {"mips", "runtime", "getcallerpc"}: struct{}{},
{"mips", "runtime", "getcallersp"}: struct{}{},
{"mips", "runtime", "getclosureptr"}: struct{}{},
{"mips", "runtime", "slicebytetostringtmp"}: struct{}{},
{"mips64", "internal/runtime/math", "Add64"}: struct{}{},
{"mips64", "internal/runtime/math", "Mul64"}: struct{}{},
{"mips64", "internal/runtime/math", "MulUintptr"}: struct{}{},
+ {"mips64", "internal/runtime/sys", "GetCallerPC"}: struct{}{},
{"mips64", "math", "Abs"}: struct{}{},
{"mips64", "math", "sqrt"}: struct{}{},
{"mips64", "math/big", "mulWW"}: struct{}{},
{"mips64", "math/bits", "Sub"}: struct{}{},
{"mips64", "math/bits", "Sub64"}: struct{}{},
{"mips64", "runtime", "KeepAlive"}: struct{}{},
- {"mips64", "runtime", "getcallerpc"}: struct{}{},
{"mips64", "runtime", "getcallersp"}: struct{}{},
{"mips64", "runtime", "getclosureptr"}: struct{}{},
{"mips64", "runtime", "slicebytetostringtmp"}: struct{}{},
{"mips64le", "internal/runtime/math", "Add64"}: struct{}{},
{"mips64le", "internal/runtime/math", "Mul64"}: struct{}{},
{"mips64le", "internal/runtime/math", "MulUintptr"}: struct{}{},
+ {"mips64le", "internal/runtime/sys", "GetCallerPC"}: struct{}{},
{"mips64le", "math", "Abs"}: struct{}{},
{"mips64le", "math", "sqrt"}: struct{}{},
{"mips64le", "math/big", "mulWW"}: struct{}{},
{"mips64le", "math/bits", "Sub"}: struct{}{},
{"mips64le", "math/bits", "Sub64"}: struct{}{},
{"mips64le", "runtime", "KeepAlive"}: struct{}{},
- {"mips64le", "runtime", "getcallerpc"}: struct{}{},
{"mips64le", "runtime", "getcallersp"}: struct{}{},
{"mips64le", "runtime", "getclosureptr"}: struct{}{},
{"mips64le", "runtime", "slicebytetostringtmp"}: struct{}{},
{"mipsle", "internal/runtime/atomic", "Xchg"}: struct{}{},
{"mipsle", "internal/runtime/atomic", "Xchgint32"}: struct{}{},
{"mipsle", "internal/runtime/atomic", "Xchguintptr"}: struct{}{},
+ {"mipsle", "internal/runtime/sys", "GetCallerPC"}: struct{}{},
{"mipsle", "internal/runtime/sys", "Len64"}: struct{}{},
{"mipsle", "internal/runtime/sys", "Len8"}: struct{}{},
{"mipsle", "internal/runtime/sys", "TrailingZeros32"}: struct{}{},
{"mipsle", "math/bits", "TrailingZeros64"}: struct{}{},
{"mipsle", "math/bits", "TrailingZeros8"}: struct{}{},
{"mipsle", "runtime", "KeepAlive"}: struct{}{},
- {"mipsle", "runtime", "getcallerpc"}: struct{}{},
{"mipsle", "runtime", "getcallersp"}: struct{}{},
{"mipsle", "runtime", "getclosureptr"}: struct{}{},
{"mipsle", "runtime", "slicebytetostringtmp"}: struct{}{},
{"ppc64", "internal/runtime/math", "MulUintptr"}: struct{}{},
{"ppc64", "internal/runtime/sys", "Bswap32"}: struct{}{},
{"ppc64", "internal/runtime/sys", "Bswap64"}: struct{}{},
+ {"ppc64", "internal/runtime/sys", "GetCallerPC"}: struct{}{},
{"ppc64", "internal/runtime/sys", "Len64"}: struct{}{},
{"ppc64", "internal/runtime/sys", "Len8"}: struct{}{},
{"ppc64", "internal/runtime/sys", "OnesCount64"}: struct{}{},
{"ppc64", "math/bits", "TrailingZeros32"}: struct{}{},
{"ppc64", "math/bits", "TrailingZeros64"}: struct{}{},
{"ppc64", "runtime", "KeepAlive"}: struct{}{},
- {"ppc64", "runtime", "getcallerpc"}: struct{}{},
{"ppc64", "runtime", "getcallersp"}: struct{}{},
{"ppc64", "runtime", "getclosureptr"}: struct{}{},
{"ppc64", "runtime", "publicationBarrier"}: struct{}{},
{"ppc64le", "internal/runtime/math", "MulUintptr"}: struct{}{},
{"ppc64le", "internal/runtime/sys", "Bswap32"}: struct{}{},
{"ppc64le", "internal/runtime/sys", "Bswap64"}: struct{}{},
+ {"ppc64le", "internal/runtime/sys", "GetCallerPC"}: struct{}{},
{"ppc64le", "internal/runtime/sys", "Len64"}: struct{}{},
{"ppc64le", "internal/runtime/sys", "Len8"}: struct{}{},
{"ppc64le", "internal/runtime/sys", "OnesCount64"}: struct{}{},
{"ppc64le", "math/bits", "TrailingZeros32"}: struct{}{},
{"ppc64le", "math/bits", "TrailingZeros64"}: struct{}{},
{"ppc64le", "runtime", "KeepAlive"}: struct{}{},
- {"ppc64le", "runtime", "getcallerpc"}: struct{}{},
{"ppc64le", "runtime", "getcallersp"}: struct{}{},
{"ppc64le", "runtime", "getclosureptr"}: struct{}{},
{"ppc64le", "runtime", "publicationBarrier"}: struct{}{},
{"riscv64", "internal/runtime/math", "Add64"}: struct{}{},
{"riscv64", "internal/runtime/math", "Mul64"}: struct{}{},
{"riscv64", "internal/runtime/math", "MulUintptr"}: struct{}{},
+ {"riscv64", "internal/runtime/sys", "GetCallerPC"}: struct{}{},
{"riscv64", "math", "Abs"}: struct{}{},
{"riscv64", "math", "Copysign"}: struct{}{},
{"riscv64", "math", "FMA"}: struct{}{},
{"riscv64", "math/bits", "Sub"}: struct{}{},
{"riscv64", "math/bits", "Sub64"}: struct{}{},
{"riscv64", "runtime", "KeepAlive"}: struct{}{},
- {"riscv64", "runtime", "getcallerpc"}: struct{}{},
{"riscv64", "runtime", "getcallersp"}: struct{}{},
{"riscv64", "runtime", "getclosureptr"}: struct{}{},
{"riscv64", "runtime", "publicationBarrier"}: struct{}{},
{"s390x", "internal/runtime/math", "Mul64"}: struct{}{},
{"s390x", "internal/runtime/sys", "Bswap32"}: struct{}{},
{"s390x", "internal/runtime/sys", "Bswap64"}: struct{}{},
+ {"s390x", "internal/runtime/sys", "GetCallerPC"}: struct{}{},
{"s390x", "internal/runtime/sys", "Len64"}: struct{}{},
{"s390x", "internal/runtime/sys", "Len8"}: struct{}{},
{"s390x", "internal/runtime/sys", "OnesCount64"}: struct{}{},
{"s390x", "math/bits", "TrailingZeros64"}: struct{}{},
{"s390x", "math/bits", "TrailingZeros8"}: struct{}{},
{"s390x", "runtime", "KeepAlive"}: struct{}{},
- {"s390x", "runtime", "getcallerpc"}: struct{}{},
{"s390x", "runtime", "getcallersp"}: struct{}{},
{"s390x", "runtime", "getclosureptr"}: struct{}{},
{"s390x", "runtime", "slicebytetostringtmp"}: struct{}{},
{"s390x", "sync/atomic", "SwapUint32"}: struct{}{},
{"s390x", "sync/atomic", "SwapUint64"}: struct{}{},
{"s390x", "sync/atomic", "SwapUintptr"}: struct{}{},
+ {"wasm", "internal/runtime/sys", "GetCallerPC"}: struct{}{},
{"wasm", "internal/runtime/sys", "Len64"}: struct{}{},
{"wasm", "internal/runtime/sys", "Len8"}: struct{}{},
{"wasm", "internal/runtime/sys", "OnesCount64"}: struct{}{},
{"wasm", "math/bits", "TrailingZeros64"}: struct{}{},
{"wasm", "math/bits", "TrailingZeros8"}: struct{}{},
{"wasm", "runtime", "KeepAlive"}: struct{}{},
- {"wasm", "runtime", "getcallerpc"}: struct{}{},
{"wasm", "runtime", "getcallersp"}: struct{}{},
{"wasm", "runtime", "getclosureptr"}: struct{}{},
{"wasm", "runtime", "slicebytetostringtmp"}: struct{}{},
// dirs are the directories to look for *.go files in.
// TODO(bradfitz): just use all directories?
- dirs = []string{".", "ken", "chan", "interface", "syntax", "dwarf", "fixedbugs", "codegen", "runtime", "abi", "typeparam", "typeparam/mdempsky", "arenas"}
+ dirs = []string{".", "ken", "chan", "interface", "internal/runtime/sys", "syntax", "dwarf", "fixedbugs", "codegen", "runtime", "abi", "typeparam", "typeparam/mdempsky", "arenas"}
)
// Test is the main entrypoint that runs tests in the GOROOT/test directory.
--- /dev/null
+// Empty assembly file to allow empty function bodies for intrinsics.
//
// ARM64: Produce PRFM instruction with PLDL1STRM option
func PrefetchStreamed(addr uintptr) {}
+
+// GetCallerPC returns the program counter (PC) of its caller's caller.
+// getcallersp returns the stack pointer (SP) of its caller's caller.
+// Both are implemented as intrinsics on every platform.
+//
+// For example:
+//
+// func f(arg1, arg2, arg3 int) {
+// pc := GetCallerPC()
+// sp := getcallersp()
+// }
+//
+// These two lines find the PC and SP immediately following
+// the call to f (where f will return).
+//
+// The call to GetCallerPC and getcallersp must be done in the
+// frame being asked about.
+//
+// The result of getcallersp is correct at the time of the return,
+// but it may be invalidated by any subsequent call to a function
+// that might relocate the stack in order to grow or shrink it.
+// A general rule is that the result of getcallersp should be used
+// immediately and can only be passed to nosplit functions.
+
+func GetCallerPC() uintptr
package runtime
import (
+ "internal/runtime/sys"
"unsafe"
)
// Public address sanitizer API.
func ASanRead(addr unsafe.Pointer, len int) {
sp := getcallersp()
- pc := getcallerpc()
+ pc := sys.GetCallerPC()
doasanread(addr, uintptr(len), sp, pc)
}
func ASanWrite(addr unsafe.Pointer, len int) {
sp := getcallersp()
- pc := getcallerpc()
+ pc := sys.GetCallerPC()
doasanwrite(addr, uintptr(len), sp, pc)
}
//go:nosplit
func asanread(addr unsafe.Pointer, sz uintptr) {
sp := getcallersp()
- pc := getcallerpc()
+ pc := sys.GetCallerPC()
doasanread(addr, sz, sp, pc)
}
//go:nosplit
func asanwrite(addr unsafe.Pointer, sz uintptr) {
sp := getcallersp()
- pc := getcallerpc()
+ pc := sys.GetCallerPC()
doasanwrite(addr, sz, sp, pc)
}
"internal/abi"
"internal/runtime/atomic"
"internal/runtime/math"
+ "internal/runtime/sys"
"unsafe"
)
//
//go:nosplit
func chansend1(c *hchan, elem unsafe.Pointer) {
- chansend(c, elem, true, getcallerpc())
+ chansend(c, elem, true, sys.GetCallerPC())
}
/*
}
if raceenabled {
- callerpc := getcallerpc()
+ callerpc := sys.GetCallerPC()
racewritepc(c.raceaddr(), callerpc, abi.FuncPCABIInternal(closechan))
racerelease(c.raceaddr())
}
// ... bar
// }
func selectnbsend(c *hchan, elem unsafe.Pointer) (selected bool) {
- return chansend(c, elem, false, getcallerpc())
+ return chansend(c, elem, false, sys.GetCallerPC())
}
// compiler implements
//go:linkname reflect_chansend reflect.chansend0
func reflect_chansend(c *hchan, elem unsafe.Pointer, nb bool) (selected bool) {
- return chansend(c, elem, !nb, getcallerpc())
+ return chansend(c, elem, !nb, sys.GetCallerPC())
}
//go:linkname reflect_chanrecv reflect.chanrecv
package runtime
-import "unsafe"
+import (
+ "internal/runtime/sys"
+ "unsafe"
+)
// A coro represents extra concurrency without extra parallelism,
// as would be needed for a coroutine implementation.
func newcoro(f func(*coro)) *coro {
c := new(coro)
c.f = f
- pc := getcallerpc()
+ pc := sys.GetCallerPC()
gp := getg()
systemstack(func() {
mp := gp.m
import (
"internal/abi"
+ "internal/runtime/sys"
"unsafe"
)
//go:nosplit
func debugCallWrap(dispatch uintptr) {
var lockedExt uint32
- callerpc := getcallerpc()
+ callerpc := sys.GetCallerPC()
gp := getg()
// Lock ourselves to the OS thread.
import (
"internal/abi"
"internal/bytealg"
+ "internal/runtime/sys"
)
// The Error interface identifies a run time error.
//
// It is called from the generated wrapper code.
func panicwrap() {
- pc := getcallerpc()
+ pc := sys.GetCallerPC()
name := funcNameForPrint(funcname(findfunc(pc)))
// name is something like "main.(*T).F".
// We want to extract pkg ("main"), typ ("T"), and meth ("F").
//go:noinline
func TracebackSystemstack(stk []uintptr, i int) int {
if i == 0 {
- pc, sp := getcallerpc(), getcallersp()
+ pc, sp := sys.GetCallerPC(), getcallersp()
var u unwinder
u.initAt(pc, sp, 0, getg(), unwindJumpStack) // Don't ignore errors, for testing
return tracebackPCs(&u, 0, stk)
package runtime
-import "unsafe"
+import (
+ "internal/runtime/sys"
+ "unsafe"
+)
const MaxArgs = maxArgs
func NewContextStub() *ContextStub {
var ctx context
- ctx.set_ip(getcallerpc())
+ ctx.set_ip(sys.GetCallerPC())
ctx.set_sp(getcallersp())
ctx.set_fp(getcallerfp())
return &ContextStub{ctx}
// be used as the second word of an interface value.
func convT(t *_type, v unsafe.Pointer) unsafe.Pointer {
if raceenabled {
- raceReadObjectPC(t, v, getcallerpc(), abi.FuncPCABIInternal(convT))
+ raceReadObjectPC(t, v, sys.GetCallerPC(), abi.FuncPCABIInternal(convT))
}
if msanenabled {
msanread(v, t.Size_)
func convTnoptr(t *_type, v unsafe.Pointer) unsafe.Pointer {
// TODO: maybe take size instead of type?
if raceenabled {
- raceReadObjectPC(t, v, getcallerpc(), abi.FuncPCABIInternal(convTnoptr))
+ raceReadObjectPC(t, v, sys.GetCallerPC(), abi.FuncPCABIInternal(convTnoptr))
}
if msanenabled {
msanread(v, t.Size_)
import (
"internal/abi"
"internal/goarch"
+ "internal/runtime/sys"
"unsafe"
)
func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
if raceenabled && h != nil {
- callerpc := getcallerpc()
+ callerpc := sys.GetCallerPC()
racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess1_fast32))
}
if h == nil || h.count == 0 {
//go:linkname mapaccess2_fast32
func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) {
if raceenabled && h != nil {
- callerpc := getcallerpc()
+ callerpc := sys.GetCallerPC()
racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess2_fast32))
}
if h == nil || h.count == 0 {
panic(plainError("assignment to entry in nil map"))
}
if raceenabled {
- callerpc := getcallerpc()
+ callerpc := sys.GetCallerPC()
racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast32))
}
if h.flags&hashWriting != 0 {
panic(plainError("assignment to entry in nil map"))
}
if raceenabled {
- callerpc := getcallerpc()
+ callerpc := sys.GetCallerPC()
racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast32))
}
if h.flags&hashWriting != 0 {
func mapdelete_fast32(t *maptype, h *hmap, key uint32) {
if raceenabled && h != nil {
- callerpc := getcallerpc()
+ callerpc := sys.GetCallerPC()
racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapdelete_fast32))
}
if h == nil || h.count == 0 {
import (
"internal/abi"
"internal/goarch"
+ "internal/runtime/sys"
"unsafe"
)
func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
if raceenabled && h != nil {
- callerpc := getcallerpc()
+ callerpc := sys.GetCallerPC()
racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess1_fast64))
}
if h == nil || h.count == 0 {
//go:linkname mapaccess2_fast64
func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) {
if raceenabled && h != nil {
- callerpc := getcallerpc()
+ callerpc := sys.GetCallerPC()
racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess2_fast64))
}
if h == nil || h.count == 0 {
panic(plainError("assignment to entry in nil map"))
}
if raceenabled {
- callerpc := getcallerpc()
+ callerpc := sys.GetCallerPC()
racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast64))
}
if h.flags&hashWriting != 0 {
panic(plainError("assignment to entry in nil map"))
}
if raceenabled {
- callerpc := getcallerpc()
+ callerpc := sys.GetCallerPC()
racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast64))
}
if h.flags&hashWriting != 0 {
func mapdelete_fast64(t *maptype, h *hmap, key uint64) {
if raceenabled && h != nil {
- callerpc := getcallerpc()
+ callerpc := sys.GetCallerPC()
racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapdelete_fast64))
}
if h == nil || h.count == 0 {
import (
"internal/abi"
"internal/goarch"
+ "internal/runtime/sys"
"unsafe"
)
func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
if raceenabled && h != nil {
- callerpc := getcallerpc()
+ callerpc := sys.GetCallerPC()
racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess1_faststr))
}
if h == nil || h.count == 0 {
//go:linkname mapaccess2_faststr
func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
if raceenabled && h != nil {
- callerpc := getcallerpc()
+ callerpc := sys.GetCallerPC()
racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess2_faststr))
}
if h == nil || h.count == 0 {
panic(plainError("assignment to entry in nil map"))
}
if raceenabled {
- callerpc := getcallerpc()
+ callerpc := sys.GetCallerPC()
racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_faststr))
}
if h.flags&hashWriting != 0 {
func mapdelete_faststr(t *maptype, h *hmap, ky string) {
if raceenabled && h != nil {
- callerpc := getcallerpc()
+ callerpc := sys.GetCallerPC()
racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapdelete_faststr))
}
if h == nil || h.count == 0 {
"internal/goarch"
"internal/runtime/atomic"
"internal/runtime/math"
+ "internal/runtime/sys"
"unsafe"
)
// hold onto it for very long.
func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
if raceenabled && h != nil {
- callerpc := getcallerpc()
+ callerpc := sys.GetCallerPC()
pc := abi.FuncPCABIInternal(mapaccess1)
racereadpc(unsafe.Pointer(h), callerpc, pc)
raceReadObjectPC(t.Key, key, callerpc, pc)
//go:linkname mapaccess2
func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) {
if raceenabled && h != nil {
- callerpc := getcallerpc()
+ callerpc := sys.GetCallerPC()
pc := abi.FuncPCABIInternal(mapaccess2)
racereadpc(unsafe.Pointer(h), callerpc, pc)
raceReadObjectPC(t.Key, key, callerpc, pc)
panic(plainError("assignment to entry in nil map"))
}
if raceenabled {
- callerpc := getcallerpc()
+ callerpc := sys.GetCallerPC()
pc := abi.FuncPCABIInternal(mapassign)
racewritepc(unsafe.Pointer(h), callerpc, pc)
raceReadObjectPC(t.Key, key, callerpc, pc)
//go:linkname mapdelete
func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
if raceenabled && h != nil {
- callerpc := getcallerpc()
+ callerpc := sys.GetCallerPC()
pc := abi.FuncPCABIInternal(mapdelete)
racewritepc(unsafe.Pointer(h), callerpc, pc)
raceReadObjectPC(t.Key, key, callerpc, pc)
//go:linkname mapiterinit
func mapiterinit(t *maptype, h *hmap, it *hiter) {
if raceenabled && h != nil {
- callerpc := getcallerpc()
+ callerpc := sys.GetCallerPC()
racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapiterinit))
}
func mapiternext(it *hiter) {
h := it.h
if raceenabled {
- callerpc := getcallerpc()
+ callerpc := sys.GetCallerPC()
racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapiternext))
}
if h.flags&hashWriting != 0 {
// It is called by the compiler.
func mapclear(t *maptype, h *hmap) {
if raceenabled && h != nil {
- callerpc := getcallerpc()
+ callerpc := sys.GetCallerPC()
pc := abi.FuncPCABIInternal(mapclear)
racewritepc(unsafe.Pointer(h), callerpc, pc)
}
return 0
}
if raceenabled {
- callerpc := getcallerpc()
+ callerpc := sys.GetCallerPC()
racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(reflect_maplen))
}
return h.count
return 0
}
if raceenabled {
- callerpc := getcallerpc()
+ callerpc := sys.GetCallerPC()
racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(reflect_maplen))
}
return h.count
"internal/goarch"
"internal/runtime/atomic"
"internal/runtime/math"
+ "internal/runtime/sys"
"unsafe"
)
// hold onto it for very long.
func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
if raceenabled && h != nil {
- callerpc := getcallerpc()
+ callerpc := sys.GetCallerPC()
pc := abi.FuncPCABIInternal(mapaccess1)
racereadpc(unsafe.Pointer(h), callerpc, pc)
raceReadObjectPC(t.Key, key, callerpc, pc)
func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) {
if raceenabled && h != nil {
- callerpc := getcallerpc()
+ callerpc := sys.GetCallerPC()
pc := abi.FuncPCABIInternal(mapaccess2)
racereadpc(unsafe.Pointer(h), callerpc, pc)
raceReadObjectPC(t.Key, key, callerpc, pc)
panic(plainError("assignment to entry in nil map"))
}
if raceenabled {
- callerpc := getcallerpc()
+ callerpc := sys.GetCallerPC()
pc := abi.FuncPCABIInternal(mapassign)
racewritepc(unsafe.Pointer(h), callerpc, pc)
raceReadObjectPC(t.Key, key, callerpc, pc)
func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
if raceenabled && h != nil {
- callerpc := getcallerpc()
+ callerpc := sys.GetCallerPC()
pc := abi.FuncPCABIInternal(mapdelete)
racewritepc(unsafe.Pointer(h), callerpc, pc)
raceReadObjectPC(t.Key, key, callerpc, pc)
// Both need to have zeroed hiter since the struct contains pointers.
func mapiterinit(t *maptype, h *hmap, it *hiter) {
if raceenabled && h != nil {
- callerpc := getcallerpc()
+ callerpc := sys.GetCallerPC()
racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapiterinit))
}
func mapiternext(it *hiter) {
h := it.h
if raceenabled {
- callerpc := getcallerpc()
+ callerpc := sys.GetCallerPC()
racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapiternext))
}
if h.flags&hashWriting != 0 {
// mapclear deletes all keys from a map.
func mapclear(t *maptype, h *hmap) {
if raceenabled && h != nil {
- callerpc := getcallerpc()
+ callerpc := sys.GetCallerPC()
pc := abi.FuncPCABIInternal(mapclear)
racewritepc(unsafe.Pointer(h), callerpc, pc)
}
return 0
}
if raceenabled {
- callerpc := getcallerpc()
+ callerpc := sys.GetCallerPC()
racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(reflect_maplen))
}
return h.count
return 0
}
if raceenabled {
- callerpc := getcallerpc()
+ callerpc := sys.GetCallerPC()
racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(reflect_maplen))
}
return h.count
"internal/abi"
"internal/goarch"
"internal/goexperiment"
+ "internal/runtime/sys"
"unsafe"
)
//go:linkname reflect_typedmemmove reflect.typedmemmove
func reflect_typedmemmove(typ *_type, dst, src unsafe.Pointer) {
if raceenabled {
- raceWriteObjectPC(typ, dst, getcallerpc(), abi.FuncPCABIInternal(reflect_typedmemmove))
- raceReadObjectPC(typ, src, getcallerpc(), abi.FuncPCABIInternal(reflect_typedmemmove))
+ raceWriteObjectPC(typ, dst, sys.GetCallerPC(), abi.FuncPCABIInternal(reflect_typedmemmove))
+ raceReadObjectPC(typ, src, sys.GetCallerPC(), abi.FuncPCABIInternal(reflect_typedmemmove))
}
if msanenabled {
msanwrite(dst, typ.Size_)
// assignment operations, it's not instrumented in the calling
// code and needs its own instrumentation.
if raceenabled {
- callerpc := getcallerpc()
+ callerpc := sys.GetCallerPC()
pc := abi.FuncPCABIInternal(slicecopy)
racewriterangepc(dstPtr, uintptr(n)*typ.Size_, callerpc, pc)
racereadrangepc(srcPtr, uintptr(n)*typ.Size_, callerpc, pc)
var nstk int
gp := getg()
sp := getcallersp()
- pc := getcallerpc()
+ pc := sys.GetCallerPC()
systemstack(func() {
var u unwinder
u.initAt(pc, sp, 0, gp, unwindSilentErrors|unwindJumpStack)
dst.AllocObjects = src.AllocObjects
dst.FreeObjects = src.FreeObjects
if raceenabled {
- racewriterangepc(unsafe.Pointer(&dst.Stack0[0]), unsafe.Sizeof(dst.Stack0), getcallerpc(), abi.FuncPCABIInternal(MemProfile))
+ racewriterangepc(unsafe.Pointer(&dst.Stack0[0]), unsafe.Sizeof(dst.Stack0), sys.GetCallerPC(), abi.FuncPCABIInternal(MemProfile))
}
if msanenabled {
msanwrite(unsafe.Pointer(&dst.Stack0[0]), unsafe.Sizeof(dst.Stack0))
dst.Count = src.Count
dst.Cycles = src.Cycles
if raceenabled {
- racewriterangepc(unsafe.Pointer(&dst.Stack0[0]), unsafe.Sizeof(dst.Stack0), getcallerpc(), abi.FuncPCABIInternal(BlockProfile))
+ racewriterangepc(unsafe.Pointer(&dst.Stack0[0]), unsafe.Sizeof(dst.Stack0), sys.GetCallerPC(), abi.FuncPCABIInternal(BlockProfile))
}
if msanenabled {
msanwrite(unsafe.Pointer(&dst.Stack0[0]), unsafe.Sizeof(dst.Stack0))
// Save current goroutine.
sp := getcallersp()
- pc := getcallerpc()
+ pc := sys.GetCallerPC()
systemstack(func() {
saveg(pc, sp, ourg, &p[0], pcbuf)
})
// Save current goroutine.
sp := getcallersp()
- pc := getcallerpc()
+ pc := sys.GetCallerPC()
systemstack(func() {
saveg(pc, sp, gp, &r[0], pcbuf)
})
if len(buf) > 0 {
gp := getg()
sp := getcallersp()
- pc := getcallerpc()
+ pc := sys.GetCallerPC()
systemstack(func() {
g0 := getg()
// Force traceback=1 to override GOTRACEBACK setting,
package runtime
import (
+ "internal/runtime/sys"
"unsafe"
)
resetLibcall := true
if mp.libcallsp == 0 {
mp.libcallg.set(gp)
- mp.libcallpc = getcallerpc()
+ mp.libcallpc = sys.GetCallerPC()
// sp must be the last, because once async cpu profiler finds
// all three values to be non-zero, it will use them
mp.libcallsp = getcallersp()
resetLibcall := true
if mp.libcallsp == 0 {
mp.libcallg.set(gp)
- mp.libcallpc = getcallerpc()
+ mp.libcallpc = sys.GetCallerPC()
// sp must be the last, because once async cpu profiler finds
// all three values to be non-zero, it will use them
mp.libcallsp = getcallersp()
resetLibcall := true
if mp.libcallsp == 0 {
mp.libcallg.set(gp)
- mp.libcallpc = getcallerpc()
+ mp.libcallpc = sys.GetCallerPC()
// sp must be the last, because once async cpu profiler finds
// all three values to be non-zero, it will use them
mp.libcallsp = getcallersp()
resetLibcall := true
if mp.libcallsp == 0 {
mp.libcallg.set(gp)
- mp.libcallpc = getcallerpc()
+ mp.libcallpc = sys.GetCallerPC()
// sp must be the last, because once async cpu profiler finds
// all three values to be non-zero, it will use them
mp.libcallsp = getcallersp()
resetLibcall := true
if mp.libcallsp == 0 {
mp.libcallg.set(gp)
- mp.libcallpc = getcallerpc()
+ mp.libcallpc = sys.GetCallerPC()
// sp must be the last, because once async cpu profiler finds
// all three values to be non-zero, it will use them
mp.libcallsp = getcallersp()
resetLibcall := true
if mp.libcallsp == 0 {
mp.libcallg.set(gp)
- mp.libcallpc = getcallerpc()
+ mp.libcallpc = sys.GetCallerPC()
// sp must be the last, because once async cpu profiler finds
// all three values to be non-zero, it will use them
mp.libcallsp = getcallersp()
resetLibcall := true
if mp.libcallsp == 0 {
mp.libcallg.set(gp)
- mp.libcallpc = getcallerpc()
+ mp.libcallpc = sys.GetCallerPC()
// sp must be the last, because once async cpu profiler finds
// all three values to be non-zero, it will use them
mp.libcallsp = getcallersp()
package runtime
-import "unsafe"
+import (
+ "internal/runtime/sys"
+ "unsafe"
+)
type mts struct {
tv_sec int64
}
if mp != nil && mp.libcallsp == 0 {
mp.libcallg.set(gp)
- mp.libcallpc = getcallerpc()
+ mp.libcallpc = sys.GetCallerPC()
// sp must be the last, because once async cpu profiler finds
// all three values to be non-zero, it will use them
mp.libcallsp = getcallersp()
}
if mp != nil && mp.libcallsp == 0 {
mp.libcallg.set(gp)
- mp.libcallpc = getcallerpc()
+ mp.libcallpc = sys.GetCallerPC()
// sp must be the last, because once async cpu profiler finds
// all three values to be non-zero, it will use them
mp.libcallsp = getcallersp()
}
if mp != nil && mp.libcallsp == 0 {
mp.libcallg.set(gp)
- mp.libcallpc = getcallerpc()
+ mp.libcallpc = sys.GetCallerPC()
// sp must be the last, because once async cpu profiler finds
// all three values to be non-zero, it will use them
mp.libcallsp = getcallersp()
}
if mp != nil && mp.libcallsp == 0 {
mp.libcallg.set(gp)
- mp.libcallpc = getcallerpc()
+ mp.libcallpc = sys.GetCallerPC()
// sp must be the last, because once async cpu profiler finds
// all three values to be non-zero, it will use them
mp.libcallsp = getcallersp()
}
if mp != nil && mp.libcallsp == 0 {
mp.libcallg.set(gp)
- mp.libcallpc = getcallerpc()
+ mp.libcallpc = sys.GetCallerPC()
// sp must be the last, because once async cpu profiler finds
// all three values to be non-zero, it will use them
mp.libcallsp = getcallersp()
}
if mp != nil && mp.libcallsp == 0 {
mp.libcallg.set(gp)
- mp.libcallpc = getcallerpc()
+ mp.libcallpc = sys.GetCallerPC()
// sp must be the last, because once async cpu profiler finds
// all three values to be non-zero, it will use them
mp.libcallsp = getcallersp()
}
if mp != nil && mp.libcallsp == 0 {
mp.libcallg.set(gp)
- mp.libcallpc = getcallerpc()
+ mp.libcallpc = sys.GetCallerPC()
// sp must be the last, because once async cpu profiler finds
// all three values to be non-zero, it will use them
mp.libcallsp = getcallersp()
"internal/abi"
"internal/goarch"
"internal/runtime/atomic"
+ "internal/runtime/sys"
"unsafe"
)
if mp.profilehz != 0 && mp.libcallsp == 0 {
// leave pc/sp for cpu profiler
mp.libcallg.set(gp)
- mp.libcallpc = getcallerpc()
+ mp.libcallpc = sys.GetCallerPC()
// sp must be the last, because once async cpu profiler finds
// all three values to be non-zero, it will use them
mp.libcallsp = getcallersp()
//
//go:yeswritebarrierrec
func goPanicIndex(x int, y int) {
- panicCheck1(getcallerpc(), "index out of range")
+ panicCheck1(sys.GetCallerPC(), "index out of range")
panic(boundsError{x: int64(x), signed: true, y: y, code: boundsIndex})
}
//go:yeswritebarrierrec
func goPanicIndexU(x uint, y int) {
- panicCheck1(getcallerpc(), "index out of range")
+ panicCheck1(sys.GetCallerPC(), "index out of range")
panic(boundsError{x: int64(x), signed: false, y: y, code: boundsIndex})
}
//
//go:yeswritebarrierrec
func goPanicSliceAlen(x int, y int) {
- panicCheck1(getcallerpc(), "slice bounds out of range")
+ panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSliceAlen})
}
//go:yeswritebarrierrec
func goPanicSliceAlenU(x uint, y int) {
- panicCheck1(getcallerpc(), "slice bounds out of range")
+ panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSliceAlen})
}
//go:yeswritebarrierrec
func goPanicSliceAcap(x int, y int) {
- panicCheck1(getcallerpc(), "slice bounds out of range")
+ panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSliceAcap})
}
//go:yeswritebarrierrec
func goPanicSliceAcapU(x uint, y int) {
- panicCheck1(getcallerpc(), "slice bounds out of range")
+ panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSliceAcap})
}
//
//go:yeswritebarrierrec
func goPanicSliceB(x int, y int) {
- panicCheck1(getcallerpc(), "slice bounds out of range")
+ panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSliceB})
}
//go:yeswritebarrierrec
func goPanicSliceBU(x uint, y int) {
- panicCheck1(getcallerpc(), "slice bounds out of range")
+ panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSliceB})
}
// failures in the comparisons for s[::x], 0 <= x <= y (y == len(s) or cap(s))
func goPanicSlice3Alen(x int, y int) {
- panicCheck1(getcallerpc(), "slice bounds out of range")
+ panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3Alen})
}
func goPanicSlice3AlenU(x uint, y int) {
- panicCheck1(getcallerpc(), "slice bounds out of range")
+ panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3Alen})
}
func goPanicSlice3Acap(x int, y int) {
- panicCheck1(getcallerpc(), "slice bounds out of range")
+ panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3Acap})
}
func goPanicSlice3AcapU(x uint, y int) {
- panicCheck1(getcallerpc(), "slice bounds out of range")
+ panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3Acap})
}
// failures in the comparisons for s[:x:y], 0 <= x <= y
func goPanicSlice3B(x int, y int) {
- panicCheck1(getcallerpc(), "slice bounds out of range")
+ panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3B})
}
func goPanicSlice3BU(x uint, y int) {
- panicCheck1(getcallerpc(), "slice bounds out of range")
+ panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3B})
}
// failures in the comparisons for s[x:y:], 0 <= x <= y
func goPanicSlice3C(x int, y int) {
- panicCheck1(getcallerpc(), "slice bounds out of range")
+ panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3C})
}
func goPanicSlice3CU(x uint, y int) {
- panicCheck1(getcallerpc(), "slice bounds out of range")
+ panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3C})
}
// failures in the conversion ([x]T)(s) or (*[x]T)(s), 0 <= x <= y, y == len(s)
func goPanicSliceConvert(x int, y int) {
- panicCheck1(getcallerpc(), "slice length too short to convert to array or pointer to array")
+ panicCheck1(sys.GetCallerPC(), "slice length too short to convert to array or pointer to array")
panic(boundsError{x: int64(x), signed: true, y: y, code: boundsConvert})
}
//go:yeswritebarrierrec
func panicshift() {
- panicCheck1(getcallerpc(), "negative shift amount")
+ panicCheck1(sys.GetCallerPC(), "negative shift amount")
panic(shiftError)
}
d.link = gp._defer
gp._defer = d
d.fn = fn
- d.pc = getcallerpc()
+ d.pc = sys.GetCallerPC()
// We must not be preempted between calling getcallersp and
// storing it to d.sp because getcallersp's result is a
// uintptr stack pointer.
d := newdefer()
d.link = gp._defer
gp._defer = d
- d.pc = getcallerpc()
+ d.pc = sys.GetCallerPC()
// We must not be preempted between calling getcallersp and
// storing it to d.sp because getcallersp's result is a
// uintptr stack pointer.
func deferprocat(fn func(), frame any) {
head := frame.(*atomic.Pointer[_defer])
if raceenabled {
- racewritepc(unsafe.Pointer(head), getcallerpc(), abi.FuncPCABIInternal(deferprocat))
+ racewritepc(unsafe.Pointer(head), sys.GetCallerPC(), abi.FuncPCABIInternal(deferprocat))
}
d1 := newdefer()
d1.fn = fn
func deferconvert(d0 *_defer) {
head := d0.head
if raceenabled {
- racereadpc(unsafe.Pointer(head), getcallerpc(), abi.FuncPCABIInternal(deferconvert))
+ racereadpc(unsafe.Pointer(head), sys.GetCallerPC(), abi.FuncPCABIInternal(deferconvert))
}
tail := d0.link
d0.rangefunc = false
d.heap = false
d.rangefunc = false
d.sp = getcallersp()
- d.pc = getcallerpc()
+ d.pc = sys.GetCallerPC()
// The lines below implement:
// d.panic = nil
// d.fd = nil
var p _panic
p.deferreturn = true
- p.start(getcallerpc(), unsafe.Pointer(getcallersp()))
+ p.start(sys.GetCallerPC(), unsafe.Pointer(getcallersp()))
for {
fn, ok := p.nextDefer()
if !ok {
var p _panic
p.goexit = true
- p.start(getcallerpc(), unsafe.Pointer(getcallersp()))
+ p.start(sys.GetCallerPC(), unsafe.Pointer(getcallersp()))
for {
fn, ok := p.nextDefer()
if !ok {
runningPanicDefers.Add(1)
- p.start(getcallerpc(), unsafe.Pointer(getcallersp()))
+ p.start(sys.GetCallerPC(), unsafe.Pointer(getcallersp()))
for {
fn, ok := p.nextDefer()
if !ok {
// that have been recovered. Also, so that if p is from Goexit, we
// can restart its defer processing loop if a recovered panic tries
// to jump past it.
- p.startPC = getcallerpc()
+ p.startPC = sys.GetCallerPC()
p.startSP = unsafe.Pointer(getcallersp())
if p.deferreturn {
//
//go:nosplit
func fatalthrow(t throwType) {
- pc := getcallerpc()
+ pc := sys.GetCallerPC()
sp := getcallersp()
gp := getg()
//
//go:nosplit
func fatalpanic(msgs *_panic) {
- pc := getcallerpc()
+ pc := sys.GetCallerPC()
sp := getcallersp()
gp := getg()
var docrash bool
package runtime
+import (
+ "internal/runtime/sys"
+)
+
// Additional index/slice error paths for 32-bit platforms.
// Used when the high word of a 64-bit index is not zero.
// failures in the comparisons for s[x], 0 <= x < y (y == len(s))
func goPanicExtendIndex(hi int, lo uint, y int) {
- panicCheck1(getcallerpc(), "index out of range")
+ panicCheck1(sys.GetCallerPC(), "index out of range")
panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: true, y: y, code: boundsIndex})
}
func goPanicExtendIndexU(hi uint, lo uint, y int) {
- panicCheck1(getcallerpc(), "index out of range")
+ panicCheck1(sys.GetCallerPC(), "index out of range")
panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: false, y: y, code: boundsIndex})
}
// failures in the comparisons for s[:x], 0 <= x <= y (y == len(s) or cap(s))
func goPanicExtendSliceAlen(hi int, lo uint, y int) {
- panicCheck1(getcallerpc(), "slice bounds out of range")
+ panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: true, y: y, code: boundsSliceAlen})
}
func goPanicExtendSliceAlenU(hi uint, lo uint, y int) {
- panicCheck1(getcallerpc(), "slice bounds out of range")
+ panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: false, y: y, code: boundsSliceAlen})
}
func goPanicExtendSliceAcap(hi int, lo uint, y int) {
- panicCheck1(getcallerpc(), "slice bounds out of range")
+ panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: true, y: y, code: boundsSliceAcap})
}
func goPanicExtendSliceAcapU(hi uint, lo uint, y int) {
- panicCheck1(getcallerpc(), "slice bounds out of range")
+ panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: false, y: y, code: boundsSliceAcap})
}
// failures in the comparisons for s[x:y], 0 <= x <= y
func goPanicExtendSliceB(hi int, lo uint, y int) {
- panicCheck1(getcallerpc(), "slice bounds out of range")
+ panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: true, y: y, code: boundsSliceB})
}
func goPanicExtendSliceBU(hi uint, lo uint, y int) {
- panicCheck1(getcallerpc(), "slice bounds out of range")
+ panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: false, y: y, code: boundsSliceB})
}
// failures in the comparisons for s[::x], 0 <= x <= y (y == len(s) or cap(s))
func goPanicExtendSlice3Alen(hi int, lo uint, y int) {
- panicCheck1(getcallerpc(), "slice bounds out of range")
+ panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: true, y: y, code: boundsSlice3Alen})
}
func goPanicExtendSlice3AlenU(hi uint, lo uint, y int) {
- panicCheck1(getcallerpc(), "slice bounds out of range")
+ panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: false, y: y, code: boundsSlice3Alen})
}
func goPanicExtendSlice3Acap(hi int, lo uint, y int) {
- panicCheck1(getcallerpc(), "slice bounds out of range")
+ panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: true, y: y, code: boundsSlice3Acap})
}
func goPanicExtendSlice3AcapU(hi uint, lo uint, y int) {
- panicCheck1(getcallerpc(), "slice bounds out of range")
+ panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: false, y: y, code: boundsSlice3Acap})
}
// failures in the comparisons for s[:x:y], 0 <= x <= y
func goPanicExtendSlice3B(hi int, lo uint, y int) {
- panicCheck1(getcallerpc(), "slice bounds out of range")
+ panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: true, y: y, code: boundsSlice3B})
}
func goPanicExtendSlice3BU(hi uint, lo uint, y int) {
- panicCheck1(getcallerpc(), "slice bounds out of range")
+ panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: false, y: y, code: boundsSlice3B})
}
// failures in the comparisons for s[x:y:], 0 <= x <= y
func goPanicExtendSlice3C(hi int, lo uint, y int) {
- panicCheck1(getcallerpc(), "slice bounds out of range")
+ panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: true, y: y, code: boundsSlice3C})
}
func goPanicExtendSlice3CU(hi uint, lo uint, y int) {
- panicCheck1(getcallerpc(), "slice bounds out of range")
+ panicCheck1(sys.GetCallerPC(), "slice bounds out of range")
panic(boundsError{x: int64(hi)<<32 + int64(lo), signed: false, y: y, code: boundsSlice3C})
}
mexit(osStack)
}
-// The go:noinline is to guarantee the getcallerpc/getcallersp below are safe,
+// The go:noinline is to guarantee the sys.GetCallerPC/getcallersp below are safe,
// so that we can set up g0.sched to return to the call of mstart1 above.
//
//go:noinline
// And goexit0 does a gogo that needs to return from mstart1
// and let mstart0 exit the thread.
gp.sched.g = guintptr(unsafe.Pointer(gp))
- gp.sched.pc = getcallerpc()
+ gp.sched.pc = sys.GetCallerPC()
gp.sched.sp = getcallersp()
asminit()
// the stack. This results in exceeding the nosplit stack requirements
// on some platforms.
fp := getcallerfp()
- reentersyscall(getcallerpc(), getcallersp(), fp)
+ reentersyscall(sys.GetCallerPC(), getcallersp(), fp)
}
func entersyscall_sysmon() {
gp.m.p.ptr().syscalltick++
// Leave SP around for GC and traceback.
- pc := getcallerpc()
+ pc := sys.GetCallerPC()
sp := getcallersp()
bp := getcallerfp()
save(pc, sp, bp)
systemstack(entersyscallblock_handoff)
// Resave for traceback during blocked call.
- save(getcallerpc(), getcallersp(), getcallerfp())
+ save(sys.GetCallerPC(), getcallersp(), getcallerfp())
gp.m.locks--
}
// The compiler turns a go statement into a call to this.
func newproc(fn *funcval) {
gp := getg()
- pc := getcallerpc()
+ pc := sys.GetCallerPC()
systemstack(func() {
newg := newproc1(fn, gp, pc, false, waitReasonZero)
// func runtime·raceread(addr uintptr)
// Called from instrumented code.
// Defined as ABIInternal so as to avoid introducing a wrapper,
-// which would render runtime.getcallerpc ineffective.
+// which would render sys.GetCallerPC ineffective.
TEXT runtime·raceread<ABIInternal>(SB), NOSPLIT, $0-8
MOVQ AX, RARG1
MOVQ (SP), RARG2
// func runtime·racewrite(addr uintptr)
// Called from instrumented code.
// Defined as ABIInternal so as to avoid introducing a wrapper,
-// which would render runtime.getcallerpc ineffective.
+// which would render sys.GetCallerPC ineffective.
TEXT runtime·racewrite<ABIInternal>(SB), NOSPLIT, $0-8
MOVQ AX, RARG1
MOVQ (SP), RARG2
// func runtime·racereadrange(addr, size uintptr)
// Called from instrumented code.
// Defined as ABIInternal so as to avoid introducing a wrapper,
-// which would render runtime.getcallerpc ineffective.
+// which would render sys.GetCallerPC ineffective.
TEXT runtime·racereadrange<ABIInternal>(SB), NOSPLIT, $0-16
MOVQ AX, RARG1
MOVQ BX, RARG2
// func runtime·racewriterange(addr, size uintptr)
// Called from instrumented code.
// Defined as ABIInternal so as to avoid introducing a wrapper,
-// which would render runtime.getcallerpc ineffective.
+// which would render sys.GetCallerPC ineffective.
TEXT runtime·racewriterange<ABIInternal>(SB), NOSPLIT, $0-16
MOVQ AX, RARG1
MOVQ BX, RARG2
import (
"internal/abi"
+ "internal/runtime/sys"
"unsafe"
)
)
func selectsetpc(pc *uintptr) {
- *pc = getcallerpc()
+ *pc = sys.GetCallerPC()
}
func sellock(scases []scase, lockorder []uint16) {
}
if raceenabled {
- callerpc := getcallerpc()
+ callerpc := sys.GetCallerPC()
pc := abi.FuncPCABIInternal(makeslicecopy)
racereadrangepc(from, copymem, callerpc, pc)
}
func growslice(oldPtr unsafe.Pointer, newLen, oldCap, num int, et *_type) slice {
oldLen := newLen - num
if raceenabled {
- callerpc := getcallerpc()
+ callerpc := sys.GetCallerPC()
racereadrangepc(oldPtr, uintptr(oldLen*int(et.Size_)), callerpc, abi.FuncPCABIInternal(growslice))
}
if msanenabled {
size := uintptr(n) * width
if raceenabled {
- callerpc := getcallerpc()
+ callerpc := sys.GetCallerPC()
pc := abi.FuncPCABIInternal(slicecopy)
racereadrangepc(fromPtr, size, callerpc, pc)
racewriterangepc(toPtr, size, callerpc, pc)
"internal/abi"
"internal/bytealg"
"internal/goarch"
+ "internal/runtime/sys"
"unsafe"
)
if raceenabled {
racereadrangepc(unsafe.Pointer(ptr),
uintptr(n),
- getcallerpc(),
+ sys.GetCallerPC(),
abi.FuncPCABIInternal(slicebytetostring))
}
if msanenabled {
if raceenabled && n > 0 {
racereadrangepc(unsafe.Pointer(ptr),
uintptr(n),
- getcallerpc(),
+ sys.GetCallerPC(),
abi.FuncPCABIInternal(slicebytetostringtmp))
}
if msanenabled && n > 0 {
if raceenabled && len(a) > 0 {
racereadrangepc(unsafe.Pointer(&a[0]),
uintptr(len(a))*unsafe.Sizeof(a[0]),
- getcallerpc(),
+ sys.GetCallerPC(),
abi.FuncPCABIInternal(slicerunetostring))
}
if msanenabled && len(a) > 0 {
// A general rule is that the result of getcallersp should be used
// immediately and can only be passed to nosplit functions.
-func getcallerpc() uintptr
func getcallersp() uintptr
package runtime
-import "unsafe"
+import (
+ "internal/runtime/sys"
+ "unsafe"
+)
// Call fn with arg as its argument. Return what fn returns.
// fn is the raw pc value of the entry point of the desired function.
}
if mp != nil && mp.libcallsp == 0 {
mp.libcallg.set(gp)
- mp.libcallpc = getcallerpc()
+ mp.libcallpc = sys.GetCallerPC()
// sp must be the last, because once async cpu profiler finds
// all three values to be non-zero, it will use them
mp.libcallsp = getcallersp()
}
// tracebacktrap is like traceback but expects that the PC and SP were obtained
-// from a trap, not from gp->sched or gp->syscallpc/gp->syscallsp or getcallerpc/getcallersp.
+// from a trap, not from gp->sched or gp->syscallpc/gp->syscallsp or GetCallerPC/getcallersp.
// Because they are from a trap instead of from a saved pair,
// the initial PC must not be rewound to the previous instruction.
// (All the saved pairs record a PC that is a return address, so we
//go:linkname callers
func callers(skip int, pcbuf []uintptr) int {
sp := getcallersp()
- pc := getcallerpc()
+ pc := sys.GetCallerPC()
gp := getg()
var n int
systemstack(func() {
import (
"internal/runtime/math"
+ "internal/runtime/sys"
"unsafe"
)
// Keep this code in sync with cmd/compile/internal/walk/builtin.go:walkUnsafeSlice
func unsafeslice(et *_type, ptr unsafe.Pointer, len int) {
if len < 0 {
- panicunsafeslicelen1(getcallerpc())
+ panicunsafeslicelen1(sys.GetCallerPC())
}
if et.Size_ == 0 {
if ptr == nil && len > 0 {
- panicunsafeslicenilptr1(getcallerpc())
+ panicunsafeslicenilptr1(sys.GetCallerPC())
}
}
mem, overflow := math.MulUintptr(et.Size_, uintptr(len))
if overflow || mem > -uintptr(ptr) {
if ptr == nil {
- panicunsafeslicenilptr1(getcallerpc())
+ panicunsafeslicenilptr1(sys.GetCallerPC())
}
- panicunsafeslicelen1(getcallerpc())
+ panicunsafeslicelen1(sys.GetCallerPC())
}
}
func unsafeslice64(et *_type, ptr unsafe.Pointer, len64 int64) {
len := int(len64)
if int64(len) != len64 {
- panicunsafeslicelen1(getcallerpc())
+ panicunsafeslicelen1(sys.GetCallerPC())
}
unsafeslice(et, ptr, len)
}
func panicunsafeslicelen() {
// This is called only from compiler-generated code, so we can get the
// source of the panic.
- panicunsafeslicelen1(getcallerpc())
+ panicunsafeslicelen1(sys.GetCallerPC())
}
//go:yeswritebarrierrec
func panicunsafeslicenilptr() {
// This is called only from compiler-generated code, so we can get the
// source of the panic.
- panicunsafeslicenilptr1(getcallerpc())
+ panicunsafeslicenilptr1(sys.GetCallerPC())
}
//go:yeswritebarrierrec
--- /dev/null
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+The internal/runtime/sys directory contains tests that specifically need to be
+compiled as-if in the internal/runtime/sys package. For error-check tests,
+these require the additional flags -+ and -p=internal/runtime/sys.
--- /dev/null
+// errorcheck -0 -+ -p=internal/runtime/sys -m
+
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sys
+
+// A function that calls sys.GetCallerPC
+// cannot be inlined, no matter how small it is.
+
+func GetCallerPC() uintptr
+
+func pc() uintptr {
+ return GetCallerPC() + 1
+}
+
+func cpc() uintptr { // ERROR "can inline cpc"
+ return pc() + 2
+}
package runtime
-// A function that calls runtime.getcallerpc or runtime.getcallersp()
+// A function that calls runtime.getcallersp()
// cannot be inlined, no matter how small it is.
-func getcallerpc() uintptr
func getcallersp() uintptr
-func pc() uintptr {
- return getcallerpc() + 1
-}
-
-func cpc() uintptr { // ERROR "can inline cpc"
- return pc() + 2
-}
-
func sp() uintptr {
return getcallersp() + 3
}