m.vdsoSP should be set to the SP of the caller of nanotime1,
instead of the SP of nanotime1 itself, which matches m.vdsoPC.
Otherwise the unmatched vdsoPC and vdsoSP would make the stack
trace look like recursive.
We already do it correctly on AMD64, 386, and RISCV64. This CL
fixes the rest.
Also incorporate CL 352509, skipping a flaky test.
Updates #47324, #50772.
Fixes #50780.
Change-Id: I98b6fcfbe9fc6bdd28b8fe2a1299b7c505371dd4
Reviewed-on: https://go-review.googlesource.com/c/go/+/337590
Trust: Cherry Mui <cherryyz@google.com>
Trust: Josh Bleecher Snyder <josharian@gmail.com>
Reviewed-by: Josh Bleecher Snyder <josharian@gmail.com>
(cherry picked from commit
217507eb035933bac6c990844f0d71d6000fd339)
Reviewed-on: https://go-review.googlesource.com/c/go/+/380716
Run-TryBot: Cherry Mui <cherryyz@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
Reviewed-by: Michael Knyszek <mknyszek@google.com>
})
}
}
+
+func TestTimeVDSO(t *testing.T) {
+ // Test that time functions have the right stack trace. In particular,
+ // it shouldn't be recursive.
+
+ if runtime.GOOS == "android" {
+ // Flaky on Android, issue 48655. VDSO may not be enabled.
+ testenv.SkipFlaky(t, 48655)
+ }
+
+ p := testCPUProfile(t, stackContains, []string{"time.now"}, avoidFunctions(), func(dur time.Duration) {
+ t0 := time.Now()
+ for {
+ t := time.Now()
+ if t.Sub(t0) >= dur {
+ return
+ }
+ }
+ })
+
+ // Check for recursive time.now sample.
+ for _, sample := range p.Sample {
+ var seenNow bool
+ for _, loc := range sample.Location {
+ for _, line := range loc.Line {
+ if line.Function.Name == "time.now" {
+ if seenNow {
+ t.Fatalf("unexpected recursive time.now")
+ }
+ seenNow = true
+ }
+ }
+ }
+ }
+}
MOVW R1, 4(R13)
MOVW R2, 8(R13)
+ MOVW $ret-4(FP), R2 // caller's SP
MOVW LR, m_vdsoPC(R5)
- MOVW R13, m_vdsoSP(R5)
+ MOVW R2, m_vdsoSP(R5)
MOVW m_curg(R5), R0
MOVW R1, 4(R13)
MOVW R2, 8(R13)
+ MOVW $ret-4(FP), R2 // caller's SP
MOVW LR, m_vdsoPC(R5)
- MOVW R13, m_vdsoSP(R5)
+ MOVW R2, m_vdsoSP(R5)
MOVW m_curg(R5), R0
MOVD R2, 8(RSP)
MOVD R3, 16(RSP)
+ MOVD $ret-8(FP), R2 // caller's SP
MOVD LR, m_vdsoPC(R21)
- MOVD R20, m_vdsoSP(R21)
+ MOVD R2, m_vdsoSP(R21)
MOVD m_curg(R21), R0
CMP g, R0
MOVD R2, 8(RSP)
MOVD R3, 16(RSP)
+ MOVD $ret-8(FP), R2 // caller's SP
MOVD LR, m_vdsoPC(R21)
- MOVD R20, m_vdsoSP(R21)
+ MOVD R2, m_vdsoSP(R21)
MOVD m_curg(R21), R0
CMP g, R0
MOVV R2, 8(R29)
MOVV R3, 16(R29)
+ MOVV $ret-8(FP), R2 // caller's SP
MOVV R31, m_vdsoPC(R17)
- MOVV R29, m_vdsoSP(R17)
+ MOVV R2, m_vdsoSP(R17)
MOVV m_curg(R17), R4
MOVV g, R5
MOVV R2, 8(R29)
MOVV R3, 16(R29)
+ MOVV $ret-8(FP), R2 // caller's SP
MOVV R31, m_vdsoPC(R17)
- MOVV R29, m_vdsoSP(R17)
+ MOVV R2, m_vdsoSP(R17)
MOVV m_curg(R17), R4
MOVV g, R5
MOVD R5, 40(R1)
MOVD LR, R14
+ MOVD $ret-FIXED_FRAME(FP), R5 // caller's SP
MOVD R14, m_vdsoPC(R21)
- MOVD R15, m_vdsoSP(R21)
+ MOVD R5, m_vdsoSP(R21)
MOVD m_curg(R21), R6
CMP g, R6
MOVD R4, 32(R1)
MOVD R5, 40(R1)
- MOVD LR, R14 // R14 is unchanged by C code
+ MOVD LR, R14 // R14 is unchanged by C code
+ MOVD $ret-FIXED_FRAME(FP), R5 // caller's SP
MOVD R14, m_vdsoPC(R21)
- MOVD R15, m_vdsoSP(R21)
+ MOVD R5, m_vdsoSP(R21)
MOVD m_curg(R21), R6
CMP g, R6