"User" throws are throws due to some invariant broken by the application.
"System" throws are due to some invariant broken by the runtime,
environment, etc (i.e., not the fault of the application).
This CL sends "user" throws through the new fatal. Currently this
function is identical to throw, but with a different name to clearly
differentiate the throw type in the stack trace, and hopefully be a bit
more clear to users what it means.
This CL changes a few categories of throw to fatal:
1. Concurrent map read/write.
2. Deadlock detection.
3. Unlock of unlocked sync.Mutex.
4. Inconsistent results from syscall.AllThreadsSyscall.
"Thread exhaustion" and "out of memory" (usually address space full)
throws are additional throws that are arguably the fault of user code,
but I've left off for now because there is no specific invariant that
they have broken to get into these states.
For #51485
Change-Id: I713276a6c290fd34a6563e6e9ef378669d74ae32
Reviewed-on: https://go-review.googlesource.com/c/go/+/390420
TryBot-Result: Gopher Robot <gobot@golang.org>
Reviewed-by: Austin Clements <austin@google.com>
Run-TryBot: Michael Pratt <mpratt@google.com>
details are printed before `throw` using `print` or `println` and the
messages are prefixed with "runtime:".
+For unrecoverable errors where user code is expected to be at fault for the
+failure (such as racing map writes), use `fatal`.
+
For runtime error debugging, it's useful to run with
`GOTRACEBACK=system` or `GOTRACEBACK=crash`.
return unsafe.Pointer(&zeroVal[0])
}
if h.flags&hashWriting != 0 {
- throw("concurrent map read and map write")
+ fatal("concurrent map read and map write")
}
hash := t.hasher(key, uintptr(h.hash0))
m := bucketMask(h.B)
return unsafe.Pointer(&zeroVal[0]), false
}
if h.flags&hashWriting != 0 {
- throw("concurrent map read and map write")
+ fatal("concurrent map read and map write")
}
hash := t.hasher(key, uintptr(h.hash0))
m := bucketMask(h.B)
asanread(key, t.key.size)
}
if h.flags&hashWriting != 0 {
- throw("concurrent map writes")
+ fatal("concurrent map writes")
}
hash := t.hasher(key, uintptr(h.hash0))
done:
if h.flags&hashWriting == 0 {
- throw("concurrent map writes")
+ fatal("concurrent map writes")
}
h.flags &^= hashWriting
if t.indirectelem() {
return
}
if h.flags&hashWriting != 0 {
- throw("concurrent map writes")
+ fatal("concurrent map writes")
}
hash := t.hasher(key, uintptr(h.hash0))
}
if h.flags&hashWriting == 0 {
- throw("concurrent map writes")
+ fatal("concurrent map writes")
}
h.flags &^= hashWriting
}
racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapiternext))
}
if h.flags&hashWriting != 0 {
- throw("concurrent map iteration and map write")
+ fatal("concurrent map iteration and map write")
}
t := it.t
bucket := it.bucket
}
if h.flags&hashWriting != 0 {
- throw("concurrent map writes")
+ fatal("concurrent map writes")
}
h.flags ^= hashWriting
}
if h.flags&hashWriting == 0 {
- throw("concurrent map writes")
+ fatal("concurrent map writes")
}
h.flags &^= hashWriting
}
return unsafe.Pointer(&zeroVal[0])
}
if h.flags&hashWriting != 0 {
- throw("concurrent map read and map write")
+ fatal("concurrent map read and map write")
}
var b *bmap
if h.B == 0 {
return unsafe.Pointer(&zeroVal[0]), false
}
if h.flags&hashWriting != 0 {
- throw("concurrent map read and map write")
+ fatal("concurrent map read and map write")
}
var b *bmap
if h.B == 0 {
racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast32))
}
if h.flags&hashWriting != 0 {
- throw("concurrent map writes")
+ fatal("concurrent map writes")
}
hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
done:
elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*4+inserti*uintptr(t.elemsize))
if h.flags&hashWriting == 0 {
- throw("concurrent map writes")
+ fatal("concurrent map writes")
}
h.flags &^= hashWriting
return elem
racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast32))
}
if h.flags&hashWriting != 0 {
- throw("concurrent map writes")
+ fatal("concurrent map writes")
}
hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
done:
elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*4+inserti*uintptr(t.elemsize))
if h.flags&hashWriting == 0 {
- throw("concurrent map writes")
+ fatal("concurrent map writes")
}
h.flags &^= hashWriting
return elem
return
}
if h.flags&hashWriting != 0 {
- throw("concurrent map writes")
+ fatal("concurrent map writes")
}
hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
}
if h.flags&hashWriting == 0 {
- throw("concurrent map writes")
+ fatal("concurrent map writes")
}
h.flags &^= hashWriting
}
return unsafe.Pointer(&zeroVal[0])
}
if h.flags&hashWriting != 0 {
- throw("concurrent map read and map write")
+ fatal("concurrent map read and map write")
}
var b *bmap
if h.B == 0 {
return unsafe.Pointer(&zeroVal[0]), false
}
if h.flags&hashWriting != 0 {
- throw("concurrent map read and map write")
+ fatal("concurrent map read and map write")
}
var b *bmap
if h.B == 0 {
racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast64))
}
if h.flags&hashWriting != 0 {
- throw("concurrent map writes")
+ fatal("concurrent map writes")
}
hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
done:
elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*8+inserti*uintptr(t.elemsize))
if h.flags&hashWriting == 0 {
- throw("concurrent map writes")
+ fatal("concurrent map writes")
}
h.flags &^= hashWriting
return elem
racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast64))
}
if h.flags&hashWriting != 0 {
- throw("concurrent map writes")
+ fatal("concurrent map writes")
}
hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
done:
elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*8+inserti*uintptr(t.elemsize))
if h.flags&hashWriting == 0 {
- throw("concurrent map writes")
+ fatal("concurrent map writes")
}
h.flags &^= hashWriting
return elem
return
}
if h.flags&hashWriting != 0 {
- throw("concurrent map writes")
+ fatal("concurrent map writes")
}
hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
}
if h.flags&hashWriting == 0 {
- throw("concurrent map writes")
+ fatal("concurrent map writes")
}
h.flags &^= hashWriting
}
return unsafe.Pointer(&zeroVal[0])
}
if h.flags&hashWriting != 0 {
- throw("concurrent map read and map write")
+ fatal("concurrent map read and map write")
}
key := stringStructOf(&ky)
if h.B == 0 {
return unsafe.Pointer(&zeroVal[0]), false
}
if h.flags&hashWriting != 0 {
- throw("concurrent map read and map write")
+ fatal("concurrent map read and map write")
}
key := stringStructOf(&ky)
if h.B == 0 {
racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_faststr))
}
if h.flags&hashWriting != 0 {
- throw("concurrent map writes")
+ fatal("concurrent map writes")
}
key := stringStructOf(&s)
hash := t.hasher(noescape(unsafe.Pointer(&s)), uintptr(h.hash0))
done:
elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*2*goarch.PtrSize+inserti*uintptr(t.elemsize))
if h.flags&hashWriting == 0 {
- throw("concurrent map writes")
+ fatal("concurrent map writes")
}
h.flags &^= hashWriting
return elem
return
}
if h.flags&hashWriting != 0 {
- throw("concurrent map writes")
+ fatal("concurrent map writes")
}
key := stringStructOf(&ky)
}
if h.flags&hashWriting == 0 {
- throw("concurrent map writes")
+ fatal("concurrent map writes")
}
h.flags &^= hashWriting
}
if errno != 0 || r1 != args.r1 || r2 != args.r2 {
print("trap:", args.trap, ", a123456=[", args.a1, ",", args.a2, ",", args.a3, ",", args.a4, ",", args.a5, ",", args.a6, "]\n")
print("results: got {r1=", r1, ",r2=", r2, ",errno=", errno, "}, want {r1=", args.r1, ",r2=", args.r2, ",errno=0\n")
- throw("AllThreadsSyscall6 results differ between threads; runtime corrupted")
+ fatal("AllThreadsSyscall6 results differ between threads; runtime corrupted")
}
gp.m.needPerThreadSyscall.Store(0)
throw(s)
}
+//go:linkname sync_fatal sync.fatal
+func sync_fatal(s string) {
+ fatal(s)
+}
+
+// throw triggers a fatal error that dumps a stack trace and exits.
+//
+// throw should be used for runtime-internal fatal errors where Go itself,
+// rather than user code, may be at fault for the failure.
//go:nosplit
func throw(s string) {
// Everything throw does should be recursively nosplit so it
systemstack(func() {
print("fatal error: ", s, "\n")
})
- gp := getg()
- if gp.m.throwing == 0 {
- gp.m.throwing = 1
- }
+
+ fatalthrow()
+}
+
+// fatal triggers a fatal error that dumps a stack trace and exits.
+//
+// fatal is equivalent to throw, but is used when user code is expected to be
+// at fault for the failure, such as racing map writes.
+//go:nosplit
+func fatal(s string) {
+ // Everything fatal does should be recursively nosplit so it
+ // can be called even when it's unsafe to grow the stack.
+ systemstack(func() {
+ print("fatal error: ", s, "\n")
+ })
+
fatalthrow()
- *(*int)(nil) = 0 // not reached
}
// runningPanicDefers is non-zero while running deferred functions for panic.
pc := getcallerpc()
sp := getcallersp()
gp := getg()
- // Switch to the system stack to avoid any stack growth, which
- // may make things worse if the runtime is in a bad state.
+
+ if gp.m.throwing == 0 {
+ gp.m.throwing = 1
+ }
+
+ // Switch to the system stack to avoid any stack growth, which may make
+ // things worse if the runtime is in a bad state.
systemstack(func() {
startpanic_m()
if fn == nil {
_g_.m.throwing = -1 // do not dump full stacks
- throw("go of nil func value")
+ fatal("go of nil func value")
}
acquirem() // disable preemption because it can be holding p in a local var
})
if grunning == 0 { // possible if main goroutine calls runtime·Goexit()
unlock(&sched.lock) // unlock so that GODEBUG=scheddetail=1 doesn't hang
- throw("no goroutines (main called runtime.Goexit) - deadlock!")
+ fatal("no goroutines (main called runtime.Goexit) - deadlock!")
}
// Maybe jump time forward for playground.
getg().m.throwing = -1 // do not dump full stacks
unlock(&sched.lock) // unlock so that GODEBUG=scheddetail=1 doesn't hang
- throw("all goroutines are asleep - deadlock!")
+ fatal("all goroutines are asleep - deadlock!")
}
// forcegcperiod is the maximum time in nanoseconds between garbage
"unsafe"
)
-func throw(string) // provided by runtime
+// Provided by runtime via linkname.
+func throw(string)
+func fatal(string)
// A Mutex is a mutual exclusion lock.
// The zero value for a Mutex is an unlocked mutex.
func (m *Mutex) unlockSlow(new int32) {
if (new+mutexLocked)&mutexLocked == 0 {
- throw("sync: unlock of unlocked mutex")
+ fatal("sync: unlock of unlocked mutex")
}
if new&mutexStarving == 0 {
old := new
func (rw *RWMutex) rUnlockSlow(r int32) {
if r+1 == 0 || r+1 == -rwmutexMaxReaders {
race.Enable()
- throw("sync: RUnlock of unlocked RWMutex")
+ fatal("sync: RUnlock of unlocked RWMutex")
}
// A writer is pending.
if atomic.AddInt32(&rw.readerWait, -1) == 0 {
r := atomic.AddInt32(&rw.readerCount, rwmutexMaxReaders)
if r >= rwmutexMaxReaders {
race.Enable()
- throw("sync: Unlock of unlocked RWMutex")
+ fatal("sync: Unlock of unlocked RWMutex")
}
// Unblock blocked readers, if any.
for i := 0; i < int(r); i++ {