//go:nosplit
func cgocall_errno(fn, arg unsafe.Pointer) int32 {
if !iscgo && GOOS != "solaris" && GOOS != "windows" {
- gothrow("cgocall unavailable")
+ throw("cgocall unavailable")
}
if fn == nil {
- gothrow("cgocall nil")
+ throw("cgocall nil")
}
if raceenabled {
args.n = uint64(n)
cgocall(_cgo_malloc, unsafe.Pointer(&args))
if args.ret == nil {
- gothrow("C malloc failed")
+ throw("C malloc failed")
}
return args.ret
}
sp := gp.m.g0.sched.sp
switch GOARCH {
default:
- gothrow("cgocallbackg is unimplemented on arch")
+ throw("cgocallbackg is unimplemented on arch")
case "arm":
// On arm, stack frame is two words and there's a saved LR between
// SP and the stack frame and between the stack frame and the arguments.
sched := &mp.g0.sched
switch GOARCH {
default:
- gothrow("unwindm not implemented")
+ throw("unwindm not implemented")
case "386", "amd64":
sched.sp = *(*uintptr)(unsafe.Pointer(sched.sp))
case "arm":
// called from assembly
func badcgocallback() {
- gothrow("misaligned stack in cgocallback")
+ throw("misaligned stack in cgocallback")
}
// called from (incomplete) assembly
func cgounimpl() {
- gothrow("cgo not implemented")
+ throw("cgo not implemented")
}
var racecgosync uint64 // represents possible synchronization in C code
// compiler checks this but be safe.
if elem.size >= 1<<16 {
- gothrow("makechan: invalid channel element type")
+ throw("makechan: invalid channel element type")
}
if hchanSize%maxAlign != 0 || elem.align > maxAlign {
- gothrow("makechan: bad alignment")
+ throw("makechan: bad alignment")
}
if size < 0 || int64(uintptr(size)) != size || (elem.size > 0 && uintptr(size) > (_MaxMem-hchanSize)/uintptr(elem.size)) {
panic("makechan: size out of range")
return false
}
gopark(nil, nil, "chan send (nil chan)")
- gothrow("unreachable")
+ throw("unreachable")
}
if debugChan {
// someone woke us up.
if mysg != gp.waiting {
- gothrow("G waiting list is corrupted!")
+ throw("G waiting list is corrupted!")
}
gp.waiting = nil
if gp.param == nil {
if c.closed == 0 {
- gothrow("chansend: spurious wakeup")
+ throw("chansend: spurious wakeup")
}
panic("send on closed channel")
}
return
}
gopark(nil, nil, "chan receive (nil chan)")
- gothrow("unreachable")
+ throw("unreachable")
}
// Fast path: check for failed non-blocking operation without acquiring the lock.
// someone woke us up
if mysg != gp.waiting {
- gothrow("G waiting list is corrupted!")
+ throw("G waiting list is corrupted!")
}
gp.waiting = nil
if mysg.releasetime > 0 {
lock(&c.lock)
if c.closed == 0 {
- gothrow("chanrecv: spurious wakeup")
+ throw("chanrecv: spurious wakeup")
}
return recvclosed(c, ep)
}
func gogetenv(key string) string {
env := environ()
if env == nil {
- gothrow("getenv before env init")
+ throw("getenv before env init")
}
for _, s := range environ() {
if len(s) > len(key) && s[len(key)] == '=' && s[:len(key)] == key {
func makemap(t *maptype, hint int64) *hmap {
if sz := unsafe.Sizeof(hmap{}); sz > 48 || sz != uintptr(t.hmap.size) {
- gothrow("bad hmap size")
+ throw("bad hmap size")
}
if hint < 0 || int64(int32(hint)) != hint {
}
if !ismapkey(t.key) {
- gothrow("runtime.makemap: unsupported map key type")
+ throw("runtime.makemap: unsupported map key type")
}
// check compiler's and reflect's math
if t.key.size > maxKeySize && (!t.indirectkey || t.keysize != uint8(ptrSize)) ||
t.key.size <= maxKeySize && (t.indirectkey || t.keysize != uint8(t.key.size)) {
- gothrow("key size wrong")
+ throw("key size wrong")
}
if t.elem.size > maxValueSize && (!t.indirectvalue || t.valuesize != uint8(ptrSize)) ||
t.elem.size <= maxValueSize && (t.indirectvalue || t.valuesize != uint8(t.elem.size)) {
- gothrow("value size wrong")
+ throw("value size wrong")
}
// invariants we depend on. We should probably check these at compile time
// somewhere, but for now we'll do it here.
if t.key.align > bucketCnt {
- gothrow("key align too big")
+ throw("key align too big")
}
if t.elem.align > bucketCnt {
- gothrow("value align too big")
+ throw("value align too big")
}
if uintptr(t.key.size)%uintptr(t.key.align) != 0 {
- gothrow("key size not a multiple of key align")
+ throw("key size not a multiple of key align")
}
if uintptr(t.elem.size)%uintptr(t.elem.align) != 0 {
- gothrow("value size not a multiple of value align")
+ throw("value size not a multiple of value align")
}
if bucketCnt < 8 {
- gothrow("bucketsize too small for proper alignment")
+ throw("bucketsize too small for proper alignment")
}
if dataOffset%uintptr(t.key.align) != 0 {
- gothrow("need padding in bucket (key)")
+ throw("need padding in bucket (key)")
}
if dataOffset%uintptr(t.elem.align) != 0 {
- gothrow("need padding in bucket (value)")
+ throw("need padding in bucket (value)")
}
// find size parameter which will hold the requested # of elements
}
if unsafe.Sizeof(hiter{})/ptrSize != 10 {
- gothrow("hash_iter size incorrect") // see ../../cmd/gc/reflect.c
+ throw("hash_iter size incorrect") // see ../../cmd/gc/reflect.c
}
it.t = t
it.h = h
func hashGrow(t *maptype, h *hmap) {
if h.oldbuckets != nil {
- gothrow("evacuation not done in time")
+ throw("evacuation not done in time")
}
oldbuckets := h.buckets
if checkgc {
continue
}
if top < minTopHash {
- gothrow("bad map state")
+ throw("bad map state")
}
k2 := k
if t.indirectkey {
for i := uintptr(0); i < uintptr(bv.n); i += bitsPerPointer {
switch bv.bytedata[i/8] >> (i % 8) & 3 {
default:
- gothrow("unexpected pointer bits")
+ throw("unexpected pointer bits")
case _BitsDead:
// BitsDead has already been processed in makeheapobjbv.
// We should only see it in stack maps, in which case we should continue processing.
switch status {
default:
print("runtime: unexpected G.status ", hex(status), "\n")
- gothrow("dumpgs in STW - bad status")
+ throw("dumpgs in STW - bad status")
case _Gdead:
// ok
case _Grunnable,
size := s.elemsize
n := (s.npages << _PageShift) / size
if n > uintptr(len(freemark)) {
- gothrow("freemark array doesn't have enough entries")
+ throw("freemark array doesn't have enough entries")
}
for l := s.freelist; l.ptr() != nil; l = l.ptr().next {
freemark[(uintptr(l)-p)/size] = true
n := nptr*_BitsPerPointer/8 + 1
p := sysAlloc(n, &memstats.other_sys)
if p == nil {
- gothrow("heapdump: out of memory")
+ throw("heapdump: out of memory")
}
tmpbuf = (*[1 << 30]byte)(p)[:n]
}
func getitab(inter *interfacetype, typ *_type, canfail bool) *itab {
if len(inter.mhdr) == 0 {
- gothrow("internal error - misuse of itab")
+ throw("internal error - misuse of itab")
}
// easy case
nextimethod:
}
if locked == 0 {
- gothrow("invalid itab locking")
+ throw("invalid itab locking")
}
m.link = hash[h]
atomicstorep(unsafe.Pointer(&hash[h]), unsafe.Pointer(m))
new := lfstackPack(node, node.pushcnt)
if node1, _ := lfstackUnpack(new); node1 != node {
println("runtime: lfstackpush invalid packing: node=", node, " cnt=", hex(node.pushcnt), " packed=", hex(new), " -> node=", node1, "\n")
- gothrow("lfstackpush")
+ throw("lfstackpush")
}
for {
old := atomicload64(head)
gp := getg()
if gp.m.locks < 0 {
- gothrow("runtime·lock: lock count")
+ throw("runtime·lock: lock count")
}
gp.m.locks++
func unlock(l *mutex) {
v := xchg(key32(&l.key), mutex_unlocked)
if v == mutex_unlocked {
- gothrow("unlock of unlocked lock")
+ throw("unlock of unlocked lock")
}
if v == mutex_sleeping {
futexwakeup(key32(&l.key), 1)
gp := getg()
gp.m.locks--
if gp.m.locks < 0 {
- gothrow("runtime·unlock: lock count")
+ throw("runtime·unlock: lock count")
}
if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack
gp.stackguard0 = stackPreempt
old := xchg(key32(&n.key), 1)
if old != 0 {
print("notewakeup - double wakeup (", old, ")\n")
- gothrow("notewakeup - double wakeup")
+ throw("notewakeup - double wakeup")
}
futexwakeup(key32(&n.key), 1)
}
func notesleep(n *note) {
gp := getg()
if gp != gp.m.g0 {
- gothrow("notesleep not on g0")
+ throw("notesleep not on g0")
}
for atomicload(key32(&n.key)) == 0 {
gp.m.blocked = true
func notetsleep(n *note, ns int64) bool {
gp := getg()
if gp != gp.m.g0 && gp.m.gcing == 0 {
- gothrow("notetsleep not on g0")
+ throw("notetsleep not on g0")
}
return notetsleep_internal(n, ns)
func notetsleepg(n *note, ns int64) bool {
gp := getg()
if gp == gp.m.g0 {
- gothrow("notetsleepg on g0")
+ throw("notetsleepg on g0")
}
entersyscallblock(0)
func lock(l *mutex) {
gp := getg()
if gp.m.locks < 0 {
- gothrow("runtime·lock: lock count")
+ throw("runtime·lock: lock count")
}
gp.m.locks++
}
gp.m.locks--
if gp.m.locks < 0 {
- gothrow("runtime·unlock: lock count")
+ throw("runtime·unlock: lock count")
}
if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack
gp.stackguard0 = stackPreempt
// Nothing was waiting. Done.
case v == locked:
// Two notewakeups! Not allowed.
- gothrow("notewakeup - double wakeup")
+ throw("notewakeup - double wakeup")
default:
// Must be the waiting m. Wake it up.
semawakeup((*m)(unsafe.Pointer(v)))
func notesleep(n *note) {
gp := getg()
if gp != gp.m.g0 {
- gothrow("notesleep not on g0")
+ throw("notesleep not on g0")
}
if gp.m.waitsema == 0 {
gp.m.waitsema = semacreate()
if !casuintptr(&n.key, 0, uintptr(unsafe.Pointer(gp.m))) {
// Must be locked (got wakeup).
if n.key != locked {
- gothrow("notesleep - waitm out of sync")
+ throw("notesleep - waitm out of sync")
}
return
}
if !casuintptr(&n.key, 0, uintptr(unsafe.Pointer(gp.m))) {
// Must be locked (got wakeup).
if n.key != locked {
- gothrow("notetsleep - waitm out of sync")
+ throw("notetsleep - waitm out of sync")
}
return true
}
// Grab it to avoid getting out of sync.
gp.m.blocked = true
if semasleep(-1) < 0 {
- gothrow("runtime: unable to acquire - semaphore out of sync")
+ throw("runtime: unable to acquire - semaphore out of sync")
}
gp.m.blocked = false
return true
default:
- gothrow("runtime: unexpected waitm - semaphore out of sync")
+ throw("runtime: unexpected waitm - semaphore out of sync")
}
}
}
func notetsleep(n *note, ns int64) bool {
gp := getg()
if gp != gp.m.g0 && gp.m.gcing == 0 {
- gothrow("notetsleep not on g0")
+ throw("notetsleep not on g0")
}
if gp.m.waitsema == 0 {
gp.m.waitsema = semacreate()
func notetsleepg(n *note, ns int64) bool {
gp := getg()
if gp == gp.m.g0 {
- gothrow("notetsleepg on g0")
+ throw("notetsleepg on g0")
}
if gp.m.waitsema == 0 {
gp.m.waitsema = semacreate()
size0 := size
if flags&flagNoScan == 0 && typ == nil {
- gothrow("malloc missing type")
+ throw("malloc missing type")
}
// This function must be atomic wrt GC, but for performance reasons
if debugMalloc {
mp := acquirem()
if mp.mallocing != 0 {
- gothrow("malloc deadlock")
+ throw("malloc deadlock")
}
mp.mallocing = 1
if mp.curg != nil {
if debugMalloc {
mp := acquirem()
if mp.mallocing == 0 {
- gothrow("bad malloc")
+ throw("bad malloc")
}
mp.mallocing = 0
if mp.curg != nil {
shift := (off % wordsPerBitmapByte) * gcBits
if debugMalloc && ((*xbits>>shift)&(bitMask|bitPtrMask)) != bitBoundary {
println("runtime: bits =", (*xbits>>shift)&(bitMask|bitPtrMask))
- gothrow("bad bits in markallocated")
+ throw("bad bits in markallocated")
}
var ti, te uintptr
masksize++ // unroll flag in the beginning
if masksize > maxGCMask && typ.gc[1] != 0 {
// write barriers have not been updated to deal with this case yet.
- gothrow("maxGCMask too small for now")
+ throw("maxGCMask too small for now")
// If the mask is too large, unroll the program directly
// into the GC bitmap. It's 7 times slower than copying
// from the pre-unrolled mask, but saves 1/16 of type size
if debugMalloc {
mp := acquirem()
if mp.mallocing == 0 {
- gothrow("bad malloc")
+ throw("bad malloc")
}
mp.mallocing = 0
if mp.curg != nil {
masksize++ // unroll flag in the beginning
if masksize > maxGCMask && typ.gc[1] != 0 {
// write barriers have not been updated to deal with this case yet.
- gothrow("maxGCMask too small for now")
+ throw("maxGCMask too small for now")
}
ptrmask = (*uint8)(unsafe.Pointer(uintptr(typ.gc[0])))
// Check whether the program is already unrolled
}
if mp != acquirem() {
- gothrow("gogc: rescheduled")
+ throw("gogc: rescheduled")
}
clearpools()
e := (*eface)(unsafe.Pointer(&obj))
etyp := e._type
if etyp == nil {
- gothrow("runtime.SetFinalizer: first argument is nil")
+ throw("runtime.SetFinalizer: first argument is nil")
}
if etyp.kind&kindMask != kindPtr {
- gothrow("runtime.SetFinalizer: first argument is " + *etyp._string + ", not pointer")
+ throw("runtime.SetFinalizer: first argument is " + *etyp._string + ", not pointer")
}
ot := (*ptrtype)(unsafe.Pointer(etyp))
if ot.elem == nil {
- gothrow("nil elem type!")
+ throw("nil elem type!")
}
// find the containing object
uintptr(unsafe.Pointer(&noptrbss)) <= uintptr(e.data) && uintptr(e.data) < uintptr(unsafe.Pointer(&enoptrbss)) {
return
}
- gothrow("runtime.SetFinalizer: pointer not in allocated block")
+ throw("runtime.SetFinalizer: pointer not in allocated block")
}
if e.data != base {
// As an implementation detail we allow to set finalizers for an inner byte
// of an object if it could come from tiny alloc (see mallocgc for details).
if ot.elem == nil || ot.elem.kind&kindNoPointers == 0 || ot.elem.size >= maxTinySize {
- gothrow("runtime.SetFinalizer: pointer not at beginning of allocated block")
+ throw("runtime.SetFinalizer: pointer not at beginning of allocated block")
}
}
}
if ftyp.kind&kindMask != kindFunc {
- gothrow("runtime.SetFinalizer: second argument is " + *ftyp._string + ", not a function")
+ throw("runtime.SetFinalizer: second argument is " + *ftyp._string + ", not a function")
}
ft := (*functype)(unsafe.Pointer(ftyp))
ins := *(*[]*_type)(unsafe.Pointer(&ft.in))
if ft.dotdotdot || len(ins) != 1 {
- gothrow("runtime.SetFinalizer: cannot pass " + *etyp._string + " to finalizer " + *ftyp._string)
+ throw("runtime.SetFinalizer: cannot pass " + *etyp._string + " to finalizer " + *ftyp._string)
}
fint := ins[0]
switch {
goto okarg
}
}
- gothrow("runtime.SetFinalizer: cannot pass " + *etyp._string + " to finalizer " + *ftyp._string)
+ throw("runtime.SetFinalizer: cannot pass " + *etyp._string + " to finalizer " + *ftyp._string)
okarg:
// compute size needed for return parameters
nret := uintptr(0)
systemstack(func() {
if !addfinalizer(e.data, (*funcval)(f.data), nret, fint, ot) {
- gothrow("runtime.SetFinalizer: finalizer already set")
+ throw("runtime.SetFinalizer: finalizer already set")
}
})
}
}
if f.fint == nil {
- gothrow("missing type in runfinq")
+ throw("missing type in runfinq")
}
switch f.fint.kind & kindMask {
case kindPtr:
*(*fInterface)(frame) = assertE2I(ityp, *(*interface{})(frame))
}
default:
- gothrow("bad kind in runfinq")
+ throw("bad kind in runfinq")
}
reflectcall(unsafe.Pointer(f.fn), frame, uint32(framesz), uint32(framesz))
if align != 0 {
if align&(align-1) != 0 {
- gothrow("persistentalloc: align is not a power of 2")
+ throw("persistentalloc: align is not a power of 2")
}
if align > _PageSize {
- gothrow("persistentalloc: align is too large")
+ throw("persistentalloc: align is too large")
}
} else {
align = 8
persistent.pos = sysAlloc(chunk, &memstats.other_sys)
if persistent.pos == nil {
unlock(&persistent.lock)
- gothrow("runtime: cannot allocate memory")
+ throw("runtime: cannot allocate memory")
}
persistent.end = add(persistent.pos, chunk)
}
initSizes()
if class_to_size[_TinySizeClass] != _TinySize {
- gothrow("bad TinySizeClass")
+ throw("bad TinySizeClass")
}
var p, bitmapSize, spansSize, pSize, limit uintptr
}
}
if p == 0 {
- gothrow("runtime: cannot reserve arena virtual address space")
+ throw("runtime: cannot reserve arena virtual address space")
}
}
if mheap_.arena_start&(_PageSize-1) != 0 {
println("bad pagesize", hex(p), hex(p1), hex(spansSize), hex(bitmapSize), hex(_PageSize), "start", hex(mheap_.arena_start))
- gothrow("misrounded allocation in mallocinit")
+ throw("misrounded allocation in mallocinit")
}
// Initialize the rest of the allocator.
}
if uintptr(p)&(_PageSize-1) != 0 {
- gothrow("misrounded allocation in MHeap_SysAlloc")
+ throw("misrounded allocation in MHeap_SysAlloc")
}
return (unsafe.Pointer)(p)
}
}
if uintptr(p)&(_PageSize-1) != 0 {
- gothrow("misrounded allocation in MHeap_SysAlloc")
+ throw("misrounded allocation in MHeap_SysAlloc")
}
return (unsafe.Pointer)(p)
}
// print("largeAlloc size=", size, "\n")
if size+_PageSize < size {
- gothrow("out of memory")
+ throw("out of memory")
}
npages := size >> _PageShift
if size&_PageMask != 0 {
}
s := mHeap_Alloc(&mheap_, npages, 0, true, flag&_FlagNoZero == 0)
if s == nil {
- gothrow("out of memory")
+ throw("out of memory")
}
s.limit = uintptr(s.start)<<_PageShift + size
v := unsafe.Pointer(uintptr(s.start) << _PageShift)
// Return the current cached span to the central lists.
s := c.alloc[sizeclass]
if s.freelist.ptr() != nil {
- gothrow("refill on a nonempty span")
+ throw("refill on a nonempty span")
}
if s != &emptymspan {
s.incache = false
// Get a new cached span from the central lists.
s = mCentral_CacheSpan(&mheap_.central[sizeclass].mcentral)
if s == nil {
- gothrow("out of memory")
+ throw("out of memory")
}
if s.freelist.ptr() == nil {
println(s.ref, (s.npages<<_PageShift)/s.elemsize)
- gothrow("empty span")
+ throw("empty span")
}
c.alloc[sizeclass] = s
_g_.m.locks--
cap := int32((s.npages << _PageShift) / s.elemsize)
n := cap - int32(s.ref)
if n == 0 {
- gothrow("empty span")
+ throw("empty span")
}
if s.freelist.ptr() == nil {
- gothrow("freelist empty")
+ throw("freelist empty")
}
s.incache = true
return s
s.incache = false
if s.ref == 0 {
- gothrow("uncaching full span")
+ throw("uncaching full span")
}
cap := int32((s.npages << _PageShift) / s.elemsize)
// caller takes care of it.
func mCentral_FreeSpan(c *mcentral, s *mspan, n int32, start gclinkptr, end gclinkptr, preserve bool) bool {
if s.incache {
- gothrow("freespan into cached span")
+ throw("freespan into cached span")
}
// Add the objects back to s's free list.
// preserve is set only when called from MCentral_CacheSpan above,
// the span must be in the empty list.
if s.next == nil {
- gothrow("can't preserve unlinked span")
+ throw("can't preserve unlinked span")
}
atomicstore(&s.sweepgen, mheap_.sweepgen)
return false
tail = gclinkptr(p)
}
if s.freelist.ptr() != nil {
- gothrow("freelist not empty")
+ throw("freelist not empty")
}
tail.ptr().next = 0
s.freelist = head
var memStats MemStats
if sizeof_C_MStats != unsafe.Sizeof(memStats) {
println(sizeof_C_MStats, unsafe.Sizeof(memStats))
- gothrow("MStats vs MemStatsType size mismatch")
+ throw("MStats vs MemStatsType size mismatch")
}
}
}
p := mmap(v, n, _PROT_READ|_PROT_WRITE, flags, -1, 0)
if uintptr(p) == _ENOMEM {
- gothrow("runtime: out of memory")
+ throw("runtime: out of memory")
}
if p != v {
print("runtime: address space conflict: map(", v, ") = ", p, "\n")
- gothrow("runtime: address space conflict")
+ throw("runtime: address space conflict")
}
return
}
p := mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0)
if uintptr(p) == _ENOMEM {
- gothrow("runtime: out of memory")
+ throw("runtime: out of memory")
}
if p != v {
- gothrow("runtime: cannot map pages in arena address space")
+ throw("runtime: cannot map pages in arena address space")
}
}
xadd64(stat, int64(n))
p := (unsafe.Pointer)(mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0))
if uintptr(p) == _ENOMEM {
- gothrow("runtime: out of memory")
+ throw("runtime: out of memory")
}
if p != v {
- gothrow("runtime: cannot map pages in arena address space")
+ throw("runtime: cannot map pages in arena address space")
}
}
if !reserved {
p := mmap_fixed(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0)
if uintptr(p) == _ENOMEM {
- gothrow("runtime: out of memory")
+ throw("runtime: out of memory")
}
if p != v {
print("runtime: address space conflict: map(", v, ") = ", p, "\n")
- gothrow("runtime: address space conflict")
+ throw("runtime: address space conflict")
}
return
}
p := mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0)
if uintptr(p) == _ENOMEM {
- gothrow("runtime: out of memory")
+ throw("runtime: out of memory")
}
if p != v {
- gothrow("runtime: cannot map pages in arena address space")
+ throw("runtime: cannot map pages in arena address space")
}
}
small &^= 4096 - 1
}
if small < 4096 {
- gothrow("runtime: failed to decommit pages")
+ throw("runtime: failed to decommit pages")
}
v = add(v, small)
n -= small
func sysUsed(v unsafe.Pointer, n uintptr) {
r := stdcall4(_VirtualAlloc, uintptr(v), n, _MEM_COMMIT, _PAGE_READWRITE)
if r != uintptr(v) {
- gothrow("runtime: failed to commit pages")
+ throw("runtime: failed to commit pages")
}
// Commit failed. See SysUnused.
small &^= 4096 - 1
}
if small < 4096 {
- gothrow("runtime: failed to decommit pages")
+ throw("runtime: failed to decommit pages")
}
v = add(v, small)
n -= small
xadd64(stat, -int64(n))
r := stdcall3(_VirtualFree, uintptr(v), 0, _MEM_RELEASE)
if r == 0 {
- gothrow("runtime: failed to release pages")
+ throw("runtime: failed to release pages")
}
}
xadd64(stat, int64(n))
p := stdcall4(_VirtualAlloc, uintptr(v), n, _MEM_COMMIT, _PAGE_READWRITE)
if p != uintptr(v) {
- gothrow("runtime: cannot map pages in arena address space")
+ throw("runtime: cannot map pages in arena address space")
}
}
func fixAlloc_Alloc(f *fixalloc) unsafe.Pointer {
if f.size == 0 {
print("runtime: use of FixAlloc_Alloc before FixAlloc_Init\n")
- gothrow("runtime: internal error")
+ throw("runtime: internal error")
}
if f.list != nil {
print(" s.start=", hex(s.start<<_PageShift), " s.limit=", hex(s.limit), " s.state=", s.state, "\n")
}
printunlock()
- gothrow("objectstart: bad pointer in unexpected span")
+ throw("objectstart: bad pointer in unexpected span")
}
return 0
}
}
if p == obj {
print("runtime: failed to find block beginning for ", hex(p), " s=", hex(s.start*_PageSize), " s.limit=", hex(s.limit), "\n")
- gothrow("failed to find block beginning")
+ throw("failed to find block beginning")
}
obj = p
}
//go:nowritebarrier
func ismarked(mbits *markbits) bool {
if mbits.bits&bitBoundary != bitBoundary {
- gothrow("ismarked: bits should have boundary bit set")
+ throw("ismarked: bits should have boundary bit set")
}
return mbits.bits&bitMarked == bitMarked
}
//go:nowritebarrier
func ischeckmarked(mbits *markbits) bool {
if mbits.bits&bitBoundary != bitBoundary {
- gothrow("ischeckmarked: bits should have boundary bit set")
+ throw("ischeckmarked: bits should have boundary bit set")
}
return mbits.tbits == _BitsScalarMarked || mbits.tbits == _BitsPointerMarked
}
//go:nowritebarrier
func gcmarknewobject_m(obj uintptr) {
if gcphase != _GCmarktermination {
- gothrow("marking new object while not in mark termination phase")
+ throw("marking new object while not in mark termination phase")
}
if checkmark { // The world should be stopped so this should not happen.
- gothrow("gcmarknewobject called while doing checkmark")
+ throw("gcmarknewobject called while doing checkmark")
}
var mbits markbits
func greyobject(obj uintptr, mbits *markbits, wbuf *workbuf) *workbuf {
// obj should be start of allocation, and so must be at least pointer-aligned.
if obj&(ptrSize-1) != 0 {
- gothrow("greyobject: obj not pointer-aligned")
+ throw("greyobject: obj not pointer-aligned")
}
if checkmark {
print(" *(obj+", i*ptrSize, ") = ", hex(*(*uintptr)(unsafe.Pointer(obj + uintptr(i)*ptrSize))), "\n")
}
}
- gothrow("checkmark found unmarked object")
+ throw("checkmark found unmarked object")
}
if ischeckmarked(mbits) {
return wbuf
docheckmark(mbits)
if !ischeckmarked(mbits) {
print("mbits xbits=", hex(mbits.xbits), " bits=", hex(mbits.bits), " tbits=", hex(mbits.tbits), " shift=", mbits.shift, "\n")
- gothrow("docheckmark and ischeckmarked disagree")
+ throw("docheckmark and ischeckmarked disagree")
}
} else {
// If marked we have nothing to do.
// Consult GC bitmap.
bits = uintptr(*(*byte)(ptrbitp))
if wordsPerBitmapByte != 2 {
- gothrow("alg doesn't work for wordsPerBitmapByte != 2")
+ throw("alg doesn't work for wordsPerBitmapByte != 2")
}
j := (uintptr(b) + i) / ptrSize & 1 // j indicates upper nibble or lower nibble
bits >>= gcBits * j
if bits&_BitsPointer != _BitsPointer {
print("gc checkmark=", checkmark, " b=", hex(b), " ptrmask=", ptrmask, " mbits.bitp=", mbits.bitp, " mbits.xbits=", hex(mbits.xbits), " bits=", hex(bits), "\n")
- gothrow("unexpected garbage collection bits")
+ throw("unexpected garbage collection bits")
}
obj := *(*uintptr)(unsafe.Pointer(b + i))
if gcphase == _GCscan {
if inheap(b) && ptrmask == nil {
// b is in heap, we are in GCscan so there should be a ptrmask.
- gothrow("scanblock: In GCscan phase and inheap is true.")
+ throw("scanblock: In GCscan phase and inheap is true.")
}
// GCscan only goes one level deep since mark wb not turned on.
putpartial(wbuf)
}
}
if gcphase == _GCscan {
- gothrow("scanblock: In GCscan phase but no b passed in.")
+ throw("scanblock: In GCscan phase but no b passed in.")
}
keepworking := b == 0
}
if wbuf.nobj <= 0 {
- gothrow("runtime:scanblock getfull returns empty buffer")
+ throw("runtime:scanblock getfull returns empty buffer")
}
}
if !checkmark && s.sweepgen != sg {
// sweepgen was updated (+2) during non-checkmark GC pass
print("sweep ", s.sweepgen, " ", sg, "\n")
- gothrow("gc: unswept span")
+ throw("gc: unswept span")
}
for sp := s.specials; sp != nil; sp = sp.next {
if sp.kind != _KindSpecialFinalizer {
default:
// the rest is scanning goroutine stacks
if uintptr(i-_RootCount) >= allglen {
- gothrow("markroot: bad index")
+ throw("markroot: bad index")
}
gp := allgs[i-_RootCount]
if b != nil && b.nobj != 0 {
_g_ := getg()
print("m", _g_.m.id, ": getempty: popped b=", b, " with non-zero b.nobj=", b.nobj, "\n")
- gothrow("getempty: workbuffer not empty, b->nobj not 0")
+ throw("getempty: workbuffer not empty, b->nobj not 0")
}
if b == nil {
b = (*workbuf)(persistentalloc(unsafe.Sizeof(*b), _CacheLineSize, &memstats.gc_sys))
//go:nowritebarrier
func putempty(b *workbuf) {
if b.nobj != 0 {
- gothrow("putempty: b->nobj not 0")
+ throw("putempty: b->nobj not 0")
}
lfstackpush(&work.empty, &b.node)
}
//go:nowritebarrier
func putfull(b *workbuf) {
if b.nobj <= 0 {
- gothrow("putfull: b->nobj <= 0")
+ throw("putfull: b->nobj <= 0")
}
lfstackpush(&work.full, &b.node)
}
lfstackpush(&work.full, &b.node)
} else {
print("b=", b, " b.nobj=", b.nobj, " len(b.obj)=", len(b.obj), "\n")
- gothrow("putpartial: bad Workbuf b.nobj")
+ throw("putpartial: bad Workbuf b.nobj")
}
}
//go:nowritebarrier
func stackmapdata(stkmap *stackmap, n int32) bitvector {
if n < 0 || n >= stkmap.n {
- gothrow("stackmapdata: index out of range")
+ throw("stackmapdata: index out of range")
}
return bitvector{stkmap.nbit, (*byte)(add(unsafe.Pointer(&stkmap.bytedata), uintptr(n*((stkmap.nbit+31)/32*4))))}
}
stkmap := (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps))
if stkmap == nil || stkmap.n <= 0 {
print("runtime: frame ", gofuncname(f), " untyped locals ", hex(frame.varp-size), "+", hex(size), "\n")
- gothrow("missing stackmap")
+ throw("missing stackmap")
}
// Locals bitmap information, scan just the pointers in locals.
if pcdata < 0 || pcdata >= stkmap.n {
// don't know where we are
print("runtime: pcdata is ", pcdata, " and ", stkmap.n, " locals stack map entries for ", gofuncname(f), " (targetpc=", targetpc, ")\n")
- gothrow("scanframe: bad symbol table")
+ throw("scanframe: bad symbol table")
}
bv := stackmapdata(stkmap, pcdata)
size = (uintptr(bv.n) * ptrSize) / bitsPerPointer
stkmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps))
if stkmap == nil || stkmap.n <= 0 {
print("runtime: frame ", gofuncname(f), " untyped args ", hex(frame.argp), "+", hex(frame.arglen), "\n")
- gothrow("missing stackmap")
+ throw("missing stackmap")
}
if pcdata < 0 || pcdata >= stkmap.n {
// don't know where we are
print("runtime: pcdata is ", pcdata, " and ", stkmap.n, " args stack map entries for ", gofuncname(f), " (targetpc=", targetpc, ")\n")
- gothrow("scanframe: bad symbol table")
+ throw("scanframe: bad symbol table")
}
bv = stackmapdata(stkmap, pcdata)
}
if readgstatus(gp)&_Gscan == 0 {
print("runtime:scanstack: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", hex(readgstatus(gp)), "\n")
- gothrow("scanstack - bad status")
+ throw("scanstack - bad status")
}
switch readgstatus(gp) &^ _Gscan {
default:
print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
- gothrow("mark - bad status")
+ throw("mark - bad status")
case _Gdead:
return
case _Grunning:
print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
- gothrow("scanstack: goroutine not stopped")
+ throw("scanstack: goroutine not stopped")
case _Grunnable, _Gsyscall, _Gwaiting:
// ok
}
if gp == getg() {
- gothrow("can't scan our own stack")
+ throw("can't scan our own stack")
}
mp := gp.m
if mp != nil && mp.helpgc != 0 {
- gothrow("can't scan gchelper stack")
+ throw("can't scan gchelper stack")
}
gentraceback(^uintptr(0), ^uintptr(0), 0, gp, 0, nil, 0x7fffffff, scanframe, nil, 0)
//go:nowritebarrier
func shade(b uintptr) {
if !inheap(b) {
- gothrow("shade: passed an address not in the heap")
+ throw("shade: passed an address not in the heap")
}
wbuf := getpartialorempty()
func gcmarkwb_m(slot *uintptr, ptr uintptr) {
switch gcphase {
default:
- gothrow("gcphasework in bad gcphase")
+ throw("gcphasework in bad gcphase")
case _GCoff, _GCquiesce, _GCstw, _GCsweep, _GCscan:
// ok
func gcphasework(gp *g) {
switch gcphase {
default:
- gothrow("gcphasework in bad gcphase")
+ throw("gcphasework in bad gcphase")
case _GCoff, _GCquiesce, _GCstw, _GCsweep:
// No work.
case _GCscan:
unsafe.Offsetof(finalizer{}.fint) != 3*ptrSize ||
unsafe.Offsetof(finalizer{}.ot) != 4*ptrSize ||
bitsPerPointer != 2) {
- gothrow("finalizer out of sync")
+ throw("finalizer out of sync")
}
for i := range finptrmask {
finptrmask[i] = finalizer1[i%len(finalizer1)]
// (if GC is triggered on another goroutine).
_g_ := getg()
if _g_.m.locks == 0 && _g_.m.mallocing == 0 && _g_ != _g_.m.g0 {
- gothrow("MSpan_EnsureSwept: m is not locked")
+ throw("MSpan_EnsureSwept: m is not locked")
}
sg := mheap_.sweepgen
//TODO go:nowritebarrier
func mSpan_Sweep(s *mspan, preserve bool) bool {
if checkmark {
- gothrow("MSpan_Sweep: checkmark only runs in STW and after the sweep")
+ throw("MSpan_Sweep: checkmark only runs in STW and after the sweep")
}
// It's critical that we enter this function with preemption disabled,
// GC must not start while we are in the middle of this function.
_g_ := getg()
if _g_.m.locks == 0 && _g_.m.mallocing == 0 && _g_ != _g_.m.g0 {
- gothrow("MSpan_Sweep: m is not locked")
+ throw("MSpan_Sweep: m is not locked")
}
sweepgen := mheap_.sweepgen
if s.state != mSpanInUse || s.sweepgen != sweepgen-1 {
print("MSpan_Sweep: state=", s.state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n")
- gothrow("MSpan_Sweep: bad span state")
+ throw("MSpan_Sweep: bad span state")
}
arena_start := mheap_.arena_start
cl := s.sizeclass
if cl == 0 {
// Free large span.
if preserve {
- gothrow("can't preserve large span")
+ throw("can't preserve large span")
}
unmarkspan(p, s.npages<<_PageShift)
s.needzero = 1
// check for potential races.
if s.state != mSpanInUse || s.sweepgen != sweepgen-1 {
print("MSpan_Sweep: state=", s.state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n")
- gothrow("MSpan_Sweep: bad span state after sweep")
+ throw("MSpan_Sweep: bad span state after sweep")
}
atomicstore(&s.sweepgen, sweepgen)
}
func gcinit() {
if unsafe.Sizeof(workbuf{}) != _WorkbufSize {
- gothrow("runtime: size of Workbuf is suboptimal")
+ throw("runtime: size of Workbuf is suboptimal")
}
work.markfor = parforalloc(_MaxGcproc)
func clearcheckmarkbitsspan(s *mspan) {
if s.state != _MSpanInUse {
print("runtime:clearcheckmarkbitsspan: state=", s.state, "\n")
- gothrow("clearcheckmarkbitsspan: bad span state")
+ throw("clearcheckmarkbitsspan: bad span state")
}
arena_start := mheap_.arena_start
// updating top and bottom nibbles, all boundaries
for i := int32(0); i < n/2; i, bitp = i+1, addb(bitp, uintptrMask&-1) {
if *bitp&bitBoundary == 0 {
- gothrow("missing bitBoundary")
+ throw("missing bitBoundary")
}
b := (*bitp & bitPtrMask) >> 2
if !checkmark && (b == _BitsScalar || b == _BitsScalarMarked) {
}
if (*bitp>>gcBits)&bitBoundary == 0 {
- gothrow("missing bitBoundary")
+ throw("missing bitBoundary")
}
b = ((*bitp >> gcBits) & bitPtrMask) >> 2
if !checkmark && (b == _BitsScalar || b == _BitsScalarMarked) {
// updating bottom nibble for first word of each object
for i := int32(0); i < n; i, bitp = i+1, addb(bitp, -step) {
if *bitp&bitBoundary == 0 {
- gothrow("missing bitBoundary")
+ throw("missing bitBoundary")
}
b := (*bitp & bitPtrMask) >> 2
}
if checkmark {
- gothrow("gccheckmark_m, entered with checkmark already true")
+ throw("gccheckmark_m, entered with checkmark already true")
}
checkmark = true
for i := uintptr(0); i < local_allglen; i++ {
gp := allgs[i]
if !gp.gcworkdone {
- gothrow("scan missed a g")
+ throw("scan missed a g")
}
}
unlock(&allglock)
scanblock(0, 0, nil)
if work.full != 0 {
- gothrow("work.full != 0")
+ throw("work.full != 0")
}
if work.partial != 0 {
- gothrow("work.partial != 0")
+ throw("work.partial != 0")
}
gcphase = oldphase
updatememstats(&stats)
if heap1 != memstats.heap_alloc {
print("runtime: mstats skew: heap=", heap1, "/", memstats.heap_alloc, "\n")
- gothrow("mstats skew")
+ throw("mstats skew")
}
obj := memstats.nmalloc - memstats.nfree
p := *pauses
// Calling code in runtime/debug should make the slice large enough.
if cap(p) < len(memstats.pause_ns)+3 {
- gothrow("runtime: short slice passed to readGCStats")
+ throw("runtime: short slice passed to readGCStats")
}
// Pass back: pauses, pause ends, last gc (absolute time), number of gc, total pause ns.
_g_ := getg()
if _g_.m.helpgc < 0 || _g_.m.helpgc >= _MaxGcproc {
- gothrow("gchelperstart: bad m->helpgc")
+ throw("gchelperstart: bad m->helpgc")
}
if _g_ != _g_.m.g0 {
- gothrow("gchelper not running on g0 stack")
+ throw("gchelper not running on g0 stack")
}
}
for {
switch *prog {
default:
- gothrow("unrollgcprog: unknown instruction")
+ throw("unrollgcprog: unknown instruction")
case insData:
prog = addb(prog, 1)
prog1 = unrollgcprog1(&mask[0], prog, &pos, inplace, sparse)
}
if *prog1 != insArrayEnd {
- gothrow("unrollgcprog: array does not end with insArrayEnd")
+ throw("unrollgcprog: array does not end with insArrayEnd")
}
prog = (*byte)(add(unsafe.Pointer(prog1), 1))
prog = unrollgcprog1(&mask[0], prog, &pos, false, false)
if pos != size/ptrSize*bitsPerPointer {
print("unrollglobgcprog: bad program size, got ", pos, ", expect ", size/ptrSize*bitsPerPointer, "\n")
- gothrow("unrollglobgcprog: bad program size")
+ throw("unrollglobgcprog: bad program size")
}
if *prog != insEnd {
- gothrow("unrollglobgcprog: program does not end with insEnd")
+ throw("unrollglobgcprog: program does not end with insEnd")
}
if mask[masksize] != 0xa1 {
- gothrow("unrollglobgcprog: overflow")
+ throw("unrollglobgcprog: overflow")
}
return bitvector{int32(masksize * 8), &mask[0]}
}
prog := (*byte)(unsafe.Pointer(uintptr(typ.gc[1])))
prog = unrollgcprog1(mask, prog, &pos, false, true)
if *prog != insEnd {
- gothrow("unrollgcprog: program does not end with insEnd")
+ throw("unrollgcprog: program does not end with insEnd")
}
if typ.size/ptrSize%2 != 0 {
// repeat the program
//go:nowritebarrier
func markspan(v unsafe.Pointer, size uintptr, n uintptr, leftover bool) {
if uintptr(v)+size*n > mheap_.arena_used || uintptr(v) < mheap_.arena_start {
- gothrow("markspan: bad pointer")
+ throw("markspan: bad pointer")
}
// Find bits of the beginning of the span.
off := (uintptr(v) - uintptr(mheap_.arena_start)) / ptrSize
if off%wordsPerBitmapByte != 0 {
- gothrow("markspan: unaligned length")
+ throw("markspan: unaligned length")
}
b := mheap_.arena_start - off/wordsPerBitmapByte - 1
// Possible only on 64-bits (minimal size class is 8 bytes).
// Set memory to 0x11.
if (bitBoundary|bitsDead)<<gcBits|bitBoundary|bitsDead != 0x11 {
- gothrow("markspan: bad bits")
+ throw("markspan: bad bits")
}
if n%(wordsPerBitmapByte*ptrSize) != 0 {
- gothrow("markspan: unaligned length")
+ throw("markspan: unaligned length")
}
b = b - n/wordsPerBitmapByte + 1 // find first byte
if b%ptrSize != 0 {
- gothrow("markspan: unaligned pointer")
+ throw("markspan: unaligned pointer")
}
for i := uintptr(0); i < n; i, b = i+wordsPerBitmapByte*ptrSize, b+ptrSize {
*(*uintptr)(unsafe.Pointer(b)) = uintptrMask & 0x1111111111111111 // bitBoundary | bitsDead, repeated
//go:nowritebarrier
func unmarkspan(v, n uintptr) {
if v+n > mheap_.arena_used || v < mheap_.arena_start {
- gothrow("markspan: bad pointer")
+ throw("markspan: bad pointer")
}
off := (v - mheap_.arena_start) / ptrSize // word offset
if off%(ptrSize*wordsPerBitmapByte) != 0 {
- gothrow("markspan: unaligned pointer")
+ throw("markspan: unaligned pointer")
}
b := mheap_.arena_start - off/wordsPerBitmapByte - 1
n /= ptrSize
if n%(ptrSize*wordsPerBitmapByte) != 0 {
- gothrow("unmarkspan: unaligned length")
+ throw("unmarkspan: unaligned length")
}
// Okay to use non-atomic ops here, because we control
}
if src != 0 && (src < _PageSize || src == _PoisonGC || src == _PoisonStack) {
- systemstack(func() { gothrow("bad pointer in write barrier") })
+ systemstack(func() { throw("bad pointer in write barrier") })
}
mp := acquirem()
sp := (*slice)(unsafe.Pointer(&new))
sp.array = (*byte)(sysAlloc(uintptr(n)*ptrSize, &memstats.other_sys))
if sp.array == nil {
- gothrow("runtime: cannot allocate memory")
+ throw("runtime: cannot allocate memory")
}
sp.len = uint(len(h_allspans))
sp.cap = uint(n)
func mHeap_Alloc_m(h *mheap, npage uintptr, sizeclass int32, large bool) *mspan {
_g_ := getg()
if _g_ != _g_.m.g0 {
- gothrow("_mheap_alloc not on g0 stack")
+ throw("_mheap_alloc not on g0 stack")
}
lock(&h.lock)
func mHeap_AllocStack(h *mheap, npage uintptr) *mspan {
_g_ := getg()
if _g_ != _g_.m.g0 {
- gothrow("mheap_allocstack not on g0 stack")
+ throw("mheap_allocstack not on g0 stack")
}
lock(&h.lock)
s := mHeap_AllocSpanLocked(h, npage)
HaveSpan:
// Mark span in use.
if s.state != _MSpanFree {
- gothrow("MHeap_AllocLocked - MSpan not free")
+ throw("MHeap_AllocLocked - MSpan not free")
}
if s.npages < npage {
- gothrow("MHeap_AllocLocked - bad npages")
+ throw("MHeap_AllocLocked - bad npages")
}
mSpanList_Remove(s)
if s.next != nil || s.prev != nil {
- gothrow("still in list")
+ throw("still in list")
}
if s.npreleased > 0 {
sysUsed((unsafe.Pointer)(s.start<<_PageShift), s.npages<<_PageShift)
//println("spanalloc", hex(s.start<<_PageShift))
if s.next != nil || s.prev != nil {
- gothrow("still in list")
+ throw("still in list")
}
return s
}
func mHeap_FreeStack(h *mheap, s *mspan) {
_g_ := getg()
if _g_ != _g_.m.g0 {
- gothrow("mheap_freestack not on g0 stack")
+ throw("mheap_freestack not on g0 stack")
}
s.needzero = 1
lock(&h.lock)
switch s.state {
case _MSpanStack:
if s.ref != 0 {
- gothrow("MHeap_FreeSpanLocked - invalid stack free")
+ throw("MHeap_FreeSpanLocked - invalid stack free")
}
case _MSpanInUse:
if s.ref != 0 || s.sweepgen != h.sweepgen {
print("MHeap_FreeSpanLocked - span ", s, " ptr ", hex(s.start<<_PageShift), " ref ", s.ref, " sweepgen ", s.sweepgen, "/", h.sweepgen, "\n")
- gothrow("MHeap_FreeSpanLocked - invalid free")
+ throw("MHeap_FreeSpanLocked - invalid free")
}
default:
- gothrow("MHeap_FreeSpanLocked - invalid span state")
+ throw("MHeap_FreeSpanLocked - invalid span state")
}
if acctinuse {
func mSpanList_Insert(list *mspan, span *mspan) {
if span.next != nil || span.prev != nil {
println("failed MSpanList_Insert", span, span.next, span.prev)
- gothrow("MSpanList_Insert")
+ throw("MSpanList_Insert")
}
span.next = list.next
span.prev = list
func mSpanList_InsertBack(list *mspan, span *mspan) {
if span.next != nil || span.prev != nil {
println("failed MSpanList_InsertBack", span, span.next, span.prev)
- gothrow("MSpanList_InsertBack")
+ throw("MSpanList_InsertBack")
}
span.next = list
span.prev = list.prev
func addspecial(p unsafe.Pointer, s *special) bool {
span := mHeap_LookupMaybe(&mheap_, p)
if span == nil {
- gothrow("addspecial on invalid pointer")
+ throw("addspecial on invalid pointer")
}
// Ensure that the span is swept.
func removespecial(p unsafe.Pointer, kind uint8) *special {
span := mHeap_LookupMaybe(&mheap_, p)
if span == nil {
- gothrow("removespecial on invalid pointer")
+ throw("removespecial on invalid pointer")
}
// Ensure that the span is swept.
s.special.kind = _KindSpecialProfile
s.b = b
if !addspecial(p, &s.special) {
- gothrow("setprofilebucket: profile already set")
+ throw("setprofilebucket: profile already set")
}
}
unlock(&mheap_.speciallock)
return true
default:
- gothrow("bad special kind")
+ throw("bad special kind")
panic("not reached")
}
}
size := unsafe.Sizeof(bucket{}) + uintptr(nstk)*unsafe.Sizeof(uintptr(0))
switch typ {
default:
- gothrow("invalid profile bucket type")
+ throw("invalid profile bucket type")
case memProfile:
size += unsafe.Sizeof(memRecord{})
case blockProfile:
// mp returns the memRecord associated with the memProfile bucket b.
func (b *bucket) mp() *memRecord {
if b.typ != memProfile {
- gothrow("bad use of bucket.mp")
+ throw("bad use of bucket.mp")
}
data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(uintptr(0)))
return (*memRecord)(data)
// bp returns the blockRecord associated with the blockProfile bucket b.
func (b *bucket) bp() *blockRecord {
if b.typ != blockProfile {
- gothrow("bad use of bucket.bp")
+ throw("bad use of bucket.bp")
}
data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(uintptr(0)))
return (*blockRecord)(data)
if buckhash == nil {
buckhash = (*[buckHashSize]*bucket)(sysAlloc(unsafe.Sizeof(*buckhash), &memstats.buckhash_sys))
if buckhash == nil {
- gothrow("runtime: cannot allocate memory")
+ throw("runtime: cannot allocate memory")
}
}
func sizeToClass(size int32) int32 {
if size > _MaxSmallSize {
- gothrow("SizeToClass - invalid size")
+ throw("SizeToClass - invalid size")
}
if size > 1024-8 {
return int32(size_to_class128[(size-1024+127)>>7])
}
}
if align&(align-1) != 0 {
- gothrow("InitSizes - bug")
+ throw("InitSizes - bug")
}
// Make the allocnpages big enough that
}
if sizeclass != _NumSizeClasses {
print("sizeclass=", sizeclass, " NumSizeClasses=", _NumSizeClasses, "\n")
- gothrow("InitSizes - bad NumSizeClasses")
+ throw("InitSizes - bad NumSizeClasses")
}
// Initialize the size_to_class tables.
}
print("\n")
}
- gothrow("InitSizes failed")
+ throw("InitSizes failed")
}
// Returns size of the memory block that mallocgc will allocate if you ask for the size.
pd := pollcache.alloc()
lock(&pd.lock)
if pd.wg != 0 && pd.wg != pdReady {
- gothrow("netpollOpen: blocked write on free descriptor")
+ throw("netpollOpen: blocked write on free descriptor")
}
if pd.rg != 0 && pd.rg != pdReady {
- gothrow("netpollOpen: blocked read on free descriptor")
+ throw("netpollOpen: blocked read on free descriptor")
}
pd.fd = fd
pd.closing = false
//go:linkname net_runtime_pollClose net.runtime_pollClose
func net_runtime_pollClose(pd *pollDesc) {
if !pd.closing {
- gothrow("netpollClose: close w/o unblock")
+ throw("netpollClose: close w/o unblock")
}
if pd.wg != 0 && pd.wg != pdReady {
- gothrow("netpollClose: blocked write on closing descriptor")
+ throw("netpollClose: blocked write on closing descriptor")
}
if pd.rg != 0 && pd.rg != pdReady {
- gothrow("netpollClose: blocked read on closing descriptor")
+ throw("netpollClose: blocked read on closing descriptor")
}
netpollclose(uintptr(pd.fd))
pollcache.free(pd)
func net_runtime_pollUnblock(pd *pollDesc) {
lock(&pd.lock)
if pd.closing {
- gothrow("netpollUnblock: already closing")
+ throw("netpollUnblock: already closing")
}
pd.closing = true
pd.seq++
return true
}
if old != 0 {
- gothrow("netpollblock: double wait")
+ throw("netpollblock: double wait")
}
if casuintptr(gpp, 0, pdWait) {
break
// be careful to not lose concurrent READY notification
old := xchguintptr(gpp, 0)
if old > pdWait {
- gothrow("netpollblock: corrupted state")
+ throw("netpollblock: corrupted state")
}
return old == pdReady
}
var rg *g
if read {
if pd.rd <= 0 || pd.rt.f == nil {
- gothrow("netpolldeadlineimpl: inconsistent read deadline")
+ throw("netpolldeadlineimpl: inconsistent read deadline")
}
pd.rd = -1
atomicstorep(unsafe.Pointer(&pd.rt.f), nil) // full memory barrier between store to rd and load of rg in netpollunblock
var wg *g
if write {
if pd.wd <= 0 || pd.wt.f == nil && !read {
- gothrow("netpolldeadlineimpl: inconsistent write deadline")
+ throw("netpolldeadlineimpl: inconsistent write deadline")
}
pd.wd = -1
atomicstorep(unsafe.Pointer(&pd.wt.f), nil) // full memory barrier between store to wd and load of wg in netpollunblock
return
}
println("netpollinit: failed to create epoll descriptor", -epfd)
- gothrow("netpollinit: failed to create descriptor")
+ throw("netpollinit: failed to create descriptor")
}
func netpollopen(fd uintptr, pd *pollDesc) int32 {
}
func netpollarm(pd *pollDesc, mode int) {
- gothrow("unused")
+ throw("unused")
}
// polls for ready network connections
kq = kqueue()
if kq < 0 {
println("netpollinit: kqueue failed with", -kq)
- gothrow("netpollinit: kqueue failed")
+ throw("netpollinit: kqueue failed")
}
closeonexec(kq)
}
}
func netpollarm(pd *pollDesc, mode int) {
- gothrow("unused")
+ throw("unused")
}
// Polls for ready network connections.
}
print("netpollinit: failed to create port (", errno(), ")\n")
- gothrow("netpollinit: failed to create port")
+ throw("netpollinit: failed to create port")
}
func netpollopen(fd uintptr, pd *pollDesc) int32 {
if events != 0 && port_associate(portfd, _PORT_SOURCE_FD, pd.fd, events, uintptr(unsafe.Pointer(pd))) != 0 {
print("netpollupdate: failed to associate (", errno(), ")\n")
- gothrow("netpollupdate: failed to associate")
+ throw("netpollupdate: failed to associate")
}
pd.user = events
}
case 'w':
netpollupdate(pd, _POLLOUT, 0)
default:
- gothrow("netpollarm: bad mode")
+ throw("netpollarm: bad mode")
}
unlock(&pd.lock)
}
iocphandle = uintptr(stdcall4(_CreateIoCompletionPort, _INVALID_HANDLE_VALUE, 0, 0, _DWORD_MAX))
if iocphandle == 0 {
println("netpoll: failed to create iocp handle (errno=", getlasterror(), ")")
- gothrow("netpoll: failed to create iocp handle")
+ throw("netpoll: failed to create iocp handle")
}
}
}
func netpollarm(pd *pollDesc, mode int) {
- gothrow("unused")
+ throw("unused")
}
// Polls for completed network IO.
return nil
}
println("netpoll: GetQueuedCompletionStatusEx failed (errno=", errno, ")")
- gothrow("netpoll: GetQueuedCompletionStatusEx failed")
+ throw("netpoll: GetQueuedCompletionStatusEx failed")
}
mp.blocked = false
for i = 0; i < n; i++ {
}
if op == nil {
println("netpoll: GetQueuedCompletionStatus failed (errno=", errno, ")")
- gothrow("netpoll: GetQueuedCompletionStatus failed")
+ throw("netpoll: GetQueuedCompletionStatus failed")
}
// dequeued failed IO packet, so report that
}
func handlecompletion(gpp **g, op *net_op, errno int32, qty uint32) {
if op == nil {
- gothrow("netpoll: GetQueuedCompletionStatus returned op == nil")
+ throw("netpoll: GetQueuedCompletionStatus returned op == nil")
}
mode := op.mode
if mode != 'r' && mode != 'w' {
println("netpoll: GetQueuedCompletionStatus returned invalid mode=", mode)
- gothrow("netpoll: GetQueuedCompletionStatus returned invalid mode")
+ throw("netpoll: GetQueuedCompletionStatus returned invalid mode")
}
op.errno = errno
op.qty = qty
if !iscgo {
if bsdthread_register() != 0 {
if gogetenv("DYLD_INSERT_LIBRARIES") != "" {
- gothrow("runtime: bsdthread_register error (unset DYLD_INSERT_LIBRARIES)")
+ throw("runtime: bsdthread_register error (unset DYLD_INSERT_LIBRARIES)")
}
- gothrow("runtime: bsdthread_register error")
+ throw("runtime: bsdthread_register error")
}
}
}
if errno < 0 {
print("runtime: failed to create new OS thread (have ", mcount(), " already; errno=", -errno, ")\n")
- gothrow("runtime.newosproc")
+ throw("runtime.newosproc")
}
}
func macherror(r int32, fn string) {
print("mach error ", fn, ": ", r, "\n")
- gothrow("mach error")
+ throw("mach error")
}
const _DebugMach = false
}
func setsigstack(i int32) {
- gothrow("setsigstack")
+ throw("setsigstack")
}
func getsig(i int32) uintptr {
}
func setsigstack(i int32) {
- gothrow("setsigstack")
+ throw("setsigstack")
}
func getsig(i int32) uintptr {
}
func setsigstack(i int32) {
- gothrow("setsigstack")
+ throw("setsigstack")
}
func getsig(i int32) uintptr {
if ret < 0 {
print("runtime: failed to create new OS thread (have ", mcount(), " already; errno=", -ret, ")\n")
- gothrow("newosproc")
+ throw("newosproc")
}
}
}
sa.sa_handler = fn
if rt_sigaction(uintptr(i), &sa, nil, unsafe.Sizeof(sa.sa_mask)) != 0 {
- gothrow("rt_sigaction failure")
+ throw("rt_sigaction failure")
}
}
func setsigstack(i int32) {
var sa sigactiont
if rt_sigaction(uintptr(i), nil, &sa, unsafe.Sizeof(sa.sa_mask)) != 0 {
- gothrow("rt_sigaction failure")
+ throw("rt_sigaction failure")
}
if sa.sa_handler == 0 || sa.sa_handler == _SIG_DFL || sa.sa_handler == _SIG_IGN || sa.sa_flags&_SA_ONSTACK != 0 {
return
}
sa.sa_flags |= _SA_ONSTACK
if rt_sigaction(uintptr(i), &sa, nil, unsafe.Sizeof(sa.sa_mask)) != 0 {
- gothrow("rt_sigaction failure")
+ throw("rt_sigaction failure")
}
}
memclr(unsafe.Pointer(&sa), unsafe.Sizeof(sa))
if rt_sigaction(uintptr(i), nil, &sa, unsafe.Sizeof(sa.sa_mask)) != 0 {
- gothrow("rt_sigaction read failure")
+ throw("rt_sigaction read failure")
}
if sa.sa_handler == funcPC(sigtramp) {
return funcPC(sighandler)
ret := nacl_thread_create(funcPC(mstart_nacl), stk, unsafe.Pointer(&tls[2]), nil)
if ret < 0 {
print("nacl_thread_create: error ", -ret, "\n")
- gothrow("newosproc")
+ throw("newosproc")
}
}
mu := nacl_mutex_create(0)
if mu < 0 {
print("nacl_mutex_create: error ", -mu, "\n")
- gothrow("semacreate")
+ throw("semacreate")
}
c := nacl_cond_create(0)
if c < 0 {
print("nacl_cond_create: error ", -cond, "\n")
- gothrow("semacreate")
+ throw("semacreate")
}
cond = uintptr(c)
_g_ := getg()
systemstack(func() {
_g_ := getg()
if nacl_mutex_lock(int32(_g_.m.waitsemalock)) < 0 {
- gothrow("semasleep")
+ throw("semasleep")
}
for _g_.m.waitsemacount == 0 {
if ns < 0 {
if nacl_cond_wait(int32(_g_.m.waitsema), int32(_g_.m.waitsemalock)) < 0 {
- gothrow("semasleep")
+ throw("semasleep")
}
} else {
var ts timespec
return
}
if r < 0 {
- gothrow("semasleep")
+ throw("semasleep")
}
}
}
func semawakeup(mp *m) {
systemstack(func() {
if nacl_mutex_lock(int32(mp.waitsemalock)) < 0 {
- gothrow("semawakeup")
+ throw("semawakeup")
}
if mp.waitsemacount != 0 {
- gothrow("semawakeup")
+ throw("semawakeup")
}
mp.waitsemacount = 1
nacl_cond_signal(int32(mp.waitsema))
ret := lwp_create(unsafe.Pointer(&uc), 0, unsafe.Pointer(&mp.procid))
if ret < 0 {
print("runtime: failed to create new OS thread (have ", mcount()-1, " already; errno=", -ret, ")\n")
- gothrow("runtime.newosproc")
+ throw("runtime.newosproc")
}
}
}
func setsigstack(i int32) {
- gothrow("setsigstack")
+ throw("setsigstack")
}
func getsig(i int32) uintptr {
if ret == -ENOTSUP {
print("runtime: is kern.rthreads disabled?\n")
}
- gothrow("runtime.newosproc")
+ throw("runtime.newosproc")
}
}
}
func setsigstack(i int32) {
- gothrow("setsigstack")
+ throw("setsigstack")
}
func getsig(i int32) uintptr {
}
pid := rfork(_RFPROC | _RFMEM | _RFNOWAIT)
if pid < 0 {
- gothrow("newosproc: rfork failed")
+ throw("newosproc: rfork failed")
}
if pid == 0 {
tstart_plan9(mp)
_STACK_SIZE_PARAM_IS_A_RESERVATION, 0)
if thandle == 0 {
println("runtime: failed to create new OS thread (have ", mcount(), " already; errno=", getlasterror(), ")")
- gothrow("runtime.newosproc")
+ throw("runtime.newosproc")
}
}
}
}
systemstack(func() {
- gothrow("interrupt/system time is changing too fast")
+ throw("interrupt/system time is changing too fast")
})
return 0
}
)
if pthread_attr_init(&attr) != 0 {
- gothrow("pthread_attr_init")
+ throw("pthread_attr_init")
}
if pthread_attr_setstack(&attr, 0, 0x200000) != 0 {
- gothrow("pthread_attr_setstack")
+ throw("pthread_attr_setstack")
}
if pthread_attr_getstack(&attr, unsafe.Pointer(&mp.g0.stack.hi), &size) != 0 {
- gothrow("pthread_attr_getstack")
+ throw("pthread_attr_getstack")
}
mp.g0.stack.lo = mp.g0.stack.hi - uintptr(size)
if pthread_attr_setdetachstate(&attr, _PTHREAD_CREATE_DETACHED) != 0 {
- gothrow("pthread_attr_setdetachstate")
+ throw("pthread_attr_setdetachstate")
}
// Disable signals during create, so that the new thread starts
sigprocmask(_SIG_SETMASK, &oset, nil)
if ret != 0 {
print("runtime: failed to create new OS thread (have ", mcount(), " already; errno=", ret, ")\n")
- gothrow("newosproc")
+ throw("newosproc")
}
}
}
func setsigstack(i int32) {
- gothrow("setsigstack")
+ throw("setsigstack")
}
func getsig(i int32) uintptr {
asmcgocall(unsafe.Pointer(&asmsysvicall6), unsafe.Pointer(&_g_.m.libcall))
sem = (*semt)(unsafe.Pointer(_g_.m.libcall.r1))
if sem_init(sem, 0, 0) != 0 {
- gothrow("sem_init")
+ throw("sem_init")
}
return uintptr(unsafe.Pointer(sem))
}
if *_m_.perrno == _ETIMEDOUT || *_m_.perrno == _EAGAIN || *_m_.perrno == _EINTR {
return -1
}
- gothrow("sem_reltimedwait_np")
+ throw("sem_reltimedwait_np")
}
return 0
}
if *_m_.perrno == _EINTR {
continue
}
- gothrow("sem_wait")
+ throw("sem_wait")
}
return 0
}
//go:nosplit
func semawakeup(mp *m) {
if sem_post((*semt)(unsafe.Pointer(mp.waitsema))) != 0 {
- gothrow("sem_post")
+ throw("sem_post")
}
}
//go:linkname os_sigpipe os.sigpipe
func os_sigpipe() {
- gothrow("too many writes on closed pipe")
+ throw("too many writes on closed pipe")
}
func sigpanic() {
g := getg()
if !canpanic(g) {
- gothrow("unexpected signal during runtime execution")
+ throw("unexpected signal during runtime execution")
}
// Native Client only invokes the exception handler for memory faults.
//go:linkname os_sigpipe os.sigpipe
func os_sigpipe() {
- gothrow("too many writes on closed pipe")
+ throw("too many writes on closed pipe")
}
func sigpanic() {
g := getg()
if !canpanic(g) {
- gothrow("unexpected signal during runtime execution")
+ throw("unexpected signal during runtime execution")
}
note := gostringnocopy((*byte)(unsafe.Pointer(g.m.notesig)))
panicmem()
}
print("unexpected fault address ", hex(g.sigcode1), "\n")
- gothrow("fault")
+ throw("fault")
case _SIGTRAP:
if g.paniconfault {
panicmem()
}
- gothrow(note)
+ throw(note)
case _SIGINTDIV:
panicdivide()
case _SIGFLOAT:
//go:linkname os_sigpipe os.sigpipe
func os_sigpipe() {
- gothrow("too many writes on closed pipe")
+ throw("too many writes on closed pipe")
}
func sigpanic() {
g := getg()
if !canpanic(g) {
- gothrow("unexpected signal during runtime execution")
+ throw("unexpected signal during runtime execution")
}
switch uint32(g.sig) {
panicmem()
}
print("unexpected fault address ", hex(g.sigcode1), "\n")
- gothrow("fault")
+ throw("fault")
case _EXCEPTION_INT_DIVIDE_BY_ZERO:
panicdivide()
case _EXCEPTION_INT_OVERFLOW:
_EXCEPTION_FLT_UNDERFLOW:
panicfloat()
}
- gothrow("fault")
+ throw("fault")
}
}
func throwreturn() {
- gothrow("no return at end of a typed function - compiler is broken")
+ throw("no return at end of a typed function - compiler is broken")
}
func throwinit() {
- gothrow("recursive call during initialization - linker skew")
+ throw("recursive call during initialization - linker skew")
}
// Create a new deferred function fn with siz bytes of arguments.
func deferproc(siz int32, fn *funcval) { // arguments of fn follow fn
if getg().m.curg != getg() {
// go code on the system stack can't defer
- gothrow("defer on system stack")
+ throw("defer on system stack")
}
// the arguments of fn are in a perilous state. The stack map
systemstack(func() {
d := newdefer(siz)
if d._panic != nil {
- gothrow("deferproc: d.panic != nil after newdefer")
+ throw("deferproc: d.panic != nil after newdefer")
}
d.fn = fn
d.pc = callerpc
}
if m[defersc] != int32(siz) {
print("bad defer size class: i=", i, " siz=", siz, " defersc=", defersc, "\n")
- gothrow("bad defer size class")
+ throw("bad defer size class")
}
}
}
// Windows otherwise runs out of stack space.
func freedeferpanic() {
// _panic must be cleared before d is unlinked from gp.
- gothrow("freedefer with d._panic != nil")
+ throw("freedefer with d._panic != nil")
}
func freedeferfn() {
// fn must be cleared before d is unlinked from gp.
- gothrow("freedefer with d.fn != nil")
+ throw("freedefer with d.fn != nil")
}
// Run a deferred function if there is one.
d.started = true
reflectcall(unsafe.Pointer(d.fn), deferArgs(d), uint32(d.siz), uint32(d.siz))
if gp._defer != d {
- gothrow("bad defer entry in Goexit")
+ throw("bad defer entry in Goexit")
}
d._panic = nil
d.fn = nil
print("panic: ")
printany(e)
print("\n")
- gothrow("panic on system stack")
+ throw("panic on system stack")
}
// m.softfloat is set during software floating point.
if gp.m.softfloat != 0 {
gp.m.locks--
gp.m.softfloat = 0
- gothrow("panic during softfloat")
+ throw("panic during softfloat")
}
if gp.m.mallocing != 0 {
print("panic: ")
printany(e)
print("\n")
- gothrow("panic during malloc")
+ throw("panic during malloc")
}
if gp.m.gcing != 0 {
print("panic: ")
printany(e)
print("\n")
- gothrow("panic during gc")
+ throw("panic during gc")
}
if gp.m.locks != 0 {
print("panic: ")
printany(e)
print("\n")
- gothrow("panic holding locks")
+ throw("panic holding locks")
}
var p _panic
// reflectcall did not panic. Remove d.
if gp._defer != d {
- gothrow("bad defer entry in panic")
+ throw("bad defer entry in panic")
}
d._panic = nil
d.fn = nil
gp.sigcode0 = uintptr(sp)
gp.sigcode1 = pc
mcall(recovery)
- gothrow("recovery failed") // mcall should not return
+ throw("recovery failed") // mcall should not return
}
}
}
//go:nosplit
-func throw(s *byte) {
- gp := getg()
- if gp.m.throwing == 0 {
- gp.m.throwing = 1
- }
- startpanic()
- print("fatal error: ", gostringnocopy(s), "\n")
- dopanic(0)
- *(*int)(nil) = 0 // not reached
-}
-
-//go:nosplit
-func gothrow(s string) {
+func throw(s string) {
print("fatal error: ", s, "\n")
gp := getg()
if gp.m.throwing == 0 {
// d's arguments need to be in the stack.
if sp != 0 && (sp < gp.stack.lo || gp.stack.hi < sp) {
print("recover: ", hex(sp), " not in [", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n")
- gothrow("bad recovery")
+ throw("bad recovery")
}
// Make the deferproc for this d return again,
func parforsetup(desc *parfor, nthr, n uint32, ctx unsafe.Pointer, wait bool, body func(*parfor, uint32)) {
if desc == nil || nthr == 0 || nthr > desc.nthrmax || body == nil {
print("desc=", desc, " nthr=", nthr, " count=", n, " body=", body, "\n")
- gothrow("parfor: invalid args")
+ throw("parfor: invalid args")
}
desc.body = *(*unsafe.Pointer)(unsafe.Pointer(&body))
end := uint32(uint64(n) * uint64(i+1) / uint64(nthr))
pos := &desc_thr_index(desc, i).pos
if uintptr(unsafe.Pointer(pos))&7 != 0 {
- gothrow("parforsetup: pos is not aligned")
+ throw("parforsetup: pos is not aligned")
}
*pos = uint64(begin) | uint64(end)<<32
}
tid := xadd(&desc.thrseq, 1) - 1
if tid >= desc.nthr {
print("tid=", tid, " nthr=", desc.nthr, "\n")
- gothrow("parfor: invalid tid")
+ throw("parfor: invalid tid")
}
// If single-threaded, just execute the for serially.
if begin < end {
// Has successfully stolen some work.
if idle {
- gothrow("parfor: should not be idle")
+ throw("parfor: should not be idle")
}
atomicstore64(mypos, uint64(begin)|uint64(end)<<32)
me.nsteal++
lockOSThread()
if g.m != &m0 {
- gothrow("runtime.main not on m0")
+ throw("runtime.main not on m0")
}
runtime_init() // must be before defer
if iscgo {
if _cgo_thread_start == nil {
- gothrow("_cgo_thread_start missing")
+ throw("_cgo_thread_start missing")
}
if _cgo_malloc == nil {
- gothrow("_cgo_malloc missing")
+ throw("_cgo_malloc missing")
}
if _cgo_free == nil {
- gothrow("_cgo_free missing")
+ throw("_cgo_free missing")
}
if GOOS != "windows" {
if _cgo_setenv == nil {
- gothrow("_cgo_setenv missing")
+ throw("_cgo_setenv missing")
}
if _cgo_unsetenv == nil {
- gothrow("_cgo_unsetenv missing")
+ throw("_cgo_unsetenv missing")
}
}
}
for {
lock(&forcegc.lock)
if forcegc.idle != 0 {
- gothrow("forcegc: phase error")
+ throw("forcegc: phase error")
}
atomicstore(&forcegc.idle, 1)
goparkunlock(&forcegc.lock, "force gc (idle)")
gp := mp.curg
status := readgstatus(gp)
if status != _Grunning && status != _Gscanrunning {
- gothrow("gopark: bad g status")
+ throw("gopark: bad g status")
}
mp.waitlock = lock
mp.waitunlockf = *(*unsafe.Pointer)(unsafe.Pointer(&unlockf))
s := c.sudogcache
if s != nil {
if s.elem != nil {
- gothrow("acquireSudog: found s.elem != nil in cache")
+ throw("acquireSudog: found s.elem != nil in cache")
}
c.sudogcache = s.next
s.next = nil
mp := acquirem()
p := new(sudog)
if p.elem != nil {
- gothrow("acquireSudog: found p.elem != nil after new")
+ throw("acquireSudog: found p.elem != nil after new")
}
releasem(mp)
return p
//go:nosplit
func releaseSudog(s *sudog) {
if s.elem != nil {
- gothrow("runtime: sudog with non-nil elem")
+ throw("runtime: sudog with non-nil elem")
}
if s.selectdone != nil {
- gothrow("runtime: sudog with non-nil selectdone")
+ throw("runtime: sudog with non-nil selectdone")
}
if s.next != nil {
- gothrow("runtime: sudog with non-nil next")
+ throw("runtime: sudog with non-nil next")
}
if s.prev != nil {
- gothrow("runtime: sudog with non-nil prev")
+ throw("runtime: sudog with non-nil prev")
}
if s.waitlink != nil {
- gothrow("runtime: sudog with non-nil waitlink")
+ throw("runtime: sudog with non-nil waitlink")
}
gp := getg()
if gp.param != nil {
- gothrow("runtime: releaseSudog with non-nil gp.param")
+ throw("runtime: releaseSudog with non-nil gp.param")
}
c := gomcache()
s.next = c.sudogcache
// called from assembly
func badmcall(fn func(*g)) {
- gothrow("runtime: mcall called on m->g0 stack")
+ throw("runtime: mcall called on m->g0 stack")
}
func badmcall2(fn func(*g)) {
- gothrow("runtime: mcall function returned")
+ throw("runtime: mcall function returned")
}
func badreflectcall() {
func allgadd(gp *g) {
if readgstatus(gp) == _Gidle {
- gothrow("allgadd: bad status Gidle")
+ throw("allgadd: bad status Gidle")
}
lock(&allglock)
procs = n
}
if procresize(int32(procs)) != nil {
- gothrow("unknown runnable goroutine during bootstrap")
+ throw("unknown runnable goroutine during bootstrap")
}
if buildVersion == "" {
// sched lock is held
if sched.mcount > sched.maxmcount {
print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n")
- gothrow("thread exhaustion")
+ throw("thread exhaustion")
}
}
_g_.m.locks++ // disable preemption because it can be holding p in a local var
if status&^_Gscan != _Gwaiting {
dumpgstatus(gp)
- gothrow("bad g->status in ready")
+ throw("bad g->status in ready")
}
// status is Gwaiting or Gscanwaiting, make Grunnable and put on runq
}
mp := mget()
if mp == nil {
- gothrow("gcprocs inconsistency")
+ throw("gcprocs inconsistency")
}
mp.helpgc = n
mp.mcache = allp[pos].mcache
func isscanstatus(status uint32) bool {
if status == _Gscan {
- gothrow("isscanstatus: Bad status Gscan")
+ throw("isscanstatus: Bad status Gscan")
}
return status&_Gscan == _Gscan
}
default:
print("runtime: casfrom_Gscanstatus bad oldval gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
dumpgstatus(gp)
- gothrow("casfrom_Gscanstatus:top gp->status is not in scan state")
+ throw("casfrom_Gscanstatus:top gp->status is not in scan state")
case _Gscanrunnable,
_Gscanwaiting,
_Gscanrunning,
if !success {
print("runtime: casfrom_Gscanstatus failed gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
dumpgstatus(gp)
- gothrow("casfrom_Gscanstatus: gp->status is not in scan state")
+ throw("casfrom_Gscanstatus: gp->status is not in scan state")
}
}
}
}
print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n")
- gothrow("castogscanstatus")
+ throw("castogscanstatus")
panic("not reached")
}
if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval {
systemstack(func() {
print("casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n")
- gothrow("casgstatus: bad incoming values")
+ throw("casgstatus: bad incoming values")
})
}
for !cas(&gp.atomicstatus, oldval, newval) {
if oldval == _Gwaiting && gp.atomicstatus == _Grunnable {
systemstack(func() {
- gothrow("casgstatus: waiting for Gwaiting but is Grunnable")
+ throw("casgstatus: waiting for Gwaiting but is Grunnable")
})
}
// Help GC if needed.
for {
oldstatus := readgstatus(gp) &^ _Gscan
if oldstatus != _Gwaiting && oldstatus != _Grunnable {
- gothrow("copystack: bad status, not Gwaiting or Grunnable")
+ throw("copystack: bad status, not Gwaiting or Grunnable")
}
if cas(&gp.atomicstatus, oldstatus, _Gcopystack) {
return oldstatus
switch s := readgstatus(gp); s {
default:
dumpgstatus(gp)
- gothrow("stopg: gp->atomicstatus is not valid")
+ throw("stopg: gp->atomicstatus is not valid")
case _Gdead:
return false
switch s {
default:
dumpgstatus(gp)
- gothrow("restartg: unexpected status")
+ throw("restartg: unexpected status")
case _Gdead:
// ok
case _Gscanenqueue:
casfrom_Gscanstatus(gp, _Gscanenqueue, _Gwaiting)
if gp != getg().m.curg {
- gothrow("processing Gscanenqueue on wrong m")
+ throw("processing Gscanenqueue on wrong m")
}
dropg()
ready(gp)
func stopscanstart(gp *g) {
_g_ := getg()
if _g_ == gp {
- gothrow("GC not moved to G0")
+ throw("GC not moved to G0")
}
if stopg(gp) {
if !isscanstatus(readgstatus(gp)) {
dumpgstatus(gp)
- gothrow("GC not in scan state")
+ throw("GC not in scan state")
}
restartg(gp)
}
// If we hold a lock, then we won't be able to stop another M
// that is blocked trying to acquire the lock.
if _g_.m.locks > 0 {
- gothrow("stoptheworld: holding locks")
+ throw("stoptheworld: holding locks")
}
lock(&sched.lock)
}
}
if sched.stopwait != 0 {
- gothrow("stoptheworld: not stopped")
+ throw("stoptheworld: not stopped")
}
for i := 0; i < int(gomaxprocs); i++ {
p := allp[i]
if p.status != _Pgcstop {
- gothrow("stoptheworld: not stopped")
+ throw("stoptheworld: not stopped")
}
}
}
mp := p.m
p.m = nil
if mp.nextp != nil {
- gothrow("starttheworld: inconsistent mp->nextp")
+ throw("starttheworld: inconsistent mp->nextp")
}
mp.nextp = p
notewakeup(&mp.park)
_g_ := getg()
if _g_ != _g_.m.g0 {
- gothrow("bad runtime·mstart")
+ throw("bad runtime·mstart")
}
// Record top of stack for use by mcall.
if iscgo {
var ts cgothreadstart
if _cgo_thread_start == nil {
- gothrow("_cgo_thread_start missing")
+ throw("_cgo_thread_start missing")
}
ts.g = mp.g0
ts.tls = (*uint64)(unsafe.Pointer(&mp.tls[0]))
_g_ := getg()
if _g_.m.locks != 0 {
- gothrow("stopm holding locks")
+ throw("stopm holding locks")
}
if _g_.m.p != nil {
- gothrow("stopm holding p")
+ throw("stopm holding p")
}
if _g_.m.spinning {
_g_.m.spinning = false
return
}
if mp.spinning {
- gothrow("startm: m is spinning")
+ throw("startm: m is spinning")
}
if mp.nextp != nil {
- gothrow("startm: m has p")
+ throw("startm: m has p")
}
mp.spinning = spinning
mp.nextp = _p_
_g_ := getg()
if _g_.m.lockedg == nil || _g_.m.lockedg.lockedm != _g_.m {
- gothrow("stoplockedm: inconsistent locking")
+ throw("stoplockedm: inconsistent locking")
}
if _g_.m.p != nil {
// Schedule another M to run this p.
if status&^_Gscan != _Grunnable {
print("runtime:stoplockedm: g is not Grunnable or Gscanrunnable\n")
dumpgstatus(_g_)
- gothrow("stoplockedm: not runnable")
+ throw("stoplockedm: not runnable")
}
acquirep(_g_.m.nextp)
_g_.m.nextp = nil
mp := gp.lockedm
if mp == _g_.m {
- gothrow("startlockedm: locked to me")
+ throw("startlockedm: locked to me")
}
if mp.nextp != nil {
- gothrow("startlockedm: m has p")
+ throw("startlockedm: m has p")
}
// directly handoff current P to the locked m
incidlelocked(-1)
_g_ := getg()
if sched.gcwaiting == 0 {
- gothrow("gcstopm: not waiting for gc")
+ throw("gcstopm: not waiting for gc")
}
if _g_.m.spinning {
_g_.m.spinning = false
// poll network
if xchg64(&sched.lastpoll, 0) != 0 {
if _g_.m.p != nil {
- gothrow("findrunnable: netpoll with p")
+ throw("findrunnable: netpoll with p")
}
if _g_.m.spinning {
- gothrow("findrunnable: netpoll with spinning")
+ throw("findrunnable: netpoll with spinning")
}
gp := netpoll(true) // block until new work is available
atomicstore64(&sched.lastpoll, uint64(nanotime()))
_g_.m.spinning = false
nmspinning = xadd(&sched.nmspinning, -1)
if nmspinning < 0 {
- gothrow("findrunnable: negative nmspinning")
+ throw("findrunnable: negative nmspinning")
}
} else {
nmspinning = atomicload(&sched.nmspinning)
_g_ := getg()
if _g_.m.locks != 0 {
- gothrow("schedule: holding locks")
+ throw("schedule: holding locks")
}
if _g_.m.lockedg != nil {
if gp == nil {
gp = runqget(_g_.m.p)
if gp != nil && _g_.m.spinning {
- gothrow("schedule: spinning with local work")
+ throw("schedule: spinning with local work")
}
}
if gp == nil {
status := readgstatus(gp)
if status&^_Gscan != _Grunning {
dumpgstatus(gp)
- gothrow("bad g status")
+ throw("bad g status")
}
casgstatus(gp, _Grunning, _Grunnable)
dropg()
if _g_.m.locked&^_LockExternal != 0 {
print("invalid m->locked = ", _g_.m.locked, "\n")
- gothrow("internal lockOSThread error")
+ throw("internal lockOSThread error")
}
_g_.m.locked = 0
gfput(_g_.m.p, gp)
if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp {
systemstack(func() {
print("entersyscall inconsistent ", hex(_g_.syscallsp), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n")
- gothrow("entersyscall")
+ throw("entersyscall")
})
}
sp3 := _g_.syscallsp
systemstack(func() {
print("entersyscallblock inconsistent ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n")
- gothrow("entersyscallblock")
+ throw("entersyscallblock")
})
}
casgstatus(_g_, _Grunning, _Gsyscall)
if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp {
systemstack(func() {
print("entersyscallblock inconsistent ", hex(sp), " ", hex(_g_.sched.sp), " ", hex(_g_.syscallsp), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n")
- gothrow("entersyscallblock")
+ throw("entersyscallblock")
})
}
_g_.m.locks++ // see comment in entersyscall
if getcallersp(unsafe.Pointer(&dummy)) > _g_.syscallsp {
- gothrow("exitsyscall: syscall frame is no longer valid")
+ throw("exitsyscall: syscall frame is no longer valid")
}
_g_.waitsince = 0
if exitsyscallfast() {
if _g_.m.mcache == nil {
- gothrow("lost mcache")
+ throw("lost mcache")
}
// There's a cpu for us, so we can run.
_g_.m.p.syscalltick++
mcall(exitsyscall0)
if _g_.m.mcache == nil {
- gothrow("lost mcache")
+ throw("lost mcache")
}
// Scheduler returned, so we're allowed to run now.
if fn == nil {
_g_.m.throwing = -1 // do not dump full stacks
- gothrow("go of nil func value")
+ throw("go of nil func value")
}
_g_.m.locks++ // disable preemption because it can be holding p in a local var
siz := narg + nret
// 4*sizeof(uintreg): extra space added below
// sizeof(uintreg): caller's LR (arm) or return address (x86, in gostartcall).
if siz >= _StackMin-4*regSize-regSize {
- gothrow("newproc: function arguments too large for new goroutine")
+ throw("newproc: function arguments too large for new goroutine")
}
_p_ := _g_.m.p
allgadd(newg) // publishes with a g->status of Gdead so GC scanner doesn't look at uninitialized stack.
}
if newg.stack.hi == 0 {
- gothrow("newproc1: newg missing stack")
+ throw("newproc1: newg missing stack")
}
if readgstatus(newg) != _Gdead {
- gothrow("newproc1: new g is not Gdead")
+ throw("newproc1: new g is not Gdead")
}
sp := newg.stack.hi
// If local list is too long, transfer a batch to the global list.
func gfput(_p_ *p, gp *g) {
if readgstatus(gp) != _Gdead {
- gothrow("gfput: bad status (not Gdead)")
+ throw("gfput: bad status (not Gdead)")
}
stksize := gp.stack.hi - gp.stack.lo
}
func badunlockosthread() {
- gothrow("runtime: internal error: misuse of lockOSThread/unlockOSThread")
+ throw("runtime: internal error: misuse of lockOSThread/unlockOSThread")
}
func gcount() int32 {
func procresize(new int32) *p {
old := gomaxprocs
if old < 0 || old > _MaxGomaxprocs || new <= 0 || new > _MaxGomaxprocs {
- gothrow("procresize: invalid arg")
+ throw("procresize: invalid arg")
}
// initialize new P's
if p.mcache == nil {
if old == 0 && i == 0 {
if getg().m.mcache == nil {
- gothrow("missing mcache?")
+ throw("missing mcache?")
}
p.mcache = getg().m.mcache // bootstrap
} else {
_g_ := getg()
if _g_.m.p != nil || _g_.m.mcache != nil {
- gothrow("acquirep: already in go")
+ throw("acquirep: already in go")
}
if _p_.m != nil || _p_.status != _Pidle {
id := int32(0)
id = _p_.m.id
}
print("acquirep: p->m=", _p_.m, "(", id, ") p->status=", _p_.status, "\n")
- gothrow("acquirep: invalid p state")
+ throw("acquirep: invalid p state")
}
_g_.m.mcache = _p_.mcache
_g_.m.p = _p_
_g_ := getg()
if _g_.m.p == nil || _g_.m.mcache == nil {
- gothrow("releasep: invalid arg")
+ throw("releasep: invalid arg")
}
_p_ := _g_.m.p
if _p_.m != _g_.m || _p_.mcache != _g_.m.mcache || _p_.status != _Prunning {
print("releasep: m=", _g_.m, " m->p=", _g_.m.p, " p->m=", _p_.m, " m->mcache=", _g_.m.mcache, " p->mcache=", _p_.mcache, " p->status=", _p_.status, "\n")
- gothrow("releasep: invalid p state")
+ throw("releasep: invalid p state")
}
_g_.m.p = nil
_g_.m.mcache = nil
}
if run < 0 {
print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", sched.mcount, "\n")
- gothrow("checkdead: inconsistent counts")
+ throw("checkdead: inconsistent counts")
}
grunning := 0
_Gsyscall:
unlock(&allglock)
print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n")
- gothrow("checkdead: runnable g")
+ throw("checkdead: runnable g")
}
}
unlock(&allglock)
if grunning == 0 { // possible if main goroutine calls runtime·Goexit()
- gothrow("no goroutines (main called runtime.Goexit) - deadlock!")
+ throw("no goroutines (main called runtime.Goexit) - deadlock!")
}
// Maybe jump time forward for playground.
globrunqput(gp)
_p_ := pidleget()
if _p_ == nil {
- gothrow("checkdead: no p for timer")
+ throw("checkdead: no p for timer")
}
mp := mget()
if mp == nil {
}
getg().m.throwing = -1 // do not dump full stacks
- gothrow("all goroutines are asleep - deadlock!")
+ throw("all goroutines are asleep - deadlock!")
}
func sysmon() {
n := t - h
n = n / 2
if n != uint32(len(_p_.runq)/2) {
- gothrow("runqputslow: queue is not full")
+ throw("runqputslow: queue is not full")
}
for i := uint32(0); i < n; i++ {
batch[i] = _p_.runq[(h+i)%uint32(len(_p_.runq))]
h := atomicload(&_p_.runqhead) // load-acquire, synchronize with consumers
t := _p_.runqtail
if t-h+n >= uint32(len(_p_.runq)) {
- gothrow("runqsteal: runq overflow")
+ throw("runqsteal: runq overflow")
}
for i := uint32(0); i < n; i++ {
_p_.runq[(t+i)%uint32(len(_p_.runq))] = batch[i]
gs := make([]g, len(_p_.runq))
for i := 0; i < len(_p_.runq); i++ {
if runqget(_p_) != nil {
- gothrow("runq is not empty initially")
+ throw("runq is not empty initially")
}
for j := 0; j < i; j++ {
runqput(_p_, &gs[i])
for j := 0; j < i; j++ {
if runqget(_p_) != &gs[i] {
print("bad element at iter ", i, "/", j, "\n")
- gothrow("bad element")
+ throw("bad element")
}
}
if runqget(_p_) != nil {
- gothrow("runq is not empty afterwards")
+ throw("runq is not empty afterwards")
}
}
}
for j := 0; j < i; j++ {
if gs[j].sig != 1 {
print("bad element ", j, "(", gs[j].sig, ") at iter ", i, "\n")
- gothrow("bad element")
+ throw("bad element")
}
}
if s != i/2 && s != i/2+1 {
print("bad steal ", s, ", want ", i/2, " or ", i/2+1, ", iter ", i, "\n")
- gothrow("bad steal")
+ throw("bad steal")
}
}
}
// Because raceenabled is false, none of these functions should be called.
-func raceReadObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) { gothrow("race") }
-func raceWriteObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) { gothrow("race") }
-func raceinit() uintptr { gothrow("race"); return 0 }
-func racefini() { gothrow("race") }
-func racemapshadow(addr unsafe.Pointer, size uintptr) { gothrow("race") }
-func racewritepc(addr unsafe.Pointer, callerpc, pc uintptr) { gothrow("race") }
-func racereadpc(addr unsafe.Pointer, callerpc, pc uintptr) { gothrow("race") }
-func racereadrangepc(addr unsafe.Pointer, sz, callerpc, pc uintptr) { gothrow("race") }
-func racewriterangepc(addr unsafe.Pointer, sz, callerpc, pc uintptr) { gothrow("race") }
-func raceacquire(addr unsafe.Pointer) { gothrow("race") }
-func raceacquireg(gp *g, addr unsafe.Pointer) { gothrow("race") }
-func racerelease(addr unsafe.Pointer) { gothrow("race") }
-func racereleaseg(gp *g, addr unsafe.Pointer) { gothrow("race") }
-func racereleasemerge(addr unsafe.Pointer) { gothrow("race") }
-func racereleasemergeg(gp *g, addr unsafe.Pointer) { gothrow("race") }
-func racefingo() { gothrow("race") }
-func racemalloc(p unsafe.Pointer, sz uintptr) { gothrow("race") }
-func racegostart(pc uintptr) uintptr { gothrow("race"); return 0 }
-func racegoend() { gothrow("race") }
+func raceReadObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) { throw("race") }
+func raceWriteObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) { throw("race") }
+func raceinit() uintptr { throw("race"); return 0 }
+func racefini() { throw("race") }
+func racemapshadow(addr unsafe.Pointer, size uintptr) { throw("race") }
+func racewritepc(addr unsafe.Pointer, callerpc, pc uintptr) { throw("race") }
+func racereadpc(addr unsafe.Pointer, callerpc, pc uintptr) { throw("race") }
+func racereadrangepc(addr unsafe.Pointer, sz, callerpc, pc uintptr) { throw("race") }
+func racewriterangepc(addr unsafe.Pointer, sz, callerpc, pc uintptr) { throw("race") }
+func raceacquire(addr unsafe.Pointer) { throw("race") }
+func raceacquireg(gp *g, addr unsafe.Pointer) { throw("race") }
+func racerelease(addr unsafe.Pointer) { throw("race") }
+func racereleaseg(gp *g, addr unsafe.Pointer) { throw("race") }
+func racereleasemerge(addr unsafe.Pointer) { throw("race") }
+func racereleasemergeg(gp *g, addr unsafe.Pointer) { throw("race") }
+func racefingo() { throw("race") }
+func racemalloc(p unsafe.Pointer, sz uintptr) { throw("race") }
+func racegostart(pc uintptr) uintptr { throw("race"); return 0 }
+func racegoend() { throw("race") }
func raceinit() uintptr {
// cgo is required to initialize libc, which is used by race runtime
if !iscgo {
- gothrow("raceinit: race build must use cgo")
+ throw("raceinit: race build must use cgo")
}
var racectx uintptr
prefetcht2(uintptr(unsafe.Pointer(&z64)))
prefetchnta(uintptr(unsafe.Pointer(&z64)))
if cas64(&z64, x64, 1) {
- gothrow("cas64 failed")
+ throw("cas64 failed")
}
if x64 != 0 {
- gothrow("cas64 failed")
+ throw("cas64 failed")
}
x64 = 42
if !cas64(&z64, x64, 1) {
- gothrow("cas64 failed")
+ throw("cas64 failed")
}
if x64 != 42 || z64 != 1 {
- gothrow("cas64 failed")
+ throw("cas64 failed")
}
if atomicload64(&z64) != 1 {
- gothrow("load64 failed")
+ throw("load64 failed")
}
atomicstore64(&z64, (1<<40)+1)
if atomicload64(&z64) != (1<<40)+1 {
- gothrow("store64 failed")
+ throw("store64 failed")
}
if xadd64(&z64, (1<<40)+1) != (2<<40)+2 {
- gothrow("xadd64 failed")
+ throw("xadd64 failed")
}
if atomicload64(&z64) != (2<<40)+2 {
- gothrow("xadd64 failed")
+ throw("xadd64 failed")
}
if xchg64(&z64, (3<<40)+3) != (2<<40)+2 {
- gothrow("xchg64 failed")
+ throw("xchg64 failed")
}
if atomicload64(&z64) != (3<<40)+3 {
- gothrow("xchg64 failed")
+ throw("xchg64 failed")
}
}
var y1 y1t
if unsafe.Sizeof(a) != 1 {
- gothrow("bad a")
+ throw("bad a")
}
if unsafe.Sizeof(b) != 1 {
- gothrow("bad b")
+ throw("bad b")
}
if unsafe.Sizeof(c) != 2 {
- gothrow("bad c")
+ throw("bad c")
}
if unsafe.Sizeof(d) != 2 {
- gothrow("bad d")
+ throw("bad d")
}
if unsafe.Sizeof(e) != 4 {
- gothrow("bad e")
+ throw("bad e")
}
if unsafe.Sizeof(f) != 4 {
- gothrow("bad f")
+ throw("bad f")
}
if unsafe.Sizeof(g) != 8 {
- gothrow("bad g")
+ throw("bad g")
}
if unsafe.Sizeof(h) != 8 {
- gothrow("bad h")
+ throw("bad h")
}
if unsafe.Sizeof(i) != 4 {
- gothrow("bad i")
+ throw("bad i")
}
if unsafe.Sizeof(j) != 8 {
- gothrow("bad j")
+ throw("bad j")
}
if unsafe.Sizeof(k) != ptrSize {
- gothrow("bad k")
+ throw("bad k")
}
if unsafe.Sizeof(l) != ptrSize {
- gothrow("bad l")
+ throw("bad l")
}
if unsafe.Sizeof(x1) != 1 {
- gothrow("bad unsafe.Sizeof x1")
+ throw("bad unsafe.Sizeof x1")
}
if unsafe.Offsetof(y1.y) != 1 {
- gothrow("bad offsetof y1.y")
+ throw("bad offsetof y1.y")
}
if unsafe.Sizeof(y1) != 2 {
- gothrow("bad unsafe.Sizeof y1")
+ throw("bad unsafe.Sizeof y1")
}
if timediv(12345*1000000000+54321, 1000000000, &e) != 12345 || e != 54321 {
- gothrow("bad timediv")
+ throw("bad timediv")
}
var z uint32
z = 1
if !cas(&z, 1, 2) {
- gothrow("cas1")
+ throw("cas1")
}
if z != 2 {
- gothrow("cas2")
+ throw("cas2")
}
z = 4
if cas(&z, 5, 6) {
- gothrow("cas3")
+ throw("cas3")
}
if z != 4 {
- gothrow("cas4")
+ throw("cas4")
}
z = 0xffffffff
if !cas(&z, 0xffffffff, 0xfffffffe) {
- gothrow("cas5")
+ throw("cas5")
}
if z != 0xfffffffe {
- gothrow("cas6")
+ throw("cas6")
}
k = unsafe.Pointer(uintptr(0xfedcb123))
k = unsafe.Pointer(uintptr(unsafe.Pointer(k)) << 10)
}
if casp(&k, nil, nil) {
- gothrow("casp1")
+ throw("casp1")
}
k1 = add(k, 1)
if !casp(&k, k, k1) {
- gothrow("casp2")
+ throw("casp2")
}
if k != k1 {
- gothrow("casp3")
+ throw("casp3")
}
m = [4]byte{1, 1, 1, 1}
atomicor8(&m[1], 0xf0)
if m[0] != 1 || m[1] != 0xf1 || m[2] != 1 || m[3] != 1 {
- gothrow("atomicor8")
+ throw("atomicor8")
}
*(*uint64)(unsafe.Pointer(&j)) = ^uint64(0)
if j == j {
- gothrow("float64nan")
+ throw("float64nan")
}
if !(j != j) {
- gothrow("float64nan1")
+ throw("float64nan1")
}
*(*uint64)(unsafe.Pointer(&j1)) = ^uint64(1)
if j == j1 {
- gothrow("float64nan2")
+ throw("float64nan2")
}
if !(j != j1) {
- gothrow("float64nan3")
+ throw("float64nan3")
}
*(*uint32)(unsafe.Pointer(&i)) = ^uint32(0)
if i == i {
- gothrow("float32nan")
+ throw("float32nan")
}
if i == i {
- gothrow("float32nan1")
+ throw("float32nan1")
}
*(*uint32)(unsafe.Pointer(&i1)) = ^uint32(1)
if i == i1 {
- gothrow("float32nan2")
+ throw("float32nan2")
}
if i == i1 {
- gothrow("float32nan3")
+ throw("float32nan3")
}
testAtomic64()
if _FixedStack != round2(_FixedStack) {
- gothrow("FixedStack is not power-of-2")
+ throw("FixedStack is not power-of-2")
}
}
func newselect(sel *_select, selsize int64, size int32) {
if selsize != int64(selectsize(uintptr(size))) {
print("runtime: bad select size ", selsize, ", want ", selectsize(uintptr(size)), "\n")
- gothrow("bad select size")
+ throw("bad select size")
}
sel.tcase = uint16(size)
sel.ncase = 0
func selectsendImpl(sel *_select, c *hchan, pc uintptr, elem unsafe.Pointer, so uintptr) {
i := sel.ncase
if i >= sel.tcase {
- gothrow("selectsend: too many cases")
+ throw("selectsend: too many cases")
}
sel.ncase = i + 1
cas := (*scase)(add(unsafe.Pointer(&sel.scase), uintptr(i)*unsafe.Sizeof(sel.scase[0])))
func selectrecvImpl(sel *_select, c *hchan, pc uintptr, elem unsafe.Pointer, received *bool, so uintptr) {
i := sel.ncase
if i >= sel.tcase {
- gothrow("selectrecv: too many cases")
+ throw("selectrecv: too many cases")
}
sel.ncase = i + 1
cas := (*scase)(add(unsafe.Pointer(&sel.scase), uintptr(i)*unsafe.Sizeof(sel.scase[0])))
func selectdefaultImpl(sel *_select, callerpc uintptr, so uintptr) {
i := sel.ncase
if i >= sel.tcase {
- gothrow("selectdefault: too many cases")
+ throw("selectdefault: too many cases")
}
sel.ncase = i + 1
cas := (*scase)(add(unsafe.Pointer(&sel.scase), uintptr(i)*unsafe.Sizeof(sel.scase[0])))
for i := 0; i+1 < int(sel.ncase); i++ {
if lockorder[i].sortkey() > lockorder[i+1].sortkey() {
print("i=", i, " x=", lockorder[i], " y=", lockorder[i+1], "\n")
- gothrow("select: broken sort")
+ throw("select: broken sort")
}
}
*/
c = cas._chan
if c.dataqsiz > 0 {
- gothrow("selectgo: shouldn't happen")
+ throw("selectgo: shouldn't happen")
}
if debugSelect {
func semacquire(addr *uint32, profile bool) {
gp := getg()
if gp != gp.m.curg {
- gothrow("semacquire not on the G stack")
+ throw("semacquire not on the G stack")
}
// Easy case.
func syncsemcheck(sz uintptr) {
if sz != unsafe.Sizeof(syncSema{}) {
print("runtime: bad syncSema size - sync=", sz, " runtime=", unsafe.Sizeof(syncSema{}), "\n")
- gothrow("bad syncSema size")
+ throw("bad syncSema size")
}
}
// sigtable should describe what to do for all the possible signals.
if len(sigtable) != _NSIG {
print("runtime: len(sigtable)=", len(sigtable), " _NSIG=", _NSIG, "\n")
- gothrow("initsig")
+ throw("initsig")
}
// First call: basic setup.
func sigpanic() {
g := getg()
if !canpanic(g) {
- gothrow("unexpected signal during runtime execution")
+ throw("unexpected signal during runtime execution")
}
switch g.sig {
panicmem()
}
print("unexpected fault address ", hex(g.sigcode1), "\n")
- gothrow("fault")
+ throw("fault")
case _SIGSEGV:
if (g.sigcode0 == 0 || g.sigcode0 == _SEGV_MAPERR || g.sigcode0 == _SEGV_ACCERR) && g.sigcode1 < 0x1000 || g.paniconfault {
panicmem()
}
print("unexpected fault address ", hex(g.sigcode1), "\n")
- gothrow("fault")
+ throw("fault")
case _SIGFPE:
switch g.sigcode0 {
case _FPE_INTDIV:
if g.sig >= uint32(len(sigtable)) {
// can't happen: we looked up g.sig in sigtable to decide to call sigpanic
- gothrow("unexpected signal value")
+ throw("unexpected signal value")
}
panic(errorString(sigtable[g.sig].name))
}
for {
switch atomicload(&sig.state) {
default:
- gothrow("sigsend: inconsistent state")
+ throw("sigsend: inconsistent state")
case sigIdle:
if cas(&sig.state, sigIdle, sigSending) {
break Send
for {
switch atomicload(&sig.state) {
default:
- gothrow("signal_recv: inconsistent state")
+ throw("signal_recv: inconsistent state")
case sigIdle:
if cas(&sig.state, sigIdle, sigReceiving) {
notetsleepg(&sig.note, -1)
var fptrace = 0
func fabort() {
- gothrow("unsupported floating point instruction")
+ throw("unsupported floating point instruction")
}
func fputf(reg uint32, val uint32) {
func stackinit() {
if _StackCacheSize&_PageMask != 0 {
- gothrow("cache size must be a multiple of page size")
+ throw("cache size must be a multiple of page size")
}
for i := range stackpool {
mSpanList_Init(&stackpool[i])
// no free stacks. Allocate another span worth.
s = mHeap_AllocStack(&mheap_, _StackCacheSize>>_PageShift)
if s == nil {
- gothrow("out of memory")
+ throw("out of memory")
}
if s.ref != 0 {
- gothrow("bad ref")
+ throw("bad ref")
}
if s.freelist.ptr() != nil {
- gothrow("bad freelist")
+ throw("bad freelist")
}
for i := uintptr(0); i < _StackCacheSize; i += _FixedStack << order {
x := gclinkptr(uintptr(s.start)<<_PageShift + i)
}
x := s.freelist
if x.ptr() == nil {
- gothrow("span has no free stacks")
+ throw("span has no free stacks")
}
s.freelist = x.ptr().next
s.ref++
func stackpoolfree(x gclinkptr, order uint8) {
s := mHeap_Lookup(&mheap_, (unsafe.Pointer)(x))
if s.state != _MSpanStack {
- gothrow("freeing stack not in a stack span")
+ throw("freeing stack not in a stack span")
}
if s.freelist.ptr() == nil {
// s will now have a free stack
// Doing so would cause a deadlock (issue 1547).
thisg := getg()
if thisg != thisg.m.g0 {
- gothrow("stackalloc not on scheduler stack")
+ throw("stackalloc not on scheduler stack")
}
if n&(n-1) != 0 {
- gothrow("stack size not a power of 2")
+ throw("stack size not a power of 2")
}
if stackDebug >= 1 {
print("stackalloc ", n, "\n")
if debug.efence != 0 || stackFromSystem != 0 {
v := sysAlloc(round(uintptr(n), _PageSize), &memstats.stacks_sys)
if v == nil {
- gothrow("out of memory (stackalloc)")
+ throw("out of memory (stackalloc)")
}
return stack{uintptr(v), uintptr(v) + uintptr(n)}
}
} else {
s := mHeap_AllocStack(&mheap_, round(uintptr(n), _PageSize)>>_PageShift)
if s == nil {
- gothrow("out of memory")
+ throw("out of memory")
}
v = (unsafe.Pointer)(s.start << _PageShift)
}
n := stk.hi - stk.lo
v := (unsafe.Pointer)(stk.lo)
if n&(n-1) != 0 {
- gothrow("stack not a power of 2")
+ throw("stack not a power of 2")
}
if stackDebug >= 1 {
println("stackfree", v, n)
s := mHeap_Lookup(&mheap_, v)
if s.state != _MSpanStack {
println(hex(s.start<<_PageShift), v)
- gothrow("bad span state")
+ throw("bad span state")
}
mHeap_FreeStack(&mheap_, s)
}
}
switch ptrbits(&bv, i) {
default:
- gothrow("unexpected pointer bits")
+ throw("unexpected pointer bits")
case _BitsDead:
if debug.gcdead != 0 {
*(*unsafe.Pointer)(add(scanp, i*ptrSize)) = unsafe.Pointer(uintptr(poisonStack))
// Live analysis wrong?
getg().m.traceback = 2
print("runtime: bad pointer in frame ", gofuncname(f), " at ", add(scanp, i*ptrSize), ": ", p, "\n")
- gothrow("invalid stack pointer")
+ throw("invalid stack pointer")
}
if minp <= up && up < maxp {
if stackDebug >= 3 {
stackmap := (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps))
if stackmap == nil || stackmap.n <= 0 {
print("runtime: frame ", funcname(f), " untyped locals ", hex(frame.varp-size), "+", hex(size), "\n")
- gothrow("missing stackmap")
+ throw("missing stackmap")
}
// Locals bitmap information, scan just the pointers in locals.
if pcdata < 0 || pcdata >= stackmap.n {
// don't know where we are
print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " locals stack map entries for ", funcname(f), " (targetpc=", targetpc, ")\n")
- gothrow("bad symbol table")
+ throw("bad symbol table")
}
bv = stackmapdata(stackmap, pcdata)
size = (uintptr(bv.n) * ptrSize) / _BitsPerPointer
stackmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps))
if stackmap == nil || stackmap.n <= 0 {
print("runtime: frame ", funcname(f), " untyped args ", frame.argp, "+", uintptr(frame.arglen), "\n")
- gothrow("missing stackmap")
+ throw("missing stackmap")
}
if pcdata < 0 || pcdata >= stackmap.n {
// don't know where we are
print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " args stack map entries for ", funcname(f), " (targetpc=", targetpc, ")\n")
- gothrow("bad symbol table")
+ throw("bad symbol table")
}
bv = stackmapdata(stackmap, pcdata)
}
// Caller must have changed gp status to Gcopystack.
func copystack(gp *g, newsize uintptr) {
if gp.syscallsp != 0 {
- gothrow("stack growth not allowed in system call")
+ throw("stack growth not allowed in system call")
}
old := gp.stack
if old.lo == 0 {
- gothrow("nil stackbase")
+ throw("nil stackbase")
}
used := old.hi - gp.sched.sp
thisg := getg()
// TODO: double check all gp. shouldn't be getg().
if thisg.m.morebuf.g.stackguard0 == stackFork {
- gothrow("stack growth after fork")
+ throw("stack growth after fork")
}
if thisg.m.morebuf.g != thisg.m.curg {
print("runtime: newstack called from g=", thisg.m.morebuf.g, "\n"+"\tm=", thisg.m, " m->curg=", thisg.m.curg, " m->g0=", thisg.m.g0, " m->gsignal=", thisg.m.gsignal, "\n")
morebuf := thisg.m.morebuf
traceback(morebuf.pc, morebuf.sp, morebuf.lr, morebuf.g)
- gothrow("runtime: wrong goroutine in newstack")
+ throw("runtime: wrong goroutine in newstack")
}
if thisg.m.curg.throwsplit {
gp := thisg.m.curg
print("runtime: newstack sp=", hex(gp.sched.sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
"\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
"\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
- gothrow("runtime: stack split at bad time")
+ throw("runtime: stack split at bad time")
}
// The goroutine must be executing in order to call newstack,
rewindmorestack(&gp.sched)
if gp.stack.lo == 0 {
- gothrow("missing stack in newstack")
+ throw("missing stack in newstack")
}
sp := gp.sched.sp
if thechar == '6' || thechar == '8' {
if sp < gp.stack.lo {
print("runtime: gp=", gp, ", gp->status=", hex(readgstatus(gp)), "\n ")
print("runtime: split stack overflow: ", hex(sp), " < ", hex(gp.stack.lo), "\n")
- gothrow("runtime: split stack overflow")
+ throw("runtime: split stack overflow")
}
if gp.sched.ctxt != nil {
if gp.stackguard0 == stackPreempt {
if gp == thisg.m.g0 {
- gothrow("runtime: preempt g0")
+ throw("runtime: preempt g0")
}
if thisg.m.p == nil && thisg.m.locks == 0 {
- gothrow("runtime: g is running but p is not")
+ throw("runtime: g is running but p is not")
}
if gp.preemptscan {
for !castogscanstatus(gp, _Gwaiting, _Gscanwaiting) {
newsize := oldsize * 2
if uintptr(newsize) > maxstacksize {
print("runtime: goroutine stack exceeds ", maxstacksize, "-byte limit\n")
- gothrow("stack overflow")
+ throw("stack overflow")
}
casgstatus(gp, _Gwaiting, _Gcopystack)
return
}
if gp.stack.lo == 0 {
- gothrow("missing stack in shrinkstack")
+ throw("missing stack in shrinkstack")
}
oldsize := gp.stack.hi - gp.stack.lo
//go:nosplit
func morestackc() {
systemstack(func() {
- gothrow("attempt to execute C code on Go stack")
+ throw("attempt to execute C code on Go stack")
})
}
useStack(32)
panic("test panic")
}
+
+func BenchmarkStackCopy(b *testing.B) {
+ c := make(chan bool)
+ for i := 0; i < b.N; i++ {
+ go func() {
+ count(1000000)
+ c <- true
+ }()
+ <-c
+ }
+}
+
+func count(n int) int {
+ if n == 0 {
+ return 0
+ }
+ return 1 + count(n-1)
+}
continue
}
if l+n < l {
- gothrow("string concatenation too long")
+ throw("string concatenation too long")
}
l += n
count++
// rawruneslice allocates a new rune slice. The rune slice is not zeroed.
func rawruneslice(size int) (b []rune) {
if uintptr(size) > _MaxMem/4 {
- gothrow("out of memory")
+ throw("out of memory")
}
mem := goroundupsize(uintptr(size) * 4)
p := mallocgc(mem, nil, flagNoScan|flagNoZero)
func systemstack(fn func())
func badsystemstack() {
- gothrow("systemstack called from unexpected goroutine")
+ throw("systemstack called from unexpected goroutine")
}
// memclr clears n bytes starting at ptr.
pcln32 := (*[2]uint32)(unsafe.Pointer(&pclntab))
if pcln32[0] != 0xfffffffb || pcln[4] != 0 || pcln[5] != 0 || pcln[6] != _PCQuantum || pcln[7] != ptrSize {
println("runtime: function symbol table header:", hex(pcln32[0]), hex(pcln[4]), hex(pcln[5]), hex(pcln[6]), hex(pcln[7]))
- gothrow("invalid function symbol table\n")
+ throw("invalid function symbol table\n")
}
// pclntable is all bytes of pclntab symbol.
for j := 0; j <= i; j++ {
print("\t", hex(ftab[j].entry), " ", gofuncname((*_func)(unsafe.Pointer(&pclntable[ftab[j].funcoff]))), "\n")
}
- gothrow("invalid runtime symbol table")
+ throw("invalid runtime symbol table")
}
}
}
}
- gothrow("findfunc: binary search failed")
+ throw("findfunc: binary search failed")
return nil
}
print("\tvalue=", val, " until pc=", hex(pc), "\n")
}
- gothrow("invalid runtime symbol table")
+ throw("invalid runtime symbol table")
return -1
}
// and then did an immediate Gosave.
func gostartcall(buf *gobuf, fn, ctxt unsafe.Pointer) {
if buf.lr != 0 {
- gothrow("invalid use of gostartcall")
+ throw("invalid use of gostartcall")
}
buf.lr = buf.pc
buf.pc = uintptr(fn)
}
print("runtime: pc=", hex(buf.pc), " ", hex(inst), "\n")
- gothrow("runtime: misuse of rewindmorestack")
+ throw("runtime: misuse of rewindmorestack")
}
// and then did an immediate Gosave.
func gostartcall(buf *gobuf, fn, ctxt unsafe.Pointer) {
if buf.lr != 0 {
- gothrow("invalid use of gostartcall")
+ throw("invalid use of gostartcall")
}
buf.lr = buf.pc
buf.pc = uintptr(fn)
}
}
print("runtime: pc=", hex(buf.pc), " ", hex(inst), "\n")
- gothrow("runtime: misuse of rewindmorestack")
+ throw("runtime: misuse of rewindmorestack")
}
return
}
print("runtime: pc=", pc, " ", hex(pc[0]), " ", hex(pc[1]), " ", hex(pc[2]), " ", hex(pc[3]), " ", hex(pc[4]), "\n")
- gothrow("runtime: misuse of rewindmorestack")
+ throw("runtime: misuse of rewindmorestack")
}
}
}
if n >= cb_max {
- gothrow("too many callback functions")
+ throw("too many callback functions")
}
c := new(wincallbackcontext)
f := findfunc(frame.pc)
if f == nil {
print("runtime: unknown pc in defer ", hex(frame.pc), "\n")
- gothrow("unknown pc")
+ throw("unknown pc")
}
frame.fn = f
frame.argp = uintptr(deferArgs(d))
// duplicating the code and all its subtlety.
func gentraceback(pc0 uintptr, sp0 uintptr, lr0 uintptr, gp *g, skip int, pcbuf *uintptr, max int, callback func(*stkframe, unsafe.Pointer) bool, v unsafe.Pointer, flags uint) int {
if goexitPC == 0 {
- gothrow("gentraceback before goexitPC initialization")
+ throw("gentraceback before goexitPC initialization")
}
g := getg()
if g == gp && g == g.m.curg {
// accepts an sp for the current goroutine (typically obtained by
// calling getcallersp) must not run on that goroutine's stack but
// instead on the g0 stack.
- gothrow("gentraceback cannot trace user goroutine on its own stack")
+ throw("gentraceback cannot trace user goroutine on its own stack")
}
gotraceback := gotraceback(nil)
if pc0 == ^uintptr(0) && sp0 == ^uintptr(0) { // Signal to fetch saved values from gp.
if f == nil {
if callback != nil {
print("runtime: unknown pc ", hex(frame.pc), "\n")
- gothrow("unknown pc")
+ throw("unknown pc")
}
return 0
}
// to avoid that confusion.
// See golang.org/issue/8153.
if callback != nil {
- gothrow("traceback_arm: found jmpdefer when tracing with callback")
+ throw("traceback_arm: found jmpdefer when tracing with callback")
}
frame.lr = 0
} else {
// get everything, so crash loudly.
if callback != nil {
print("runtime: unexpected return pc for ", gofuncname(f), " called from ", hex(frame.lr), "\n")
- gothrow("unknown caller pc")
+ throw("unknown caller pc")
}
}
}
for _defer = gp._defer; _defer != nil; _defer = _defer.link {
print("\tdefer ", _defer, " sp=", hex(_defer.sp), " pc=", hex(_defer.pc), "\n")
}
- gothrow("traceback has leftover defers")
+ throw("traceback has leftover defers")
}
return n
fn := *(**[2]uintptr)(unsafe.Pointer(arg0))
if fn[0] != f.entry {
print("runtime: confused by ", gofuncname(f), "\n")
- gothrow("reflect mismatch")
+ throw("reflect mismatch")
}
bv := (*bitvector)(unsafe.Pointer(fn[1]))
frame.arglen = uintptr(bv.n / 2 * ptrSize)