for (tries = 0; tries < 20; tries++) {
err = pthread_create(thread, attr, pfn, arg);
+ if (err == 0) {
+ pthread_detach(*thread);
+ return 0;
+ }
if (err != EAGAIN) {
return err;
}
}
}
+func exitThread(wait *uint32) {
+ // We should never reach exitThread on Solaris because we let
+ // libc clean up threads.
+ throw("exitThread")
+}
+
var urandom_dev = []byte("/dev/urandom\x00")
//go:nosplit
}
}
+//go:noescape
+func exitThread(wait *uint32)
+
//go:nosplit
func semacreate(mp *m) {
if mp.waitsema != 0 {
_UC_SIGMASK = 0x01
_UC_CPU = 0x04
+ // From <sys/lwp.h>
+ _LWP_DETACHED = 0x00000040
+
_EAGAIN = 35
)
lwp_mcontext_init(&uc.uc_mcontext, stk, mp, mp.g0, funcPC(netbsdMstart))
- ret := lwp_create(unsafe.Pointer(&uc), 0, unsafe.Pointer(&mp.procid))
+ ret := lwp_create(unsafe.Pointer(&uc), _LWP_DETACHED, unsafe.Pointer(&mp.procid))
sigprocmask(_SIG_SETMASK, &oset, nil)
if ret < 0 {
print("runtime: failed to create new OS thread (have ", mcount()-1, " already; errno=", -ret, ")\n")
}
}
+func exitThread(wait *uint32) {
+ // We should never reach exitThread on Plan 9 because we let
+ // the OS clean up threads.
+ throw("exitThread")
+}
+
//go:nosplit
func semacreate(mp *m) {
}
print("runtime: failed to create new OS thread (have ", mcount(), " already; errno=", getlasterror(), ")\n")
throw("runtime.newosproc")
}
+
+ // Close thandle to avoid leaking the thread object if it exits.
+ stdcall1(_CloseHandle, thandle)
}
// Used by the C library build mode. On Linux this function would allocate a
newosproc(mp, stk)
}
+func exitThread(wait *uint32) {
+ // We should never reach exitThread on Windows because we let
+ // the OS clean up threads.
+ throw("exitThread")
+}
+
// Called to initialize a new m (including the bootstrap m).
// Called on the parent thread (main thread in case of bootstrap), can allocate memory.
func mpreinit(mp *m) {
func mstart() {
_g_ := getg()
- if _g_.stack.lo == 0 {
+ osStack := _g_.stack.lo == 0
+ if osStack {
// Initialize stack bounds from system stack.
// Cgo may have left stack size in stack.hi.
size := _g_.stack.hi
// both Go and C functions with stack growth prologues.
_g_.stackguard0 = _g_.stack.lo + _StackGuard
_g_.stackguard1 = _g_.stackguard0
- mstart1()
+ mstart1(0)
+
+ // Exit this thread.
+ if GOOS == "windows" || GOOS == "solaris" {
+ // Windows and Solaris always system-allocate the
+ // stack, but put it in _g_.stack before mstart, so
+ // the logic above hasn't set osStack yet.
+ osStack = true
+ }
+ mexit(osStack)
}
-func mstart1() {
+func mstart1(dummy int32) {
_g_ := getg()
if _g_ != _g_.m.g0 {
throw("bad runtime·mstart")
}
- // Record top of stack for use by mcall.
- // Once we call schedule we're never coming back,
- // so other calls can reuse this stack space.
- gosave(&_g_.m.g0.sched)
- _g_.m.g0.sched.pc = ^uintptr(0) // make sure it is never used
+ // Record the caller for use as the top of stack in mcall and
+ // for terminating the thread.
+ // We're never coming back to mstart1 after we call schedule,
+ // so other calls can reuse the current frame.
+ save(getcallerpc(), getcallersp(unsafe.Pointer(&dummy)))
asminit()
minit()
initsig(false)
}
+// mexit tears down and exits the current thread.
+//
+// Don't call this directly to exit the thread, since it must run at
+// the top of the thread stack. Instead, use gogo(&_g_.m.g0.sched) to
+// unwind the stack to the point that exits the thread.
+//
+// It is entered with m.p != nil, so write barriers are allowed. It
+// will release the P before exiting.
+//
+//go:yeswritebarrierrec
+func mexit(osStack bool) {
+ g := getg()
+ m := g.m
+
+ if m == &m0 {
+ // This is the main thread. Just wedge it.
+ //
+ // On Linux, exiting the main thread puts the process
+ // into a non-waitable zombie state. On Plan 9,
+ // exiting the main thread unblocks wait even though
+ // other threads are still running. On Solaris we can
+ // neither exitThread nor return from mstart. Other
+ // bad things probably happen on other platforms.
+ //
+ // We could try to clean up this M more before wedging
+ // it, but that complicates signal handling.
+ handoffp(releasep())
+ lock(&sched.lock)
+ sched.nmfreed++
+ checkdead()
+ unlock(&sched.lock)
+ notesleep(&m.park)
+ throw("locked m0 woke up")
+ }
+
+ sigblock()
+ unminit()
+
+ // Free the gsignal stack.
+ if m.gsignal != nil {
+ stackfree(m.gsignal.stack)
+ }
+
+ // Remove m from allm.
+ lock(&sched.lock)
+ for pprev := &allm; *pprev != nil; pprev = &(*pprev).alllink {
+ if *pprev == m {
+ *pprev = m.alllink
+ goto found
+ }
+ }
+ throw("m not found in allm")
+found:
+ if !osStack {
+ // Delay reaping m until it's done with the stack.
+ //
+ // If this is using an OS stack, the OS will free it
+ // so there's no need for reaping.
+ atomic.Store(&m.freeWait, 1)
+ // Put m on the free list, though it will not be reaped until
+ // freeWait is 0. Note that the free list must not be linked
+ // through alllink because some functions walk allm without
+ // locking, so may be using alllink.
+ m.freelink = sched.freem
+ sched.freem = m
+ }
+ unlock(&sched.lock)
+
+ // Release the P.
+ handoffp(releasep())
+ // After this point we must not have write barriers.
+
+ // Invoke the deadlock detector. This must happen after
+ // handoffp because it may have started a new M to take our
+ // P's work.
+ lock(&sched.lock)
+ sched.nmfreed++
+ checkdead()
+ unlock(&sched.lock)
+
+ if osStack {
+ // Return from mstart and let the system thread
+ // library free the g0 stack and terminate the thread.
+ return
+ }
+
+ // mstart is the thread's entry point, so there's nothing to
+ // return to. Exit the thread directly. exitThread will clear
+ // m.freeWait when it's done with the stack and the m can be
+ // reaped.
+ exitThread(&m.freeWait)
+}
+
// forEachP calls fn(p) for every P p when p reaches a GC safe point.
// If a P is currently executing code, this will bring the P to a GC
// safe point and execute fn on that P. If the P is not executing code
if _g_.m.p == 0 {
acquirep(_p_) // temporarily borrow p for mallocs in this function
}
+
+ // Release the free M list. We need to do this somewhere and
+ // this may free up a stack we can use.
+ if sched.freem != nil {
+ lock(&sched.lock)
+ var newList *m
+ for freem := sched.freem; freem != nil; {
+ if freem.freeWait != 0 {
+ next := freem.freelink
+ freem.freelink = newList
+ newList = freem
+ freem = next
+ continue
+ }
+ stackfree(freem.g0.stack)
+ freem = freem.freelink
+ }
+ sched.freem = newList
+ unlock(&sched.lock)
+ }
+
mp := new(m)
mp.mstartfn = fn
mcommoninit(mp)
}
func mcount() int32 {
- return int32(sched.mnext)
+ return int32(sched.mnext - sched.nmfreed)
}
var prof struct {
// Check for deadlock situation.
// The check is based on number of running M's, if 0 -> deadlock.
+// sched.lock must be held.
func checkdead() {
// For -buildmode=c-shared or -buildmode=c-archive it's OK if
// there are no running goroutines. The calling program is
// a word that is completely ignored by the GC than to have one for which
// only a few updates are ignored.
//
-// Gs, Ms, and Ps are always reachable via true pointers in the
-// allgs, allm, and allp lists or (during allocation before they reach those lists)
+// Gs and Ps are always reachable via true pointers in the
+// allgs and allp lists or (during allocation before they reach those lists)
// from stack variables.
+//
+// Ms are always reachable via true pointers either from allm or
+// freem. Unlike Gs and Ps we do free Ms, so it's important that
+// nothing ever hold an muintptr across a safe point.
// A guintptr holds a goroutine pointer, but typed as a uintptr
// to bypass write barriers. It is used in the Gobuf goroutine state
//go:nosplit
func (pp *puintptr) set(p *p) { *pp = puintptr(unsafe.Pointer(p)) }
+// muintptr is a *m that is not tracked by the garbage collector.
+//
+// Because we do free Ms, there are some additional constrains on
+// muintptrs:
+//
+// 1. Never hold an muintptr locally across a safe point.
+//
+// 2. Any muintptr in the heap must be owned by the M itself so it can
+// ensure it is not in use when the last true *m is released.
type muintptr uintptr
//go:nosplit
inwb bool // m is executing a write barrier
newSigstack bool // minit on C thread called sigaltstack
printlock int8
- incgo bool // m is executing a cgo call
+ incgo bool // m is executing a cgo call
+ freeWait uint32 // if == 0, safe to free g0 and delete m (atomic)
fastrand [2]uint32
needextram bool
traceback uint8
startingtrace bool
syscalltick uint32
thread uintptr // thread handle
+ freelink *m // on sched.freem
// these are here because they are too large to be on the stack
// of low-level NOSPLIT functions.
lock mutex
+ // When increasing nmidle, nmidlelocked, nmsys, or nmfreed, be
+ // sure to call checkdead().
+
midle muintptr // idle m's waiting for work
nmidle int32 // number of idle m's waiting for work
nmidlelocked int32 // number of locked m's waiting for work
mnext int64 // number of m's that have been created and next M ID
maxmcount int32 // maximum number of m's allowed (or die)
nmsys int32 // number of system m's not counted for deadlock
+ nmfreed int64 // cumulative number of freed m's
ngsys uint32 // number of system goroutines; updated atomically
deferlock mutex
deferpool [5]*_defer
+ // freem is the list of m's waiting to be freed when their
+ // m.exited is set. Linked through m.freelink.
+ freem *m
+
gcwaiting uint32 // gc is waiting to run
stopwait int32
stopnote note
//go:noescape
func jmpdefer(fv *funcval, argp uintptr)
-func exit1(code int32)
func asminit()
func setg(gg *g)
func breakpoint()
func open(name *byte, mode, perm int32) int32
func madvise(addr unsafe.Pointer, n uintptr, flags int32)
+
+// exitThread terminates the current thread, writing *wait = 0 when
+// the stack is safe to reclaim.
+//
+//go:noescape
+func exitThread(wait *uint32)
// Exit this OS thread (like pthread_exit, which eventually
// calls __bsdthread_terminate).
-TEXT runtime·exit1(SB),NOSPLIT,$16-0
+TEXT exit1<>(SB),NOSPLIT,$16-0
// __bsdthread_terminate takes 4 word-size arguments.
// Set them all to 0. (None are an exit status.)
MOVL $0, 0(SP)
MOVL $0xf1, 0xf1 // crash
RET
+GLOBL exitStack<>(SB),RODATA,$(4*4)
+DATA exitStack<>+0x00(SB)/4, $0
+DATA exitStack<>+0x04(SB)/4, $0
+DATA exitStack<>+0x08(SB)/4, $0
+DATA exitStack<>+0x0c(SB)/4, $0
+
+// func exitThread(wait *uint32)
+TEXT runtime·exitThread(SB),NOSPLIT,$0-4
+ MOVL wait+0(FP), AX
+ // We're done using the stack.
+ MOVL $0, (AX)
+ // __bsdthread_terminate takes 4 arguments, which it expects
+ // on the stack. They should all be 0, so switch over to a
+ // fake stack of 0s. It won't write to the stack.
+ MOVL $exitStack<>(SB), SP
+ MOVL $361, AX // __bsdthread_terminate
+ INT $0x80
+ MOVL $0xf1, 0xf1 // crash
+ JMP 0(PC)
+
TEXT runtime·open(SB),NOSPLIT,$0
MOVL $5, AX
INT $0x80
MOVL BX, m_procid(DX) // m->procid = thread port (for debuggers)
CALL runtime·stackcheck(SB) // smashes AX
CALL CX // fn()
- CALL runtime·exit1(SB)
+ CALL exit1<>(SB)
RET
// func bsdthread_register() int32
// Exit this OS thread (like pthread_exit, which eventually
// calls __bsdthread_terminate).
-TEXT runtime·exit1(SB),NOSPLIT,$0
+TEXT exit1<>(SB),NOSPLIT,$0
+ // Because of exitThread below, this must not use the stack.
// __bsdthread_terminate takes 4 word-size arguments.
// Set them all to 0. (None are an exit status.)
MOVL $0, DI
MOVL $0xf1, 0xf1 // crash
RET
-
+// func exitThread(wait *uint32)
+TEXT runtime·exitThread(SB),NOSPLIT,$0-8
+ MOVQ wait+0(FP), AX
+ // We're done using the stack.
+ MOVL $0, (AX)
+ JMP exit1<>(SB)
TEXT runtime·open(SB),NOSPLIT,$0
MOVQ name+0(FP), DI // arg 1 pathname
MOVQ CX, g_m(AX)
CALL runtime·stackcheck(SB) // smashes AX, CX
CALL DX // fn
- CALL runtime·exit1(SB)
+ CALL exit1<>(SB)
RET
// func bsdthread_register() int32
// Exit this OS thread (like pthread_exit, which eventually
// calls __bsdthread_terminate).
-TEXT runtime·exit1(SB),NOSPLIT,$0
+TEXT exit1<>(SB),NOSPLIT,$0
+ // Because of exitThread below, this must not use the stack.
// __bsdthread_terminate takes 4 word-size arguments.
// Set them all to 0. (None are an exit status.)
MOVW $0, R0
MOVW $1003, R1
MOVW R0, (R1) // fail hard
+// func exitThread(wait *uint32)
+TEXT runtime·exitThread(SB),NOSPLIT,$0-4
+ MOVW wait+0(FP), R0
+ // We're done using the stack.
+ MOVW $0, R1
+storeloop:
+ LDREX (R0), R4 // loads R4
+ STREX R1, (R0), R1 // stores R2
+ CMP $0, R1
+ BNE storeloop
+ JMP exit1<>(SB)
+
TEXT runtime·raise(SB),NOSPLIT,$0
// Ideally we'd send the signal to the current thread,
// not the whole process, but that's too hard on OS X.
EOR R12, R12
WORD $0xeee1ca10 // fmxr fpscr, ip
BL (R2) // fn
- BL runtime·exit1(SB)
+ BL exit1<>(SB)
RET
// int32 bsdthread_register(void)
// Exit this OS thread (like pthread_exit, which eventually
// calls __bsdthread_terminate).
-TEXT runtime·exit1(SB),NOSPLIT,$0
+TEXT exit1<>(SB),NOSPLIT,$0
+ // Because of exitThread below, this must not use the stack.
// __bsdthread_terminate takes 4 word-size arguments.
// Set them all to 0. (None are an exit status.)
MOVW $0, R0
MOVD $1003, R1
MOVD R0, (R1) // fail hard
+// func exitThread(wait *uint32)
+TEXT runtime·exitThread(SB),NOSPLIT,$0-8
+ MOVD wait+0(FP), R0
+ // We're done using the stack.
+ MOVW $0, R1
+ STLRW R1, (R0)
+ JMP exit1<>(SB)
+
TEXT runtime·raise(SB),NOSPLIT,$0
// Ideally we'd send the signal to the current thread,
// not the whole process, but that's too hard on OS X.
MOVL $0xf1, 0xf1 // crash
RET
-TEXT runtime·exit1(SB),NOSPLIT,$-8
- MOVL code+0(FP), DI // arg 1 exit status
- MOVL $431, AX
+// func exitThread(wait *uint32)
+TEXT runtime·exitThread(SB),NOSPLIT,$0-8
+ MOVQ wait+0(FP), AX
+ // We're done using the stack.
+ MOVL $0, (AX)
+ MOVL $0x10000, DI // arg 1 how - EXTEXIT_LWP
+ MOVL $0, SI // arg 2 status
+ MOVL $0, DX // arg 3 addr
+ MOVL $494, AX // extexit
SYSCALL
MOVL $0xf1, 0xf1 // crash
- RET
+ JMP 0(PC)
TEXT runtime·open(SB),NOSPLIT,$-8
MOVQ name+0(FP), DI // arg 1 pathname
MOVL $0xf1, 0xf1 // crash
RET
-TEXT runtime·exit1(SB),NOSPLIT,$-4
- MOVL $431, AX
+GLOBL exitStack<>(SB),RODATA,$8
+DATA exitStack<>+0x00(SB)/4, $0
+DATA exitStack<>+0x04(SB)/4, $0
+
+// func exitThread(wait *uint32)
+TEXT runtime·exitThread(SB),NOSPLIT,$0-4
+ MOVL wait+0(FP), AX
+ // We're done using the stack.
+ MOVL $0, (AX)
+ // thr_exit takes a single pointer argument, which it expects
+ // on the stack. We want to pass 0, so switch over to a fake
+ // stack of 0s. It won't write to the stack.
+ MOVL $exitStack<>(SB), SP
+ MOVL $431, AX // thr_exit
INT $0x80
- JAE 2(PC)
MOVL $0xf1, 0xf1 // crash
- RET
+ JMP 0(PC)
TEXT runtime·open(SB),NOSPLIT,$-4
MOVL $5, AX
MOVL $0xf1, 0xf1 // crash
RET
-TEXT runtime·exit1(SB),NOSPLIT,$-8
- MOVL code+0(FP), DI // arg 1 exit status
- MOVL $431, AX
+// func exitThread(wait *uint32)
+TEXT runtime·exitThread(SB),NOSPLIT,$0-8
+ MOVQ wait+0(FP), AX
+ // We're done using the stack.
+ MOVL $0, (AX)
+ MOVL $0, DI // arg 1 long *state
+ MOVL $431, AX // thr_exit
SYSCALL
MOVL $0xf1, 0xf1 // crash
- RET
+ JMP 0(PC)
TEXT runtime·open(SB),NOSPLIT,$-8
MOVQ name+0(FP), DI // arg 1 pathname
MOVW.CS R8, (R8)
RET
-TEXT runtime·exit1(SB),NOSPLIT,$-8
- MOVW code+0(FP), R0 // arg 1 exit status
- MOVW $SYS_thr_exit, R7
- SWI $0
- MOVW.CS $0, R8 // crash on syscall failure
- MOVW.CS R8, (R8)
- RET
+// func exitThread(wait *uint32)
+TEXT runtime·exitThread(SB),NOSPLIT,$0-4
+ MOVW wait+0(FP), R0
+ // We're done using the stack.
+ MOVW $0, R1
+storeloop:
+ LDREX (R0), R4 // loads R4
+ STREX R1, (R0), R1 // stores R2
+ CMP $0, R1
+ BNE storeloop
+ MOVW $0, R0 // arg 1 long *state
+ MOVW $SYS_thr_exit, R7
+ SWI $0
+ MOVW.CS $0, R8 // crash on syscall failure
+ MOVW.CS R8, (R8)
+ JMP 0(PC)
TEXT runtime·open(SB),NOSPLIT,$-8
MOVW name+0(FP), R0 // arg 1 name
INT $3 // not reached
RET
-TEXT runtime·exit1(SB),NOSPLIT,$0
+TEXT exit1<>(SB),NOSPLIT,$0
MOVL $SYS_exit, AX
MOVL code+0(FP), BX
INVOKE_SYSCALL
INT $3 // not reached
RET
+// func exitThread(wait *uint32)
+TEXT runtime·exitThread(SB),NOSPLIT,$0-4
+ MOVL wait+0(FP), AX
+ // We're done using the stack.
+ MOVL $0, (AX)
+ MOVL $1, AX // exit (just this thread)
+ MOVL $0, BX // exit code
+ INT $0x80 // no stack; must not use CALL
+ // We may not even have a stack any more.
+ INT $3
+ JMP 0(PC)
+
TEXT runtime·open(SB),NOSPLIT,$0
MOVL $SYS_open, AX
MOVL name+0(FP), BX
nog:
CALL SI // fn()
- CALL runtime·exit1(SB)
+ CALL exit1<>(SB)
MOVL $0x1234, 0x1005
TEXT runtime·sigaltstack(SB),NOSPLIT,$-8
SYSCALL
RET
-TEXT runtime·exit1(SB),NOSPLIT,$0-4
- MOVL code+0(FP), DI
+// func exitThread(wait *uint32)
+TEXT runtime·exitThread(SB),NOSPLIT,$0-8
+ MOVQ wait+0(FP), AX
+ // We're done using the stack.
+ MOVL $0, (AX)
+ MOVL $0, DI // exit code
MOVL $SYS_exit, AX
SYSCALL
- RET
+ // We may not even have a stack any more.
+ INT $3
+ JMP 0(PC)
TEXT runtime·open(SB),NOSPLIT,$0-20
MOVQ name+0(FP), DI
MOVW $1002, R1
MOVW R0, (R1) // fail hard
-TEXT runtime·exit1(SB),NOSPLIT,$-4
+TEXT exit1<>(SB),NOSPLIT,$-4
MOVW code+0(FP), R0
MOVW $SYS_exit, R7
SWI $0
MOVW $1003, R1
MOVW R0, (R1) // fail hard
+// func exitThread(wait *uint32)
+TEXT runtime·exitThread(SB),NOSPLIT,$-4-4
+ MOVW wait+0(FP), R0
+ // We're done using the stack.
+ // Alas, there's no reliable way to make this write atomic
+ // without potentially using the stack. So it goes.
+ MOVW $0, R1
+ MOVW R1, (R0)
+ MOVW $0, R0 // exit code
+ MOVW $SYS_exit, R7
+ SWI $0
+ MOVW $1234, R0
+ MOVW $1004, R1
+ MOVW R0, (R1) // fail hard
+ JMP 0(PC)
+
TEXT runtime·gettid(SB),NOSPLIT,$0-4
MOVW $SYS_gettid, R7
SWI $0
SUB $16, R13 // restore the stack pointer to avoid memory corruption
MOVW $0, R0
MOVW R0, 4(R13)
- BL runtime·exit1(SB)
+ BL exit1<>(SB)
MOVW $1234, R0
MOVW $1005, R1
SVC
RET
-TEXT runtime·exit1(SB),NOSPLIT,$-8-4
- MOVW code+0(FP), R0
+// func exitThread(wait *uint32)
+TEXT runtime·exitThread(SB),NOSPLIT,$-8-8
+ MOVD wait+0(FP), R0
+ // We're done using the stack.
+ MOVW $0, R1
+ STLRW R1, (R0)
+ MOVW $0, R0 // exit code
MOVD $SYS_exit, R8
SVC
- RET
+ JMP 0(PC)
TEXT runtime·open(SB),NOSPLIT,$-8-20
MOVD $AT_FDCWD, R0
SYSCALL
RET
-TEXT runtime·exit1(SB),NOSPLIT,$-8-4
- MOVW code+0(FP), R4
+// func exitThread(wait *uint32)
+TEXT runtime·exitThread(SB),NOSPLIT,$-8-8
+ MOVV wait+0(FP), R1
+ // We're done using the stack.
+ MOVW $0, R2
+ SYNC
+ MOVW R2, (R1)
+ SYNC
+ MOVW $0, R4 // exit code
MOVV $SYS_exit, R2
SYSCALL
- RET
+ JMP 0(PC)
TEXT runtime·open(SB),NOSPLIT,$-8-20
MOVV name+0(FP), R4
UNDEF
RET
-TEXT runtime·exit1(SB),NOSPLIT,$0-4
- MOVW code+0(FP), R4
+// func exitThread(wait *uint32)
+TEXT runtime·exitThread(SB),NOSPLIT,$0-4
+ MOVW wait+0(FP), R1
+ // We're done using the stack.
+ MOVW $0, R2
+ SYNC
+ MOVW R2, (R1)
+ SYNC
+ MOVW $0, R4 // exit code
MOVW $SYS_exit, R2
SYSCALL
UNDEF
- RET
+ JMP 0(PC)
TEXT runtime·open(SB),NOSPLIT,$0-16
MOVW name+0(FP), R4
SYSCALL $SYS_exit_group
RET
-TEXT runtime·exit1(SB),NOSPLIT|NOFRAME,$0-4
- MOVW code+0(FP), R3
+// func exitThread(wait *uint32)
+TEXT runtime·exitThread(SB),NOSPLIT|NOFRAME,$0-8
+ MOVD wait+0(FP), R1
+ // We're done using the stack.
+ MOVW $0, R2
+ SYNC
+ MOVW R2, (R1)
+ MOVW $0, R3 // exit code
SYSCALL $SYS_exit
- RET
+ JMP 0(PC)
TEXT runtime·open(SB),NOSPLIT|NOFRAME,$0-20
MOVD name+0(FP), R3
SYSCALL
RET
-TEXT runtime·exit1(SB),NOSPLIT|NOFRAME,$0-4
- MOVW code+0(FP), R2
+// func exitThread(wait *uint32)
+TEXT runtime·exitThread(SB),NOSPLIT|NOFRAME,$0-8
+ MOVD wait+0(FP), R1
+ // We're done using the stack.
+ MOVW $0, R2
+ MOVW R2, (R1)
+ MOVW $0, R2 // exit code
MOVW $SYS_exit, R1
SYSCALL
- RET
+ JMP 0(PC)
TEXT runtime·open(SB),NOSPLIT|NOFRAME,$0-20
MOVD name+0(FP), R2
NACL_SYSCALL(SYS_exit)
JMP 0(PC)
-TEXT runtime·exit1(SB),NOSPLIT,$4
- MOVL code+0(FP), AX
+// func exitThread(wait *uint32)
+TEXT runtime·exitThread(SB),NOSPLIT,$4-4
+ MOVL wait+0(FP), AX
+ // SYS_thread_exit will clear *wait when the stack is free.
MOVL AX, 0(SP)
NACL_SYSCALL(SYS_thread_exit)
- RET
+ JMP 0(PC)
TEXT runtime·open(SB),NOSPLIT,$12
MOVL name+0(FP), AX
NACL_SYSCALL(SYS_exit)
RET
-TEXT runtime·exit1(SB),NOSPLIT,$0
- MOVL code+0(FP), DI
+// func exitThread(wait *uint32)
+TEXT runtime·exitThread(SB),NOSPLIT,$0-4
+ MOVL wait+0(FP), DI
+ // SYS_thread_exit will clear *wait when the stack is free.
NACL_SYSCALL(SYS_thread_exit)
- RET
+ JMP 0(PC)
TEXT runtime·open(SB),NOSPLIT,$0
MOVL name+0(FP), DI
NACL_SYSCALL(SYS_exit)
RET
-TEXT runtime·exit1(SB),NOSPLIT,$0
- MOVW code+0(FP), R0
+// func exitThread(wait *uint32)
+TEXT runtime·exitThread(SB),NOSPLIT,$4-4
+ MOVW wait+0(FP), R0
+ // SYS_thread_exit will clear *wait when the stack is free.
NACL_SYSCALL(SYS_thread_exit)
- RET
+ JMP 0(PC)
TEXT runtime·open(SB),NOSPLIT,$0
MOVW name+0(FP), R0
MOVL $0xf1, 0xf1 // crash
RET
-TEXT runtime·exit1(SB),NOSPLIT,$-4
+// func exitThread(wait *uint32)
+TEXT runtime·exitThread(SB),NOSPLIT,$0-4
+ MOVL wait+0(FP), AX
+ // We're done using the stack.
+ MOVL $0, (AX)
MOVL $310, AX // sys__lwp_exit
INT $0x80
- JAE 2(PC)
MOVL $0xf1, 0xf1 // crash
- RET
+ JMP 0(PC)
TEXT runtime·open(SB),NOSPLIT,$-4
MOVL $5, AX
// Call fn
CALL SI
- CALL runtime·exit1(SB)
+ // fn should never return
MOVL $0x1234, 0x1005
RET
MOVL $0xf1, 0xf1 // crash
RET
-TEXT runtime·exit1(SB),NOSPLIT,$-8
+// func exitThread(wait *uint32)
+TEXT runtime·exitThread(SB),NOSPLIT,$0-8
+ MOVQ wait+0(FP), AX
+ // We're done using the stack.
+ MOVL $0, (AX)
MOVL $310, AX // sys__lwp_exit
SYSCALL
MOVL $0xf1, 0xf1 // crash
- RET
+ JMP 0(PC)
TEXT runtime·open(SB),NOSPLIT,$-8
MOVQ name+0(FP), DI // arg 1 pathname
MOVW.CS R8, (R8)
RET
-TEXT runtime·exit1(SB),NOSPLIT,$-4
+// func exitThread(wait *uint32)
+TEXT runtime·exitThread(SB),NOSPLIT,$0-4
+ MOVW wait+0(FP), R0
+ // We're done using the stack.
+ MOVW $0, R1
+storeloop:
+ LDREX (R0), R4 // loads R4
+ STREX R1, (R0), R1 // stores R2
+ CMP $0, R1
+ BNE storeloop
SWI $0xa00136 // sys__lwp_exit
MOVW $1, R8 // crash
MOVW R8, (R8)
- RET
-
+ JMP 0(PC)
+
TEXT runtime·open(SB),NOSPLIT,$-8
MOVW name+0(FP), R0
MOVW mode+4(FP), R1
MOVL $0xf1, 0xf1 // crash
RET
-TEXT runtime·exit1(SB),NOSPLIT,$8
- MOVL $0, 0(SP)
- MOVL $0, 4(SP) // arg 1 - notdead
+GLOBL exitStack<>(SB),RODATA,$8
+DATA exitStack<>+0x00(SB)/4, $0
+DATA exitStack<>+0x04(SB)/4, $0
+
+// func exitThread(wait *uint32)
+TEXT runtime·exitThread(SB),NOSPLIT,$0-4
+ MOVL wait+0(FP), AX
+ // We're done using the stack.
+ MOVL $0, (AX)
+ // sys__lwp_exit takes 1 argument, which it expects on the stack.
+ MOVL $exitStack<>(SB), SP
MOVL $302, AX // sys___threxit
INT $0x80
- JAE 2(PC)
MOVL $0xf1, 0xf1 // crash
- RET
+ JMP 0(PC)
TEXT runtime·open(SB),NOSPLIT,$-4
MOVL $5, AX
// Call fn.
CALL SI
- CALL runtime·exit1(SB)
+ // fn should never return.
MOVL $0x1234, 0x1005
RET
MOVL $0xf1, 0xf1 // crash
RET
-TEXT runtime·exit1(SB),NOSPLIT,$-8
+// func exitThread(wait *uint32)
+TEXT runtime·exitThread(SB),NOSPLIT,$0-8
+ MOVQ wait+0(FP), AX
+ // We're done using the stack.
+ MOVL $0, (AX)
MOVQ $0, DI // arg 1 - notdead
MOVL $302, AX // sys___threxit
SYSCALL
MOVL $0xf1, 0xf1 // crash
- RET
+ JMP 0(PC)
TEXT runtime·open(SB),NOSPLIT,$-8
MOVQ name+0(FP), DI // arg 1 pathname
MOVW.CS R8, (R8)
RET
-TEXT runtime·exit1(SB),NOSPLIT,$-4
+// func exitThread(wait *uint32)
+TEXT runtime·exitThread(SB),NOSPLIT,$0-4
+ MOVW wait+0(FP), R0
+ // We're done using the stack.
+ MOVW $0, R1
+storeloop:
+ LDREX (R0), R4 // loads R4
+ STREX R1, (R0), R1 // stores R2
+ CMP $0, R1
+ BNE storeloop
MOVW $0, R0 // arg 1 - notdead
MOVW $302, R12 // sys___threxit
SWI $0
MOVW.CS $1, R8 // crash on syscall failure
MOVW.CS R8, (R8)
- RET
+ JMP 0(PC)
TEXT runtime·open(SB),NOSPLIT,$-4
MOVW name+0(FP), R0 // arg 1 - path
// Call fn.
BL (R6)
- BL runtime·exit1(SB)
+ // fn should never return.
MOVW $2, R8 // crash if reached
MOVW R8, (R8)
RET
MOVL AX, ret+4(FP)
RET
-TEXT runtime·tstart_plan9(SB),NOSPLIT,$0
+TEXT runtime·tstart_plan9(SB),NOSPLIT,$4
MOVL newm+0(FP), CX
MOVL m_g0(CX), DX
CALL runtime·stackcheck(SB) // smashes AX, CX
CALL runtime·mstart(SB)
- MOVL $0x1234, 0x1234 // not reached
- RET
+ // Exit the thread.
+ MOVL $0, 0(SP)
+ CALL runtime·exits(SB)
+ JMP 0(PC)
// void sigtramp(void *ureg, int8 *note)
TEXT runtime·sigtramp(SB),NOSPLIT,$0
MOVL AX, ret+8(FP)
RET
-TEXT runtime·tstart_plan9(SB),NOSPLIT,$0
+TEXT runtime·tstart_plan9(SB),NOSPLIT,$8
MOVQ newm+0(FP), CX
MOVQ m_g0(CX), DX
CALL runtime·stackcheck(SB) // smashes AX, CX
CALL runtime·mstart(SB)
- MOVQ $0x1234, 0x1234 // not reached
- RET
+ // Exit the thread.
+ MOVQ $0, 0(SP)
+ CALL runtime·exits(SB)
+ JMP 0(PC)
// This is needed by asm_amd64.s
TEXT runtime·settls(SB),NOSPLIT,$0
RET
//func tstart_plan9(newm *m)
-TEXT runtime·tstart_plan9(SB),NOSPLIT,$0-4
+TEXT runtime·tstart_plan9(SB),NOSPLIT,$4-4
MOVW newm+0(FP), R1
MOVW m_g0(R1), g
BL runtime·mstart(SB)
- MOVW $0x1234, R0
- MOVW R0, 0(R0) // not reached
- RET
+ // Exit the thread.
+ MOVW $0, R0
+ MOVW R0, 4(R13)
+ CALL runtime·exits(SB)
+ JMP 0(PC)
//func sigtramp(ureg, note unsafe.Pointer)
TEXT runtime·sigtramp(SB),NOSPLIT,$0-8