"unsafe"
)
-/*
- * defined constants
- */
+// defined constants
const (
// G status
//
_Pdead
)
+// Mutual exclusion locks. In the uncontended case,
+// as fast as spin locks (just a few user-level instructions),
+// but on the contention path they sleep in the kernel.
+// A zeroed Mutex is unlocked (no need to initialize each lock).
type mutex struct {
// Futex-based impl treats it as uint32 key,
// while sema-based impl as M* waitm.
key uintptr
}
+// sleep and wakeup on one-time events.
+// before any calls to notesleep or notewakeup,
+// must call noteclear to initialize the Note.
+// then, exactly one thread can call notesleep
+// and exactly one thread can call notewakeup (once).
+// once notewakeup has been called, the notesleep
+// will return. future notesleep will return immediately.
+// subsequent noteclear must be called only after
+// previous notesleep has returned, e.g. it's disallowed
+// to call noteclear straight after notewakeup.
+//
+// notetsleep is like notesleep but wakes up after
+// a given number of nanoseconds even if the event
+// has not yet happened. if a goroutine uses notetsleep to
+// wake up early, it must wait to call noteclear until it
+// can be sure that no other goroutine is calling
+// notewakeup.
+//
+// notesleep/notetsleep are generally called on g0,
+// notetsleepg is similar to notetsleep but is called on user g.
type note struct {
// Futex-based impl treats it as uint32 key,
// while sema-based impl as M* waitm.
waittraceskip int
startingtrace bool
syscalltick uint32
- //#ifdef GOOS_windows
- thread uintptr // thread handle
+ thread uintptr // thread handle
+
// these are here because they are too large to be on the stack
// of low-level NOSPLIT functions.
libcall libcall
libcallsp uintptr
libcallg guintptr
syscall libcall // stores syscall parameters on windows
- //#endif
+
mOS
}
totaltime int64 // ∫gomaxprocs dt up to procresizetime
}
-// The m->locked word holds two pieces of state counting active calls to LockOSThread/lockOSThread.
+// The m.locked word holds two pieces of state counting active calls to LockOSThread/lockOSThread.
// The low bit (LockExternal) is a boolean reporting whether any LockOSThread call is active.
// External locks are not recursive; a second lock is silently ignored.
-// The upper bits of m->locked record the nesting depth of calls to lockOSThread
+// The upper bits of m.locked record the nesting depth of calls to lockOSThread
// (counting up by LockInternal), popped by unlockOSThread (counting down by LockInternal).
// Internal locks can be recursive. For instance, a lock for cgo can occur while the main
// goroutine is holding the lock during the initialization phase.
idle uint32
}
-/*
- * known to compiler
- */
-const (
- _Structrnd = sys.RegSize
-)
-
// startup_random_data holds random bytes initialized at startup. These come from
// the ELF AT_RANDOM auxiliary vector (vdso_linux_amd64.go or os_linux_386.go).
var startupRandomData []byte
}
}
-/*
- * deferred subroutine calls
- */
+// deferred subroutine calls
type _defer struct {
siz int32
started bool
link *_defer
}
-/*
- * panics
- */
+// panics
type _panic struct {
argp unsafe.Pointer // pointer to arguments of deferred call run during panic; cannot move - known to liblink
arg interface{} // argument to panic
aborted bool // the panic was aborted
}
-/*
- * stack traces
- */
-
+// stack traces
type stkframe struct {
fn *_func // function being run
pc uintptr // program counter within fn
_TraceJumpStack // if traceback is on a systemstack, resume trace at g that called into it
)
-const (
- // The maximum number of frames we print for a traceback
- _TracebackMaxFrames = 100
-)
+// The maximum number of frames we print for a traceback
+const _TracebackMaxFrames = 100
var (
emptystring string
islibrary bool // -buildmode=c-shared
isarchive bool // -buildmode=c-archive
)
-
-/*
- * mutual exclusion locks. in the uncontended case,
- * as fast as spin locks (just a few user-level instructions),
- * but on the contention path they sleep in the kernel.
- * a zeroed Mutex is unlocked (no need to initialize each lock).
- */
-
-/*
- * sleep and wakeup on one-time events.
- * before any calls to notesleep or notewakeup,
- * must call noteclear to initialize the Note.
- * then, exactly one thread can call notesleep
- * and exactly one thread can call notewakeup (once).
- * once notewakeup has been called, the notesleep
- * will return. future notesleep will return immediately.
- * subsequent noteclear must be called only after
- * previous notesleep has returned, e.g. it's disallowed
- * to call noteclear straight after notewakeup.
- *
- * notetsleep is like notesleep but wakes up after
- * a given number of nanoseconds even if the event
- * has not yet happened. if a goroutine uses notetsleep to
- * wake up early, it must wait to call noteclear until it
- * can be sure that no other goroutine is calling
- * notewakeup.
- *
- * notesleep/notetsleep are generally called on g0,
- * notetsleepg is similar to notetsleep but is called on user g.
- */
-// bool runtime·notetsleep(Note*, int64); // false - timeout
-// bool runtime·notetsleepg(Note*, int64); // false - timeout
-
-/*
- * Lock-free stack.
- * Initialize uint64 head to 0, compare with 0 to test for emptiness.
- * The stack does not keep pointers to nodes,
- * so they can be garbage collected if there are no other pointers to nodes.
- */
-
-// for mmap, we only pass the lower 32 bits of file offset to the
-// assembly routine; the higher bits (if required), should be provided
-// by the assembly routine as 0.