Having an mcache field in both m and p is confusing, so remove it from m.
Always use mcache field from p. Use new variable mcache0 during bootstrap.
Change-Id: If2cba9f8bb131d911d512b61fd883a86cf62cc98
Reviewed-on: https://go-review.googlesource.com/c/go/+/205239
Run-TryBot: Ian Lance Taylor <iant@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Austin Clements <austin@google.com>
// Initialize the heap.
mheap_.init()
- _g_ := getg()
- _g_.m.mcache = allocmcache()
+ mcache0 = allocmcache()
// Create initial arena growth hints.
if sys.PtrSize == 8 {
shouldhelpgc := false
dataSize := size
- c := gomcache()
+ var c *mcache
+ if mp.p != 0 {
+ c = mp.p.ptr().mcache
+ } else {
+ // We will be called without a P while bootstrapping,
+ // in which case we use mcache0, which is set in mallocinit.
+ // mcache0 is cleared when bootstrapping is complete,
+ // by procresize.
+ c = mcache0
+ if c == nil {
+ throw("malloc called with no P")
+ }
+ }
var x unsafe.Pointer
noscan := typ == nil || typ.ptrdata == 0
if size <= maxSmallSize {
}
func profilealloc(mp *m, x unsafe.Pointer, size uintptr) {
- mp.mcache.next_sample = nextSample()
+ mp.p.ptr().mcache.next_sample = nextSample()
mProf_Malloc(x, size)
}
size := s.elemsize
res := false
- c := _g_.m.mcache
+ c := _g_.m.p.ptr().mcache
freeToHeap := false
// The allocBits indicate which unmarked objects don't need to be
// which may only be done with the heap locked.
// Transfer stats from mcache to global.
- memstats.heap_scan += uint64(gp.m.mcache.local_scan)
- gp.m.mcache.local_scan = 0
- memstats.tinyallocs += uint64(gp.m.mcache.local_tinyallocs)
- gp.m.mcache.local_tinyallocs = 0
+ var c *mcache
+ if gp.m.p != 0 {
+ c = gp.m.p.ptr().mcache
+ } else {
+ // This case occurs while bootstrapping.
+ // See the similar code in mallocgc.
+ c = mcache0
+ if c == nil {
+ throw("mheap.allocSpan called with no P")
+ }
+ }
+ memstats.heap_scan += uint64(c.local_scan)
+ c.local_scan = 0
+ memstats.tinyallocs += uint64(c.local_tinyallocs)
+ c.local_tinyallocs = 0
// Do some additional accounting if it's a large allocation.
if spanclass.sizeclass() == 0 {
// Free the span back into the heap.
func (h *mheap) freeSpan(s *mspan) {
systemstack(func() {
- mp := getg().m
+ c := getg().m.p.ptr().mcache
lock(&h.lock)
- memstats.heap_scan += uint64(mp.mcache.local_scan)
- mp.mcache.local_scan = 0
- memstats.tinyallocs += uint64(mp.mcache.local_tinyallocs)
- mp.mcache.local_tinyallocs = 0
+ memstats.heap_scan += uint64(c.local_scan)
+ c.local_scan = 0
+ memstats.tinyallocs += uint64(c.local_tinyallocs)
+ c.local_tinyallocs = 0
if msanenabled {
// Tell msan that this entire span is no longer in use.
base := unsafe.Pointer(s.base())
var (
m0 m
g0 g
+ mcache0 *mcache
raceprocctx0 uintptr
)
_g_.m.syscalltick = _g_.m.p.ptr().syscalltick
_g_.sysblocktraced = true
- _g_.m.mcache = nil
pp := _g_.m.p.ptr()
pp.m = 0
_g_.m.oldp.set(pp)
oldp := _g_.m.oldp.ptr()
_g_.m.oldp = 0
if exitsyscallfast(oldp) {
- if _g_.m.mcache == nil {
- throw("lost mcache")
- }
if trace.enabled {
if oldp != _g_.m.p.ptr() || _g_.m.syscalltick != _g_.m.p.ptr().syscalltick {
systemstack(traceGoStart)
// Call the scheduler.
mcall(exitsyscall0)
- if _g_.m.mcache == nil {
- throw("lost mcache")
- }
-
// Scheduler returned, so we're allowed to run now.
// Delete the syscallsp information that we left for
// the garbage collector during the system call.
pp.wbBuf.reset()
if pp.mcache == nil {
if id == 0 {
- if getg().m.mcache == nil {
+ if mcache0 == nil {
throw("missing mcache?")
}
- pp.mcache = getg().m.mcache // bootstrap
+ // Use the bootstrap mcache0. Only one P will get
+ // mcache0: the one with ID 0.
+ pp.mcache = mcache0
} else {
pp.mcache = allocmcache()
}
_g_.m.p.ptr().m = 0
}
_g_.m.p = 0
- _g_.m.mcache = nil
p := allp[0]
p.m = 0
p.status = _Pidle
}
}
+ // g.m.p is now set, so we no longer need mcache0 for bootstrapping.
+ mcache0 = nil
+
// release resources from unused P's
for i := nprocs; i < old; i++ {
p := allp[i]
func wirep(_p_ *p) {
_g_ := getg()
- if _g_.m.p != 0 || _g_.m.mcache != nil {
+ if _g_.m.p != 0 {
throw("wirep: already in go")
}
if _p_.m != 0 || _p_.status != _Pidle {
print("wirep: p->m=", _p_.m, "(", id, ") p->status=", _p_.status, "\n")
throw("wirep: invalid p state")
}
- _g_.m.mcache = _p_.mcache
_g_.m.p.set(_p_)
_p_.m.set(_g_.m)
_p_.status = _Prunning
func releasep() *p {
_g_ := getg()
- if _g_.m.p == 0 || _g_.m.mcache == nil {
+ if _g_.m.p == 0 {
throw("releasep: invalid arg")
}
_p_ := _g_.m.p.ptr()
- if _p_.m.ptr() != _g_.m || _p_.mcache != _g_.m.mcache || _p_.status != _Prunning {
- print("releasep: m=", _g_.m, " m->p=", _g_.m.p.ptr(), " p->m=", hex(_p_.m), " m->mcache=", _g_.m.mcache, " p->mcache=", _p_.mcache, " p->status=", _p_.status, "\n")
+ if _p_.m.ptr() != _g_.m || _p_.status != _Prunning {
+ print("releasep: m=", _g_.m, " m->p=", _g_.m.p.ptr(), " p->m=", hex(_p_.m), " p->status=", _p_.status, "\n")
throw("releasep: invalid p state")
}
if trace.enabled {
traceProcStop(_g_.m.p.ptr())
}
_g_.m.p = 0
- _g_.m.mcache = nil
_p_.m = 0
_p_.status = _Pidle
return _p_
}
}
-//go:nosplit
-func gomcache() *mcache {
- return getg().m.mcache
-}
-
//go:linkname reflect_typelinks reflect.typelinks
func reflect_typelinks() ([]unsafe.Pointer, [][]int32) {
modules := activeModules()
park note
alllink *m // on allm
schedlink muintptr
- mcache *mcache
lockedg guintptr
createstack [32]uintptr // stack that created this thread.
lockedExt uint32 // tracking for external LockOSThread
n2 >>= 1
}
var x gclinkptr
- c := thisg.m.mcache
- if stackNoCache != 0 || c == nil || thisg.m.preemptoff != "" {
- // c == nil can happen in the guts of exitsyscall or
- // procresize. Just get a stack from the global pool.
+ if stackNoCache != 0 || thisg.m.p == 0 || thisg.m.preemptoff != "" {
+ // thisg.m.p == 0 can happen in the guts of exitsyscall
+ // or procresize. Just get a stack from the global pool.
// Also don't touch stackcache during gc
// as it's flushed concurrently.
lock(&stackpool[order].item.mu)
x = stackpoolalloc(order)
unlock(&stackpool[order].item.mu)
} else {
+ c := thisg.m.p.ptr().mcache
x = c.stackcache[order].list
if x.ptr() == nil {
stackcacherefill(c, order)
n2 >>= 1
}
x := gclinkptr(v)
- c := gp.m.mcache
- if stackNoCache != 0 || c == nil || gp.m.preemptoff != "" {
+ if stackNoCache != 0 || gp.m.p == 0 || gp.m.preemptoff != "" {
lock(&stackpool[order].item.mu)
stackpoolfree(x, order)
unlock(&stackpool[order].item.mu)
} else {
+ c := gp.m.p.ptr().mcache
if c.stackcache[order].size >= _StackCacheSize {
stackcacherelease(c, order)
}