atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(pp))
}
- // release resources from unused P's
- for i := nprocs; i < old; i++ {
- p := allp[i]
- if trace.enabled && p == getg().m.p.ptr() {
- // moving to p[0], pretend that we were descheduled
- // and then scheduled again to keep the trace sane.
- traceGoSched()
- traceProcStop(p)
- }
- p.destroy()
- // can't free P itself because it can be referenced by an M in syscall
- }
-
- // Trim allp.
- if int32(len(allp)) != nprocs {
- lock(&allpLock)
- allp = allp[:nprocs]
- unlock(&allpLock)
- }
-
_g_ := getg()
if _g_.m.p != 0 && _g_.m.p.ptr().id < nprocs {
// continue to use the current P
_g_.m.p.ptr().status = _Prunning
_g_.m.p.ptr().mcache.prepareForSweep()
} else {
- // release the current P and acquire allp[0]
+ // release the current P and acquire allp[0].
+ //
+ // We must do this before destroying our current P
+ // because p.destroy itself has write barriers, so we
+ // need to do that from a valid P.
if _g_.m.p != 0 {
+ if trace.enabled {
+ // Pretend that we were descheduled
+ // and then scheduled again to keep
+ // the trace sane.
+ traceGoSched()
+ traceProcStop(_g_.m.p.ptr())
+ }
_g_.m.p.ptr().m = 0
}
_g_.m.p = 0
traceGoStart()
}
}
+
+ // release resources from unused P's
+ for i := nprocs; i < old; i++ {
+ p := allp[i]
+ p.destroy()
+ // can't free P itself because it can be referenced by an M in syscall
+ }
+
+ // Trim allp.
+ if int32(len(allp)) != nprocs {
+ lock(&allpLock)
+ allp = allp[:nprocs]
+ unlock(&allpLock)
+ }
+
var runnablePs *p
for i := nprocs - 1; i >= 0; i-- {
p := allp[i]