bits = (obits & ~(bitMask<<shift)) | (bitAllocated<<shift);
if(noptr)
bits |= bitNoPointers<<shift;
- if(runtime·gomaxprocs == 1) {
+ if(runtime·singleproc) {
*b = bits;
break;
} else {
- // gomaxprocs > 1: use atomic op
+ // more than one goroutine is potentially running: use atomic op
if(runtime·casp((void**)b, (void*)obits, (void*)bits))
break;
}
for(;;) {
obits = *b;
bits = (obits & ~(bitMask<<shift)) | (bitBlockBoundary<<shift);
- if(runtime·gomaxprocs == 1) {
+ if(runtime·singleproc) {
*b = bits;
break;
} else {
- // gomaxprocs > 1: use atomic op
+ // more than one goroutine is potentially running: use atomic op
if(runtime·casp((void**)b, (void*)obits, (void*)bits))
break;
}
for(;;) {
obits = *b;
bits = obits | (bitSpecial<<shift);
- if(runtime·gomaxprocs == 1) {
+ if(runtime·singleproc) {
*b = bits;
break;
} else {
- // gomaxprocs > 1: use atomic op
+ // more than one goroutine is potentially running: use atomic op
if(runtime·casp((void**)b, (void*)obits, (void*)bits))
break;
}
Sched runtime·sched;
int32 runtime·gomaxprocs;
+bool runtime·singleproc;
// An m that is waiting for notewakeup(&m->havenextg). This may be
// only be accessed while the scheduler lock is held. This is used to
runtime·gomaxprocs = n;
}
setmcpumax(runtime·gomaxprocs);
+ runtime·singleproc = runtime·gomaxprocs == 1;
runtime·sched.predawn = 1;
m->nomemprof--;
runtime·notesleep(&runtime·sched.stopped);
schedlock();
}
+ runtime·singleproc = runtime·gomaxprocs == 1;
schedunlock();
}
if(n > maxgomaxprocs)
n = maxgomaxprocs;
runtime·gomaxprocs = n;
+ if(runtime·gomaxprocs > 1)
+ runtime·singleproc = false;
if(runtime·gcwaiting != 0) {
if(atomic_mcpumax(runtime·sched.atomic) != 1)
runtime·throw("invalid mcpumax during gc");
G* runtime·allg;
M* runtime·allm;
extern int32 runtime·gomaxprocs;
+extern bool runtime·singleproc;
extern uint32 runtime·panicking;
extern int32 runtime·gcwaiting; // gc is waiting to run
int8* runtime·goos;