Updates #22460.
Change-Id: I3c8e90fd6bcda7e28911036591873d63665aaca7
Reviewed-on: https://go-review.googlesource.com/92696
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Cherry Zhang <cherryyz@google.com>
Debug['l'] = 1 - Debug['l']
}
- // The buffered write barrier is only implemented on amd64
- // right now.
- if objabi.GOARCH != "amd64" {
+ switch objabi.GOARCH {
+ case "amd64", "386":
+ default:
+ // Other architectures don't support the buffered
+ // write barrier yet.
Debug_eagerwb = 1
}
// runtime call clobber R12 on nacl
opcodeTable[OpARMCALLudiv].reg.clobbers |= 1 << 12 // R12
+
+ // Returns clobber BP on nacl/386, so the write
+ // barrier does.
+ opcodeTable[Op386LoweredWB].reg.clobbers |= 1 << 5 // BP
+ }
+
+ if ctxt.Flag_shared {
+ // LoweredWB is secretly a CALL and CALLs on 386 in
+ // shared mode get rewritten by obj6.go to go through
+ // the GOT, which clobbers BX.
+ opcodeTable[Op386LoweredWB].reg.clobbers |= 1 << 3 // BX
}
// cutoff is compared with product of numblocks and numvalues,
(If cond yes no) -> (NE (TESTB cond cond) yes no)
+// Write barrier.
+(WB {fn} destptr srcptr mem) -> (LoweredWB {fn} destptr srcptr mem)
+
// ***************************
// Above: lowering rules
// Below: optimizations
//arg0=ptr,arg1=mem, returns void. Faults if ptr is nil.
{name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gpsp}}, clobberFlags: true, nilCheck: true, faultOnNilArg0: true},
+ // LoweredWB invokes runtime.gcWriteBarrier. arg0=destptr, arg1=srcptr, arg2=mem, aux=runtime.gcWriteBarrier
+ // It saves all GP registers if necessary, but may clobber others.
+ {name: "LoweredWB", argLength: 3, reg: regInfo{inputs: []regMask{buildReg("DI"), ax}, clobbers: callerSave &^ gp}, clobberFlags: true, aux: "Sym", symEffect: "None"},
+
// MOVLconvert converts between pointers and integers.
// We have a special op for this so as to not confuse GC
// (particularly stack maps). It takes a memory arg so it
Op386LoweredGetCallerPC
Op386LoweredGetCallerSP
Op386LoweredNilCheck
+ Op386LoweredWB
Op386MOVLconvert
Op386FlagEQ
Op386FlagLT_ULT
},
},
},
+ {
+ name: "LoweredWB",
+ auxType: auxSym,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymNone,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 128}, // DI
+ {1, 1}, // AX
+ },
+ clobbers: 65280, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
{
name: "MOVLconvert",
argLen: 2,
return rewriteValue386_OpTrunc32to16_0(v)
case OpTrunc32to8:
return rewriteValue386_OpTrunc32to8_0(v)
+ case OpWB:
+ return rewriteValue386_OpWB_0(v)
case OpXor16:
return rewriteValue386_OpXor16_0(v)
case OpXor32:
return true
}
}
+func rewriteValue386_OpWB_0(v *Value) bool {
+ // match: (WB {fn} destptr srcptr mem)
+ // cond:
+ // result: (LoweredWB {fn} destptr srcptr mem)
+ for {
+ fn := v.Aux
+ _ = v.Args[2]
+ destptr := v.Args[0]
+ srcptr := v.Args[1]
+ mem := v.Args[2]
+ v.reset(Op386LoweredWB)
+ v.Aux = fn
+ v.AddArg(destptr)
+ v.AddArg(srcptr)
+ v.AddArg(mem)
+ return true
+ }
+}
func rewriteValue386_OpXor16_0(v *Value) bool {
// match: (Xor16 x y)
// cond:
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
+ case ssa.Op386LoweredWB:
+ p := s.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = v.Aux.(*obj.LSym)
+
case ssa.Op386CALLstatic, ssa.Op386CALLclosure, ssa.Op386CALLinter:
s.Call(v)
case ssa.Op386NEGL,
runtime/asm_386.s: [386] float64touint32: function float64touint32 missing Go declaration
runtime/asm_386.s: [386] stackcheck: function stackcheck missing Go declaration
+
+runtime/asm_ARCHSUFF.s: [GOARCH] gcWriteBarrier: function gcWriteBarrier missing Go declaration
MOVL 4(SP), AX
MOVL AX, ret+8(FP)
RET
+
+// gcWriteBarrier performs a heap pointer write and informs the GC.
+//
+// gcWriteBarrier does NOT follow the Go ABI. It takes two arguments:
+// - DI is the destination of the write
+// - AX is the value being written at DI
+// It clobbers FLAGS. It does not clobber any general-purpose registers,
+// but may clobber others (e.g., SSE registers).
+TEXT runtime·gcWriteBarrier(SB),NOSPLIT,$28
+ // Save the registers clobbered by the fast path. This is slightly
+ // faster than having the caller spill these.
+ MOVL CX, 20(SP)
+ MOVL BX, 24(SP)
+ // TODO: Consider passing g.m.p in as an argument so they can be shared
+ // across a sequence of write barriers.
+ get_tls(BX)
+ MOVL g(BX), BX
+ MOVL g_m(BX), BX
+ MOVL m_p(BX), BX
+ MOVL (p_wbBuf+wbBuf_next)(BX), CX
+ // Increment wbBuf.next position.
+ LEAL 8(CX), CX
+ MOVL CX, (p_wbBuf+wbBuf_next)(BX)
+ CMPL CX, (p_wbBuf+wbBuf_end)(BX)
+ // Record the write.
+ MOVL AX, -8(CX) // Record value
+ MOVL (DI), BX // TODO: This turns bad writes into bad reads.
+ MOVL BX, -4(CX) // Record *slot
+ // Is the buffer full? (flags set in CMPL above)
+ JEQ flush
+ret:
+ MOVL 20(SP), CX
+ MOVL 24(SP), BX
+ // Do the write.
+ MOVL AX, (DI)
+ RET
+
+flush:
+ // Save all general purpose registers since these could be
+ // clobbered by wbBufFlush and were not saved by the caller.
+ MOVL DI, 0(SP) // Also first argument to wbBufFlush
+ MOVL AX, 4(SP) // Also second argument to wbBufFlush
+ // BX already saved
+ // CX already saved
+ MOVL DX, 8(SP)
+ MOVL BP, 12(SP)
+ MOVL SI, 16(SP)
+ // DI already saved
+
+ // This takes arguments DI and AX
+ CALL runtime·wbBufFlush(SB)
+
+ MOVL 0(SP), DI
+ MOVL 4(SP), AX
+ MOVL 8(SP), DX
+ MOVL 12(SP), BP
+ MOVL 16(SP), SI
+ JMP ret