if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
gc.Warnl(v.Pos, "generated nil check")
}
- case ssa.OpAMD64MOVLatomicload, ssa.OpAMD64MOVQatomicload:
+ case ssa.OpAMD64MOVBatomicload, ssa.OpAMD64MOVLatomicload, ssa.OpAMD64MOVQatomicload:
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
case ssa.OpARM64LDAR,
+ ssa.OpARM64LDARB,
ssa.OpARM64LDARW:
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v)
},
sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.MIPS64, sys.PPC64)
+ addF("runtime/internal/atomic", "Load8",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ v := s.newValue2(ssa.OpAtomicLoad8, types.NewTuple(types.Types[TUINT8], types.TypeMem), args[0], s.mem())
+ s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, types.Types[TUINT8], v)
+ },
+ sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS64, sys.PPC64)
addF("runtime/internal/atomic", "Load64",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
v := s.newValue2(ssa.OpAtomicLoad64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], s.mem())
p.To.Name = obj.NAME_EXTERN
p.To.Sym = gc.BoundsCheckFunc[v.AuxInt]
s.UseArgs(16) // space used in callee args area by assembly stubs
- case ssa.OpMIPS64LoweredAtomicLoad32, ssa.OpMIPS64LoweredAtomicLoad64:
+ case ssa.OpMIPS64LoweredAtomicLoad8, ssa.OpMIPS64LoweredAtomicLoad32, ssa.OpMIPS64LoweredAtomicLoad64:
as := mips.AMOVV
- if v.Op == ssa.OpMIPS64LoweredAtomicLoad32 {
+ switch v.Op {
+ case ssa.OpMIPS64LoweredAtomicLoad8:
+ as = mips.AMOVB
+ case ssa.OpMIPS64LoweredAtomicLoad32:
as = mips.AMOVW
}
s.Prog(mips.ASYNC)
pisync := s.Prog(ppc64.AISYNC)
pisync.To.Type = obj.TYPE_NONE
- case ssa.OpPPC64LoweredAtomicLoad32,
+ case ssa.OpPPC64LoweredAtomicLoad8,
+ ssa.OpPPC64LoweredAtomicLoad32,
ssa.OpPPC64LoweredAtomicLoad64,
ssa.OpPPC64LoweredAtomicLoadPtr:
// SYNC
- // MOVD/MOVW (Rarg0), Rout
+ // MOVB/MOVD/MOVW (Rarg0), Rout
// CMP Rout,Rout
// BNE 1(PC)
// ISYNC
ld := ppc64.AMOVD
cmp := ppc64.ACMP
- if v.Op == ssa.OpPPC64LoweredAtomicLoad32 {
- ld = ppc64.AMOVW
+ switch v.Op {
+ case ssa.OpPPC64LoweredAtomicLoad8:
+ ld = ppc64.AMOVBZ
+ case ssa.OpPPC64LoweredAtomicLoad32:
+ ld = ppc64.AMOVWZ
cmp = ppc64.ACMPW
}
arg0 := v.Args[0].Reg()
clear.To.Type = obj.TYPE_MEM
clear.To.Reg = v.Args[0].Reg()
}
- case ssa.OpS390XMOVWZatomicload, ssa.OpS390XMOVDatomicload:
+ case ssa.OpS390XMOVBZatomicload, ssa.OpS390XMOVWZatomicload, ssa.OpS390XMOVDatomicload:
p := s.Prog(v.Op.Asm())
p.From.Type = obj.TYPE_MEM
p.From.Reg = v.Args[0].Reg()
for _, b := range f.Blocks {
for _, v := range b.Values {
switch v.Op {
- case OpLoad, OpAtomicLoad32, OpAtomicLoad64, OpAtomicLoadPtr, OpAtomicLoadAcq32:
+ case OpLoad, OpAtomicLoad8, OpAtomicLoad32, OpAtomicLoad64, OpAtomicLoadPtr, OpAtomicLoadAcq32:
loadAddr.add(v.Args[0].ID)
case OpMove:
loadAddr.add(v.Args[1].ID)
(If cond yes no) -> (NE (TESTB cond cond) yes no)
// Atomic loads. Other than preserving their ordering with respect to other loads, nothing special here.
+(AtomicLoad8 ptr mem) -> (MOVBatomicload ptr mem)
(AtomicLoad32 ptr mem) -> (MOVLatomicload ptr mem)
(AtomicLoad64 ptr mem) -> (MOVQatomicload ptr mem)
(AtomicLoadPtr ptr mem) && config.PtrSize == 8 -> (MOVQatomicload ptr mem)
((ADD|SUB|AND|OR|XOR|BTC|BTR|BTS)Qmodify [off] {sym} ptr x mem)
// Merge ADDQconst and LEAQ into atomic loads.
-(MOVQatomicload [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) ->
- (MOVQatomicload [off1+off2] {sym} ptr mem)
-(MOVLatomicload [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) ->
- (MOVLatomicload [off1+off2] {sym} ptr mem)
-(MOVQatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
- (MOVQatomicload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
-(MOVLatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
- (MOVLatomicload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOV(Q|L|B)atomicload [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) ->
+ (MOV(Q|L|B)atomicload [off1+off2] {sym} ptr mem)
+(MOV(Q|L|B)atomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) ->
+ (MOV(Q|L|B)atomicload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
// Merge ADDQconst and LEAQ into atomic stores.
(XCHGQ [off1] {sym} val (ADDQconst [off2] ptr) mem) && is32Bit(off1+off2) ->
// Atomic loads. These are just normal loads but return <value,memory> tuples
// so they can be properly ordered with other loads.
// load from arg0+auxint+aux. arg1=mem.
+ {name: "MOVBatomicload", argLength: 2, reg: gpload, asm: "MOVB", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"},
{name: "MOVLatomicload", argLength: 2, reg: gpload, asm: "MOVL", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"},
{name: "MOVQatomicload", argLength: 2, reg: gpload, asm: "MOVQ", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"},
// atomic intrinsics
// Note: these ops do not accept offset.
+(AtomicLoad8 ptr mem) -> (LDARB ptr mem)
(AtomicLoad32 ptr mem) -> (LDARW ptr mem)
(AtomicLoad64 ptr mem) -> (LDAR ptr mem)
(AtomicLoadPtr ptr mem) -> (LDAR ptr mem)
// load from arg0. arg1=mem. auxint must be zero.
// returns <value,memory> so they can be properly ordered with other loads.
{name: "LDAR", argLength: 2, reg: gpload, asm: "LDAR", faultOnNilArg0: true},
+ {name: "LDARB", argLength: 2, reg: gpload, asm: "LDARB", faultOnNilArg0: true},
{name: "LDARW", argLength: 2, reg: gpload, asm: "LDARW", faultOnNilArg0: true},
// atomic stores.
(InterCall [argwid] entry mem) -> (CALLinter [argwid] entry mem)
// atomic intrinsics
+(AtomicLoad8 ptr mem) -> (LoweredAtomicLoad8 ptr mem)
(AtomicLoad32 ptr mem) -> (LoweredAtomicLoad32 ptr mem)
(AtomicLoad64 ptr mem) -> (LoweredAtomicLoad64 ptr mem)
(AtomicLoadPtr ptr mem) -> (LoweredAtomicLoad64 ptr mem)
// atomic loads.
// load from arg0. arg1=mem.
// returns <value,memory> so they can be properly ordered with other loads.
+ {name: "LoweredAtomicLoad8", argLength: 2, reg: gpload, faultOnNilArg0: true},
{name: "LoweredAtomicLoad32", argLength: 2, reg: gpload, faultOnNilArg0: true},
{name: "LoweredAtomicLoad64", argLength: 2, reg: gpload, faultOnNilArg0: true},
(MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
// atomic intrinsics
-(AtomicLoad(32|64|Ptr) ptr mem) -> (LoweredAtomicLoad(32|64|Ptr) [1] ptr mem)
+(AtomicLoad(8|32|64|Ptr) ptr mem) -> (LoweredAtomicLoad(8|32|64|Ptr) [1] ptr mem)
(AtomicLoadAcq32 ptr mem) -> (LoweredAtomicLoad32 [0] ptr mem)
(AtomicStore(32|64) ptr val mem) -> (LoweredAtomicStore(32|64) [1] ptr val mem)
{name: "LoweredAtomicStore32", argLength: 3, reg: gpstore, typ: "Mem", aux: "Int64", faultOnNilArg0: true, hasSideEffects: true},
{name: "LoweredAtomicStore64", argLength: 3, reg: gpstore, typ: "Mem", aux: "Int64", faultOnNilArg0: true, hasSideEffects: true},
+ {name: "LoweredAtomicLoad8", argLength: 2, reg: gpload, typ: "UInt8", aux: "Int64", clobberFlags: true, faultOnNilArg0: true},
{name: "LoweredAtomicLoad32", argLength: 2, reg: gpload, typ: "UInt32", aux: "Int64", clobberFlags: true, faultOnNilArg0: true},
{name: "LoweredAtomicLoad64", argLength: 2, reg: gpload, typ: "Int64", aux: "Int64", clobberFlags: true, faultOnNilArg0: true},
{name: "LoweredAtomicLoadPtr", argLength: 2, reg: gpload, typ: "Int64", aux: "Int64", clobberFlags: true, faultOnNilArg0: true},
(Round x) -> (FIDBR [1] x)
// Atomic loads.
+(AtomicLoad8 ptr mem) -> (MOVBZatomicload ptr mem)
(AtomicLoad32 ptr mem) -> (MOVWZatomicload ptr mem)
(AtomicLoad64 ptr mem) -> (MOVDatomicload ptr mem)
(AtomicLoadPtr ptr mem) -> (MOVDatomicload ptr mem)
// Atomic loads. These are just normal loads but return <value,memory> tuples
// so they can be properly ordered with other loads.
// load from arg0+auxint+aux. arg1=mem.
+ {name: "MOVBZatomicload", argLength: 2, reg: gpload, asm: "MOVBZ", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"},
{name: "MOVWZatomicload", argLength: 2, reg: gpload, asm: "MOVWZ", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"},
{name: "MOVDatomicload", argLength: 2, reg: gpload, asm: "MOVD", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"},
// Atomic loads return a new memory so that the loads are properly ordered
// with respect to other loads and stores.
// TODO: use for sync/atomic at some point.
+ {name: "AtomicLoad8", argLength: 2, typ: "(UInt8,Mem)"}, // Load from arg0. arg1=memory. Returns loaded value and new memory.
{name: "AtomicLoad32", argLength: 2, typ: "(UInt32,Mem)"}, // Load from arg0. arg1=memory. Returns loaded value and new memory.
{name: "AtomicLoad64", argLength: 2, typ: "(UInt64,Mem)"}, // Load from arg0. arg1=memory. Returns loaded value and new memory.
{name: "AtomicLoadPtr", argLength: 2, typ: "(BytePtr,Mem)"}, // Load from arg0. arg1=memory. Returns loaded value and new memory.
OpAMD64FlagLT_UGT
OpAMD64FlagGT_UGT
OpAMD64FlagGT_ULT
+ OpAMD64MOVBatomicload
OpAMD64MOVLatomicload
OpAMD64MOVQatomicload
OpAMD64XCHGL
OpARM64FlagGT_ULT
OpARM64InvertFlags
OpARM64LDAR
+ OpARM64LDARB
OpARM64LDARW
OpARM64STLR
OpARM64STLRW
OpMIPS64DUFFZERO
OpMIPS64LoweredZero
OpMIPS64LoweredMove
+ OpMIPS64LoweredAtomicLoad8
OpMIPS64LoweredAtomicLoad32
OpMIPS64LoweredAtomicLoad64
OpMIPS64LoweredAtomicStore32
OpPPC64LoweredMove
OpPPC64LoweredAtomicStore32
OpPPC64LoweredAtomicStore64
+ OpPPC64LoweredAtomicLoad8
OpPPC64LoweredAtomicLoad32
OpPPC64LoweredAtomicLoad64
OpPPC64LoweredAtomicLoadPtr
OpS390XFlagLT
OpS390XFlagGT
OpS390XFlagOV
+ OpS390XMOVBZatomicload
OpS390XMOVWZatomicload
OpS390XMOVDatomicload
OpS390XMOVWatomicstore
OpCvt64Fto64U
OpSelect0
OpSelect1
+ OpAtomicLoad8
OpAtomicLoad32
OpAtomicLoad64
OpAtomicLoadPtr
argLen: 0,
reg: regInfo{},
},
+ {
+ name: "MOVBatomicload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R14 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
{
name: "MOVLatomicload",
auxType: auxSymOff,
},
},
},
+ {
+ name: "LDARB",
+ argLen: 2,
+ faultOnNilArg0: true,
+ asm: arm64.ALDARB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
{
name: "LDARW",
argLen: 2,
clobbers: 6, // R1 R2
},
},
+ {
+ name: "LoweredAtomicLoad8",
+ argLen: 2,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
{
name: "LoweredAtomicLoad32",
argLen: 2,
},
},
},
+ {
+ name: "LoweredAtomicLoad8",
+ auxType: auxInt64,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
{
name: "LoweredAtomicLoad32",
auxType: auxInt64,
argLen: 0,
reg: regInfo{},
},
+ {
+ name: "MOVBZatomicload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: s390x.AMOVBZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
{
name: "MOVWZatomicload",
auxType: auxSymOff,
zeroWidth: true,
generic: true,
},
+ {
+ name: "AtomicLoad8",
+ argLen: 2,
+ generic: true,
+ },
{
name: "AtomicLoad32",
argLen: 2,
return rewriteValueAMD64_OpAMD64MOVBQSXload_0(v)
case OpAMD64MOVBQZX:
return rewriteValueAMD64_OpAMD64MOVBQZX_0(v)
+ case OpAMD64MOVBatomicload:
+ return rewriteValueAMD64_OpAMD64MOVBatomicload_0(v)
case OpAMD64MOVBload:
return rewriteValueAMD64_OpAMD64MOVBload_0(v)
case OpAMD64MOVBloadidx1:
return rewriteValueAMD64_OpAtomicLoad32_0(v)
case OpAtomicLoad64:
return rewriteValueAMD64_OpAtomicLoad64_0(v)
+ case OpAtomicLoad8:
+ return rewriteValueAMD64_OpAtomicLoad8_0(v)
case OpAtomicLoadPtr:
return rewriteValueAMD64_OpAtomicLoadPtr_0(v)
case OpAtomicOr8:
}
return false
}
+func rewriteValueAMD64_OpAMD64MOVBatomicload_0(v *Value) bool {
+ // match: (MOVBatomicload [off1] {sym} (ADDQconst [off2] ptr) mem)
+ // cond: is32Bit(off1+off2)
+ // result: (MOVBatomicload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym := v.Aux
+ mem := v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := v_0.AuxInt
+ ptr := v_0.Args[0]
+ if !(is32Bit(off1 + off2)) {
+ break
+ }
+ v.reset(OpAMD64MOVBatomicload)
+ v.AuxInt = off1 + off2
+ v.Aux = sym
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ // match: (MOVBatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem)
+ // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2)
+ // result: (MOVBatomicload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := v.AuxInt
+ sym1 := v.Aux
+ mem := v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := v_0.AuxInt
+ sym2 := v_0.Aux
+ ptr := v_0.Args[0]
+ if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MOVBatomicload)
+ v.AuxInt = off1 + off2
+ v.Aux = mergeSym(sym1, sym2)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
func rewriteValueAMD64_OpAMD64MOVBload_0(v *Value) bool {
// match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _))
// cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
return true
}
}
+func rewriteValueAMD64_OpAtomicLoad8_0(v *Value) bool {
+ // match: (AtomicLoad8 ptr mem)
+ // cond:
+ // result: (MOVBatomicload ptr mem)
+ for {
+ mem := v.Args[1]
+ ptr := v.Args[0]
+ v.reset(OpAMD64MOVBatomicload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+}
func rewriteValueAMD64_OpAtomicLoadPtr_0(v *Value) bool {
b := v.Block
config := b.Func.Config
return rewriteValueARM64_OpAtomicLoad32_0(v)
case OpAtomicLoad64:
return rewriteValueARM64_OpAtomicLoad64_0(v)
+ case OpAtomicLoad8:
+ return rewriteValueARM64_OpAtomicLoad8_0(v)
case OpAtomicLoadPtr:
return rewriteValueARM64_OpAtomicLoadPtr_0(v)
case OpAtomicOr8:
return true
}
}
+func rewriteValueARM64_OpAtomicLoad8_0(v *Value) bool {
+ // match: (AtomicLoad8 ptr mem)
+ // cond:
+ // result: (LDARB ptr mem)
+ for {
+ mem := v.Args[1]
+ ptr := v.Args[0]
+ v.reset(OpARM64LDARB)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+}
func rewriteValueARM64_OpAtomicLoadPtr_0(v *Value) bool {
// match: (AtomicLoadPtr ptr mem)
// cond:
return rewriteValueMIPS64_OpAtomicLoad32_0(v)
case OpAtomicLoad64:
return rewriteValueMIPS64_OpAtomicLoad64_0(v)
+ case OpAtomicLoad8:
+ return rewriteValueMIPS64_OpAtomicLoad8_0(v)
case OpAtomicLoadPtr:
return rewriteValueMIPS64_OpAtomicLoadPtr_0(v)
case OpAtomicStore32:
return true
}
}
+func rewriteValueMIPS64_OpAtomicLoad8_0(v *Value) bool {
+ // match: (AtomicLoad8 ptr mem)
+ // cond:
+ // result: (LoweredAtomicLoad8 ptr mem)
+ for {
+ mem := v.Args[1]
+ ptr := v.Args[0]
+ v.reset(OpMIPS64LoweredAtomicLoad8)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+}
func rewriteValueMIPS64_OpAtomicLoadPtr_0(v *Value) bool {
// match: (AtomicLoadPtr ptr mem)
// cond:
return rewriteValuePPC64_OpAtomicLoad32_0(v)
case OpAtomicLoad64:
return rewriteValuePPC64_OpAtomicLoad64_0(v)
+ case OpAtomicLoad8:
+ return rewriteValuePPC64_OpAtomicLoad8_0(v)
case OpAtomicLoadAcq32:
return rewriteValuePPC64_OpAtomicLoadAcq32_0(v)
case OpAtomicLoadPtr:
return true
}
}
+func rewriteValuePPC64_OpAtomicLoad8_0(v *Value) bool {
+ // match: (AtomicLoad8 ptr mem)
+ // cond:
+ // result: (LoweredAtomicLoad8 [1] ptr mem)
+ for {
+ mem := v.Args[1]
+ ptr := v.Args[0]
+ v.reset(OpPPC64LoweredAtomicLoad8)
+ v.AuxInt = 1
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+}
func rewriteValuePPC64_OpAtomicLoadAcq32_0(v *Value) bool {
// match: (AtomicLoadAcq32 ptr mem)
// cond:
return rewriteValueS390X_OpAtomicLoad32_0(v)
case OpAtomicLoad64:
return rewriteValueS390X_OpAtomicLoad64_0(v)
+ case OpAtomicLoad8:
+ return rewriteValueS390X_OpAtomicLoad8_0(v)
case OpAtomicLoadPtr:
return rewriteValueS390X_OpAtomicLoadPtr_0(v)
case OpAtomicStore32:
return true
}
}
+func rewriteValueS390X_OpAtomicLoad8_0(v *Value) bool {
+ // match: (AtomicLoad8 ptr mem)
+ // cond:
+ // result: (MOVBZatomicload ptr mem)
+ for {
+ mem := v.Args[1]
+ ptr := v.Args[0]
+ v.reset(OpS390XMOVBZatomicload)
+ v.AddArg(ptr)
+ v.AddArg(mem)
+ return true
+ }
+}
func rewriteValueS390X_OpAtomicLoadPtr_0(v *Value) bool {
// match: (AtomicLoadPtr ptr mem)
// cond:
MOVW R1, ret+4(FP)
RET
+TEXT ·Load8(SB),NOSPLIT,$0-5
+ MOVW ptr+0(FP), R1
+ SYNC
+ MOVB 0(R1), R1
+ SYNC
+ MOVB R1, ret+4(FP)
+ RET
+
TEXT ·Xadd(SB),NOSPLIT,$0-12
MOVW ptr+0(FP), R2
MOVW delta+4(FP), R3
//go:noescape
func Load64(ptr *uint64) uint64
+//go:nosplit
+//go:noinline
+func Load8(ptr *uint8) uint8 {
+ return *ptr
+}
+
//go:noescape
func And8(ptr *uint8, val uint8)
//go:noescape
func Xchguintptr(ptr *uintptr, new uintptr) uintptr
+//go:nosplit
+//go:noinline
+func Load8(ptr *uint8) uint8 {
+ return *ptr
+}
+
//go:noescape
func And8(ptr *uint8, val uint8)
// NO go:noescape annotation; *addr escapes if result escapes (#31525)
func Loadp(addr unsafe.Pointer) unsafe.Pointer
+//go:noescape
+func Load8(addr *uint8) uint8
+
//go:noescape
func LoadAcq(addr *uint32) uint32
//go:noescape
func Load(ptr *uint32) uint32
+//go:noescape
+func Load8(ptr *uint8) uint8
+
//go:noescape
func Load64(ptr *uint64) uint64
MOVW R0, ret+8(FP)
RET
+// uint8 runtime∕internal∕atomic·Load8(uint8 volatile* addr)
+TEXT ·Load8(SB),NOSPLIT,$0-9
+ MOVD ptr+0(FP), R0
+ LDARB (R0), R0
+ MOVB R0, ret+8(FP)
+ RET
+
// uint64 runtime∕internal∕atomic·Load64(uint64 volatile* addr)
TEXT ·Load64(SB),NOSPLIT,$0-16
MOVD ptr+0(FP), R0
//go:noescape
func Load(ptr *uint32) uint32
+//go:noescape
+func Load8(ptr *uint8) uint8
+
//go:noescape
func Load64(ptr *uint64) uint64
MOVW R1, ret+8(FP)
RET
+// uint8 runtime∕internal∕atomic·Load8(uint8 volatile* ptr)
+TEXT ·Load8(SB),NOSPLIT|NOFRAME,$0-9
+ MOVV ptr+0(FP), R1
+ SYNC
+ MOVBU 0(R1), R1
+ SYNC
+ MOVB R1, ret+8(FP)
+ RET
+
// uint64 runtime∕internal∕atomic·Load64(uint64 volatile* ptr)
TEXT ·Load64(SB),NOSPLIT|NOFRAME,$0-16
MOVV ptr+0(FP), R1
//go:noescape
func Load(ptr *uint32) uint32
+//go:noescape
+func Load8(ptr *uint8) uint8
+
// NO go:noescape annotation; *ptr escapes if result escapes (#31525)
func Loadp(ptr unsafe.Pointer) unsafe.Pointer
//go:noescape
func Load(ptr *uint32) uint32
+//go:noescape
+func Load8(ptr *uint8) uint8
+
//go:noescape
func Load64(ptr *uint64) uint64
MOVW R3, ret+8(FP)
RET
+// uint8 runtime∕internal∕atomic·Load8(uint8 volatile* ptr)
+TEXT ·Load8(SB),NOSPLIT|NOFRAME,$-8-9
+ MOVD ptr+0(FP), R3
+ SYNC
+ MOVBZ 0(R3), R3
+ CMP R3, R3, CR7
+ BC 4, 30, 1(PC) // bne- cr7,0x4
+ ISYNC
+ MOVB R3, ret+8(FP)
+ RET
+
// uint64 runtime∕internal∕atomic·Load64(uint64 volatile* ptr)
TEXT ·Load64(SB),NOSPLIT|NOFRAME,$-8-16
MOVD ptr+0(FP), R3
return *(*unsafe.Pointer)(ptr)
}
+//go:nosplit
+//go:noinline
+func Load8(ptr *uint8) uint8 {
+ return *ptr
+}
+
//go:nosplit
//go:noinline
func Load64(ptr *uint64) uint64 {
return *ptr
}
+//go:nosplit
+//go:noinline
+func Load8(ptr *uint8) uint8 {
+ return *ptr
+}
+
//go:nosplit
//go:noinline
func Load64(ptr *uint64) uint64 {
native_barrier2:
DMB MB_ISH
RET
+
+TEXT ·Load8(SB),NOSPLIT,$0-5
+ MOVW addr+0(FP), R0
+ MOVB (R0), R1
+
+ MOVB runtime·goarm(SB), R11
+ CMP $7, R11
+ BGE native_barrier
+ BL memory_barrier<>(SB)
+ B end
+native_barrier:
+ DMB MB_ISH
+end:
+ MOVB R1, ret+4(FP)
+ RET
+
BLT 2(PC)
DMB MB_ISH
RET
+
+TEXT ·Load8(SB),NOSPLIT|NOFRAME,$0-5
+ MOVW addr+0(FP), R0
+ MOVB (R0), R1
+
+ MOVB runtime·goarm(SB), R11
+ CMP $7, R11
+ BLT 2(PC)
+ DMB MB_ISH
+
+ MOVB R1, ret+4(FP)
+ RET