p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
+ case ssa.OpAMD64LoweredGetCallerSP:
+ // caller's SP is the address of the first arg
+ mov := x86.AMOVQ
+ if gc.Widthptr == 4 {
+ mov = x86.AMOVL
+ }
+ p := s.Prog(mov)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Offset = -gc.Ctxt.FixedFrameSize() // 0 on amd64, just to be consistent with other architectures
+ p.From.Name = obj.NAME_PARAM
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
case ssa.OpAMD64NEGQ, ssa.OpAMD64NEGL,
ssa.OpAMD64BSWAPQ, ssa.OpAMD64BSWAPL,
ssa.OpAMD64NOTQ, ssa.OpAMD64NOTL:
case ssa.OpARMLoweredGetClosurePtr:
// Closure pointer is R7 (arm.REGCTXT).
gc.CheckLoweredGetClosurePtr(v)
+ case ssa.OpARMLoweredGetCallerSP:
+ // caller's SP is FixedFrameSize below the address of the first arg
+ p := s.Prog(arm.AMOVW)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Offset = -gc.Ctxt.FixedFrameSize()
+ p.From.Name = obj.NAME_PARAM
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
case ssa.OpARMFlagEQ,
ssa.OpARMFlagLT_ULT,
ssa.OpARMFlagLT_UGT,
case ssa.OpARM64LoweredGetClosurePtr:
// Closure pointer is R26 (arm64.REGCTXT).
gc.CheckLoweredGetClosurePtr(v)
+ case ssa.OpARM64LoweredGetCallerSP:
+ // caller's SP is FixedFrameSize below the address of the first arg
+ p := s.Prog(arm64.AMOVD)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Offset = -gc.Ctxt.FixedFrameSize()
+ p.From.Name = obj.NAME_PARAM
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
case ssa.OpARM64FlagEQ,
ssa.OpARM64FlagLT_ULT,
ssa.OpARM64FlagLT_UGT,
if s.IsBlank() {
return
}
- if compiling_runtime && (s.Name == "getg" || s.Name == "getclosureptr" || s.Name == "getcallerpc") {
- // runtime.getg(), getclosureptr(), and getcallerpc() are
- // not real functions and so do not get funcsyms.
+ if compiling_runtime && (s.Name == "getg" || s.Name == "getclosureptr" || s.Name == "getcallerpc" || s.Name == "getcallersp") {
+ // runtime.getg(), getclosureptr(), getcallerpc(), and
+ // getcallersp() are not real functions and so do not
+ // get funcsyms.
return
}
if _, existed := s.Pkg.LookupOK(funcsymname(s)); !existed {
return s.newValue0(ssa.OpGetCallerPC, s.f.Config.Types.Uintptr)
}, sys.AMD64, sys.I386)
+ add("runtime", "getcallersp",
+ func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
+ return s.newValue0(ssa.OpGetCallerSP, s.f.Config.Types.Uintptr)
+ },
+ all...)
+
/******** runtime/internal/sys ********/
addF("runtime/internal/sys", "Ctz32",
func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
case ssa.OpMIPSLoweredGetClosurePtr:
// Closure pointer is R22 (mips.REGCTXT).
gc.CheckLoweredGetClosurePtr(v)
+ case ssa.OpMIPSLoweredGetCallerSP:
+ // caller's SP is FixedFrameSize below the address of the first arg
+ p := s.Prog(mips.AMOVW)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Offset = -gc.Ctxt.FixedFrameSize()
+ p.From.Name = obj.NAME_PARAM
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
case ssa.OpClobber:
// TODO: implement for clobberdead experiment. Nop is ok for now.
default:
case ssa.OpMIPS64LoweredGetClosurePtr:
// Closure pointer is R22 (mips.REGCTXT).
gc.CheckLoweredGetClosurePtr(v)
+ case ssa.OpMIPS64LoweredGetCallerSP:
+ // caller's SP is FixedFrameSize below the address of the first arg
+ p := s.Prog(mips.AMOVV)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Offset = -gc.Ctxt.FixedFrameSize()
+ p.From.Name = obj.NAME_PARAM
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
case ssa.OpClobber:
// TODO: implement for clobberdead experiment. Nop is ok for now.
default:
// Closure pointer is R11 (already)
gc.CheckLoweredGetClosurePtr(v)
+ case ssa.OpPPC64LoweredGetCallerSP:
+ // caller's SP is FixedFrameSize below the address of the first arg
+ p := s.Prog(ppc64.AMOVD)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Offset = -gc.Ctxt.FixedFrameSize()
+ p.From.Name = obj.NAME_PARAM
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
case ssa.OpPPC64LoweredRound32F, ssa.OpPPC64LoweredRound64F:
// input is already rounded
p.From.Reg = s390x.REGG
p.To.Type = obj.TYPE_REG
p.To.Reg = r
+ case ssa.OpS390XLoweredGetCallerSP:
+ // caller's SP is FixedFrameSize below the address of the first arg
+ p := s.Prog(s390x.AMOVD)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Offset = -gc.Ctxt.FixedFrameSize()
+ p.From.Name = obj.NAME_PARAM
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
case ssa.OpS390XCALLstatic, ssa.OpS390XCALLclosure, ssa.OpS390XCALLinter:
s.Call(v)
case ssa.OpS390XFLOGR, ssa.OpS390XNEG, ssa.OpS390XNEGW,
(GetG mem) -> (LoweredGetG mem)
(GetClosurePtr) -> (LoweredGetClosurePtr)
(GetCallerPC) -> (LoweredGetCallerPC)
+(GetCallerSP) -> (LoweredGetCallerSP)
(Addr {sym} base) -> (LEAL {sym} base)
// block rewrites
// the result should be the PC within f that g will return to.
// See runtime/stubs.go for a more detailed discussion.
{name: "LoweredGetCallerPC", reg: gp01},
+ // LoweredGetCallerSP returns the SP of the caller of the current function.
+ {name: "LoweredGetCallerSP", reg: gp01, rematerializeable: true},
//arg0=ptr,arg1=mem, returns void. Faults if ptr is nil.
{name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gpsp}}, clobberFlags: true, nilCheck: true, faultOnNilArg0: true},
(GetG mem) -> (LoweredGetG mem)
(GetClosurePtr) -> (LoweredGetClosurePtr)
(GetCallerPC) -> (LoweredGetCallerPC)
+(GetCallerSP) -> (LoweredGetCallerSP)
(Addr {sym} base) && config.PtrSize == 8 -> (LEAQ {sym} base)
(Addr {sym} base) && config.PtrSize == 4 -> (LEAL {sym} base)
// the result should be the PC within f that g will return to.
// See runtime/stubs.go for a more detailed discussion.
{name: "LoweredGetCallerPC", reg: gp01},
+ // LoweredGetCallerSP returns the SP of the caller of the current function.
+ {name: "LoweredGetCallerSP", reg: gp01, rematerializeable: true},
//arg0=ptr,arg1=mem, returns void. Faults if ptr is nil.
{name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gpsp}}, clobberFlags: true, nilCheck: true, faultOnNilArg0: true},
// pseudo-ops
(GetClosurePtr) -> (LoweredGetClosurePtr)
+(GetCallerSP) -> (LoweredGetCallerSP)
(Convert x mem) -> (MOVWconvert x mem)
// Absorb pseudo-ops into blocks.
// pseudo-ops
(GetClosurePtr) -> (LoweredGetClosurePtr)
+(GetCallerSP) -> (LoweredGetCallerSP)
(Convert x mem) -> (MOVDconvert x mem)
// Absorb pseudo-ops into blocks.
// use of R26 (arm64.REGCTXT, the closure pointer)
{name: "LoweredGetClosurePtr", reg: regInfo{outputs: []regMask{buildReg("R26")}}},
+ // LoweredGetCallerSP returns the SP of the caller of the current function.
+ {name: "LoweredGetCallerSP", reg: gp01, rematerializeable: true},
+
// MOVDconvert converts between pointers and integers.
// We have a special op for this so as to not confuse GC
// (particularly stack maps). It takes a memory arg so it
// use of R7 (arm.REGCTXT, the closure pointer)
{name: "LoweredGetClosurePtr", reg: regInfo{outputs: []regMask{buildReg("R7")}}},
+ // LoweredGetCallerSP returns the SP of the caller of the current function.
+ {name: "LoweredGetCallerSP", reg: gp01, rematerializeable: true},
+
// MOVWconvert converts between pointers and integers.
// We have a special op for this so as to not confuse GC
// (particularly stack maps). It takes a memory arg so it
// pseudo-ops
(GetClosurePtr) -> (LoweredGetClosurePtr)
+(GetCallerSP) -> (LoweredGetCallerSP)
(Convert x mem) -> (MOVWconvert x mem)
(If cond yes no) -> (NE cond yes no)
// pseudo-ops
(GetClosurePtr) -> (LoweredGetClosurePtr)
+(GetCallerSP) -> (LoweredGetCallerSP)
(Convert x mem) -> (MOVVconvert x mem)
(If cond yes no) -> (NE cond yes no)
// use of R22 (mips.REGCTXT, the closure pointer)
{name: "LoweredGetClosurePtr", reg: regInfo{outputs: []regMask{buildReg("R22")}}},
+ // LoweredGetCallerSP returns the SP of the caller of the current function.
+ {name: "LoweredGetCallerSP", reg: gp01, rematerializeable: true},
+
// MOVDconvert converts between pointers and integers.
// We have a special op for this so as to not confuse GC
// (particularly stack maps). It takes a memory arg so it
// use of R22 (mips.REGCTXT, the closure pointer)
{name: "LoweredGetClosurePtr", reg: regInfo{outputs: []regMask{buildReg("R22")}}},
+ // LoweredGetCallerSP returns the SP of the caller of the current function.
+ {name: "LoweredGetCallerSP", reg: gp01, rematerializeable: true},
+
// MOVWconvert converts between pointers and integers.
// We have a special op for this so as to not confuse GC
// (particularly stack maps). It takes a memory arg so it
// Miscellaneous
(Convert <t> x mem) -> (MOVDconvert <t> x mem)
(GetClosurePtr) -> (LoweredGetClosurePtr)
+(GetCallerSP) -> (LoweredGetCallerSP)
(IsNonNil ptr) -> (NotEqual (CMPconst [0] ptr))
(IsInBounds idx len) -> (LessThan (CMPU idx len))
(IsSliceInBounds idx len) -> (LessEqual (CMPU idx len))
// use of the closure pointer.
{name: "LoweredGetClosurePtr", reg: regInfo{outputs: []regMask{ctxt}}},
+ // LoweredGetCallerSP returns the SP of the caller of the current function.
+ {name: "LoweredGetCallerSP", reg: gp01, rematerializeable: true},
+
//arg0=ptr,arg1=mem, returns void. Faults if ptr is nil.
{name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gp | sp | sb}, clobbers: tmp}, clobberFlags: true, nilCheck: true, faultOnNilArg0: true},
// Round ops to block fused-multiply-add extraction.
(NilCheck ptr mem) -> (LoweredNilCheck ptr mem)
(GetG mem) -> (LoweredGetG mem)
(GetClosurePtr) -> (LoweredGetClosurePtr)
+(GetCallerSP) -> (LoweredGetCallerSP)
(Addr {sym} base) -> (MOVDaddr {sym} base)
(ITab (Load ptr mem)) -> (MOVDload ptr mem)
// use of R12 (the closure pointer)
{name: "LoweredGetClosurePtr", reg: regInfo{outputs: []regMask{buildReg("R12")}}},
// arg0=ptr,arg1=mem, returns void. Faults if ptr is nil.
+ // LoweredGetCallerSP returns the SP of the caller of the current function.
+ {name: "LoweredGetCallerSP", reg: gp01, rematerializeable: true},
{name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{ptrsp}}, clobberFlags: true, nilCheck: true, faultOnNilArg0: true},
// Round ops to block fused-multiply-add extraction.
{name: "LoweredRound32F", argLength: 1, reg: fp11, resultInArg0: true},
{name: "GetG", argLength: 1}, // runtime.getg() (read g pointer). arg0=mem
{name: "GetClosurePtr"}, // get closure pointer from dedicated register
{name: "GetCallerPC"}, // for getcallerpc intrinsic
+ {name: "GetCallerSP"}, // for getcallersp intrinsic
// Indexing operations
{name: "PtrIndex", argLength: 2}, // arg0=ptr, arg1=index. Computes ptr+sizeof(*v.type)*index, where index is extended to ptrwidth type
Op386LoweredGetG
Op386LoweredGetClosurePtr
Op386LoweredGetCallerPC
+ Op386LoweredGetCallerSP
Op386LoweredNilCheck
Op386MOVLconvert
Op386FlagEQ
OpAMD64LoweredGetG
OpAMD64LoweredGetClosurePtr
OpAMD64LoweredGetCallerPC
+ OpAMD64LoweredGetCallerSP
OpAMD64LoweredNilCheck
OpAMD64MOVQconvert
OpAMD64MOVLconvert
OpARMLoweredZero
OpARMLoweredMove
OpARMLoweredGetClosurePtr
+ OpARMLoweredGetCallerSP
OpARMMOVWconvert
OpARMFlagEQ
OpARMFlagLT_ULT
OpARM64DUFFCOPY
OpARM64LoweredMove
OpARM64LoweredGetClosurePtr
+ OpARM64LoweredGetCallerSP
OpARM64MOVDconvert
OpARM64FlagEQ
OpARM64FlagLT_ULT
OpMIPSFPFlagTrue
OpMIPSFPFlagFalse
OpMIPSLoweredGetClosurePtr
+ OpMIPSLoweredGetCallerSP
OpMIPSMOVWconvert
OpMIPS64ADDV
OpMIPS64FPFlagTrue
OpMIPS64FPFlagFalse
OpMIPS64LoweredGetClosurePtr
+ OpMIPS64LoweredGetCallerSP
OpMIPS64MOVVconvert
OpPPC64ADD
OpPPC64GreaterEqual
OpPPC64FGreaterEqual
OpPPC64LoweredGetClosurePtr
+ OpPPC64LoweredGetCallerSP
OpPPC64LoweredNilCheck
OpPPC64LoweredRound32F
OpPPC64LoweredRound64F
OpS390XInvertFlags
OpS390XLoweredGetG
OpS390XLoweredGetClosurePtr
+ OpS390XLoweredGetCallerSP
OpS390XLoweredNilCheck
OpS390XLoweredRound32F
OpS390XLoweredRound64F
OpGetG
OpGetClosurePtr
OpGetCallerPC
+ OpGetCallerSP
OpPtrIndex
OpOffPtr
OpSliceMake
},
},
},
+ {
+ name: "LoweredGetCallerSP",
+ argLen: 0,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
{
name: "LoweredNilCheck",
argLen: 2,
},
},
},
+ {
+ name: "LoweredGetCallerSP",
+ argLen: 0,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 65519}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
{
name: "LoweredNilCheck",
argLen: 2,
},
},
},
+ {
+ name: "LoweredGetCallerSP",
+ argLen: 0,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
{
name: "MOVWconvert",
argLen: 2,
},
},
},
+ {
+ name: "LoweredGetCallerSP",
+ argLen: 0,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
{
name: "MOVDconvert",
argLen: 2,
},
},
},
+ {
+ name: "LoweredGetCallerSP",
+ argLen: 0,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
{
name: "MOVWconvert",
argLen: 2,
},
},
},
+ {
+ name: "LoweredGetCallerSP",
+ argLen: 0,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
{
name: "MOVVconvert",
argLen: 2,
},
},
},
+ {
+ name: "LoweredGetCallerSP",
+ argLen: 0,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
{
name: "LoweredNilCheck",
argLen: 2,
},
},
},
+ {
+ name: "LoweredGetCallerSP",
+ argLen: 0,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
{
name: "LoweredNilCheck",
argLen: 2,
argLen: 0,
generic: true,
},
+ {
+ name: "GetCallerSP",
+ argLen: 0,
+ generic: true,
+ },
{
name: "PtrIndex",
argLen: 2,
return rewriteValue386_OpGeq8U_0(v)
case OpGetCallerPC:
return rewriteValue386_OpGetCallerPC_0(v)
+ case OpGetCallerSP:
+ return rewriteValue386_OpGetCallerSP_0(v)
case OpGetClosurePtr:
return rewriteValue386_OpGetClosurePtr_0(v)
case OpGetG:
return true
}
}
+func rewriteValue386_OpGetCallerSP_0(v *Value) bool {
+ // match: (GetCallerSP)
+ // cond:
+ // result: (LoweredGetCallerSP)
+ for {
+ v.reset(Op386LoweredGetCallerSP)
+ return true
+ }
+}
func rewriteValue386_OpGetClosurePtr_0(v *Value) bool {
// match: (GetClosurePtr)
// cond:
return rewriteValueAMD64_OpGeq8U_0(v)
case OpGetCallerPC:
return rewriteValueAMD64_OpGetCallerPC_0(v)
+ case OpGetCallerSP:
+ return rewriteValueAMD64_OpGetCallerSP_0(v)
case OpGetClosurePtr:
return rewriteValueAMD64_OpGetClosurePtr_0(v)
case OpGetG:
return true
}
}
+func rewriteValueAMD64_OpGetCallerSP_0(v *Value) bool {
+ // match: (GetCallerSP)
+ // cond:
+ // result: (LoweredGetCallerSP)
+ for {
+ v.reset(OpAMD64LoweredGetCallerSP)
+ return true
+ }
+}
func rewriteValueAMD64_OpGetClosurePtr_0(v *Value) bool {
// match: (GetClosurePtr)
// cond:
return rewriteValueARM_OpGeq8_0(v)
case OpGeq8U:
return rewriteValueARM_OpGeq8U_0(v)
+ case OpGetCallerSP:
+ return rewriteValueARM_OpGetCallerSP_0(v)
case OpGetClosurePtr:
return rewriteValueARM_OpGetClosurePtr_0(v)
case OpGreater16:
return true
}
}
+func rewriteValueARM_OpGetCallerSP_0(v *Value) bool {
+ // match: (GetCallerSP)
+ // cond:
+ // result: (LoweredGetCallerSP)
+ for {
+ v.reset(OpARMLoweredGetCallerSP)
+ return true
+ }
+}
func rewriteValueARM_OpGetClosurePtr_0(v *Value) bool {
// match: (GetClosurePtr)
// cond:
return rewriteValueARM64_OpGeq8_0(v)
case OpGeq8U:
return rewriteValueARM64_OpGeq8U_0(v)
+ case OpGetCallerSP:
+ return rewriteValueARM64_OpGetCallerSP_0(v)
case OpGetClosurePtr:
return rewriteValueARM64_OpGetClosurePtr_0(v)
case OpGreater16:
return true
}
}
+func rewriteValueARM64_OpGetCallerSP_0(v *Value) bool {
+ // match: (GetCallerSP)
+ // cond:
+ // result: (LoweredGetCallerSP)
+ for {
+ v.reset(OpARM64LoweredGetCallerSP)
+ return true
+ }
+}
func rewriteValueARM64_OpGetClosurePtr_0(v *Value) bool {
// match: (GetClosurePtr)
// cond:
return rewriteValueMIPS_OpGeq8_0(v)
case OpGeq8U:
return rewriteValueMIPS_OpGeq8U_0(v)
+ case OpGetCallerSP:
+ return rewriteValueMIPS_OpGetCallerSP_0(v)
case OpGetClosurePtr:
return rewriteValueMIPS_OpGetClosurePtr_0(v)
case OpGreater16:
return true
}
}
+func rewriteValueMIPS_OpGetCallerSP_0(v *Value) bool {
+ // match: (GetCallerSP)
+ // cond:
+ // result: (LoweredGetCallerSP)
+ for {
+ v.reset(OpMIPSLoweredGetCallerSP)
+ return true
+ }
+}
func rewriteValueMIPS_OpGetClosurePtr_0(v *Value) bool {
// match: (GetClosurePtr)
// cond:
return rewriteValueMIPS64_OpGeq8_0(v)
case OpGeq8U:
return rewriteValueMIPS64_OpGeq8U_0(v)
+ case OpGetCallerSP:
+ return rewriteValueMIPS64_OpGetCallerSP_0(v)
case OpGetClosurePtr:
return rewriteValueMIPS64_OpGetClosurePtr_0(v)
case OpGreater16:
return true
}
}
+func rewriteValueMIPS64_OpGetCallerSP_0(v *Value) bool {
+ // match: (GetCallerSP)
+ // cond:
+ // result: (LoweredGetCallerSP)
+ for {
+ v.reset(OpMIPS64LoweredGetCallerSP)
+ return true
+ }
+}
func rewriteValueMIPS64_OpGetClosurePtr_0(v *Value) bool {
// match: (GetClosurePtr)
// cond:
return rewriteValuePPC64_OpGeq8_0(v)
case OpGeq8U:
return rewriteValuePPC64_OpGeq8U_0(v)
+ case OpGetCallerSP:
+ return rewriteValuePPC64_OpGetCallerSP_0(v)
case OpGetClosurePtr:
return rewriteValuePPC64_OpGetClosurePtr_0(v)
case OpGreater16:
return true
}
}
+func rewriteValuePPC64_OpGetCallerSP_0(v *Value) bool {
+ // match: (GetCallerSP)
+ // cond:
+ // result: (LoweredGetCallerSP)
+ for {
+ v.reset(OpPPC64LoweredGetCallerSP)
+ return true
+ }
+}
func rewriteValuePPC64_OpGetClosurePtr_0(v *Value) bool {
// match: (GetClosurePtr)
// cond:
return rewriteValueS390X_OpGeq8_0(v)
case OpGeq8U:
return rewriteValueS390X_OpGeq8U_0(v)
+ case OpGetCallerSP:
+ return rewriteValueS390X_OpGetCallerSP_0(v)
case OpGetClosurePtr:
return rewriteValueS390X_OpGetClosurePtr_0(v)
case OpGetG:
return true
}
}
+func rewriteValueS390X_OpGetCallerSP_0(v *Value) bool {
+ // match: (GetCallerSP)
+ // cond:
+ // result: (LoweredGetCallerSP)
+ for {
+ v.reset(OpS390XLoweredGetCallerSP)
+ return true
+ }
+}
func rewriteValueS390X_OpGetClosurePtr_0(v *Value) bool {
// match: (GetClosurePtr)
// cond:
p.To.Type = obj.TYPE_REG
p.To.Reg = v.Reg()
+ case ssa.Op386LoweredGetCallerSP:
+ // caller's SP is the address of the first arg
+ p := s.Prog(x86.AMOVL)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Offset = -gc.Ctxt.FixedFrameSize() // 0 on 386, just to be consistent with other architectures
+ p.From.Name = obj.NAME_PARAM
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
case ssa.Op386CALLstatic, ssa.Op386CALLclosure, ssa.Op386CALLinter:
s.Call(v)
case ssa.Op386NEGL,
package runtime
-import (
- "runtime/internal/sys"
- "unsafe"
-)
+import "unsafe"
// Should be a built-in for unsafe.Pointer?
//go:nosplit
//go:noescape
func getcallerpc() uintptr
-//go:nosplit
-func getcallersp(argp unsafe.Pointer) uintptr {
- return uintptr(argp) - sys.MinFrameSize
-}
+//go:noescape
+func getcallersp(argp unsafe.Pointer) uintptr // implemented as an intrinsic on all platforms
// getclosureptr returns the pointer to the current closure.
// getclosureptr can only be used in an assignment statement