}
func (c *ctxt0) stacksplit(p *obj.Prog, framesize int32) *obj.Prog {
- var mov, add, sub obj.As
+ var mov, add obj.As
if c.ctxt.Arch.Family == sys.MIPS64 {
add = AADDV
mov = AMOVV
- sub = ASUBVU
} else {
add = AADDU
mov = AMOVW
- sub = ASUBU
}
// MOV g_stackguard(g), R1
p.Reg = REG_R1
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R1
- } else if framesize <= objabi.StackBig {
- // large stack: SP-framesize < stackguard-StackSmall
- // ADD $-(framesize-StackSmall), SP, R2
- // SGTU R2, stackguard, R1
- p = obj.Appendp(p, c.newprog)
-
- p.As = add
- p.From.Type = obj.TYPE_CONST
- p.From.Offset = -(int64(framesize) - objabi.StackSmall)
- p.Reg = REGSP
- p.To.Type = obj.TYPE_REG
- p.To.Reg = REG_R2
-
- p = obj.Appendp(p, c.newprog)
- p.As = ASGTU
- p.From.Type = obj.TYPE_REG
- p.From.Reg = REG_R2
- p.Reg = REG_R1
- p.To.Type = obj.TYPE_REG
- p.To.Reg = REG_R1
} else {
- // Such a large stack we need to protect against wraparound.
- // If SP is close to zero:
- // SP-stackguard+StackGuard <= framesize + (StackGuard-StackSmall)
- // The +StackGuard on both sides is required to keep the left side positive:
- // SP is allowed to be slightly below stackguard. See stack.h.
- //
- // Preemption sets stackguard to StackPreempt, a very large value.
- // That breaks the math above, so we have to check for that explicitly.
- // // stackguard is R1
- // MOV $StackPreempt, R2
- // BEQ R1, R2, label-of-call-to-morestack
- // ADD $StackGuard, SP, R2
- // SUB R1, R2
- // MOV $(framesize+(StackGuard-StackSmall)), R1
- // SGTU R2, R1, R1
- p = obj.Appendp(p, c.newprog)
+ // large stack: SP-framesize < stackguard-StackSmall
+ offset := int64(framesize) - objabi.StackSmall
+ if framesize > objabi.StackBig {
+ // Such a large stack we need to protect against underflow.
+ // The runtime guarantees SP > objabi.StackBig, but
+ // framesize is large enough that SP-framesize may
+ // underflow, causing a direct comparison with the
+ // stack guard to incorrectly succeed. We explicitly
+ // guard against underflow.
+ //
+ // SGTU $(framesize-StackSmall), SP, R2
+ // BNE R2, label-of-call-to-morestack
+
+ p = obj.Appendp(p, c.newprog)
+ p.As = ASGTU
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = offset
+ p.Reg = REGSP
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_R2
- p.As = mov
- p.From.Type = obj.TYPE_CONST
- p.From.Offset = objabi.StackPreempt
- p.To.Type = obj.TYPE_REG
- p.To.Reg = REG_R2
+ p = obj.Appendp(p, c.newprog)
+ q = p
+ p.As = ABNE
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REG_R2
+ p.To.Type = obj.TYPE_BRANCH
+ p.Mark |= BRANCH
+ }
+ // Check against the stack guard. We've ensured this won't underflow.
+ // ADD $-(framesize-StackSmall), SP, R2
+ // SGTU R2, stackguard, R1
p = obj.Appendp(p, c.newprog)
- q = p
- p.As = ABEQ
- p.From.Type = obj.TYPE_REG
- p.From.Reg = REG_R1
- p.Reg = REG_R2
- p.To.Type = obj.TYPE_BRANCH
- p.Mark |= BRANCH
- p = obj.Appendp(p, c.newprog)
p.As = add
p.From.Type = obj.TYPE_CONST
- p.From.Offset = int64(objabi.StackGuard)
+ p.From.Offset = -offset
p.Reg = REGSP
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R2
- p = obj.Appendp(p, c.newprog)
- p.As = sub
- p.From.Type = obj.TYPE_REG
- p.From.Reg = REG_R1
- p.To.Type = obj.TYPE_REG
- p.To.Reg = REG_R2
-
- p = obj.Appendp(p, c.newprog)
- p.As = mov
- p.From.Type = obj.TYPE_CONST
- p.From.Offset = int64(framesize) + int64(objabi.StackGuard) - objabi.StackSmall
- p.To.Type = obj.TYPE_REG
- p.To.Reg = REG_R1
-
p = obj.Appendp(p, c.newprog)
p.As = ASGTU
p.From.Type = obj.TYPE_REG