"cmd/compile/internal/types"
"cmd/internal/obj"
"cmd/internal/obj/ppc64"
+ "internal/abi"
"internal/buildcfg"
"math"
"strings"
// AuxInt encodes how many buffer entries we need.
p.To.Sym = ir.Syms.GCWriteBarrier[v.AuxInt-1]
- case ssa.OpPPC64LoweredPanicBoundsA, ssa.OpPPC64LoweredPanicBoundsB, ssa.OpPPC64LoweredPanicBoundsC:
- p := s.Prog(obj.ACALL)
+ case ssa.OpPPC64LoweredPanicBoundsRR, ssa.OpPPC64LoweredPanicBoundsRC, ssa.OpPPC64LoweredPanicBoundsCR, ssa.OpPPC64LoweredPanicBoundsCC:
+ // Compute the constant we put in the PCData entry for this call.
+ code, signed := ssa.BoundsKind(v.AuxInt).Code()
+ xIsReg := false
+ yIsReg := false
+ xVal := 0
+ yVal := 0
+ switch v.Op {
+ case ssa.OpPPC64LoweredPanicBoundsRR:
+ xIsReg = true
+ xVal = int(v.Args[0].Reg() - ppc64.REG_R3)
+ yIsReg = true
+ yVal = int(v.Args[1].Reg() - ppc64.REG_R3)
+ case ssa.OpPPC64LoweredPanicBoundsRC:
+ xIsReg = true
+ xVal = int(v.Args[0].Reg() - ppc64.REG_R3)
+ c := v.Aux.(ssa.PanicBoundsC).C
+ if c >= 0 && c <= abi.BoundsMaxConst {
+ yVal = int(c)
+ } else {
+ // Move constant to a register
+ yIsReg = true
+ if yVal == xVal {
+ yVal = 1
+ }
+ p := s.Prog(ppc64.AMOVD)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = c
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_R3 + int16(yVal)
+ }
+ case ssa.OpPPC64LoweredPanicBoundsCR:
+ yIsReg = true
+ yVal := int(v.Args[0].Reg() - ppc64.REG_R3)
+ c := v.Aux.(ssa.PanicBoundsC).C
+ if c >= 0 && c <= abi.BoundsMaxConst {
+ xVal = int(c)
+ } else {
+ // Move constant to a register
+ if xVal == yVal {
+ xVal = 1
+ }
+ p := s.Prog(ppc64.AMOVD)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = c
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_R3 + int16(xVal)
+ }
+ case ssa.OpPPC64LoweredPanicBoundsCC:
+ c := v.Aux.(ssa.PanicBoundsCC).Cx
+ if c >= 0 && c <= abi.BoundsMaxConst {
+ xVal = int(c)
+ } else {
+ // Move constant to a register
+ xIsReg = true
+ p := s.Prog(ppc64.AMOVD)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = c
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_R3 + int16(xVal)
+ }
+ c = v.Aux.(ssa.PanicBoundsCC).Cy
+ if c >= 0 && c <= abi.BoundsMaxConst {
+ yVal = int(c)
+ } else {
+ // Move constant to a register
+ yIsReg = true
+ yVal = 1
+ p := s.Prog(ppc64.AMOVD)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = c
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_R3 + int16(yVal)
+ }
+ }
+ c := abi.BoundsEncode(code, signed, xIsReg, yIsReg, xVal, yVal)
+
+ p := s.Prog(obj.APCDATA)
+ p.From.SetConst(abi.PCDATA_PanicBounds)
+ p.To.SetConst(int64(c))
+ p = s.Prog(obj.ACALL)
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_EXTERN
- p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
- s.UseArgs(16) // space used in callee args area by assembly stubs
+ p.To.Sym = ir.Syms.PanicBounds
case ssa.OpPPC64LoweredNilCheck:
if buildcfg.GOOS == "aix" {
// Publication barrier as intrinsic
(PubBarrier ...) => (LoweredPubBarrier ...)
-(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem)
-(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem)
-(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem)
+(PanicBounds ...) => (LoweredPanicBoundsRR ...)
+(LoweredPanicBoundsRR [kind] x (MOVDconst [c]) mem) => (LoweredPanicBoundsRC [kind] x {PanicBoundsC{C:c}} mem)
+(LoweredPanicBoundsRR [kind] (MOVDconst [c]) y mem) => (LoweredPanicBoundsCR [kind] {PanicBoundsC{C:c}} y mem)
+(LoweredPanicBoundsRC [kind] {p} (MOVDconst [c]) mem) => (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:c, Cy:p.C}} mem)
+(LoweredPanicBoundsCR [kind] {p} (MOVDconst [c]) mem) => (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:p.C, Cy:c}} mem)
// Optimizations
// Note that PPC "logical" immediates come in 0:15 and 16:31 unsigned immediate forms,
fpstore = regInfo{inputs: []regMask{gp | sp | sb, fp}}
fpstoreidx = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb, fp}}
callerSave = regMask(gp | fp | gr | xer)
- r3 = buildReg("R3")
- r4 = buildReg("R4")
- r5 = buildReg("R5")
- r6 = buildReg("R6")
+ first8 = buildReg("R3 R4 R5 R6 R7 R8 R9 R10")
)
ops := []opData{
{name: "ADD", argLength: 2, reg: gp21, asm: "ADD", commutative: true}, // arg0 + arg1
{name: "LoweredWB", argLength: 1, reg: regInfo{clobbers: (callerSave &^ buildReg("R0 R3 R4 R5 R6 R7 R8 R9 R10 R14 R15 R16 R17 R20 R21 g")) | buildReg("R31"), outputs: []regMask{buildReg("R29")}}, clobberFlags: true, aux: "Int64"},
{name: "LoweredPubBarrier", argLength: 1, asm: "LWSYNC", hasSideEffects: true}, // Do data barrier. arg0=memory
- // There are three of these functions so that they can have three different register inputs.
- // When we check 0 <= c <= cap (A), then 0 <= b <= c (B), then 0 <= a <= b (C), we want the
- // default registers to match so we don't need to copy registers around unnecessarily.
- {name: "LoweredPanicBoundsA", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r5, r6}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
- {name: "LoweredPanicBoundsB", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r4, r5}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
- {name: "LoweredPanicBoundsC", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r3, r4}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
+
+ // LoweredPanicBoundsRR takes x and y, two values that caused a bounds check to fail.
+ // the RC and CR versions are used when one of the arguments is a constant. CC is used
+ // when both are constant (normally both 0, as prove derives the fact that a [0] bounds
+ // failure means the length must have also been 0).
+ // AuxInt contains a report code (see PanicBounds in genericOps.go).
+ {name: "LoweredPanicBoundsRR", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{first8, first8}}, typ: "Mem", call: true}, // arg0=x, arg1=y, arg2=mem, returns memory.
+ {name: "LoweredPanicBoundsRC", argLength: 2, aux: "PanicBoundsC", reg: regInfo{inputs: []regMask{first8}}, typ: "Mem", call: true}, // arg0=x, arg1=mem, returns memory.
+ {name: "LoweredPanicBoundsCR", argLength: 2, aux: "PanicBoundsC", reg: regInfo{inputs: []regMask{first8}}, typ: "Mem", call: true}, // arg0=y, arg1=mem, returns memory.
+ {name: "LoweredPanicBoundsCC", argLength: 1, aux: "PanicBoundsCC", reg: regInfo{}, typ: "Mem", call: true}, // arg0=mem, returns memory.
// (InvertFlags (CMP a b)) == (CMP b a)
// So if we want (LessThan (CMP a b)) but we can't do that because a is a constant,
OpPPC64LoweredAtomicOr32
OpPPC64LoweredWB
OpPPC64LoweredPubBarrier
- OpPPC64LoweredPanicBoundsA
- OpPPC64LoweredPanicBoundsB
- OpPPC64LoweredPanicBoundsC
+ OpPPC64LoweredPanicBoundsRR
+ OpPPC64LoweredPanicBoundsRC
+ OpPPC64LoweredPanicBoundsCR
+ OpPPC64LoweredPanicBoundsCC
OpPPC64InvertFlags
OpPPC64FlagEQ
OpPPC64FlagLT
reg: regInfo{},
},
{
- name: "LoweredPanicBoundsA",
+ name: "LoweredPanicBoundsRR",
auxType: auxInt64,
argLen: 3,
call: true,
reg: regInfo{
inputs: []inputInfo{
- {0, 32}, // R5
- {1, 64}, // R6
+ {0, 2040}, // R3 R4 R5 R6 R7 R8 R9 R10
+ {1, 2040}, // R3 R4 R5 R6 R7 R8 R9 R10
},
},
},
{
- name: "LoweredPanicBoundsB",
- auxType: auxInt64,
- argLen: 3,
+ name: "LoweredPanicBoundsRC",
+ auxType: auxPanicBoundsC,
+ argLen: 2,
call: true,
reg: regInfo{
inputs: []inputInfo{
- {0, 16}, // R4
- {1, 32}, // R5
+ {0, 2040}, // R3 R4 R5 R6 R7 R8 R9 R10
},
},
},
{
- name: "LoweredPanicBoundsC",
- auxType: auxInt64,
- argLen: 3,
+ name: "LoweredPanicBoundsCR",
+ auxType: auxPanicBoundsC,
+ argLen: 2,
call: true,
reg: regInfo{
inputs: []inputInfo{
- {0, 8}, // R3
- {1, 16}, // R4
+ {0, 2040}, // R3 R4 R5 R6 R7 R8 R9 R10
},
},
},
+ {
+ name: "LoweredPanicBoundsCC",
+ auxType: auxPanicBoundsCC,
+ argLen: 1,
+ call: true,
+ reg: regInfo{},
+ },
{
name: "InvertFlags",
argLen: 1,
return rewriteValuePPC64_OpPPC64LessEqual(v)
case OpPPC64LessThan:
return rewriteValuePPC64_OpPPC64LessThan(v)
+ case OpPPC64LoweredPanicBoundsCR:
+ return rewriteValuePPC64_OpPPC64LoweredPanicBoundsCR(v)
+ case OpPPC64LoweredPanicBoundsRC:
+ return rewriteValuePPC64_OpPPC64LoweredPanicBoundsRC(v)
+ case OpPPC64LoweredPanicBoundsRR:
+ return rewriteValuePPC64_OpPPC64LoweredPanicBoundsRR(v)
case OpPPC64MFVSRD:
return rewriteValuePPC64_OpPPC64MFVSRD(v)
case OpPPC64MOVBZload:
case OpPPC64XORconst:
return rewriteValuePPC64_OpPPC64XORconst(v)
case OpPanicBounds:
- return rewriteValuePPC64_OpPanicBounds(v)
+ v.Op = OpPPC64LoweredPanicBoundsRR
+ return true
case OpPopCount16:
return rewriteValuePPC64_OpPopCount16(v)
case OpPopCount32:
return true
}
}
+func rewriteValuePPC64_OpPPC64LoweredPanicBoundsCR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LoweredPanicBoundsCR [kind] {p} (MOVDconst [c]) mem)
+ // result: (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:p.C, Cy:c}} mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ p := auxToPanicBoundsC(v.Aux)
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ mem := v_1
+ v.reset(OpPPC64LoweredPanicBoundsCC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.Aux = panicBoundsCCToAux(PanicBoundsCC{Cx: p.C, Cy: c})
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64LoweredPanicBoundsRC(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LoweredPanicBoundsRC [kind] {p} (MOVDconst [c]) mem)
+ // result: (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:c, Cy:p.C}} mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ p := auxToPanicBoundsC(v.Aux)
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ mem := v_1
+ v.reset(OpPPC64LoweredPanicBoundsCC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.Aux = panicBoundsCCToAux(PanicBoundsCC{Cx: c, Cy: p.C})
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64LoweredPanicBoundsRR(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LoweredPanicBoundsRR [kind] x (MOVDconst [c]) mem)
+ // result: (LoweredPanicBoundsRC [kind] x {PanicBoundsC{C:c}} mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpPPC64LoweredPanicBoundsRC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.Aux = panicBoundsCToAux(PanicBoundsC{C: c})
+ v.AddArg2(x, mem)
+ return true
+ }
+ // match: (LoweredPanicBoundsRR [kind] (MOVDconst [c]) y mem)
+ // result: (LoweredPanicBoundsCR [kind] {PanicBoundsC{C:c}} y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ y := v_1
+ mem := v_2
+ v.reset(OpPPC64LoweredPanicBoundsCR)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.Aux = panicBoundsCToAux(PanicBoundsC{C: c})
+ v.AddArg2(y, mem)
+ return true
+ }
+ return false
+}
func rewriteValuePPC64_OpPPC64MFVSRD(v *Value) bool {
v_0 := v.Args[0]
b := v.Block
}
return false
}
-func rewriteValuePPC64_OpPanicBounds(v *Value) bool {
- v_2 := v.Args[2]
- v_1 := v.Args[1]
- v_0 := v.Args[0]
- // match: (PanicBounds [kind] x y mem)
- // cond: boundsABI(kind) == 0
- // result: (LoweredPanicBoundsA [kind] x y mem)
- for {
- kind := auxIntToInt64(v.AuxInt)
- x := v_0
- y := v_1
- mem := v_2
- if !(boundsABI(kind) == 0) {
- break
- }
- v.reset(OpPPC64LoweredPanicBoundsA)
- v.AuxInt = int64ToAuxInt(kind)
- v.AddArg3(x, y, mem)
- return true
- }
- // match: (PanicBounds [kind] x y mem)
- // cond: boundsABI(kind) == 1
- // result: (LoweredPanicBoundsB [kind] x y mem)
- for {
- kind := auxIntToInt64(v.AuxInt)
- x := v_0
- y := v_1
- mem := v_2
- if !(boundsABI(kind) == 1) {
- break
- }
- v.reset(OpPPC64LoweredPanicBoundsB)
- v.AuxInt = int64ToAuxInt(kind)
- v.AddArg3(x, y, mem)
- return true
- }
- // match: (PanicBounds [kind] x y mem)
- // cond: boundsABI(kind) == 2
- // result: (LoweredPanicBoundsC [kind] x y mem)
- for {
- kind := auxIntToInt64(v.AuxInt)
- x := v_0
- y := v_1
- mem := v_2
- if !(boundsABI(kind) == 2) {
- break
- }
- v.reset(OpPPC64LoweredPanicBoundsC)
- v.AuxInt = int64ToAuxInt(kind)
- v.AddArg3(x, y, mem)
- return true
- }
- return false
-}
func rewriteValuePPC64_OpPopCount16(v *Value) bool {
v_0 := v.Args[0]
b := v.Block
TW $31, R0, R0
RET
#endif
-// Note: these functions use a special calling convention to save generated code space.
-// Arguments are passed in registers, but the space for those arguments are allocated
-// in the caller's stack frame. These stubs write the args into that stack space and
-// then tail call to the corresponding runtime handler.
-// The tail call makes these stubs disappear in backtraces.
-TEXT runtime·panicIndex<ABIInternal>(SB),NOSPLIT,$0-16
- JMP runtime·goPanicIndex<ABIInternal>(SB)
-TEXT runtime·panicIndexU<ABIInternal>(SB),NOSPLIT,$0-16
- JMP runtime·goPanicIndexU<ABIInternal>(SB)
-TEXT runtime·panicSliceAlen<ABIInternal>(SB),NOSPLIT,$0-16
- MOVD R4, R3
- MOVD R5, R4
- JMP runtime·goPanicSliceAlen<ABIInternal>(SB)
-TEXT runtime·panicSliceAlenU<ABIInternal>(SB),NOSPLIT,$0-16
- MOVD R4, R3
- MOVD R5, R4
- JMP runtime·goPanicSliceAlenU<ABIInternal>(SB)
-TEXT runtime·panicSliceAcap<ABIInternal>(SB),NOSPLIT,$0-16
- MOVD R4, R3
- MOVD R5, R4
- JMP runtime·goPanicSliceAcap<ABIInternal>(SB)
-TEXT runtime·panicSliceAcapU<ABIInternal>(SB),NOSPLIT,$0-16
- MOVD R4, R3
- MOVD R5, R4
- JMP runtime·goPanicSliceAcapU<ABIInternal>(SB)
-TEXT runtime·panicSliceB<ABIInternal>(SB),NOSPLIT,$0-16
- JMP runtime·goPanicSliceB<ABIInternal>(SB)
-TEXT runtime·panicSliceBU<ABIInternal>(SB),NOSPLIT,$0-16
- JMP runtime·goPanicSliceBU<ABIInternal>(SB)
-TEXT runtime·panicSlice3Alen<ABIInternal>(SB),NOSPLIT,$0-16
- MOVD R5, R3
- MOVD R6, R4
- JMP runtime·goPanicSlice3Alen<ABIInternal>(SB)
-TEXT runtime·panicSlice3AlenU<ABIInternal>(SB),NOSPLIT,$0-16
- MOVD R5, R3
- MOVD R6, R4
- JMP runtime·goPanicSlice3AlenU<ABIInternal>(SB)
-TEXT runtime·panicSlice3Acap<ABIInternal>(SB),NOSPLIT,$0-16
- MOVD R5, R3
- MOVD R6, R4
- JMP runtime·goPanicSlice3Acap<ABIInternal>(SB)
-TEXT runtime·panicSlice3AcapU<ABIInternal>(SB),NOSPLIT,$0-16
- MOVD R5, R3
- MOVD R6, R4
- JMP runtime·goPanicSlice3AcapU<ABIInternal>(SB)
-TEXT runtime·panicSlice3B<ABIInternal>(SB),NOSPLIT,$0-16
- MOVD R4, R3
- MOVD R5, R4
- JMP runtime·goPanicSlice3B<ABIInternal>(SB)
-TEXT runtime·panicSlice3BU<ABIInternal>(SB),NOSPLIT,$0-16
- MOVD R4, R3
- MOVD R5, R4
- JMP runtime·goPanicSlice3BU<ABIInternal>(SB)
-TEXT runtime·panicSlice3C<ABIInternal>(SB),NOSPLIT,$0-16
- JMP runtime·goPanicSlice3C<ABIInternal>(SB)
-TEXT runtime·panicSlice3CU<ABIInternal>(SB),NOSPLIT,$0-16
- JMP runtime·goPanicSlice3CU<ABIInternal>(SB)
-TEXT runtime·panicSliceConvert<ABIInternal>(SB),NOSPLIT,$0-16
- MOVD R5, R3
- MOVD R6, R4
- JMP runtime·goPanicSliceConvert<ABIInternal>(SB)
+
+TEXT runtime·panicBounds<ABIInternal>(SB),NOSPLIT,$104-0
+ // Note: frame size is 16 bytes larger than necessary
+ // in order to pacify vet. Vet doesn't understand ppc64
+ // layout properly.
+ NO_LOCAL_POINTERS
+ // Save all 8 int registers that could have an index in them.
+ // They may be pointers, but if they are they are dead.
+ // Skip R0 aka ZERO, R1 aka SP, R2 aka SB
+ MOVD R3, 48(R1)
+ MOVD R4, 56(R1)
+ MOVD R5, 64(R1)
+ MOVD R6, 72(R1)
+ MOVD R7, 80(R1)
+ MOVD R8, 88(R1)
+ MOVD R9, 96(R1)
+ MOVD R10, 104(R1)
+ // Note: we only save 8 reigsters to keep under nosplit stack limit
+ // Also, R11 is clobbered in dynamic linking situations
+
+ MOVD LR, R3 // PC immediately after call to panicBounds
+ ADD $48, R1, R4 // pointer to save area
+ CALL runtime·panicBounds64<ABIInternal>(SB)
+ RET
// These functions are used when internal linking cgo with external
// objects compiled with the -Os on gcc. They reduce prologue/epilogue