}
func TestARMEndToEnd(t *testing.T) {
- defer func(old int) { buildcfg.GOARM = old }(buildcfg.GOARM)
+ defer func(old int) { buildcfg.GOARM.Version = old }(buildcfg.GOARM.Version)
for _, goarm := range []int{5, 6, 7} {
t.Logf("GOARM=%d", goarm)
- buildcfg.GOARM = goarm
+ buildcfg.GOARM.Version = goarm
testEndToEnd(t, "arm", "arm")
if goarm == 6 {
testEndToEnd(t, "arm", "armv6")
arch.LinkArch = &arm.Linkarm
arch.REGSP = arm.REGSP
arch.MAXWIDTH = (1 << 32) - 1
- arch.SoftFloat = buildcfg.GOARM == 5
+ arch.SoftFloat = buildcfg.GOARM.SoftFloat
arch.ZeroRange = zerorange
arch.Ginsnop = ginsnop
case ssa.OpARMANDconst, ssa.OpARMBICconst:
// try to optimize ANDconst and BICconst to BFC, which saves bytes and ticks
// BFC is only available on ARMv7, and its result and source are in the same register
- if buildcfg.GOARM == 7 && v.Reg() == v.Args[0].Reg() {
+ if buildcfg.GOARM.Version == 7 && v.Reg() == v.Args[0].Reg() {
var val uint32
if v.Op == ssa.OpARMANDconst {
val = ^uint32(v.AuxInt)
default:
}
}
- if buildcfg.GOARM >= 6 {
+ if buildcfg.GOARM.Version >= 6 {
// generate more efficient "MOVB/MOVBU/MOVH/MOVHU Reg@>0, Reg" on ARMv6 & ARMv7
genshift(s, v, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_RR, 0)
return
// count trailing zero for ARMv5 and ARMv6
// 32 - CLZ(x&-x - 1)
-(Ctz32 <t> x) && buildcfg.GOARM<=6 =>
+(Ctz32 <t> x) && buildcfg.GOARM.Version<=6 =>
(RSBconst [32] (CLZ <t> (SUBconst <t> (AND <t> x (RSBconst <t> [0] x)) [1])))
-(Ctz16 <t> x) && buildcfg.GOARM<=6 =>
+(Ctz16 <t> x) && buildcfg.GOARM.Version<=6 =>
(RSBconst [32] (CLZ <t> (SUBconst <typ.UInt32> (AND <typ.UInt32> (ORconst <typ.UInt32> [0x10000] x) (RSBconst <typ.UInt32> [0] (ORconst <typ.UInt32> [0x10000] x))) [1])))
-(Ctz8 <t> x) && buildcfg.GOARM<=6 =>
+(Ctz8 <t> x) && buildcfg.GOARM.Version<=6 =>
(RSBconst [32] (CLZ <t> (SUBconst <typ.UInt32> (AND <typ.UInt32> (ORconst <typ.UInt32> [0x100] x) (RSBconst <typ.UInt32> [0] (ORconst <typ.UInt32> [0x100] x))) [1])))
// count trailing zero for ARMv7
-(Ctz32 <t> x) && buildcfg.GOARM==7 => (CLZ <t> (RBIT <t> x))
-(Ctz16 <t> x) && buildcfg.GOARM==7 => (CLZ <t> (RBIT <typ.UInt32> (ORconst <typ.UInt32> [0x10000] x)))
-(Ctz8 <t> x) && buildcfg.GOARM==7 => (CLZ <t> (RBIT <typ.UInt32> (ORconst <typ.UInt32> [0x100] x)))
+(Ctz32 <t> x) && buildcfg.GOARM.Version==7 => (CLZ <t> (RBIT <t> x))
+(Ctz16 <t> x) && buildcfg.GOARM.Version==7 => (CLZ <t> (RBIT <typ.UInt32> (ORconst <typ.UInt32> [0x10000] x)))
+(Ctz8 <t> x) && buildcfg.GOARM.Version==7 => (CLZ <t> (RBIT <typ.UInt32> (ORconst <typ.UInt32> [0x100] x)))
// bit length
(BitLen32 <t> x) => (RSBconst [32] (CLZ <t> x))
// t5 = x right rotate 8 bits -- (d, a, b, c )
// result = t4 ^ t5 -- (d, c, b, a )
// using shifted ops this can be done in 4 instructions.
-(Bswap32 <t> x) && buildcfg.GOARM==5 =>
+(Bswap32 <t> x) && buildcfg.GOARM.Version==5 =>
(XOR <t>
(SRLconst <t> (BICconst <t> (XOR <t> x (SRRconst <t> [16] x)) [0xff0000]) [8])
(SRRconst <t> x [8]))
// byte swap for ARMv6 and above
-(Bswap32 x) && buildcfg.GOARM>=6 => (REV x)
+(Bswap32 x) && buildcfg.GOARM.Version>=6 => (REV x)
// boolean ops -- booleans are represented with 0=false, 1=true
(AndB ...) => (AND ...)
(SUBconst [c] x) && !isARMImmRot(uint32(c)) && isARMImmRot(uint32(-c)) => (ADDconst [-c] x)
(ANDconst [c] x) && !isARMImmRot(uint32(c)) && isARMImmRot(^uint32(c)) => (BICconst [int32(^uint32(c))] x)
(BICconst [c] x) && !isARMImmRot(uint32(c)) && isARMImmRot(^uint32(c)) => (ANDconst [int32(^uint32(c))] x)
-(ADDconst [c] x) && buildcfg.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && uint32(-c)<=0xffff => (SUBconst [-c] x)
-(SUBconst [c] x) && buildcfg.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && uint32(-c)<=0xffff => (ADDconst [-c] x)
-(ANDconst [c] x) && buildcfg.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && ^uint32(c)<=0xffff => (BICconst [int32(^uint32(c))] x)
-(BICconst [c] x) && buildcfg.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && ^uint32(c)<=0xffff => (ANDconst [int32(^uint32(c))] x)
+(ADDconst [c] x) && buildcfg.GOARM.Version==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && uint32(-c)<=0xffff => (SUBconst [-c] x)
+(SUBconst [c] x) && buildcfg.GOARM.Version==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && uint32(-c)<=0xffff => (ADDconst [-c] x)
+(ANDconst [c] x) && buildcfg.GOARM.Version==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && ^uint32(c)<=0xffff => (BICconst [int32(^uint32(c))] x)
+(BICconst [c] x) && buildcfg.GOARM.Version==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && ^uint32(c)<=0xffff => (ANDconst [int32(^uint32(c))] x)
(ADDconst [c] (MOVWconst [d])) => (MOVWconst [c+d])
(ADDconst [c] (ADDconst [d] x)) => (ADDconst [c+d] x)
(ADDconst [c] (SUBconst [d] x)) => (ADDconst [c-d] x)
// UBFX instruction is supported by ARMv6T2, ARMv7 and above versions, REV16 is supported by
// ARMv6 and above versions. So for ARMv6, we need to match SLLconst, SRLconst and ORshiftLL.
((ADDshiftLL|ORshiftLL|XORshiftLL) <typ.UInt16> [8] (BFXU <typ.UInt16> [int32(armBFAuxInt(8, 8))] x) x) => (REV16 x)
-((ADDshiftLL|ORshiftLL|XORshiftLL) <typ.UInt16> [8] (SRLconst <typ.UInt16> [24] (SLLconst [16] x)) x) && buildcfg.GOARM>=6 => (REV16 x)
+((ADDshiftLL|ORshiftLL|XORshiftLL) <typ.UInt16> [8] (SRLconst <typ.UInt16> [24] (SLLconst [16] x)) x) && buildcfg.GOARM.Version>=6 => (REV16 x)
// use indexed loads and stores
(MOVWload [0] {sym} (ADD ptr idx) mem) && sym == nil => (MOVWloadidx ptr idx mem)
(BIC x x) => (MOVWconst [0])
(ADD (MUL x y) a) => (MULA x y a)
-(SUB a (MUL x y)) && buildcfg.GOARM == 7 => (MULS x y a)
-(RSB (MUL x y) a) && buildcfg.GOARM == 7 => (MULS x y a)
+(SUB a (MUL x y)) && buildcfg.GOARM.Version == 7 => (MULS x y a)
+(RSB (MUL x y) a) && buildcfg.GOARM.Version == 7 => (MULS x y a)
-(NEGF (MULF x y)) && buildcfg.GOARM >= 6 => (NMULF x y)
-(NEGD (MULD x y)) && buildcfg.GOARM >= 6 => (NMULD x y)
-(MULF (NEGF x) y) && buildcfg.GOARM >= 6 => (NMULF x y)
-(MULD (NEGD x) y) && buildcfg.GOARM >= 6 => (NMULD x y)
+(NEGF (MULF x y)) && buildcfg.GOARM.Version >= 6 => (NMULF x y)
+(NEGD (MULD x y)) && buildcfg.GOARM.Version >= 6 => (NMULD x y)
+(MULF (NEGF x) y) && buildcfg.GOARM.Version >= 6 => (NMULF x y)
+(MULD (NEGD x) y) && buildcfg.GOARM.Version >= 6 => (NMULD x y)
(NMULF (NEGF x) y) => (MULF x y)
(NMULD (NEGD x) y) => (MULD x y)
// the result will overwrite the addend, since they are in the same register
-(ADDF a (MULF x y)) && a.Uses == 1 && buildcfg.GOARM >= 6 => (MULAF a x y)
-(ADDF a (NMULF x y)) && a.Uses == 1 && buildcfg.GOARM >= 6 => (MULSF a x y)
-(ADDD a (MULD x y)) && a.Uses == 1 && buildcfg.GOARM >= 6 => (MULAD a x y)
-(ADDD a (NMULD x y)) && a.Uses == 1 && buildcfg.GOARM >= 6 => (MULSD a x y)
-(SUBF a (MULF x y)) && a.Uses == 1 && buildcfg.GOARM >= 6 => (MULSF a x y)
-(SUBF a (NMULF x y)) && a.Uses == 1 && buildcfg.GOARM >= 6 => (MULAF a x y)
-(SUBD a (MULD x y)) && a.Uses == 1 && buildcfg.GOARM >= 6 => (MULSD a x y)
-(SUBD a (NMULD x y)) && a.Uses == 1 && buildcfg.GOARM >= 6 => (MULAD a x y)
+(ADDF a (MULF x y)) && a.Uses == 1 && buildcfg.GOARM.Version >= 6 => (MULAF a x y)
+(ADDF a (NMULF x y)) && a.Uses == 1 && buildcfg.GOARM.Version >= 6 => (MULSF a x y)
+(ADDD a (MULD x y)) && a.Uses == 1 && buildcfg.GOARM.Version >= 6 => (MULAD a x y)
+(ADDD a (NMULD x y)) && a.Uses == 1 && buildcfg.GOARM.Version >= 6 => (MULSD a x y)
+(SUBF a (MULF x y)) && a.Uses == 1 && buildcfg.GOARM.Version >= 6 => (MULSF a x y)
+(SUBF a (NMULF x y)) && a.Uses == 1 && buildcfg.GOARM.Version >= 6 => (MULAF a x y)
+(SUBD a (MULD x y)) && a.Uses == 1 && buildcfg.GOARM.Version >= 6 => (MULSD a x y)
+(SUBD a (NMULD x y)) && a.Uses == 1 && buildcfg.GOARM.Version >= 6 => (MULAD a x y)
(AND x (MVN y)) => (BIC x y)
(CMPD x (MOVDconst [0])) => (CMPD0 x)
// bit extraction
-(SRAconst (SLLconst x [c]) [d]) && buildcfg.GOARM==7 && uint64(d)>=uint64(c) && uint64(d)<=31 => (BFX [(d-c)|(32-d)<<8] x)
-(SRLconst (SLLconst x [c]) [d]) && buildcfg.GOARM==7 && uint64(d)>=uint64(c) && uint64(d)<=31 => (BFXU [(d-c)|(32-d)<<8] x)
+(SRAconst (SLLconst x [c]) [d]) && buildcfg.GOARM.Version==7 && uint64(d)>=uint64(c) && uint64(d)<=31 => (BFX [(d-c)|(32-d)<<8] x)
+(SRLconst (SLLconst x [c]) [d]) && buildcfg.GOARM.Version==7 && uint64(d)>=uint64(c) && uint64(d)<=31 => (BFXU [(d-c)|(32-d)<<8] x)
// comparison simplification
((EQ|NE) (CMP x (RSBconst [0] y))) => ((EQ|NE) (CMN x y)) // sense of carry bit not preserved; see also #50854
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (ADDD a (MULD x y))
- // cond: a.Uses == 1 && buildcfg.GOARM >= 6
+ // cond: a.Uses == 1 && buildcfg.GOARM.Version >= 6
// result: (MULAD a x y)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
}
y := v_1.Args[1]
x := v_1.Args[0]
- if !(a.Uses == 1 && buildcfg.GOARM >= 6) {
+ if !(a.Uses == 1 && buildcfg.GOARM.Version >= 6) {
continue
}
v.reset(OpARMMULAD)
break
}
// match: (ADDD a (NMULD x y))
- // cond: a.Uses == 1 && buildcfg.GOARM >= 6
+ // cond: a.Uses == 1 && buildcfg.GOARM.Version >= 6
// result: (MULSD a x y)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
}
y := v_1.Args[1]
x := v_1.Args[0]
- if !(a.Uses == 1 && buildcfg.GOARM >= 6) {
+ if !(a.Uses == 1 && buildcfg.GOARM.Version >= 6) {
continue
}
v.reset(OpARMMULSD)
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (ADDF a (MULF x y))
- // cond: a.Uses == 1 && buildcfg.GOARM >= 6
+ // cond: a.Uses == 1 && buildcfg.GOARM.Version >= 6
// result: (MULAF a x y)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
}
y := v_1.Args[1]
x := v_1.Args[0]
- if !(a.Uses == 1 && buildcfg.GOARM >= 6) {
+ if !(a.Uses == 1 && buildcfg.GOARM.Version >= 6) {
continue
}
v.reset(OpARMMULAF)
break
}
// match: (ADDF a (NMULF x y))
- // cond: a.Uses == 1 && buildcfg.GOARM >= 6
+ // cond: a.Uses == 1 && buildcfg.GOARM.Version >= 6
// result: (MULSF a x y)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
}
y := v_1.Args[1]
x := v_1.Args[0]
- if !(a.Uses == 1 && buildcfg.GOARM >= 6) {
+ if !(a.Uses == 1 && buildcfg.GOARM.Version >= 6) {
continue
}
v.reset(OpARMMULSF)
return true
}
// match: (ADDconst [c] x)
- // cond: buildcfg.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && uint32(-c)<=0xffff
+ // cond: buildcfg.GOARM.Version==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && uint32(-c)<=0xffff
// result: (SUBconst [-c] x)
for {
c := auxIntToInt32(v.AuxInt)
x := v_0
- if !(buildcfg.GOARM == 7 && !isARMImmRot(uint32(c)) && uint32(c) > 0xffff && uint32(-c) <= 0xffff) {
+ if !(buildcfg.GOARM.Version == 7 && !isARMImmRot(uint32(c)) && uint32(c) > 0xffff && uint32(-c) <= 0xffff) {
break
}
v.reset(OpARMSUBconst)
return true
}
// match: (ADDshiftLL <typ.UInt16> [8] (SRLconst <typ.UInt16> [24] (SLLconst [16] x)) x)
- // cond: buildcfg.GOARM>=6
+ // cond: buildcfg.GOARM.Version>=6
// result: (REV16 x)
for {
if v.Type != typ.UInt16 || auxIntToInt32(v.AuxInt) != 8 || v_0.Op != OpARMSRLconst || v_0.Type != typ.UInt16 || auxIntToInt32(v_0.AuxInt) != 24 {
break
}
x := v_0_0.Args[0]
- if x != v_1 || !(buildcfg.GOARM >= 6) {
+ if x != v_1 || !(buildcfg.GOARM.Version >= 6) {
break
}
v.reset(OpARMREV16)
return true
}
// match: (ANDconst [c] x)
- // cond: buildcfg.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && ^uint32(c)<=0xffff
+ // cond: buildcfg.GOARM.Version==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && ^uint32(c)<=0xffff
// result: (BICconst [int32(^uint32(c))] x)
for {
c := auxIntToInt32(v.AuxInt)
x := v_0
- if !(buildcfg.GOARM == 7 && !isARMImmRot(uint32(c)) && uint32(c) > 0xffff && ^uint32(c) <= 0xffff) {
+ if !(buildcfg.GOARM.Version == 7 && !isARMImmRot(uint32(c)) && uint32(c) > 0xffff && ^uint32(c) <= 0xffff) {
break
}
v.reset(OpARMBICconst)
return true
}
// match: (BICconst [c] x)
- // cond: buildcfg.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && ^uint32(c)<=0xffff
+ // cond: buildcfg.GOARM.Version==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && ^uint32(c)<=0xffff
// result: (ANDconst [int32(^uint32(c))] x)
for {
c := auxIntToInt32(v.AuxInt)
x := v_0
- if !(buildcfg.GOARM == 7 && !isARMImmRot(uint32(c)) && uint32(c) > 0xffff && ^uint32(c) <= 0xffff) {
+ if !(buildcfg.GOARM.Version == 7 && !isARMImmRot(uint32(c)) && uint32(c) > 0xffff && ^uint32(c) <= 0xffff) {
break
}
v.reset(OpARMANDconst)
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (MULD (NEGD x) y)
- // cond: buildcfg.GOARM >= 6
+ // cond: buildcfg.GOARM.Version >= 6
// result: (NMULD x y)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
}
x := v_0.Args[0]
y := v_1
- if !(buildcfg.GOARM >= 6) {
+ if !(buildcfg.GOARM.Version >= 6) {
continue
}
v.reset(OpARMNMULD)
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (MULF (NEGF x) y)
- // cond: buildcfg.GOARM >= 6
+ // cond: buildcfg.GOARM.Version >= 6
// result: (NMULF x y)
for {
for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
}
x := v_0.Args[0]
y := v_1
- if !(buildcfg.GOARM >= 6) {
+ if !(buildcfg.GOARM.Version >= 6) {
continue
}
v.reset(OpARMNMULF)
func rewriteValueARM_OpARMNEGD(v *Value) bool {
v_0 := v.Args[0]
// match: (NEGD (MULD x y))
- // cond: buildcfg.GOARM >= 6
+ // cond: buildcfg.GOARM.Version >= 6
// result: (NMULD x y)
for {
if v_0.Op != OpARMMULD {
}
y := v_0.Args[1]
x := v_0.Args[0]
- if !(buildcfg.GOARM >= 6) {
+ if !(buildcfg.GOARM.Version >= 6) {
break
}
v.reset(OpARMNMULD)
func rewriteValueARM_OpARMNEGF(v *Value) bool {
v_0 := v.Args[0]
// match: (NEGF (MULF x y))
- // cond: buildcfg.GOARM >= 6
+ // cond: buildcfg.GOARM.Version >= 6
// result: (NMULF x y)
for {
if v_0.Op != OpARMMULF {
}
y := v_0.Args[1]
x := v_0.Args[0]
- if !(buildcfg.GOARM >= 6) {
+ if !(buildcfg.GOARM.Version >= 6) {
break
}
v.reset(OpARMNMULF)
return true
}
// match: (ORshiftLL <typ.UInt16> [8] (SRLconst <typ.UInt16> [24] (SLLconst [16] x)) x)
- // cond: buildcfg.GOARM>=6
+ // cond: buildcfg.GOARM.Version>=6
// result: (REV16 x)
for {
if v.Type != typ.UInt16 || auxIntToInt32(v.AuxInt) != 8 || v_0.Op != OpARMSRLconst || v_0.Type != typ.UInt16 || auxIntToInt32(v_0.AuxInt) != 24 {
break
}
x := v_0_0.Args[0]
- if x != v_1 || !(buildcfg.GOARM >= 6) {
+ if x != v_1 || !(buildcfg.GOARM.Version >= 6) {
break
}
v.reset(OpARMREV16)
return true
}
// match: (RSB (MUL x y) a)
- // cond: buildcfg.GOARM == 7
+ // cond: buildcfg.GOARM.Version == 7
// result: (MULS x y a)
for {
if v_0.Op != OpARMMUL {
y := v_0.Args[1]
x := v_0.Args[0]
a := v_1
- if !(buildcfg.GOARM == 7) {
+ if !(buildcfg.GOARM.Version == 7) {
break
}
v.reset(OpARMMULS)
return true
}
// match: (SRAconst (SLLconst x [c]) [d])
- // cond: buildcfg.GOARM==7 && uint64(d)>=uint64(c) && uint64(d)<=31
+ // cond: buildcfg.GOARM.Version==7 && uint64(d)>=uint64(c) && uint64(d)<=31
// result: (BFX [(d-c)|(32-d)<<8] x)
for {
d := auxIntToInt32(v.AuxInt)
}
c := auxIntToInt32(v_0.AuxInt)
x := v_0.Args[0]
- if !(buildcfg.GOARM == 7 && uint64(d) >= uint64(c) && uint64(d) <= 31) {
+ if !(buildcfg.GOARM.Version == 7 && uint64(d) >= uint64(c) && uint64(d) <= 31) {
break
}
v.reset(OpARMBFX)
return true
}
// match: (SRLconst (SLLconst x [c]) [d])
- // cond: buildcfg.GOARM==7 && uint64(d)>=uint64(c) && uint64(d)<=31
+ // cond: buildcfg.GOARM.Version==7 && uint64(d)>=uint64(c) && uint64(d)<=31
// result: (BFXU [(d-c)|(32-d)<<8] x)
for {
d := auxIntToInt32(v.AuxInt)
}
c := auxIntToInt32(v_0.AuxInt)
x := v_0.Args[0]
- if !(buildcfg.GOARM == 7 && uint64(d) >= uint64(c) && uint64(d) <= 31) {
+ if !(buildcfg.GOARM.Version == 7 && uint64(d) >= uint64(c) && uint64(d) <= 31) {
break
}
v.reset(OpARMBFXU)
return true
}
// match: (SUB a (MUL x y))
- // cond: buildcfg.GOARM == 7
+ // cond: buildcfg.GOARM.Version == 7
// result: (MULS x y a)
for {
a := v_0
}
y := v_1.Args[1]
x := v_1.Args[0]
- if !(buildcfg.GOARM == 7) {
+ if !(buildcfg.GOARM.Version == 7) {
break
}
v.reset(OpARMMULS)
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (SUBD a (MULD x y))
- // cond: a.Uses == 1 && buildcfg.GOARM >= 6
+ // cond: a.Uses == 1 && buildcfg.GOARM.Version >= 6
// result: (MULSD a x y)
for {
a := v_0
}
y := v_1.Args[1]
x := v_1.Args[0]
- if !(a.Uses == 1 && buildcfg.GOARM >= 6) {
+ if !(a.Uses == 1 && buildcfg.GOARM.Version >= 6) {
break
}
v.reset(OpARMMULSD)
return true
}
// match: (SUBD a (NMULD x y))
- // cond: a.Uses == 1 && buildcfg.GOARM >= 6
+ // cond: a.Uses == 1 && buildcfg.GOARM.Version >= 6
// result: (MULAD a x y)
for {
a := v_0
}
y := v_1.Args[1]
x := v_1.Args[0]
- if !(a.Uses == 1 && buildcfg.GOARM >= 6) {
+ if !(a.Uses == 1 && buildcfg.GOARM.Version >= 6) {
break
}
v.reset(OpARMMULAD)
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (SUBF a (MULF x y))
- // cond: a.Uses == 1 && buildcfg.GOARM >= 6
+ // cond: a.Uses == 1 && buildcfg.GOARM.Version >= 6
// result: (MULSF a x y)
for {
a := v_0
}
y := v_1.Args[1]
x := v_1.Args[0]
- if !(a.Uses == 1 && buildcfg.GOARM >= 6) {
+ if !(a.Uses == 1 && buildcfg.GOARM.Version >= 6) {
break
}
v.reset(OpARMMULSF)
return true
}
// match: (SUBF a (NMULF x y))
- // cond: a.Uses == 1 && buildcfg.GOARM >= 6
+ // cond: a.Uses == 1 && buildcfg.GOARM.Version >= 6
// result: (MULAF a x y)
for {
a := v_0
}
y := v_1.Args[1]
x := v_1.Args[0]
- if !(a.Uses == 1 && buildcfg.GOARM >= 6) {
+ if !(a.Uses == 1 && buildcfg.GOARM.Version >= 6) {
break
}
v.reset(OpARMMULAF)
return true
}
// match: (SUBconst [c] x)
- // cond: buildcfg.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && uint32(-c)<=0xffff
+ // cond: buildcfg.GOARM.Version==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && uint32(-c)<=0xffff
// result: (ADDconst [-c] x)
for {
c := auxIntToInt32(v.AuxInt)
x := v_0
- if !(buildcfg.GOARM == 7 && !isARMImmRot(uint32(c)) && uint32(c) > 0xffff && uint32(-c) <= 0xffff) {
+ if !(buildcfg.GOARM.Version == 7 && !isARMImmRot(uint32(c)) && uint32(c) > 0xffff && uint32(-c) <= 0xffff) {
break
}
v.reset(OpARMADDconst)
return true
}
// match: (XORshiftLL <typ.UInt16> [8] (SRLconst <typ.UInt16> [24] (SLLconst [16] x)) x)
- // cond: buildcfg.GOARM>=6
+ // cond: buildcfg.GOARM.Version>=6
// result: (REV16 x)
for {
if v.Type != typ.UInt16 || auxIntToInt32(v.AuxInt) != 8 || v_0.Op != OpARMSRLconst || v_0.Type != typ.UInt16 || auxIntToInt32(v_0.AuxInt) != 24 {
break
}
x := v_0_0.Args[0]
- if x != v_1 || !(buildcfg.GOARM >= 6) {
+ if x != v_1 || !(buildcfg.GOARM.Version >= 6) {
break
}
v.reset(OpARMREV16)
v_0 := v.Args[0]
b := v.Block
// match: (Bswap32 <t> x)
- // cond: buildcfg.GOARM==5
+ // cond: buildcfg.GOARM.Version==5
// result: (XOR <t> (SRLconst <t> (BICconst <t> (XOR <t> x (SRRconst <t> [16] x)) [0xff0000]) [8]) (SRRconst <t> x [8]))
for {
t := v.Type
x := v_0
- if !(buildcfg.GOARM == 5) {
+ if !(buildcfg.GOARM.Version == 5) {
break
}
v.reset(OpARMXOR)
return true
}
// match: (Bswap32 x)
- // cond: buildcfg.GOARM>=6
+ // cond: buildcfg.GOARM.Version>=6
// result: (REV x)
for {
x := v_0
- if !(buildcfg.GOARM >= 6) {
+ if !(buildcfg.GOARM.Version >= 6) {
break
}
v.reset(OpARMREV)
b := v.Block
typ := &b.Func.Config.Types
// match: (Ctz16 <t> x)
- // cond: buildcfg.GOARM<=6
+ // cond: buildcfg.GOARM.Version<=6
// result: (RSBconst [32] (CLZ <t> (SUBconst <typ.UInt32> (AND <typ.UInt32> (ORconst <typ.UInt32> [0x10000] x) (RSBconst <typ.UInt32> [0] (ORconst <typ.UInt32> [0x10000] x))) [1])))
for {
t := v.Type
x := v_0
- if !(buildcfg.GOARM <= 6) {
+ if !(buildcfg.GOARM.Version <= 6) {
break
}
v.reset(OpARMRSBconst)
return true
}
// match: (Ctz16 <t> x)
- // cond: buildcfg.GOARM==7
+ // cond: buildcfg.GOARM.Version==7
// result: (CLZ <t> (RBIT <typ.UInt32> (ORconst <typ.UInt32> [0x10000] x)))
for {
t := v.Type
x := v_0
- if !(buildcfg.GOARM == 7) {
+ if !(buildcfg.GOARM.Version == 7) {
break
}
v.reset(OpARMCLZ)
v_0 := v.Args[0]
b := v.Block
// match: (Ctz32 <t> x)
- // cond: buildcfg.GOARM<=6
+ // cond: buildcfg.GOARM.Version<=6
// result: (RSBconst [32] (CLZ <t> (SUBconst <t> (AND <t> x (RSBconst <t> [0] x)) [1])))
for {
t := v.Type
x := v_0
- if !(buildcfg.GOARM <= 6) {
+ if !(buildcfg.GOARM.Version <= 6) {
break
}
v.reset(OpARMRSBconst)
return true
}
// match: (Ctz32 <t> x)
- // cond: buildcfg.GOARM==7
+ // cond: buildcfg.GOARM.Version==7
// result: (CLZ <t> (RBIT <t> x))
for {
t := v.Type
x := v_0
- if !(buildcfg.GOARM == 7) {
+ if !(buildcfg.GOARM.Version == 7) {
break
}
v.reset(OpARMCLZ)
b := v.Block
typ := &b.Func.Config.Types
// match: (Ctz8 <t> x)
- // cond: buildcfg.GOARM<=6
+ // cond: buildcfg.GOARM.Version<=6
// result: (RSBconst [32] (CLZ <t> (SUBconst <typ.UInt32> (AND <typ.UInt32> (ORconst <typ.UInt32> [0x100] x) (RSBconst <typ.UInt32> [0] (ORconst <typ.UInt32> [0x100] x))) [1])))
for {
t := v.Type
x := v_0
- if !(buildcfg.GOARM <= 6) {
+ if !(buildcfg.GOARM.Version <= 6) {
break
}
v.reset(OpARMRSBconst)
return true
}
// match: (Ctz8 <t> x)
- // cond: buildcfg.GOARM==7
+ // cond: buildcfg.GOARM.Version==7
// result: (CLZ <t> (RBIT <typ.UInt32> (ORconst <typ.UInt32> [0x100] x)))
for {
t := v.Type
x := v_0
- if !(buildcfg.GOARM == 7) {
+ if !(buildcfg.GOARM.Version == 7) {
break
}
v.reset(OpARMCLZ)
// GOARM
// For GOARCH=arm, the ARM architecture for which to compile.
// Valid values are 5, 6, 7.
+// The value can be followed by an option specifying how to implement floating point instructions.
+// Valid options are ,softfloat (default for 5) and ,hardfloat (default for 6 and 7).
// GO386
// For GOARCH=386, how to implement floating point instructions.
// Valid values are sse2 (default), softfloat.
GOARM
For GOARCH=arm, the ARM architecture for which to compile.
Valid values are 5, 6, 7.
+ The value can be followed by an option specifying how to implement floating point instructions.
+ Valid options are ,softfloat (default for 5) and ,hardfloat (default for 6 and 7).
GO386
For GOARCH=386, how to implement floating point instructions.
Valid values are sse2 (default), softfloat.
if immrot(^uint32(c.instoffset)) != 0 {
return C_NCON
}
- if uint32(c.instoffset) <= 0xffff && buildcfg.GOARM == 7 {
+ if uint32(c.instoffset) <= 0xffff && buildcfg.GOARM.Version == 7 {
return C_SCON
}
if x, y := immrot2a(uint32(c.instoffset)); x != 0 && y != 0 {
}
func (c *ctxt5) chipzero5(e float64) int {
- // We use GOARM=7 to gate the use of VFPv3 vmov (imm) instructions.
- if buildcfg.GOARM < 7 || math.Float64bits(e) != 0 {
+ // We use GOARM.Version=7 and !GOARM.SoftFloat to gate the use of VFPv3 vmov (imm) instructions.
+ if buildcfg.GOARM.Version < 7 || buildcfg.GOARM.SoftFloat || math.Float64bits(e) != 0 {
return -1
}
return 0
}
func (c *ctxt5) chipfloat5(e float64) int {
- // We use GOARM=7 to gate the use of VFPv3 vmov (imm) instructions.
- if buildcfg.GOARM < 7 {
+ // We use GOARM.Version=7 and !GOARM.SoftFloat to gate the use of VFPv3 vmov (imm) instructions.
+ if buildcfg.GOARM.Version < 7 || buildcfg.GOARM.SoftFloat {
return -1
}
ctxt.Diag("%v: TLS MRC instruction must write to R0 as it might get translated into a BL instruction", p.Line())
}
- if buildcfg.GOARM < 7 {
+ if buildcfg.GOARM.Version < 7 {
// Replace it with BL runtime.read_tls_fallback(SB) for ARM CPUs that lack the tls extension.
if progedit_tlsfallback == nil {
progedit_tlsfallback = ctxt.Lookup("runtime.read_tls_fallback")
archVariants = map[string][]string{
"386": {"GO386", "sse2", "softfloat"},
"amd64": {"GOAMD64", "v1", "v2", "v3", "v4"},
- "arm": {"GOARM", "5", "6", "7"},
+ "arm": {"GOARM", "5", "6", "7", "7,softfloat"},
"arm64": {},
"loong64": {},
"mips": {"GOMIPS", "hardfloat", "softfloat"},
sb := ctxt.loader.MakeSymbolUpdater(goarm)
sb.SetType(sym.SDATA)
sb.SetSize(0)
- sb.AddUint8(uint8(buildcfg.GOARM))
+ sb.AddUint8(uint8(buildcfg.GOARM.Version))
+
+ goarmsoftfp := ctxt.loader.LookupOrCreateSym("runtime.goarmsoftfp", 0)
+ sb2 := ctxt.loader.MakeSymbolUpdater(goarmsoftfp)
+ sb2.SetType(sym.SDATA)
+ sb2.SetSize(0)
+ if buildcfg.GOARM.SoftFloat {
+ sb2.AddUint8(1)
+ } else {
+ sb2.AddUint8(0)
+ }
}
// Set runtime.disableMemoryProfiling bool if
return int(defaultGOAMD64[len("v")] - '0')
}
-func goarm() int {
+type goarmFeatures struct {
+ Version int
+ SoftFloat bool
+}
+
+func (g goarmFeatures) String() string {
+ armStr := strconv.Itoa(g.Version)
+ if g.SoftFloat {
+ armStr += ",softfloat"
+ } else {
+ armStr += ",hardfloat"
+ }
+ return armStr
+}
+
+func goarm() (g goarmFeatures) {
+ const (
+ softFloatOpt = ",softfloat"
+ hardFloatOpt = ",hardfloat"
+ )
def := defaultGOARM
if GOOS == "android" && GOARCH == "arm" {
// Android arm devices always support GOARM=7.
def = "7"
}
- switch v := envOr("GOARM", def); v {
+ v := envOr("GOARM", def)
+
+ floatSpecified := false
+ if strings.HasSuffix(v, softFloatOpt) {
+ g.SoftFloat = true
+ floatSpecified = true
+ v = v[:len(v)-len(softFloatOpt)]
+ }
+ if strings.HasSuffix(v, hardFloatOpt) {
+ floatSpecified = true
+ v = v[:len(v)-len(hardFloatOpt)]
+ }
+
+ switch v {
case "5":
- return 5
+ g.Version = 5
case "6":
- return 6
+ g.Version = 6
case "7":
- return 7
+ g.Version = 7
+ default:
+ Error = fmt.Errorf("invalid GOARM: must start with 5, 6, or 7, and may optionally end in either %q or %q", hardFloatOpt, softFloatOpt)
+ g.Version = int(def[0] - '0')
}
- Error = fmt.Errorf("invalid GOARM: must be 5, 6, 7")
- return int(def[0] - '0')
+
+ // 5 defaults to softfloat. 6 and 7 default to hardfloat.
+ if !floatSpecified && g.Version == 5 {
+ g.SoftFloat = true
+ }
+ return
}
func gomips() string {
case "amd64":
return "GOAMD64", fmt.Sprintf("v%d", GOAMD64)
case "arm":
- return "GOARM", strconv.Itoa(GOARM)
+ return "GOARM", GOARM.String()
case "mips", "mipsle":
return "GOMIPS", GOMIPS
case "mips64", "mips64le":
return list
case "arm":
var list []string
- for i := 5; i <= GOARM; i++ {
+ for i := 5; i <= GOARM.Version; i++ {
list = append(list, fmt.Sprintf("%s.%d", GOARCH, i))
}
return list
. "math/rand"
"os"
"runtime"
+ "strings"
"sync"
"testing"
"testing/iotest"
func hasSlowFloatingPoint() bool {
switch runtime.GOARCH {
case "arm":
- return os.Getenv("GOARM") == "5"
+ return os.Getenv("GOARM") == "5" || strings.HasSuffix(os.Getenv("GOARM"), ",softfloat")
case "mips", "mipsle", "mips64", "mips64le":
// Be conservative and assume that all mips boards
// have emulated floating point.
MOVW g, 32(R13)
MOVW R11, 36(R13)
- // Skip floating point registers on GOARM < 6.
- MOVB runtime·goarm(SB), R11
- CMP $6, R11
- BLT skipfpsave
+ // Skip floating point registers on goarmsoftfp != 0.
+ MOVB runtime·goarmsoftfp(SB), R11
+ CMP $0, R11
+ BNE skipfpsave
MOVD F8, (40+8*0)(R13)
MOVD F9, (40+8*1)(R13)
MOVD F10, (40+8*2)(R13)
BL runtime·newosproc0(SB)
rr:
// Restore callee-save registers and return.
- MOVB runtime·goarm(SB), R11
- CMP $6, R11
- BLT skipfprest
+ MOVB runtime·goarmsoftfp(SB), R11
+ CMP $0, R11
+ BNE skipfprest
MOVD (40+8*0)(R13), F8
MOVD (40+8*1)(R13), F9
MOVD (40+8*2)(R13), F10
RET
TEXT runtime·asminit(SB),NOSPLIT,$0-0
- // disable runfast (flush-to-zero) mode of vfp if runtime.goarm > 5
- MOVB runtime·goarm(SB), R11
- CMP $5, R11
- BLE 4(PC)
+ // disable runfast (flush-to-zero) mode of vfp if runtime.goarmsoftfp == 0
+ MOVB runtime·goarmsoftfp(SB), R11
+ CMP $0, R11
+ BNE 4(PC)
WORD $0xeef1ba10 // vmrs r11, fpscr
BIC $(1<<24), R11
WORD $0xeee1ba10 // vmsr fpscr, r11
// starting at 4(R13).
MOVW.W R14, -4(R13)
- // Skip floating point registers on GOARM < 6.
- MOVB runtime·goarm(SB), R11
- CMP $6, R11
- BLT skipfpsave
+ // Skip floating point registers if goarmsoftfp!=0.
+ MOVB runtime·goarmsoftfp(SB), R11
+ CMP $0, R11
+ BNE skipfpsave
MOVD F8, (13*4+8*1)(R13)
MOVD F9, (13*4+8*2)(R13)
MOVD F10, (13*4+8*3)(R13)
// We set up the arguments to cgocallback when saving registers above.
BL runtime·cgocallback(SB)
- MOVB runtime·goarm(SB), R11
- CMP $6, R11
- BLT skipfprest
+ MOVB runtime·goarmsoftfp(SB), R11
+ CMP $0, R11
+ BNE skipfprest
MOVD (13*4+8*1)(R13), F8
MOVD (13*4+8*2)(R13), F9
MOVD (13*4+8*3)(R13), F10
p("MOVW.W R14, -%d(R13)", lfp.stack) // allocate frame, save LR
l.save()
- p("MOVB ·goarm(SB), R0\nCMP $6, R0\nBLT nofp") // test goarm, and skip FP registers if goarm=5.
+ p("MOVB ·goarmsoftfp(SB), R0\nCMP $0, R0\nBNE nofp") // test goarmsoftfp, and skip FP registers if goarmsoftfp!=0.
lfp.save()
label("nofp:")
p("CALL ·asyncPreempt2(SB)")
- p("MOVB ·goarm(SB), R0\nCMP $6, R0\nBLT nofp2") // test goarm, and skip FP registers if goarm=5.
+ p("MOVB ·goarmsoftfp(SB), R0\nCMP $0, R0\nBNE nofp2") // test goarmsoftfp, and skip FP registers if goarmsoftfp!=0.
lfp.restore()
label("nofp2:")
l.restore()
)
func checkgoarm() {
- if goarm > 5 && cpu.HWCap&_HWCAP_VFP == 0 {
+ if cpu.HWCap&_HWCAP_VFP == 0 && goarmsoftfp == 0 {
print("runtime: this CPU has no floating point hardware, so it cannot run\n")
- print("this GOARM=", goarm, " binary. Recompile using GOARM=5.\n")
+ print("a binary compiled for hard floating point. Recompile adding ,softfloat\n")
+ print("to GOARM.\n")
exit(1)
}
- if goarm > 6 && cpu.HWCap&_HWCAP_VFPv3 == 0 {
+ if goarm > 6 && cpu.HWCap&_HWCAP_VFPv3 == 0 && goarmsoftfp == 0 {
print("runtime: this CPU has no VFPv3 floating point hardware, so it cannot run\n")
- print("this GOARM=", goarm, " binary. Recompile using GOARM=5 or GOARM=6.\n")
+ print("a binary compiled for VFPv3 hard floating point. Recompile adding ,softfloat\n")
+ print("to GOARM or changing GOARM to 6.\n")
exit(1)
}
if GOOS == "android" {
return
}
- if goarm > 5 && cpu.HWCap&_HWCAP_VFP == 0 {
+ if cpu.HWCap&_HWCAP_VFP == 0 && goarmsoftfp == 0 {
print("runtime: this CPU has no floating point hardware, so it cannot run\n")
- print("this GOARM=", goarm, " binary. Recompile using GOARM=5.\n")
+ print("a binary compiled for hard floating point. Recompile adding ,softfloat\n")
+ print("to GOARM.\n")
exit(1)
}
- if goarm > 6 && cpu.HWCap&_HWCAP_VFPv3 == 0 {
+ if goarm > 6 && cpu.HWCap&_HWCAP_VFPv3 == 0 && goarmsoftfp == 0 {
print("runtime: this CPU has no VFPv3 floating point hardware, so it cannot run\n")
- print("this GOARM=", goarm, " binary. Recompile using GOARM=5 or GOARM=6.\n")
+ print("a binary compiled for VFPv3 hard floating point. Recompile adding ,softfloat\n")
+ print("to GOARM or changing GOARM to 6.\n")
exit(1)
}
}
MOVW R12, 48(R13)
MOVW CPSR, R0
MOVW R0, 52(R13)
- MOVB ·goarm(SB), R0
- CMP $6, R0
- BLT nofp
+ MOVB ·goarmsoftfp(SB), R0
+ CMP $0, R0
+ BNE nofp
MOVW FPCR, R0
MOVW R0, 56(R13)
MOVD F0, 60(R13)
MOVD F15, 180(R13)
nofp:
CALL ·asyncPreempt2(SB)
- MOVB ·goarm(SB), R0
- CMP $6, R0
- BLT nofp2
+ MOVB ·goarmsoftfp(SB), R0
+ CMP $0, R0
+ BNE nofp2
MOVD 180(R13), F15
MOVD 172(R13), F14
MOVD 164(R13), F13
processorVersionInfo uint32
isIntel bool
- goarm uint8 // set by cmd/link on arm systems
+ // set by cmd/link on arm systems
+ goarm uint8
+ goarmsoftfp uint8
)
// Set by the linker so the runtime can determine the buildmode.