... and 0-31 for 32-bit shifts.
Generally update the docs for ppc64 shift instructions to be
clearer about what they actually do.
This issue is causing problems for the subsequent CL. The shift
amount was <0 and caused the assembler to report an invalid instruction.
Change-Id: I8c708a15e7f71931835e6e543d8db3c716186e52
Reviewed-on: https://go-review.googlesource.com/c/go/+/232858
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Lynn Boger <laboger@linux.vnet.ibm.com>
(Rsh8x64 x (MOVDconst [c])) && uint64(c) < 8 => (SRAWconst (SignExt8to32 x) [c])
(Rsh8Ux64 x (MOVDconst [c])) && uint64(c) < 8 => (SRWconst (ZeroExt8to32 x) [c])
-(Lsh64x32 x (MOVDconst [c])) && uint32(c) < 64 => (SLDconst x [c])
-(Rsh64x32 x (MOVDconst [c])) && uint32(c) < 64 => (SRADconst x [c])
-(Rsh64Ux32 x (MOVDconst [c])) && uint32(c) < 64 => (SRDconst x [c])
-(Lsh32x32 x (MOVDconst [c])) && uint32(c) < 32 => (SLWconst x [c])
-(Rsh32x32 x (MOVDconst [c])) && uint32(c) < 32 => (SRAWconst x [c])
-(Rsh32Ux32 x (MOVDconst [c])) && uint32(c) < 32 => (SRWconst x [c])
-(Lsh16x32 x (MOVDconst [c])) && uint32(c) < 16 => (SLWconst x [c])
-(Rsh16x32 x (MOVDconst [c])) && uint32(c) < 16 => (SRAWconst (SignExt16to32 x) [c])
-(Rsh16Ux32 x (MOVDconst [c])) && uint32(c) < 16 => (SRWconst (ZeroExt16to32 x) [c])
-(Lsh8x32 x (MOVDconst [c])) && uint32(c) < 8 => (SLWconst x [c])
-(Rsh8x32 x (MOVDconst [c])) && uint32(c) < 8 => (SRAWconst (SignExt8to32 x) [c])
-(Rsh8Ux32 x (MOVDconst [c])) && uint32(c) < 8 => (SRWconst (ZeroExt8to32 x) [c])
+(Lsh64x32 x (MOVDconst [c])) && uint32(c) < 64 => (SLDconst x [c&63])
+(Rsh64x32 x (MOVDconst [c])) && uint32(c) < 64 => (SRADconst x [c&63])
+(Rsh64Ux32 x (MOVDconst [c])) && uint32(c) < 64 => (SRDconst x [c&63])
+(Lsh32x32 x (MOVDconst [c])) && uint32(c) < 32 => (SLWconst x [c&31])
+(Rsh32x32 x (MOVDconst [c])) && uint32(c) < 32 => (SRAWconst x [c&31])
+(Rsh32Ux32 x (MOVDconst [c])) && uint32(c) < 32 => (SRWconst x [c&31])
+(Lsh16x32 x (MOVDconst [c])) && uint32(c) < 16 => (SLWconst x [c&31])
+(Rsh16x32 x (MOVDconst [c])) && uint32(c) < 16 => (SRAWconst (SignExt16to32 x) [c&15])
+(Rsh16Ux32 x (MOVDconst [c])) && uint32(c) < 16 => (SRWconst (ZeroExt16to32 x) [c&15])
+(Lsh8x32 x (MOVDconst [c])) && uint32(c) < 8 => (SLWconst x [c&7])
+(Rsh8x32 x (MOVDconst [c])) && uint32(c) < 8 => (SRAWconst (SignExt8to32 x) [c&7])
+(Rsh8Ux32 x (MOVDconst [c])) && uint32(c) < 8 => (SRWconst (ZeroExt8to32 x) [c&7])
// Lower bounded shifts first. No need to check shift value.
(Lsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SLD x y)
(MaskIfNotCarry (FlagCarrySet)) => (MOVDconst [0])
(MaskIfNotCarry (FlagCarryClear)) => (MOVDconst [-1])
-(S(RAD|RAW|RD|RW|LD|LW) x (MOVDconst [c])) => (S(RAD|RAW|RD|RW|LD|LW)const [c] x)
+(S(RAD|RD|LD) x (MOVDconst [c])) => (S(RAD|RD|LD)const [c&63 | (c>>6&1*63)] x)
+(S(RAW|RW|LW) x (MOVDconst [c])) => (S(RAW|RW|LW)const [c&31 | (c>>5&1*31)] x)
(Addr {sym} base) => (MOVDaddr {sym} [0] base)
(LocalAddr {sym} base _) => (MOVDaddr {sym} base)
{name: "FMSUB", argLength: 3, reg: fp31, asm: "FMSUB"}, // arg0*arg1 - arg2
{name: "FMSUBS", argLength: 3, reg: fp31, asm: "FMSUBS"}, // arg0*arg1 - arg2
- {name: "SRAD", argLength: 2, reg: gp21, asm: "SRAD"}, // arg0 >>a arg1, 64 bits (all sign if arg1 & 64 != 0)
- {name: "SRAW", argLength: 2, reg: gp21, asm: "SRAW"}, // arg0 >>a arg1, 32 bits (all sign if arg1 & 32 != 0)
- {name: "SRD", argLength: 2, reg: gp21, asm: "SRD"}, // arg0 >> arg1, 64 bits (0 if arg1 & 64 != 0)
- {name: "SRW", argLength: 2, reg: gp21, asm: "SRW"}, // arg0 >> arg1, 32 bits (0 if arg1 & 32 != 0)
- {name: "SLD", argLength: 2, reg: gp21, asm: "SLD"}, // arg0 << arg1, 64 bits (0 if arg1 & 64 != 0)
- {name: "SLW", argLength: 2, reg: gp21, asm: "SLW"}, // arg0 << arg1, 32 bits (0 if arg1 & 32 != 0)
+ {name: "SRAD", argLength: 2, reg: gp21, asm: "SRAD"}, // signed arg0 >> (arg1&127), 64 bit width (note: 127, not 63!)
+ {name: "SRAW", argLength: 2, reg: gp21, asm: "SRAW"}, // signed arg0 >> (arg1&63), 32 bit width
+ {name: "SRD", argLength: 2, reg: gp21, asm: "SRD"}, // unsigned arg0 >> (arg1&127), 64 bit width
+ {name: "SRW", argLength: 2, reg: gp21, asm: "SRW"}, // unsigned arg0 >> (arg1&63), 32 bit width
+ {name: "SLD", argLength: 2, reg: gp21, asm: "SLD"}, // arg0 << (arg1&127), 64 bit width
+ {name: "SLW", argLength: 2, reg: gp21, asm: "SLW"}, // arg0 << (arg1&63), 32 bit width
{name: "ROTL", argLength: 2, reg: gp21, asm: "ROTL"}, // arg0 rotate left by arg1 mod 64
{name: "ROTLW", argLength: 2, reg: gp21, asm: "ROTLW"}, // uint32(arg0) rotate left by arg1 mod 32
{name: "ADDconstForCarry", argLength: 1, reg: regInfo{inputs: []regMask{gp | sp | sb}, clobbers: tmp}, aux: "Int16", asm: "ADDC", typ: "Flags"}, // _, carry := arg0 + auxint
{name: "MaskIfNotCarry", argLength: 1, reg: crgp, asm: "ADDME", typ: "Int64"}, // carry - 1 (if carry then 0 else -1)
- {name: "SRADconst", argLength: 1, reg: gp11, asm: "SRAD", aux: "Int64"}, // arg0 >>a aux, 64 bits
- {name: "SRAWconst", argLength: 1, reg: gp11, asm: "SRAW", aux: "Int64"}, // arg0 >>a aux, 32 bits
- {name: "SRDconst", argLength: 1, reg: gp11, asm: "SRD", aux: "Int64"}, // arg0 >> aux, 64 bits
- {name: "SRWconst", argLength: 1, reg: gp11, asm: "SRW", aux: "Int64"}, // arg0 >> aux, 32 bits
- {name: "SLDconst", argLength: 1, reg: gp11, asm: "SLD", aux: "Int64"}, // arg0 << aux, 64 bits
- {name: "SLWconst", argLength: 1, reg: gp11, asm: "SLW", aux: "Int64"}, // arg0 << aux, 32 bits
+ {name: "SRADconst", argLength: 1, reg: gp11, asm: "SRAD", aux: "Int64"}, // signed arg0 >> auxInt, 0 <= auxInt < 64, 64 bit width
+ {name: "SRAWconst", argLength: 1, reg: gp11, asm: "SRAW", aux: "Int64"}, // signed arg0 >> auxInt, 0 <= auxInt < 32, 32 bit width
+ {name: "SRDconst", argLength: 1, reg: gp11, asm: "SRD", aux: "Int64"}, // unsigned arg0 >> auxInt, 0 <= auxInt < 64, 64 bit width
+ {name: "SRWconst", argLength: 1, reg: gp11, asm: "SRW", aux: "Int64"}, // unsigned arg0 >> auxInt, 0 <= auxInt < 32, 32 bit width
+ {name: "SLDconst", argLength: 1, reg: gp11, asm: "SLD", aux: "Int64"}, // arg0 << auxInt, 0 <= auxInt < 64, 64 bit width
+ {name: "SLWconst", argLength: 1, reg: gp11, asm: "SLW", aux: "Int64"}, // arg0 << auxInt, 0 <= auxInt < 32, 32 bit width
{name: "ROTLconst", argLength: 1, reg: gp11, asm: "ROTL", aux: "Int64"}, // arg0 rotate left by auxInt bits
{name: "ROTLWconst", argLength: 1, reg: gp11, asm: "ROTLW", aux: "Int64"}, // uint32(arg0) rotate left by auxInt bits
typ := &b.Func.Config.Types
// match: (Lsh16x32 x (MOVDconst [c]))
// cond: uint32(c) < 16
- // result: (SLWconst x [c])
+ // result: (SLWconst x [c&31])
for {
x := v_0
if v_1.Op != OpPPC64MOVDconst {
break
}
v.reset(OpPPC64SLWconst)
- v.AuxInt = int64ToAuxInt(c)
+ v.AuxInt = int64ToAuxInt(c & 31)
v.AddArg(x)
return true
}
typ := &b.Func.Config.Types
// match: (Lsh32x32 x (MOVDconst [c]))
// cond: uint32(c) < 32
- // result: (SLWconst x [c])
+ // result: (SLWconst x [c&31])
for {
x := v_0
if v_1.Op != OpPPC64MOVDconst {
break
}
v.reset(OpPPC64SLWconst)
- v.AuxInt = int64ToAuxInt(c)
+ v.AuxInt = int64ToAuxInt(c & 31)
v.AddArg(x)
return true
}
typ := &b.Func.Config.Types
// match: (Lsh64x32 x (MOVDconst [c]))
// cond: uint32(c) < 64
- // result: (SLDconst x [c])
+ // result: (SLDconst x [c&63])
for {
x := v_0
if v_1.Op != OpPPC64MOVDconst {
break
}
v.reset(OpPPC64SLDconst)
- v.AuxInt = int64ToAuxInt(c)
+ v.AuxInt = int64ToAuxInt(c & 63)
v.AddArg(x)
return true
}
typ := &b.Func.Config.Types
// match: (Lsh8x32 x (MOVDconst [c]))
// cond: uint32(c) < 8
- // result: (SLWconst x [c])
+ // result: (SLWconst x [c&7])
for {
x := v_0
if v_1.Op != OpPPC64MOVDconst {
break
}
v.reset(OpPPC64SLWconst)
- v.AuxInt = int64ToAuxInt(c)
+ v.AuxInt = int64ToAuxInt(c & 7)
v.AddArg(x)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (SLD x (MOVDconst [c]))
- // result: (SLDconst [c] x)
+ // result: (SLDconst [c&63 | (c>>6&1*63)] x)
for {
x := v_0
if v_1.Op != OpPPC64MOVDconst {
}
c := auxIntToInt64(v_1.AuxInt)
v.reset(OpPPC64SLDconst)
- v.AuxInt = int64ToAuxInt(c)
+ v.AuxInt = int64ToAuxInt(c&63 | (c >> 6 & 1 * 63))
v.AddArg(x)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (SLW x (MOVDconst [c]))
- // result: (SLWconst [c] x)
+ // result: (SLWconst [c&31 | (c>>5&1*31)] x)
for {
x := v_0
if v_1.Op != OpPPC64MOVDconst {
}
c := auxIntToInt64(v_1.AuxInt)
v.reset(OpPPC64SLWconst)
- v.AuxInt = int64ToAuxInt(c)
+ v.AuxInt = int64ToAuxInt(c&31 | (c >> 5 & 1 * 31))
v.AddArg(x)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (SRAD x (MOVDconst [c]))
- // result: (SRADconst [c] x)
+ // result: (SRADconst [c&63 | (c>>6&1*63)] x)
for {
x := v_0
if v_1.Op != OpPPC64MOVDconst {
}
c := auxIntToInt64(v_1.AuxInt)
v.reset(OpPPC64SRADconst)
- v.AuxInt = int64ToAuxInt(c)
+ v.AuxInt = int64ToAuxInt(c&63 | (c >> 6 & 1 * 63))
v.AddArg(x)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (SRAW x (MOVDconst [c]))
- // result: (SRAWconst [c] x)
+ // result: (SRAWconst [c&31 | (c>>5&1*31)] x)
for {
x := v_0
if v_1.Op != OpPPC64MOVDconst {
}
c := auxIntToInt64(v_1.AuxInt)
v.reset(OpPPC64SRAWconst)
- v.AuxInt = int64ToAuxInt(c)
+ v.AuxInt = int64ToAuxInt(c&31 | (c >> 5 & 1 * 31))
v.AddArg(x)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (SRD x (MOVDconst [c]))
- // result: (SRDconst [c] x)
+ // result: (SRDconst [c&63 | (c>>6&1*63)] x)
for {
x := v_0
if v_1.Op != OpPPC64MOVDconst {
}
c := auxIntToInt64(v_1.AuxInt)
v.reset(OpPPC64SRDconst)
- v.AuxInt = int64ToAuxInt(c)
+ v.AuxInt = int64ToAuxInt(c&63 | (c >> 6 & 1 * 63))
v.AddArg(x)
return true
}
v_1 := v.Args[1]
v_0 := v.Args[0]
// match: (SRW x (MOVDconst [c]))
- // result: (SRWconst [c] x)
+ // result: (SRWconst [c&31 | (c>>5&1*31)] x)
for {
x := v_0
if v_1.Op != OpPPC64MOVDconst {
}
c := auxIntToInt64(v_1.AuxInt)
v.reset(OpPPC64SRWconst)
- v.AuxInt = int64ToAuxInt(c)
+ v.AuxInt = int64ToAuxInt(c&31 | (c >> 5 & 1 * 31))
v.AddArg(x)
return true
}
typ := &b.Func.Config.Types
// match: (Rsh16Ux32 x (MOVDconst [c]))
// cond: uint32(c) < 16
- // result: (SRWconst (ZeroExt16to32 x) [c])
+ // result: (SRWconst (ZeroExt16to32 x) [c&15])
for {
x := v_0
if v_1.Op != OpPPC64MOVDconst {
break
}
v.reset(OpPPC64SRWconst)
- v.AuxInt = int64ToAuxInt(c)
+ v.AuxInt = int64ToAuxInt(c & 15)
v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
v0.AddArg(x)
v.AddArg(v0)
typ := &b.Func.Config.Types
// match: (Rsh16x32 x (MOVDconst [c]))
// cond: uint32(c) < 16
- // result: (SRAWconst (SignExt16to32 x) [c])
+ // result: (SRAWconst (SignExt16to32 x) [c&15])
for {
x := v_0
if v_1.Op != OpPPC64MOVDconst {
break
}
v.reset(OpPPC64SRAWconst)
- v.AuxInt = int64ToAuxInt(c)
+ v.AuxInt = int64ToAuxInt(c & 15)
v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
v0.AddArg(x)
v.AddArg(v0)
typ := &b.Func.Config.Types
// match: (Rsh32Ux32 x (MOVDconst [c]))
// cond: uint32(c) < 32
- // result: (SRWconst x [c])
+ // result: (SRWconst x [c&31])
for {
x := v_0
if v_1.Op != OpPPC64MOVDconst {
break
}
v.reset(OpPPC64SRWconst)
- v.AuxInt = int64ToAuxInt(c)
+ v.AuxInt = int64ToAuxInt(c & 31)
v.AddArg(x)
return true
}
typ := &b.Func.Config.Types
// match: (Rsh32x32 x (MOVDconst [c]))
// cond: uint32(c) < 32
- // result: (SRAWconst x [c])
+ // result: (SRAWconst x [c&31])
for {
x := v_0
if v_1.Op != OpPPC64MOVDconst {
break
}
v.reset(OpPPC64SRAWconst)
- v.AuxInt = int64ToAuxInt(c)
+ v.AuxInt = int64ToAuxInt(c & 31)
v.AddArg(x)
return true
}
typ := &b.Func.Config.Types
// match: (Rsh64Ux32 x (MOVDconst [c]))
// cond: uint32(c) < 64
- // result: (SRDconst x [c])
+ // result: (SRDconst x [c&63])
for {
x := v_0
if v_1.Op != OpPPC64MOVDconst {
break
}
v.reset(OpPPC64SRDconst)
- v.AuxInt = int64ToAuxInt(c)
+ v.AuxInt = int64ToAuxInt(c & 63)
v.AddArg(x)
return true
}
typ := &b.Func.Config.Types
// match: (Rsh64x32 x (MOVDconst [c]))
// cond: uint32(c) < 64
- // result: (SRADconst x [c])
+ // result: (SRADconst x [c&63])
for {
x := v_0
if v_1.Op != OpPPC64MOVDconst {
break
}
v.reset(OpPPC64SRADconst)
- v.AuxInt = int64ToAuxInt(c)
+ v.AuxInt = int64ToAuxInt(c & 63)
v.AddArg(x)
return true
}
typ := &b.Func.Config.Types
// match: (Rsh8Ux32 x (MOVDconst [c]))
// cond: uint32(c) < 8
- // result: (SRWconst (ZeroExt8to32 x) [c])
+ // result: (SRWconst (ZeroExt8to32 x) [c&7])
for {
x := v_0
if v_1.Op != OpPPC64MOVDconst {
break
}
v.reset(OpPPC64SRWconst)
- v.AuxInt = int64ToAuxInt(c)
+ v.AuxInt = int64ToAuxInt(c & 7)
v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
v0.AddArg(x)
v.AddArg(v0)
typ := &b.Func.Config.Types
// match: (Rsh8x32 x (MOVDconst [c]))
// cond: uint32(c) < 8
- // result: (SRAWconst (SignExt8to32 x) [c])
+ // result: (SRAWconst (SignExt8to32 x) [c&7])
for {
x := v_0
if v_1.Op != OpPPC64MOVDconst {
break
}
v.reset(OpPPC64SRAWconst)
- v.AuxInt = int64ToAuxInt(c)
+ v.AuxInt = int64ToAuxInt(c & 7)
v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
v0.AddArg(x)
v.AddArg(v0)