op = x86.AMOVL
}
sym := n.Sym.Linksym()
- size := n.Type.Size()
+ size := n.Type.MustSize()
for i := int64(0); i < size; i += int64(gc.Widthptr) {
p := pp.Prog(op)
p.From.Type = obj.TYPE_CONST
// loadByType returns the load instruction of the given type.
func loadByType(t ssa.Type) obj.As {
// Avoid partial register write
- if !t.IsFloat() && t.Size() <= 2 {
- if t.Size() == 1 {
+ if !t.IsFloat() && t.MustSize() <= 2 {
+ if t.MustSize() == 1 {
return x86.AMOVBLZX
} else {
return x86.AMOVWLZX
// storeByType returns the store instruction of the given type.
func storeByType(t ssa.Type) obj.As {
- width := t.Size()
+ width := t.MustSize()
if t.IsFloat() {
switch width {
case 4:
// so use movups, which has 2 byte opcode.
return x86.AMOVUPS
} else {
- switch t.Size() {
+ switch t.MustSize() {
case 1:
// Avoids partial register write
return x86.AMOVL
case 16:
return x86.AMOVUPS // int128s are in SSE registers
default:
- panic(fmt.Sprintf("bad int register width %d:%s", t.Size(), t))
+ panic(fmt.Sprintf("bad int register width %d:%s", t.MustSize(), t))
}
}
}
// IMULB puts the high portion in AH instead of DL,
// so move it to DL for consistency
- if v.Type.Size() == 1 {
+ if v.Type.MustSize() == 1 {
m := s.Prog(x86.AMOVB)
m.From.Type = obj.TYPE_REG
m.From.Reg = x86.REG_AH
func zeroAuto(pp *gc.Progs, n *gc.Node) {
// Note: this code must not clobber any registers.
sym := n.Sym.Linksym()
- size := n.Type.Size()
+ size := n.Type.MustSize()
p := pp.Prog(arm.AMOVW)
p.From.Type = obj.TYPE_CONST
p.From.Offset = 0
// loadByType returns the load instruction of the given type.
func loadByType(t ssa.Type) obj.As {
if t.IsFloat() {
- switch t.Size() {
+ switch t.MustSize() {
case 4:
return arm.AMOVF
case 8:
return arm.AMOVD
}
} else {
- switch t.Size() {
+ switch t.MustSize() {
case 1:
if t.IsSigned() {
return arm.AMOVB
// storeByType returns the store instruction of the given type.
func storeByType(t ssa.Type) obj.As {
if t.IsFloat() {
- switch t.Size() {
+ switch t.MustSize() {
case 4:
return arm.AMOVF
case 8:
return arm.AMOVD
}
} else {
- switch t.Size() {
+ switch t.MustSize() {
case 1:
return arm.AMOVB
case 2:
}
as := arm.AMOVW
if v.Type.IsFloat() {
- switch v.Type.Size() {
+ switch v.Type.MustSize() {
case 4:
as = arm.AMOVF
case 8:
if a.Op == ssa.OpLoadReg {
t := a.Type
switch {
- case v.Op == ssa.OpARMMOVBreg && t.Size() == 1 && t.IsSigned(),
- v.Op == ssa.OpARMMOVBUreg && t.Size() == 1 && !t.IsSigned(),
- v.Op == ssa.OpARMMOVHreg && t.Size() == 2 && t.IsSigned(),
- v.Op == ssa.OpARMMOVHUreg && t.Size() == 2 && !t.IsSigned():
+ case v.Op == ssa.OpARMMOVBreg && t.MustSize() == 1 && t.IsSigned(),
+ v.Op == ssa.OpARMMOVBUreg && t.MustSize() == 1 && !t.IsSigned(),
+ v.Op == ssa.OpARMMOVHreg && t.MustSize() == 2 && t.IsSigned(),
+ v.Op == ssa.OpARMMOVHUreg && t.MustSize() == 2 && !t.IsSigned():
// arg is a proper-typed load, already zero/sign-extended, don't extend again
if v.Reg() == v.Args[0].Reg() {
return
func zeroAuto(pp *gc.Progs, n *gc.Node) {
// Note: this code must not clobber any registers.
sym := n.Sym.Linksym()
- size := n.Type.Size()
+ size := n.Type.MustSize()
for i := int64(0); i < size; i += 8 {
p := pp.Prog(arm64.AMOVD)
p.From.Type = obj.TYPE_REG
// loadByType returns the load instruction of the given type.
func loadByType(t ssa.Type) obj.As {
if t.IsFloat() {
- switch t.Size() {
+ switch t.MustSize() {
case 4:
return arm64.AFMOVS
case 8:
return arm64.AFMOVD
}
} else {
- switch t.Size() {
+ switch t.MustSize() {
case 1:
if t.IsSigned() {
return arm64.AMOVB
// storeByType returns the store instruction of the given type.
func storeByType(t ssa.Type) obj.As {
if t.IsFloat() {
- switch t.Size() {
+ switch t.MustSize() {
case 4:
return arm64.AFMOVS
case 8:
return arm64.AFMOVD
}
} else {
- switch t.Size() {
+ switch t.MustSize() {
case 1:
return arm64.AMOVB
case 2:
}
as := arm64.AMOVD
if v.Type.IsFloat() {
- switch v.Type.Size() {
+ switch v.Type.MustSize() {
case 4:
as = arm64.AFMOVS
case 8:
if a.Op == ssa.OpLoadReg {
t := a.Type
switch {
- case v.Op == ssa.OpARM64MOVBreg && t.Size() == 1 && t.IsSigned(),
- v.Op == ssa.OpARM64MOVBUreg && t.Size() == 1 && !t.IsSigned(),
- v.Op == ssa.OpARM64MOVHreg && t.Size() == 2 && t.IsSigned(),
- v.Op == ssa.OpARM64MOVHUreg && t.Size() == 2 && !t.IsSigned(),
- v.Op == ssa.OpARM64MOVWreg && t.Size() == 4 && t.IsSigned(),
- v.Op == ssa.OpARM64MOVWUreg && t.Size() == 4 && !t.IsSigned():
+ case v.Op == ssa.OpARM64MOVBreg && t.MustSize() == 1 && t.IsSigned(),
+ v.Op == ssa.OpARM64MOVBUreg && t.MustSize() == 1 && !t.IsSigned(),
+ v.Op == ssa.OpARM64MOVHreg && t.MustSize() == 2 && t.IsSigned(),
+ v.Op == ssa.OpARM64MOVHUreg && t.MustSize() == 2 && !t.IsSigned(),
+ v.Op == ssa.OpARM64MOVWreg && t.MustSize() == 4 && t.IsSigned(),
+ v.Op == ssa.OpARM64MOVWUreg && t.MustSize() == 4 && !t.IsSigned():
// arg is a proper-typed load, already zero/sign-extended, don't extend again
if v.Reg() == v.Args[0].Reg() {
return
}
var varSize int64
for _, n := range lv.vars {
- varSize += n.Type.Size()
+ varSize += n.Type.MustSize()
}
if len(lv.livevars) > 1000 || varSize > 10000 {
// Be careful to avoid doing too much work.
case TARRAY:
for i := int64(0); i < t.NumElem(); i++ {
- clobberWalk(b, v, offset+i*t.Elem().Size(), t.Elem())
+ clobberWalk(b, v, offset+i*t.Elem().MustSize(), t.Elem())
}
case TSTRUCT:
}
func floatForComplex(t *types.Type) *types.Type {
- if t.Size() == 8 {
+ if t.MustSize() == 8 {
return types.Types[TFLOAT32]
} else {
return types.Types[TFLOAT64]
switch u := n.Val().U.(type) {
case *Mpint:
i := u.Int64()
- switch n.Type.Size() {
+ switch n.Type.MustSize() {
case 1:
return s.constInt8(n.Type, int8(i))
case 2:
case 8:
return s.constInt64(n.Type, i)
default:
- s.Fatalf("bad integer size %d", n.Type.Size())
+ s.Fatalf("bad integer size %d", n.Type.MustSize())
return nil
}
case string:
return s.constNil(t)
}
case *Mpflt:
- switch n.Type.Size() {
+ switch n.Type.MustSize() {
case 4:
return s.constFloat32(n.Type, u.Float32())
case 8:
return s.constFloat64(n.Type, u.Float64())
default:
- s.Fatalf("bad float size %d", n.Type.Size())
+ s.Fatalf("bad float size %d", n.Type.MustSize())
return nil
}
case *Mpcplx:
r := &u.Real
i := &u.Imag
- switch n.Type.Size() {
+ switch n.Type.MustSize() {
case 8:
pt := types.Types[TFLOAT32]
return s.newValue2(ssa.OpComplexMake, n.Type,
s.constFloat64(pt, r.Float64()),
s.constFloat64(pt, i.Float64()))
default:
- s.Fatalf("bad float size %d", n.Type.Size())
+ s.Fatalf("bad float size %d", n.Type.MustSize())
return nil
}
}
if ft.IsInteger() && tt.IsInteger() {
var op ssa.Op
- if tt.Size() == ft.Size() {
+ if tt.MustSize() == ft.MustSize() {
op = ssa.OpCopy
- } else if tt.Size() < ft.Size() {
+ } else if tt.MustSize() < ft.MustSize() {
// truncation
- switch 10*ft.Size() + tt.Size() {
+ switch 10*ft.MustSize() + tt.MustSize() {
case 21:
op = ssa.OpTrunc16to8
case 41:
}
} else if ft.IsSigned() {
// sign extension
- switch 10*ft.Size() + tt.Size() {
+ switch 10*ft.MustSize() + tt.MustSize() {
case 12:
op = ssa.OpSignExt8to16
case 14:
}
} else {
// zero extension
- switch 10*ft.Size() + tt.Size() {
+ switch 10*ft.MustSize() + tt.MustSize() {
case 12:
op = ssa.OpZeroExt8to16
case 14:
}
if thearch.LinkArch.Family == sys.MIPS {
- if ft.Size() == 4 && ft.IsInteger() && !ft.IsSigned() {
+ if ft.MustSize() == 4 && ft.IsInteger() && !ft.IsSigned() {
// tt is float32 or float64, and ft is also unsigned
- if tt.Size() == 4 {
+ if tt.MustSize() == 4 {
return s.uint32Tofloat32(n, x, ft, tt)
}
- if tt.Size() == 8 {
+ if tt.MustSize() == 8 {
return s.uint32Tofloat64(n, x, ft, tt)
}
- } else if tt.Size() == 4 && tt.IsInteger() && !tt.IsSigned() {
+ } else if tt.MustSize() == 4 && tt.IsInteger() && !tt.IsSigned() {
// ft is float32 or float64, and tt is unsigned integer
- if ft.Size() == 4 {
+ if ft.MustSize() == 4 {
return s.float32ToUint32(n, x, ft, tt)
}
- if ft.Size() == 8 {
+ if ft.MustSize() == 8 {
return s.float64ToUint32(n, x, ft, tt)
}
}
// Tricky 64-bit unsigned cases.
if ft.IsInteger() {
// tt is float32 or float64, and ft is also unsigned
- if tt.Size() == 4 {
+ if tt.MustSize() == 4 {
return s.uint64Tofloat32(n, x, ft, tt)
}
- if tt.Size() == 8 {
+ if tt.MustSize() == 8 {
return s.uint64Tofloat64(n, x, ft, tt)
}
s.Fatalf("weird unsigned integer to float conversion %v -> %v", ft, tt)
}
// ft is float32 or float64, and tt is unsigned integer
- if ft.Size() == 4 {
+ if ft.MustSize() == 4 {
return s.float32ToUint64(n, x, ft, tt)
}
- if ft.Size() == 8 {
+ if ft.MustSize() == 8 {
return s.float64ToUint64(n, x, ft, tt)
}
s.Fatalf("weird float to unsigned integer conversion %v -> %v", ft, tt)
if ft.IsComplex() && tt.IsComplex() {
var op ssa.Op
- if ft.Size() == tt.Size() {
- switch ft.Size() {
+ if ft.MustSize() == tt.MustSize() {
+ switch ft.MustSize() {
case 8:
op = ssa.OpRound32F
case 16:
default:
s.Fatalf("weird complex conversion %v -> %v", ft, tt)
}
- } else if ft.Size() == 8 && tt.Size() == 16 {
+ } else if ft.MustSize() == 8 && tt.MustSize() == 16 {
op = ssa.OpCvt32Fto64F
- } else if ft.Size() == 16 && tt.Size() == 8 {
+ } else if ft.MustSize() == 16 && tt.MustSize() == 8 {
op = ssa.OpCvt64Fto32F
} else {
s.Fatalf("weird complex conversion %v -> %v", ft, tt)
if arg.store {
s.storeType(et, addr, arg.v, 0)
} else {
- store := s.newValue3I(ssa.OpMove, ssa.TypeMem, et.Size(), addr, arg.v, s.mem())
+ store := s.newValue3I(ssa.OpMove, ssa.TypeMem, et.MustSize(), addr, arg.v, s.mem())
store.Aux = et
s.vars[&memVar] = store
}
// Treat as a mem->mem move.
var store *ssa.Value
if right == nil {
- store = s.newValue2I(ssa.OpZero, ssa.TypeMem, t.Size(), addr, s.mem())
+ store = s.newValue2I(ssa.OpZero, ssa.TypeMem, t.MustSize(), addr, s.mem())
} else {
- store = s.newValue3I(ssa.OpMove, ssa.TypeMem, t.Size(), addr, right, s.mem())
+ store = s.newValue3I(ssa.OpMove, ssa.TypeMem, t.MustSize(), addr, right, s.mem())
}
store.Aux = t
s.vars[&memVar] = store
func (s *state) zeroVal(t *types.Type) *ssa.Value {
switch {
case t.IsInteger():
- switch t.Size() {
+ switch t.MustSize() {
case 1:
return s.constInt8(t, 0)
case 2:
s.Fatalf("bad sized integer type %v", t)
}
case t.IsFloat():
- switch t.Size() {
+ switch t.MustSize() {
case 4:
return s.constFloat32(t, 0)
case 8:
s.Fatalf("bad sized float type %v", t)
}
case t.IsComplex():
- switch t.Size() {
+ switch t.MustSize() {
case 8:
z := s.constFloat32(types.Types[TFLOAT32], 0)
return s.entryNewValue2(ssa.OpComplexMake, t, z, z)
off := Ctxt.FixedFrameSize()
for _, arg := range args {
t := arg.Type
- off = Rnd(off, t.Alignment())
+ off = Rnd(off, t.MustAlignment())
ptr := s.constOffPtrSP(t.PtrTo(), off)
- size := t.Size()
+ size := t.MustSize()
s.vars[&memVar] = s.newValue3A(ssa.OpStore, ssa.TypeMem, t, ptr, arg, s.mem())
off += size
}
// Load results
res := make([]*ssa.Value, len(results))
for i, t := range results {
- off = Rnd(off, t.Alignment())
+ off = Rnd(off, t.MustAlignment())
ptr := s.constOffPtrSP(types.NewPtr(t), off)
res[i] = s.newValue2(ssa.OpLoad, t, ptr, s.mem())
- off += t.Size()
+ off += t.MustSize()
}
off = Rnd(off, int64(Widthptr))
}
} else {
p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type), iface)
- store := s.newValue3I(ssa.OpMove, ssa.TypeMem, n.Type.Size(), addr, p, s.mem())
+ store := s.newValue3I(ssa.OpMove, ssa.TypeMem, n.Type.MustSize(), addr, p, s.mem())
store.Aux = n.Type
s.vars[&memVar] = store
}
if tmp == nil {
s.vars[valVar] = s.zeroVal(n.Type)
} else {
- store := s.newValue2I(ssa.OpZero, ssa.TypeMem, n.Type.Size(), addr, s.mem())
+ store := s.newValue2I(ssa.OpZero, ssa.TypeMem, n.Type.MustSize(), addr, s.mem())
store.Aux = n.Type
s.vars[&memVar] = store
}
if n.Class() != PAUTO {
v.Fatalf("zero of variable which isn't PAUTO %v", n)
}
- if n.Type.Size()%int64(Widthptr) != 0 {
+ if n.Type.MustSize()%int64(Widthptr) != 0 {
v.Fatalf("zero of variable not a multiple of ptr size %v", n)
}
thearch.ZeroAuto(s.pp, n)
if n.Class() != PAUTO {
Fatalf("needzero class %d", n.Class())
}
- if n.Type.Size()%int64(Widthptr) != 0 || n.Xoffset%int64(Widthptr) != 0 || n.Type.Size() == 0 {
- Fatalf("var %L has size %d offset %d", n, n.Type.Size(), n.Xoffset)
+ if n.Type.MustSize()%int64(Widthptr) != 0 || n.Xoffset%int64(Widthptr) != 0 || n.Type.MustSize() == 0 {
+ Fatalf("var %L has size %d offset %d", n, n.Type.MustSize(), n.Xoffset)
}
- if lo != hi && n.Xoffset+n.Type.Size() >= lo-int64(2*Widthreg) {
+ if lo != hi && n.Xoffset+n.Type.MustSize() >= lo-int64(2*Widthreg) {
// Merge with range we already have.
lo = n.Xoffset
continue
// Set new range.
lo = n.Xoffset
- hi = lo + n.Type.Size()
+ hi = lo + n.Type.MustSize()
}
// Zero final range.
// extendIndex extends v to a full int width.
// panic using the given function if v does not fit in an int (only on 32-bit archs).
func (s *state) extendIndex(v *ssa.Value, panicfn *obj.LSym) *ssa.Value {
- size := v.Type.Size()
+ size := v.Type.MustSize()
if size == s.config.PtrSize {
return v
}
// where v should be spilled.
func AutoVar(v *ssa.Value) (*Node, int64) {
loc := v.Block.Func.RegAlloc[v.ID].(ssa.LocalSlot)
- if v.Type.Size() > loc.Type.Size() {
+ if v.Type.MustSize() > loc.Type.MustSize() {
v.Fatalf("spill/restore type %s doesn't fit in slot type %s", v.Type, loc.Type)
}
return loc.N.(*Node), loc.Off
func (e *ssafn) SplitComplex(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
n := name.N.(*Node)
- s := name.Type.Size() / 2
+ s := name.Type.MustSize() / 2
var t *types.Type
if s == 8 {
t = types.Types[TFLOAT64]
func zeroAuto(pp *gc.Progs, n *gc.Node) {
// Note: this code must not clobber any registers.
sym := n.Sym.Linksym()
- size := n.Type.Size()
+ size := n.Type.MustSize()
for i := int64(0); i < size; i += 4 {
p := pp.Prog(mips.AMOVW)
p.From.Type = obj.TYPE_REG
// loadByType returns the load instruction of the given type.
func loadByType(t ssa.Type, r int16) obj.As {
if isFPreg(r) {
- if t.Size() == 4 { // float32 or int32
+ if t.MustSize() == 4 { // float32 or int32
return mips.AMOVF
} else { // float64 or int64
return mips.AMOVD
}
} else {
- switch t.Size() {
+ switch t.MustSize() {
case 1:
if t.IsSigned() {
return mips.AMOVB
// storeByType returns the store instruction of the given type.
func storeByType(t ssa.Type, r int16) obj.As {
if isFPreg(r) {
- if t.Size() == 4 { // float32 or int32
+ if t.MustSize() == 4 { // float32 or int32
return mips.AMOVF
} else { // float64 or int64
return mips.AMOVD
}
} else {
- switch t.Size() {
+ switch t.MustSize() {
case 1:
return mips.AMOVB
case 2:
as := mips.AMOVW
if isFPreg(x) && isFPreg(y) {
as = mips.AMOVF
- if t.Size() == 8 {
+ if t.MustSize() == 8 {
as = mips.AMOVD
}
}
if a.Op == ssa.OpLoadReg {
t := a.Type
switch {
- case v.Op == ssa.OpMIPSMOVBreg && t.Size() == 1 && t.IsSigned(),
- v.Op == ssa.OpMIPSMOVBUreg && t.Size() == 1 && !t.IsSigned(),
- v.Op == ssa.OpMIPSMOVHreg && t.Size() == 2 && t.IsSigned(),
- v.Op == ssa.OpMIPSMOVHUreg && t.Size() == 2 && !t.IsSigned():
+ case v.Op == ssa.OpMIPSMOVBreg && t.MustSize() == 1 && t.IsSigned(),
+ v.Op == ssa.OpMIPSMOVBUreg && t.MustSize() == 1 && !t.IsSigned(),
+ v.Op == ssa.OpMIPSMOVHreg && t.MustSize() == 2 && t.IsSigned(),
+ v.Op == ssa.OpMIPSMOVHUreg && t.MustSize() == 2 && !t.IsSigned():
// arg is a proper-typed load, already zero/sign-extended, don't extend again
if v.Reg() == v.Args[0].Reg() {
return
func zeroAuto(pp *gc.Progs, n *gc.Node) {
// Note: this code must not clobber any registers.
sym := n.Sym.Linksym()
- size := n.Type.Size()
+ size := n.Type.MustSize()
for i := int64(0); i < size; i += 8 {
p := pp.Prog(mips.AMOVV)
p.From.Type = obj.TYPE_REG
// loadByType returns the load instruction of the given type.
func loadByType(t ssa.Type, r int16) obj.As {
if isFPreg(r) {
- if t.Size() == 4 { // float32 or int32
+ if t.MustSize() == 4 { // float32 or int32
return mips.AMOVF
} else { // float64 or int64
return mips.AMOVD
}
} else {
- switch t.Size() {
+ switch t.MustSize() {
case 1:
if t.IsSigned() {
return mips.AMOVB
// storeByType returns the store instruction of the given type.
func storeByType(t ssa.Type, r int16) obj.As {
if isFPreg(r) {
- if t.Size() == 4 { // float32 or int32
+ if t.MustSize() == 4 { // float32 or int32
return mips.AMOVF
} else { // float64 or int64
return mips.AMOVD
}
} else {
- switch t.Size() {
+ switch t.MustSize() {
case 1:
return mips.AMOVB
case 2:
if a.Op == ssa.OpLoadReg {
t := a.Type
switch {
- case v.Op == ssa.OpMIPS64MOVBreg && t.Size() == 1 && t.IsSigned(),
- v.Op == ssa.OpMIPS64MOVBUreg && t.Size() == 1 && !t.IsSigned(),
- v.Op == ssa.OpMIPS64MOVHreg && t.Size() == 2 && t.IsSigned(),
- v.Op == ssa.OpMIPS64MOVHUreg && t.Size() == 2 && !t.IsSigned(),
- v.Op == ssa.OpMIPS64MOVWreg && t.Size() == 4 && t.IsSigned(),
- v.Op == ssa.OpMIPS64MOVWUreg && t.Size() == 4 && !t.IsSigned():
+ case v.Op == ssa.OpMIPS64MOVBreg && t.MustSize() == 1 && t.IsSigned(),
+ v.Op == ssa.OpMIPS64MOVBUreg && t.MustSize() == 1 && !t.IsSigned(),
+ v.Op == ssa.OpMIPS64MOVHreg && t.MustSize() == 2 && t.IsSigned(),
+ v.Op == ssa.OpMIPS64MOVHUreg && t.MustSize() == 2 && !t.IsSigned(),
+ v.Op == ssa.OpMIPS64MOVWreg && t.MustSize() == 4 && t.IsSigned(),
+ v.Op == ssa.OpMIPS64MOVWUreg && t.MustSize() == 4 && !t.IsSigned():
// arg is a proper-typed load, already zero/sign-extended, don't extend again
if v.Reg() == v.Args[0].Reg() {
return
func zeroAuto(pp *gc.Progs, n *gc.Node) {
// Note: this code must not clobber any registers.
sym := n.Sym.Linksym()
- size := n.Type.Size()
+ size := n.Type.MustSize()
for i := int64(0); i < size; i += 8 {
p := pp.Prog(ppc64.AMOVD)
p.From.Type = obj.TYPE_REG
// loadByType returns the load instruction of the given type.
func loadByType(t ssa.Type) obj.As {
if t.IsFloat() {
- switch t.Size() {
+ switch t.MustSize() {
case 4:
return ppc64.AFMOVS
case 8:
return ppc64.AFMOVD
}
} else {
- switch t.Size() {
+ switch t.MustSize() {
case 1:
if t.IsSigned() {
return ppc64.AMOVB
// storeByType returns the store instruction of the given type.
func storeByType(t ssa.Type) obj.As {
if t.IsFloat() {
- switch t.Size() {
+ switch t.MustSize() {
case 4:
return ppc64.AFMOVS
case 8:
return ppc64.AFMOVD
}
} else {
- switch t.Size() {
+ switch t.MustSize() {
case 1:
return ppc64.AMOVB
case 2:
// Note: this code must not clobber any registers.
p := pp.Prog(s390x.ACLEAR)
p.From.Type = obj.TYPE_CONST
- p.From.Offset = n.Type.Size()
+ p.From.Offset = n.Type.MustSize()
p.To.Type = obj.TYPE_MEM
p.To.Name = obj.NAME_AUTO
p.To.Reg = s390x.REGSP
// loadByType returns the load instruction of the given type.
func loadByType(t ssa.Type) obj.As {
if t.IsFloat() {
- switch t.Size() {
+ switch t.MustSize() {
case 4:
return s390x.AFMOVS
case 8:
return s390x.AFMOVD
}
} else {
- switch t.Size() {
+ switch t.MustSize() {
case 1:
if t.IsSigned() {
return s390x.AMOVB
// storeByType returns the store instruction of the given type.
func storeByType(t ssa.Type) obj.As {
- width := t.Size()
+ width := t.MustSize()
if t.IsFloat() {
switch width {
case 4:
if t.IsFloat() {
return s390x.AFMOVD
} else {
- switch t.Size() {
+ switch t.MustSize() {
case 1:
if t.IsSigned() {
return s390x.AMOVB
if v.Op == OpStore || v.Op == OpZero {
var sz int64
if v.Op == OpStore {
- sz = v.Aux.(Type).Size()
+ sz = v.Aux.(Type).MustSize()
} else { // OpZero
sz = v.AuxInt
}
v.SetArgs1(v.Args[2])
} else {
// zero addr mem
- typesz := v.Args[0].Type.ElemType().Size()
+ typesz := v.Args[0].Type.ElemType().MustSize()
if sz != typesz {
f.Fatalf("mismatched zero/store sizes: %d and %d [%s]",
sz, typesz, v.LongString())
for _, name := range f.Names {
t := name.Type
switch {
- case t.IsInteger() && t.Size() > f.Config.RegSize:
+ case t.IsInteger() && t.MustSize() > f.Config.RegSize:
var elemType Type
if t.IsSigned() {
elemType = f.Config.Types.Int32
delete(f.NamedValues, name)
case t.IsComplex():
var elemType Type
- if t.Size() == 16 {
+ if t.MustSize() == 16 {
elemType = f.Config.Types.Float64
} else {
elemType = f.Config.Types.Float32
delete(f.NamedValues, name)
case t.IsFloat():
// floats are never decomposed, even ones bigger than RegSize
- case t.Size() > f.Config.RegSize:
+ case t.MustSize() > f.Config.RegSize:
f.Fatalf("undecomposed named type %v %v", name, t)
default:
newNames = append(newNames, name)
func decomposeBuiltInPhi(v *Value) {
switch {
- case v.Type.IsInteger() && v.Type.Size() > v.Block.Func.Config.RegSize:
+ case v.Type.IsInteger() && v.Type.MustSize() > v.Block.Func.Config.RegSize:
decomposeInt64Phi(v)
case v.Type.IsComplex():
decomposeComplexPhi(v)
decomposeInterfacePhi(v)
case v.Type.IsFloat():
// floats are never decomposed, even ones bigger than RegSize
- case v.Type.Size() > v.Block.Func.Config.RegSize:
+ case v.Type.MustSize() > v.Block.Func.Config.RegSize:
v.Fatalf("undecomposed type %s", v.Type)
}
}
func decomposeComplexPhi(v *Value) {
types := &v.Block.Func.Config.Types
var partType Type
- switch z := v.Type.Size(); z {
+ switch z := v.Type.MustSize(); z {
case 8:
partType = types.Float32
case 16:
LocalSlot{s.N, dummyTypes.Int, s.Off + 16}
}
func (d DummyFrontend) SplitComplex(s LocalSlot) (LocalSlot, LocalSlot) {
- if s.Type.Size() == 16 {
+ if s.Type.MustSize() == 16 {
return LocalSlot{s.N, dummyTypes.Float64, s.Off}, LocalSlot{s.N, dummyTypes.Float64, s.Off + 8}
}
return LocalSlot{s.N, dummyTypes.Float32, s.Off}, LocalSlot{s.N, dummyTypes.Float32, s.Off + 4}
// Lowering stores
// These more-specific FP versions of Store pattern should come first.
-(Store {t} ptr val mem) && t.(Type).Size() == 8 && is64BitFloat(val.Type) -> (MOVSDstore ptr val mem)
-(Store {t} ptr val mem) && t.(Type).Size() == 4 && is32BitFloat(val.Type) -> (MOVSSstore ptr val mem)
+(Store {t} ptr val mem) && t.(Type).MustSize() == 8 && is64BitFloat(val.Type) -> (MOVSDstore ptr val mem)
+(Store {t} ptr val mem) && t.(Type).MustSize() == 4 && is32BitFloat(val.Type) -> (MOVSSstore ptr val mem)
-(Store {t} ptr val mem) && t.(Type).Size() == 4 -> (MOVLstore ptr val mem)
-(Store {t} ptr val mem) && t.(Type).Size() == 2 -> (MOVWstore ptr val mem)
-(Store {t} ptr val mem) && t.(Type).Size() == 1 -> (MOVBstore ptr val mem)
+(Store {t} ptr val mem) && t.(Type).MustSize() == 4 -> (MOVLstore ptr val mem)
+(Store {t} ptr val mem) && t.(Type).MustSize() == 2 -> (MOVWstore ptr val mem)
+(Store {t} ptr val mem) && t.(Type).MustSize() == 1 -> (MOVBstore ptr val mem)
// Lowering moves
(Move [0] _ _ mem) -> mem
( ORL (SHLLconst [c] x) (SHRLconst [d] x)) && d == 32-c -> (ROLLconst [c] x)
(XORL (SHLLconst [c] x) (SHRLconst [d] x)) && d == 32-c -> (ROLLconst [c] x)
-(ADDL <t> (SHLLconst x [c]) (SHRWconst x [d])) && c < 16 && d == 16-c && t.Size() == 2 -> (ROLWconst x [c])
-( ORL <t> (SHLLconst x [c]) (SHRWconst x [d])) && c < 16 && d == 16-c && t.Size() == 2 -> (ROLWconst x [c])
-(XORL <t> (SHLLconst x [c]) (SHRWconst x [d])) && c < 16 && d == 16-c && t.Size() == 2 -> (ROLWconst x [c])
+(ADDL <t> (SHLLconst x [c]) (SHRWconst x [d])) && c < 16 && d == 16-c && t.MustSize() == 2 -> (ROLWconst x [c])
+( ORL <t> (SHLLconst x [c]) (SHRWconst x [d])) && c < 16 && d == 16-c && t.MustSize() == 2 -> (ROLWconst x [c])
+(XORL <t> (SHLLconst x [c]) (SHRWconst x [d])) && c < 16 && d == 16-c && t.MustSize() == 2 -> (ROLWconst x [c])
-(ADDL <t> (SHLLconst x [c]) (SHRBconst x [d])) && c < 8 && d == 8-c && t.Size() == 1 -> (ROLBconst x [c])
-( ORL <t> (SHLLconst x [c]) (SHRBconst x [d])) && c < 8 && d == 8-c && t.Size() == 1 -> (ROLBconst x [c])
-(XORL <t> (SHLLconst x [c]) (SHRBconst x [d])) && c < 8 && d == 8-c && t.Size() == 1 -> (ROLBconst x [c])
+(ADDL <t> (SHLLconst x [c]) (SHRBconst x [d])) && c < 8 && d == 8-c && t.MustSize() == 1 -> (ROLBconst x [c])
+( ORL <t> (SHLLconst x [c]) (SHRBconst x [d])) && c < 8 && d == 8-c && t.MustSize() == 1 -> (ROLBconst x [c])
+(XORL <t> (SHLLconst x [c]) (SHRBconst x [d])) && c < 8 && d == 8-c && t.MustSize() == 1 -> (ROLBconst x [c])
(ROLLconst [c] (ROLLconst [d] x)) -> (ROLLconst [(c+d)&31] x)
(ROLWconst [c] (ROLWconst [d] x)) -> (ROLWconst [(c+d)&15] x)
// things like (ANDLconst [0x100] x) which were formerly
// (ANDBconst [0] x). Probably doesn't happen very often.
// If we cared, we might do:
-// (ANDLconst <t> [c] x) && t.Size()==1 && int8(x)==0 -> (MOVLconst [0])
+// (ANDLconst <t> [c] x) && t.MustSize()==1 && int8(x)==0 -> (MOVLconst [0])
// Convert constant subtracts to constant adds
(SUBLconst [c] x) -> (ADDLconst [int64(int32(-c))] x)
// Lowering stores
// These more-specific FP versions of Store pattern should come first.
-(Store {t} ptr val mem) && t.(Type).Size() == 8 && is64BitFloat(val.Type) -> (MOVSDstore ptr val mem)
-(Store {t} ptr val mem) && t.(Type).Size() == 4 && is32BitFloat(val.Type) -> (MOVSSstore ptr val mem)
+(Store {t} ptr val mem) && t.(Type).MustSize() == 8 && is64BitFloat(val.Type) -> (MOVSDstore ptr val mem)
+(Store {t} ptr val mem) && t.(Type).MustSize() == 4 && is32BitFloat(val.Type) -> (MOVSSstore ptr val mem)
-(Store {t} ptr val mem) && t.(Type).Size() == 8 -> (MOVQstore ptr val mem)
-(Store {t} ptr val mem) && t.(Type).Size() == 4 -> (MOVLstore ptr val mem)
-(Store {t} ptr val mem) && t.(Type).Size() == 2 -> (MOVWstore ptr val mem)
-(Store {t} ptr val mem) && t.(Type).Size() == 1 -> (MOVBstore ptr val mem)
+(Store {t} ptr val mem) && t.(Type).MustSize() == 8 -> (MOVQstore ptr val mem)
+(Store {t} ptr val mem) && t.(Type).MustSize() == 4 -> (MOVLstore ptr val mem)
+(Store {t} ptr val mem) && t.(Type).MustSize() == 2 -> (MOVWstore ptr val mem)
+(Store {t} ptr val mem) && t.(Type).MustSize() == 1 -> (MOVBstore ptr val mem)
// Lowering moves
(Move [0] _ _ mem) -> mem
( ORL (SHLLconst x [c]) (SHRLconst x [d])) && d==32-c -> (ROLLconst x [c])
(XORL (SHLLconst x [c]) (SHRLconst x [d])) && d==32-c -> (ROLLconst x [c])
-(ADDL <t> (SHLLconst x [c]) (SHRWconst x [d])) && d==16-c && c < 16 && t.Size() == 2 -> (ROLWconst x [c])
-( ORL <t> (SHLLconst x [c]) (SHRWconst x [d])) && d==16-c && c < 16 && t.Size() == 2 -> (ROLWconst x [c])
-(XORL <t> (SHLLconst x [c]) (SHRWconst x [d])) && d==16-c && c < 16 && t.Size() == 2 -> (ROLWconst x [c])
+(ADDL <t> (SHLLconst x [c]) (SHRWconst x [d])) && d==16-c && c < 16 && t.MustSize() == 2 -> (ROLWconst x [c])
+( ORL <t> (SHLLconst x [c]) (SHRWconst x [d])) && d==16-c && c < 16 && t.MustSize() == 2 -> (ROLWconst x [c])
+(XORL <t> (SHLLconst x [c]) (SHRWconst x [d])) && d==16-c && c < 16 && t.MustSize() == 2 -> (ROLWconst x [c])
-(ADDL <t> (SHLLconst x [c]) (SHRBconst x [d])) && d==8-c && c < 8 && t.Size() == 1 -> (ROLBconst x [c])
-( ORL <t> (SHLLconst x [c]) (SHRBconst x [d])) && d==8-c && c < 8 && t.Size() == 1 -> (ROLBconst x [c])
-(XORL <t> (SHLLconst x [c]) (SHRBconst x [d])) && d==8-c && c < 8 && t.Size() == 1 -> (ROLBconst x [c])
+(ADDL <t> (SHLLconst x [c]) (SHRBconst x [d])) && d==8-c && c < 8 && t.MustSize() == 1 -> (ROLBconst x [c])
+( ORL <t> (SHLLconst x [c]) (SHRBconst x [d])) && d==8-c && c < 8 && t.MustSize() == 1 -> (ROLBconst x [c])
+(XORL <t> (SHLLconst x [c]) (SHRBconst x [d])) && d==8-c && c < 8 && t.MustSize() == 1 -> (ROLBconst x [c])
(ROLQconst [c] (ROLQconst [d] x)) -> (ROLQconst [(c+d)&63] x)
(ROLLconst [c] (ROLLconst [d] x)) -> (ROLLconst [(c+d)&31] x)
(ORL (SHLL x (ANDQconst y [15]))
(ANDL (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16])))
(SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16]))))
- && v.Type.Size() == 2
+ && v.Type.MustSize() == 2
-> (ROLW x y)
(ORL (SHLL x (ANDLconst y [15]))
(ANDL (SHRW x (NEGL (ADDLconst (ANDLconst y [15]) [-16])))
(SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [15]) [-16])) [16]))))
- && v.Type.Size() == 2
+ && v.Type.MustSize() == 2
-> (ROLW x y)
(ORL (SHRW x (ANDQconst y [15]))
(SHLL x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))))
- && v.Type.Size() == 2
+ && v.Type.MustSize() == 2
-> (RORW x y)
(ORL (SHRW x (ANDLconst y [15]))
(SHLL x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))))
- && v.Type.Size() == 2
+ && v.Type.MustSize() == 2
-> (RORW x y)
(ORL (SHLL x (ANDQconst y [ 7]))
(ANDL (SHRB x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])))
(SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])) [ 8]))))
- && v.Type.Size() == 1
+ && v.Type.MustSize() == 1
-> (ROLB x y)
(ORL (SHLL x (ANDLconst y [ 7]))
(ANDL (SHRB x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])))
(SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])) [ 8]))))
- && v.Type.Size() == 1
+ && v.Type.MustSize() == 1
-> (ROLB x y)
(ORL (SHRB x (ANDQconst y [ 7]))
(SHLL x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))))
- && v.Type.Size() == 1
+ && v.Type.MustSize() == 1
-> (RORB x y)
(ORL (SHRB x (ANDLconst y [ 7]))
(SHLL x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))))
- && v.Type.Size() == 1
+ && v.Type.MustSize() == 1
-> (RORB x y)
// rotate left negative = rotate right
// things like (ANDLconst [0x100] x) which were formerly
// (ANDBconst [0] x). Probably doesn't happen very often.
// If we cared, we might do:
-// (ANDLconst <t> [c] x) && t.Size()==1 && int8(x)==0 -> (MOVLconst [0])
+// (ANDLconst <t> [c] x) && t.MustSize()==1 && int8(x)==0 -> (MOVLconst [0])
// Convert constant subtracts to constant adds
(SUBQconst [c] x) && c != -(1<<31) -> (ADDQconst [-c] x)
(Load <t> ptr mem) && is64BitFloat(t) -> (MOVDload ptr mem)
// stores
-(Store {t} ptr val mem) && t.(Type).Size() == 1 -> (MOVBstore ptr val mem)
-(Store {t} ptr val mem) && t.(Type).Size() == 2 -> (MOVHstore ptr val mem)
-(Store {t} ptr val mem) && t.(Type).Size() == 4 && !is32BitFloat(val.Type) -> (MOVWstore ptr val mem)
-(Store {t} ptr val mem) && t.(Type).Size() == 4 && is32BitFloat(val.Type) -> (MOVFstore ptr val mem)
-(Store {t} ptr val mem) && t.(Type).Size() == 8 && is64BitFloat(val.Type) -> (MOVDstore ptr val mem)
+(Store {t} ptr val mem) && t.(Type).MustSize() == 1 -> (MOVBstore ptr val mem)
+(Store {t} ptr val mem) && t.(Type).MustSize() == 2 -> (MOVHstore ptr val mem)
+(Store {t} ptr val mem) && t.(Type).MustSize() == 4 && !is32BitFloat(val.Type) -> (MOVWstore ptr val mem)
+(Store {t} ptr val mem) && t.(Type).MustSize() == 4 && is32BitFloat(val.Type) -> (MOVFstore ptr val mem)
+(Store {t} ptr val mem) && t.(Type).MustSize() == 8 && is64BitFloat(val.Type) -> (MOVDstore ptr val mem)
// zero instructions
(Zero [0] _ mem) -> mem
(Zero [1] ptr mem) -> (MOVBstore ptr (MOVWconst [0]) mem)
-(Zero [2] {t} ptr mem) && t.(Type).Alignment()%2 == 0 ->
+(Zero [2] {t} ptr mem) && t.(Type).MustAlignment()%2 == 0 ->
(MOVHstore ptr (MOVWconst [0]) mem)
(Zero [2] ptr mem) ->
(MOVBstore [1] ptr (MOVWconst [0])
(MOVBstore [0] ptr (MOVWconst [0]) mem))
-(Zero [4] {t} ptr mem) && t.(Type).Alignment()%4 == 0 ->
+(Zero [4] {t} ptr mem) && t.(Type).MustAlignment()%4 == 0 ->
(MOVWstore ptr (MOVWconst [0]) mem)
-(Zero [4] {t} ptr mem) && t.(Type).Alignment()%2 == 0 ->
+(Zero [4] {t} ptr mem) && t.(Type).MustAlignment()%2 == 0 ->
(MOVHstore [2] ptr (MOVWconst [0])
(MOVHstore [0] ptr (MOVWconst [0]) mem))
(Zero [4] ptr mem) ->
// 4 and 128 are magic constants, see runtime/mkduff.go
(Zero [s] {t} ptr mem)
&& s%4 == 0 && s > 4 && s <= 512
- && t.(Type).Alignment()%4 == 0 && !config.noDuffDevice ->
+ && t.(Type).MustAlignment()%4 == 0 && !config.noDuffDevice ->
(DUFFZERO [4 * (128 - int64(s/4))] ptr (MOVWconst [0]) mem)
// Large zeroing uses a loop
(Zero [s] {t} ptr mem)
- && (s > 512 || config.noDuffDevice) || t.(Type).Alignment()%4 != 0 ->
- (LoweredZero [t.(Type).Alignment()]
+ && (s > 512 || config.noDuffDevice) || t.(Type).MustAlignment()%4 != 0 ->
+ (LoweredZero [t.(Type).MustAlignment()]
ptr
- (ADDconst <ptr.Type> ptr [s-moveSize(t.(Type).Alignment(), config)])
+ (ADDconst <ptr.Type> ptr [s-moveSize(t.(Type).MustAlignment(), config)])
(MOVWconst [0])
mem)
// moves
(Move [0] _ _ mem) -> mem
(Move [1] dst src mem) -> (MOVBstore dst (MOVBUload src mem) mem)
-(Move [2] {t} dst src mem) && t.(Type).Alignment()%2 == 0 ->
+(Move [2] {t} dst src mem) && t.(Type).MustAlignment()%2 == 0 ->
(MOVHstore dst (MOVHUload src mem) mem)
(Move [2] dst src mem) ->
(MOVBstore [1] dst (MOVBUload [1] src mem)
(MOVBstore dst (MOVBUload src mem) mem))
-(Move [4] {t} dst src mem) && t.(Type).Alignment()%4 == 0 ->
+(Move [4] {t} dst src mem) && t.(Type).MustAlignment()%4 == 0 ->
(MOVWstore dst (MOVWload src mem) mem)
-(Move [4] {t} dst src mem) && t.(Type).Alignment()%2 == 0 ->
+(Move [4] {t} dst src mem) && t.(Type).MustAlignment()%2 == 0 ->
(MOVHstore [2] dst (MOVHUload [2] src mem)
(MOVHstore dst (MOVHUload src mem) mem))
(Move [4] dst src mem) ->
// 8 and 128 are magic constants, see runtime/mkduff.go
(Move [s] {t} dst src mem)
&& s%4 == 0 && s > 4 && s <= 512
- && t.(Type).Alignment()%4 == 0 && !config.noDuffDevice ->
+ && t.(Type).MustAlignment()%4 == 0 && !config.noDuffDevice ->
(DUFFCOPY [8 * (128 - int64(s/4))] dst src mem)
// Large move uses a loop
(Move [s] {t} dst src mem)
- && (s > 512 || config.noDuffDevice) || t.(Type).Alignment()%4 != 0 ->
- (LoweredMove [t.(Type).Alignment()]
+ && (s > 512 || config.noDuffDevice) || t.(Type).MustAlignment()%4 != 0 ->
+ (LoweredMove [t.(Type).MustAlignment()]
dst
src
- (ADDconst <src.Type> src [s-moveSize(t.(Type).Alignment(), config)])
+ (ADDconst <src.Type> src [s-moveSize(t.(Type).MustAlignment(), config)])
mem)
// calls
(Load <t> ptr mem) && is64BitFloat(t) -> (FMOVDload ptr mem)
// stores
-(Store {t} ptr val mem) && t.(Type).Size() == 1 -> (MOVBstore ptr val mem)
-(Store {t} ptr val mem) && t.(Type).Size() == 2 -> (MOVHstore ptr val mem)
-(Store {t} ptr val mem) && t.(Type).Size() == 4 && !is32BitFloat(val.Type) -> (MOVWstore ptr val mem)
-(Store {t} ptr val mem) && t.(Type).Size() == 8 && !is64BitFloat(val.Type) -> (MOVDstore ptr val mem)
-(Store {t} ptr val mem) && t.(Type).Size() == 4 && is32BitFloat(val.Type) -> (FMOVSstore ptr val mem)
-(Store {t} ptr val mem) && t.(Type).Size() == 8 && is64BitFloat(val.Type) -> (FMOVDstore ptr val mem)
+(Store {t} ptr val mem) && t.(Type).MustSize() == 1 -> (MOVBstore ptr val mem)
+(Store {t} ptr val mem) && t.(Type).MustSize() == 2 -> (MOVHstore ptr val mem)
+(Store {t} ptr val mem) && t.(Type).MustSize() == 4 && !is32BitFloat(val.Type) -> (MOVWstore ptr val mem)
+(Store {t} ptr val mem) && t.(Type).MustSize() == 8 && !is64BitFloat(val.Type) -> (MOVDstore ptr val mem)
+(Store {t} ptr val mem) && t.(Type).MustSize() == 4 && is32BitFloat(val.Type) -> (FMOVSstore ptr val mem)
+(Store {t} ptr val mem) && t.(Type).MustSize() == 8 && is64BitFloat(val.Type) -> (FMOVDstore ptr val mem)
// zeroing
(Zero [0] _ mem) -> mem
( ORshiftRL [c] (SLLconst x [64-c]) x) -> (RORconst [ c] x)
(XORshiftRL [c] (SLLconst x [64-c]) x) -> (RORconst [ c] x)
-(ADDshiftLL <t> [c] (SRLconst (MOVWUreg x) [32-c]) x) && c < 32 && t.Size() == 4 -> (RORWconst [32-c] x)
-( ORshiftLL <t> [c] (SRLconst (MOVWUreg x) [32-c]) x) && c < 32 && t.Size() == 4 -> (RORWconst [32-c] x)
-(XORshiftLL <t> [c] (SRLconst (MOVWUreg x) [32-c]) x) && c < 32 && t.Size() == 4 -> (RORWconst [32-c] x)
-(ADDshiftRL <t> [c] (SLLconst x [32-c]) (MOVWUreg x)) && c < 32 && t.Size() == 4 -> (RORWconst [ c] x)
-( ORshiftRL <t> [c] (SLLconst x [32-c]) (MOVWUreg x)) && c < 32 && t.Size() == 4 -> (RORWconst [ c] x)
-(XORshiftRL <t> [c] (SLLconst x [32-c]) (MOVWUreg x)) && c < 32 && t.Size() == 4 -> (RORWconst [ c] x)
+(ADDshiftLL <t> [c] (SRLconst (MOVWUreg x) [32-c]) x) && c < 32 && t.MustSize() == 4 -> (RORWconst [32-c] x)
+( ORshiftLL <t> [c] (SRLconst (MOVWUreg x) [32-c]) x) && c < 32 && t.MustSize() == 4 -> (RORWconst [32-c] x)
+(XORshiftLL <t> [c] (SRLconst (MOVWUreg x) [32-c]) x) && c < 32 && t.MustSize() == 4 -> (RORWconst [32-c] x)
+(ADDshiftRL <t> [c] (SLLconst x [32-c]) (MOVWUreg x)) && c < 32 && t.MustSize() == 4 -> (RORWconst [ c] x)
+( ORshiftRL <t> [c] (SLLconst x [32-c]) (MOVWUreg x)) && c < 32 && t.MustSize() == 4 -> (RORWconst [ c] x)
+(XORshiftRL <t> [c] (SLLconst x [32-c]) (MOVWUreg x)) && c < 32 && t.MustSize() == 4 -> (RORWconst [ c] x)
// Generic rules rewrite certain AND to a pair of shifts.
// However, on ARM64 the bitmask can fit into an instruction.
(Load <t> ptr mem) && is64BitFloat(t) -> (MOVDload ptr mem)
// stores
-(Store {t} ptr val mem) && t.(Type).Size() == 1 -> (MOVBstore ptr val mem)
-(Store {t} ptr val mem) && t.(Type).Size() == 2 -> (MOVHstore ptr val mem)
-(Store {t} ptr val mem) && t.(Type).Size() == 4 && !is32BitFloat(val.Type) -> (MOVWstore ptr val mem)
-(Store {t} ptr val mem) && t.(Type).Size() == 4 && is32BitFloat(val.Type) -> (MOVFstore ptr val mem)
-(Store {t} ptr val mem) && t.(Type).Size() == 8 && is64BitFloat(val.Type) -> (MOVDstore ptr val mem)
+(Store {t} ptr val mem) && t.(Type).MustSize() == 1 -> (MOVBstore ptr val mem)
+(Store {t} ptr val mem) && t.(Type).MustSize() == 2 -> (MOVHstore ptr val mem)
+(Store {t} ptr val mem) && t.(Type).MustSize() == 4 && !is32BitFloat(val.Type) -> (MOVWstore ptr val mem)
+(Store {t} ptr val mem) && t.(Type).MustSize() == 4 && is32BitFloat(val.Type) -> (MOVFstore ptr val mem)
+(Store {t} ptr val mem) && t.(Type).MustSize() == 8 && is64BitFloat(val.Type) -> (MOVDstore ptr val mem)
// zero instructions
(Zero [0] _ mem) -> mem
(Zero [1] ptr mem) -> (MOVBstore ptr (MOVWconst [0]) mem)
-(Zero [2] {t} ptr mem) && t.(Type).Alignment()%2 == 0 ->
+(Zero [2] {t} ptr mem) && t.(Type).MustAlignment()%2 == 0 ->
(MOVHstore ptr (MOVWconst [0]) mem)
(Zero [2] ptr mem) ->
(MOVBstore [1] ptr (MOVWconst [0])
(MOVBstore [0] ptr (MOVWconst [0]) mem))
-(Zero [4] {t} ptr mem) && t.(Type).Alignment()%4 == 0 ->
+(Zero [4] {t} ptr mem) && t.(Type).MustAlignment()%4 == 0 ->
(MOVWstore ptr (MOVWconst [0]) mem)
-(Zero [4] {t} ptr mem) && t.(Type).Alignment()%2 == 0 ->
+(Zero [4] {t} ptr mem) && t.(Type).MustAlignment()%2 == 0 ->
(MOVHstore [2] ptr (MOVWconst [0])
(MOVHstore [0] ptr (MOVWconst [0]) mem))
(Zero [4] ptr mem) ->
(MOVBstore [2] ptr (MOVWconst [0])
(MOVBstore [1] ptr (MOVWconst [0])
(MOVBstore [0] ptr (MOVWconst [0]) mem)))
-(Zero [6] {t} ptr mem) && t.(Type).Alignment()%2 == 0 ->
+(Zero [6] {t} ptr mem) && t.(Type).MustAlignment()%2 == 0 ->
(MOVHstore [4] ptr (MOVWconst [0])
(MOVHstore [2] ptr (MOVWconst [0])
(MOVHstore [0] ptr (MOVWconst [0]) mem)))
-(Zero [8] {t} ptr mem) && t.(Type).Alignment()%4 == 0 ->
+(Zero [8] {t} ptr mem) && t.(Type).MustAlignment()%4 == 0 ->
(MOVWstore [4] ptr (MOVWconst [0])
(MOVWstore [0] ptr (MOVWconst [0]) mem))
-(Zero [12] {t} ptr mem) && t.(Type).Alignment()%4 == 0 ->
+(Zero [12] {t} ptr mem) && t.(Type).MustAlignment()%4 == 0 ->
(MOVWstore [8] ptr (MOVWconst [0])
(MOVWstore [4] ptr (MOVWconst [0])
(MOVWstore [0] ptr (MOVWconst [0]) mem)))
-(Zero [16] {t} ptr mem) && t.(Type).Alignment()%4 == 0 ->
+(Zero [16] {t} ptr mem) && t.(Type).MustAlignment()%4 == 0 ->
(MOVWstore [12] ptr (MOVWconst [0])
(MOVWstore [8] ptr (MOVWconst [0])
(MOVWstore [4] ptr (MOVWconst [0])
// large or unaligned zeroing uses a loop
(Zero [s] {t} ptr mem)
- && (s > 16 || t.(Type).Alignment()%4 != 0) ->
- (LoweredZero [t.(Type).Alignment()]
+ && (s > 16 || t.(Type).MustAlignment()%4 != 0) ->
+ (LoweredZero [t.(Type).MustAlignment()]
ptr
- (ADDconst <ptr.Type> ptr [s-moveSize(t.(Type).Alignment(), config)])
+ (ADDconst <ptr.Type> ptr [s-moveSize(t.(Type).MustAlignment(), config)])
mem)
// moves
(Move [0] _ _ mem) -> mem
(Move [1] dst src mem) -> (MOVBstore dst (MOVBUload src mem) mem)
-(Move [2] {t} dst src mem) && t.(Type).Alignment()%2 == 0 ->
+(Move [2] {t} dst src mem) && t.(Type).MustAlignment()%2 == 0 ->
(MOVHstore dst (MOVHUload src mem) mem)
(Move [2] dst src mem) ->
(MOVBstore [1] dst (MOVBUload [1] src mem)
(MOVBstore dst (MOVBUload src mem) mem))
-(Move [4] {t} dst src mem) && t.(Type).Alignment()%4 == 0 ->
+(Move [4] {t} dst src mem) && t.(Type).MustAlignment()%4 == 0 ->
(MOVWstore dst (MOVWload src mem) mem)
-(Move [4] {t} dst src mem) && t.(Type).Alignment()%2 == 0 ->
+(Move [4] {t} dst src mem) && t.(Type).MustAlignment()%2 == 0 ->
(MOVHstore [2] dst (MOVHUload [2] src mem)
(MOVHstore dst (MOVHUload src mem) mem))
(Move [4] dst src mem) ->
(MOVBstore [2] dst (MOVBUload [2] src mem)
(MOVBstore [1] dst (MOVBUload [1] src mem)
(MOVBstore dst (MOVBUload src mem) mem)))
-(Move [8] {t} dst src mem) && t.(Type).Alignment()%4 == 0 ->
+(Move [8] {t} dst src mem) && t.(Type).MustAlignment()%4 == 0 ->
(MOVWstore [4] dst (MOVWload [4] src mem)
(MOVWstore dst (MOVWload src mem) mem))
-(Move [8] {t} dst src mem) && t.(Type).Alignment()%2 == 0 ->
+(Move [8] {t} dst src mem) && t.(Type).MustAlignment()%2 == 0 ->
(MOVHstore [6] dst (MOVHload [6] src mem)
(MOVHstore [4] dst (MOVHload [4] src mem)
(MOVHstore [2] dst (MOVHload [2] src mem)
(MOVHstore dst (MOVHload src mem) mem))))
-(Move [6] {t} dst src mem) && t.(Type).Alignment()%2 == 0 ->
+(Move [6] {t} dst src mem) && t.(Type).MustAlignment()%2 == 0 ->
(MOVHstore [4] dst (MOVHload [4] src mem)
(MOVHstore [2] dst (MOVHload [2] src mem)
(MOVHstore dst (MOVHload src mem) mem)))
-(Move [12] {t} dst src mem) && t.(Type).Alignment()%4 == 0 ->
+(Move [12] {t} dst src mem) && t.(Type).MustAlignment()%4 == 0 ->
(MOVWstore [8] dst (MOVWload [8] src mem)
(MOVWstore [4] dst (MOVWload [4] src mem)
(MOVWstore dst (MOVWload src mem) mem)))
-(Move [16] {t} dst src mem) && t.(Type).Alignment()%4 == 0 ->
+(Move [16] {t} dst src mem) && t.(Type).MustAlignment()%4 == 0 ->
(MOVWstore [12] dst (MOVWload [12] src mem)
(MOVWstore [8] dst (MOVWload [8] src mem)
(MOVWstore [4] dst (MOVWload [4] src mem)
// large or unaligned move uses a loop
(Move [s] {t} dst src mem)
- && (s > 16 || t.(Type).Alignment()%4 != 0) ->
- (LoweredMove [t.(Type).Alignment()]
+ && (s > 16 || t.(Type).MustAlignment()%4 != 0) ->
+ (LoweredMove [t.(Type).MustAlignment()]
dst
src
- (ADDconst <src.Type> src [s-moveSize(t.(Type).Alignment(), config)])
+ (ADDconst <src.Type> src [s-moveSize(t.(Type).MustAlignment(), config)])
mem)
// calls
(Load <t> ptr mem) && is64BitFloat(t) -> (MOVDload ptr mem)
// stores
-(Store {t} ptr val mem) && t.(Type).Size() == 1 -> (MOVBstore ptr val mem)
-(Store {t} ptr val mem) && t.(Type).Size() == 2 -> (MOVHstore ptr val mem)
-(Store {t} ptr val mem) && t.(Type).Size() == 4 && !is32BitFloat(val.Type) -> (MOVWstore ptr val mem)
-(Store {t} ptr val mem) && t.(Type).Size() == 8 && !is64BitFloat(val.Type) -> (MOVVstore ptr val mem)
-(Store {t} ptr val mem) && t.(Type).Size() == 4 && is32BitFloat(val.Type) -> (MOVFstore ptr val mem)
-(Store {t} ptr val mem) && t.(Type).Size() == 8 && is64BitFloat(val.Type) -> (MOVDstore ptr val mem)
+(Store {t} ptr val mem) && t.(Type).MustSize() == 1 -> (MOVBstore ptr val mem)
+(Store {t} ptr val mem) && t.(Type).MustSize() == 2 -> (MOVHstore ptr val mem)
+(Store {t} ptr val mem) && t.(Type).MustSize() == 4 && !is32BitFloat(val.Type) -> (MOVWstore ptr val mem)
+(Store {t} ptr val mem) && t.(Type).MustSize() == 8 && !is64BitFloat(val.Type) -> (MOVVstore ptr val mem)
+(Store {t} ptr val mem) && t.(Type).MustSize() == 4 && is32BitFloat(val.Type) -> (MOVFstore ptr val mem)
+(Store {t} ptr val mem) && t.(Type).MustSize() == 8 && is64BitFloat(val.Type) -> (MOVDstore ptr val mem)
// zeroing
(Zero [0] _ mem) -> mem
(Zero [1] ptr mem) -> (MOVBstore ptr (MOVVconst [0]) mem)
-(Zero [2] {t} ptr mem) && t.(Type).Alignment()%2 == 0 ->
+(Zero [2] {t} ptr mem) && t.(Type).MustAlignment()%2 == 0 ->
(MOVHstore ptr (MOVVconst [0]) mem)
(Zero [2] ptr mem) ->
(MOVBstore [1] ptr (MOVVconst [0])
(MOVBstore [0] ptr (MOVVconst [0]) mem))
-(Zero [4] {t} ptr mem) && t.(Type).Alignment()%4 == 0 ->
+(Zero [4] {t} ptr mem) && t.(Type).MustAlignment()%4 == 0 ->
(MOVWstore ptr (MOVVconst [0]) mem)
-(Zero [4] {t} ptr mem) && t.(Type).Alignment()%2 == 0 ->
+(Zero [4] {t} ptr mem) && t.(Type).MustAlignment()%2 == 0 ->
(MOVHstore [2] ptr (MOVVconst [0])
(MOVHstore [0] ptr (MOVVconst [0]) mem))
(Zero [4] ptr mem) ->
(MOVBstore [2] ptr (MOVVconst [0])
(MOVBstore [1] ptr (MOVVconst [0])
(MOVBstore [0] ptr (MOVVconst [0]) mem))))
-(Zero [8] {t} ptr mem) && t.(Type).Alignment()%8 == 0 ->
+(Zero [8] {t} ptr mem) && t.(Type).MustAlignment()%8 == 0 ->
(MOVVstore ptr (MOVVconst [0]) mem)
-(Zero [8] {t} ptr mem) && t.(Type).Alignment()%4 == 0 ->
+(Zero [8] {t} ptr mem) && t.(Type).MustAlignment()%4 == 0 ->
(MOVWstore [4] ptr (MOVVconst [0])
(MOVWstore [0] ptr (MOVVconst [0]) mem))
-(Zero [8] {t} ptr mem) && t.(Type).Alignment()%2 == 0 ->
+(Zero [8] {t} ptr mem) && t.(Type).MustAlignment()%2 == 0 ->
(MOVHstore [6] ptr (MOVVconst [0])
(MOVHstore [4] ptr (MOVVconst [0])
(MOVHstore [2] ptr (MOVVconst [0])
(MOVBstore [2] ptr (MOVVconst [0])
(MOVBstore [1] ptr (MOVVconst [0])
(MOVBstore [0] ptr (MOVVconst [0]) mem)))
-(Zero [6] {t} ptr mem) && t.(Type).Alignment()%2 == 0 ->
+(Zero [6] {t} ptr mem) && t.(Type).MustAlignment()%2 == 0 ->
(MOVHstore [4] ptr (MOVVconst [0])
(MOVHstore [2] ptr (MOVVconst [0])
(MOVHstore [0] ptr (MOVVconst [0]) mem)))
-(Zero [12] {t} ptr mem) && t.(Type).Alignment()%4 == 0 ->
+(Zero [12] {t} ptr mem) && t.(Type).MustAlignment()%4 == 0 ->
(MOVWstore [8] ptr (MOVVconst [0])
(MOVWstore [4] ptr (MOVVconst [0])
(MOVWstore [0] ptr (MOVVconst [0]) mem)))
-(Zero [16] {t} ptr mem) && t.(Type).Alignment()%8 == 0 ->
+(Zero [16] {t} ptr mem) && t.(Type).MustAlignment()%8 == 0 ->
(MOVVstore [8] ptr (MOVVconst [0])
(MOVVstore [0] ptr (MOVVconst [0]) mem))
-(Zero [24] {t} ptr mem) && t.(Type).Alignment()%8 == 0 ->
+(Zero [24] {t} ptr mem) && t.(Type).MustAlignment()%8 == 0 ->
(MOVVstore [16] ptr (MOVVconst [0])
(MOVVstore [8] ptr (MOVVconst [0])
(MOVVstore [0] ptr (MOVVconst [0]) mem)))
// 8, and 128 are magic constants, see runtime/mkduff.go
(Zero [s] {t} ptr mem)
&& s%8 == 0 && s > 24 && s <= 8*128
- && t.(Type).Alignment()%8 == 0 && !config.noDuffDevice ->
+ && t.(Type).MustAlignment()%8 == 0 && !config.noDuffDevice ->
(DUFFZERO [8 * (128 - int64(s/8))] ptr mem)
// large or unaligned zeroing uses a loop
(Zero [s] {t} ptr mem)
- && (s > 8*128 || config.noDuffDevice) || t.(Type).Alignment()%8 != 0 ->
- (LoweredZero [t.(Type).Alignment()]
+ && (s > 8*128 || config.noDuffDevice) || t.(Type).MustAlignment()%8 != 0 ->
+ (LoweredZero [t.(Type).MustAlignment()]
ptr
- (ADDVconst <ptr.Type> ptr [s-moveSize(t.(Type).Alignment(), config)])
+ (ADDVconst <ptr.Type> ptr [s-moveSize(t.(Type).MustAlignment(), config)])
mem)
// moves
(Move [0] _ _ mem) -> mem
(Move [1] dst src mem) -> (MOVBstore dst (MOVBload src mem) mem)
-(Move [2] {t} dst src mem) && t.(Type).Alignment()%2 == 0 ->
+(Move [2] {t} dst src mem) && t.(Type).MustAlignment()%2 == 0 ->
(MOVHstore dst (MOVHload src mem) mem)
(Move [2] dst src mem) ->
(MOVBstore [1] dst (MOVBload [1] src mem)
(MOVBstore dst (MOVBload src mem) mem))
-(Move [4] {t} dst src mem) && t.(Type).Alignment()%4 == 0 ->
+(Move [4] {t} dst src mem) && t.(Type).MustAlignment()%4 == 0 ->
(MOVWstore dst (MOVWload src mem) mem)
-(Move [4] {t} dst src mem) && t.(Type).Alignment()%2 == 0 ->
+(Move [4] {t} dst src mem) && t.(Type).MustAlignment()%2 == 0 ->
(MOVHstore [2] dst (MOVHload [2] src mem)
(MOVHstore dst (MOVHload src mem) mem))
(Move [4] dst src mem) ->
(MOVBstore [2] dst (MOVBload [2] src mem)
(MOVBstore [1] dst (MOVBload [1] src mem)
(MOVBstore dst (MOVBload src mem) mem))))
-(Move [8] {t} dst src mem) && t.(Type).Alignment()%8 == 0 ->
+(Move [8] {t} dst src mem) && t.(Type).MustAlignment()%8 == 0 ->
(MOVVstore dst (MOVVload src mem) mem)
-(Move [8] {t} dst src mem) && t.(Type).Alignment()%4 == 0 ->
+(Move [8] {t} dst src mem) && t.(Type).MustAlignment()%4 == 0 ->
(MOVWstore [4] dst (MOVWload [4] src mem)
(MOVWstore dst (MOVWload src mem) mem))
-(Move [8] {t} dst src mem) && t.(Type).Alignment()%2 == 0 ->
+(Move [8] {t} dst src mem) && t.(Type).MustAlignment()%2 == 0 ->
(MOVHstore [6] dst (MOVHload [6] src mem)
(MOVHstore [4] dst (MOVHload [4] src mem)
(MOVHstore [2] dst (MOVHload [2] src mem)
(MOVBstore [2] dst (MOVBload [2] src mem)
(MOVBstore [1] dst (MOVBload [1] src mem)
(MOVBstore dst (MOVBload src mem) mem)))
-(Move [6] {t} dst src mem) && t.(Type).Alignment()%2 == 0 ->
+(Move [6] {t} dst src mem) && t.(Type).MustAlignment()%2 == 0 ->
(MOVHstore [4] dst (MOVHload [4] src mem)
(MOVHstore [2] dst (MOVHload [2] src mem)
(MOVHstore dst (MOVHload src mem) mem)))
-(Move [12] {t} dst src mem) && t.(Type).Alignment()%4 == 0 ->
+(Move [12] {t} dst src mem) && t.(Type).MustAlignment()%4 == 0 ->
(MOVWstore [8] dst (MOVWload [8] src mem)
(MOVWstore [4] dst (MOVWload [4] src mem)
(MOVWstore dst (MOVWload src mem) mem)))
-(Move [16] {t} dst src mem) && t.(Type).Alignment()%8 == 0 ->
+(Move [16] {t} dst src mem) && t.(Type).MustAlignment()%8 == 0 ->
(MOVVstore [8] dst (MOVVload [8] src mem)
(MOVVstore dst (MOVVload src mem) mem))
-(Move [24] {t} dst src mem) && t.(Type).Alignment()%8 == 0 ->
+(Move [24] {t} dst src mem) && t.(Type).MustAlignment()%8 == 0 ->
(MOVVstore [16] dst (MOVVload [16] src mem)
(MOVVstore [8] dst (MOVVload [8] src mem)
(MOVVstore dst (MOVVload src mem) mem)))
// large or unaligned move uses a loop
(Move [s] {t} dst src mem)
- && s > 24 || t.(Type).Alignment()%8 != 0 ->
- (LoweredMove [t.(Type).Alignment()]
+ && s > 24 || t.(Type).MustAlignment()%8 != 0 ->
+ (LoweredMove [t.(Type).MustAlignment()]
dst
src
- (ADDVconst <src.Type> src [s-moveSize(t.(Type).Alignment(), config)])
+ (ADDVconst <src.Type> src [s-moveSize(t.(Type).MustAlignment(), config)])
mem)
// calls
(Load <t> ptr mem) && is32BitFloat(t) -> (FMOVSload ptr mem)
(Load <t> ptr mem) && is64BitFloat(t) -> (FMOVDload ptr mem)
-(Store {t} ptr val mem) && t.(Type).Size() == 8 && is64BitFloat(val.Type) -> (FMOVDstore ptr val mem)
-(Store {t} ptr val mem) && t.(Type).Size() == 8 && is32BitFloat(val.Type) -> (FMOVDstore ptr val mem) // glitch from (Cvt32Fto64F x) -> x -- type is wrong
-(Store {t} ptr val mem) && t.(Type).Size() == 4 && is32BitFloat(val.Type) -> (FMOVSstore ptr val mem)
-(Store {t} ptr val mem) && t.(Type).Size() == 8 && (is64BitInt(val.Type) || isPtr(val.Type)) -> (MOVDstore ptr val mem)
-(Store {t} ptr val mem) && t.(Type).Size() == 4 && is32BitInt(val.Type) -> (MOVWstore ptr val mem)
-(Store {t} ptr val mem) && t.(Type).Size() == 2 -> (MOVHstore ptr val mem)
-(Store {t} ptr val mem) && t.(Type).Size() == 1 -> (MOVBstore ptr val mem)
+(Store {t} ptr val mem) && t.(Type).MustSize() == 8 && is64BitFloat(val.Type) -> (FMOVDstore ptr val mem)
+(Store {t} ptr val mem) && t.(Type).MustSize() == 8 && is32BitFloat(val.Type) -> (FMOVDstore ptr val mem) // glitch from (Cvt32Fto64F x) -> x -- type is wrong
+(Store {t} ptr val mem) && t.(Type).MustSize() == 4 && is32BitFloat(val.Type) -> (FMOVSstore ptr val mem)
+(Store {t} ptr val mem) && t.(Type).MustSize() == 8 && (is64BitInt(val.Type) || isPtr(val.Type)) -> (MOVDstore ptr val mem)
+(Store {t} ptr val mem) && t.(Type).MustSize() == 4 && is32BitInt(val.Type) -> (MOVWstore ptr val mem)
+(Store {t} ptr val mem) && t.(Type).MustSize() == 2 -> (MOVHstore ptr val mem)
+(Store {t} ptr val mem) && t.(Type).MustSize() == 1 -> (MOVBstore ptr val mem)
// Using Zero instead of LoweredZero allows the
// target address to be folded where possible.
(Move [4] dst src mem) ->
(MOVWstore dst (MOVWZload src mem) mem)
// MOVD for load and store must have offsets that are multiple of 4
-(Move [8] {t} dst src mem) && t.(Type).Alignment()%4 == 0 ->
+(Move [8] {t} dst src mem) && t.(Type).MustAlignment()%4 == 0 ->
(MOVDstore dst (MOVDload src mem) mem)
(Move [8] dst src mem) ->
(MOVWstore [4] dst (MOVWZload [4] src mem)
// Lowering stores
// These more-specific FP versions of Store pattern should come first.
-(Store {t} ptr val mem) && t.(Type).Size() == 8 && is64BitFloat(val.Type) -> (FMOVDstore ptr val mem)
-(Store {t} ptr val mem) && t.(Type).Size() == 4 && is32BitFloat(val.Type) -> (FMOVSstore ptr val mem)
+(Store {t} ptr val mem) && t.(Type).MustSize() == 8 && is64BitFloat(val.Type) -> (FMOVDstore ptr val mem)
+(Store {t} ptr val mem) && t.(Type).MustSize() == 4 && is32BitFloat(val.Type) -> (FMOVSstore ptr val mem)
-(Store {t} ptr val mem) && t.(Type).Size() == 8 -> (MOVDstore ptr val mem)
-(Store {t} ptr val mem) && t.(Type).Size() == 4 -> (MOVWstore ptr val mem)
-(Store {t} ptr val mem) && t.(Type).Size() == 2 -> (MOVHstore ptr val mem)
-(Store {t} ptr val mem) && t.(Type).Size() == 1 -> (MOVBstore ptr val mem)
+(Store {t} ptr val mem) && t.(Type).MustSize() == 8 -> (MOVDstore ptr val mem)
+(Store {t} ptr val mem) && t.(Type).MustSize() == 4 -> (MOVWstore ptr val mem)
+(Store {t} ptr val mem) && t.(Type).MustSize() == 2 -> (MOVHstore ptr val mem)
+(Store {t} ptr val mem) && t.(Type).MustSize() == 1 -> (MOVBstore ptr val mem)
// Lowering moves
(ComplexReal (ComplexMake real _ )) -> real
(ComplexImag (ComplexMake _ imag )) -> imag
-(Load <t> ptr mem) && t.IsComplex() && t.Size() == 8 ->
+(Load <t> ptr mem) && t.IsComplex() && t.MustSize() == 8 ->
(ComplexMake
(Load <types.Float32> ptr mem)
(Load <types.Float32>
(OffPtr <types.Float32Ptr> [4] ptr)
mem)
)
-(Store {t} dst (ComplexMake real imag) mem) && t.(Type).Size() == 8 ->
+(Store {t} dst (ComplexMake real imag) mem) && t.(Type).MustSize() == 8 ->
(Store {types.Float32}
(OffPtr <types.Float32Ptr> [4] dst)
imag
(Store {types.Float32} dst real mem))
-(Load <t> ptr mem) && t.IsComplex() && t.Size() == 16 ->
+(Load <t> ptr mem) && t.IsComplex() && t.MustSize() == 16 ->
(ComplexMake
(Load <types.Float64> ptr mem)
(Load <types.Float64>
(OffPtr <types.Float64Ptr> [8] ptr)
mem)
)
-(Store {t} dst (ComplexMake real imag) mem) && t.(Type).Size() == 16 ->
+(Store {t} dst (ComplexMake real imag) mem) && t.(Type).MustSize() == 16 ->
(Store {types.Float64}
(OffPtr <types.Float64Ptr> [8] dst)
imag
(Load <types.UInt32> ptr mem)
(Load <types.UInt32> (OffPtr <types.UInt32Ptr> [4] ptr) mem))
-(Store {t} dst (Int64Make hi lo) mem) && t.(Type).Size() == 8 && !config.BigEndian ->
+(Store {t} dst (Int64Make hi lo) mem) && t.(Type).MustSize() == 8 && !config.BigEndian ->
(Store {hi.Type}
(OffPtr <hi.Type.PtrTo()> [4] dst)
hi
(Store {lo.Type} dst lo mem))
-(Store {t} dst (Int64Make hi lo) mem) && t.(Type).Size() == 8 && config.BigEndian ->
+(Store {t} dst (Int64Make hi lo) mem) && t.(Type).MustSize() == 8 && config.BigEndian ->
(Store {lo.Type}
(OffPtr <lo.Type.PtrTo()> [4] dst)
lo
(NeqSlice x y) -> (NeqPtr (SlicePtr x) (SlicePtr y))
// Load of store of same address, with compatibly typed value and same size
-(Load <t1> p1 (Store {t2} p2 x _)) && isSamePtr(p1,p2) && t1.Compare(x.Type)==CMPeq && t1.Size() == t2.(Type).Size() -> x
+(Load <t1> p1 (Store {t2} p2 x _)) && isSamePtr(p1,p2) && t1.Compare(x.Type)==CMPeq && t1.MustSize() == t2.(Type).MustSize() -> x
// Collapse OffPtr
(OffPtr (OffPtr p [b]) [a]) -> (OffPtr p [a+b])
// indexing operations
// Note: bounds check has already been done
-(PtrIndex <t> ptr idx) && config.PtrSize == 4 -> (AddPtr ptr (Mul32 <types.Int> idx (Const32 <types.Int> [t.ElemType().Size()])))
-(PtrIndex <t> ptr idx) && config.PtrSize == 8 -> (AddPtr ptr (Mul64 <types.Int> idx (Const64 <types.Int> [t.ElemType().Size()])))
+(PtrIndex <t> ptr idx) && config.PtrSize == 4 -> (AddPtr ptr (Mul32 <types.Int> idx (Const32 <types.Int> [t.ElemType().MustSize()])))
+(PtrIndex <t> ptr idx) && config.PtrSize == 8 -> (AddPtr ptr (Mul64 <types.Int> idx (Const64 <types.Int> [t.ElemType().MustSize()])))
// struct operations
(StructSelect (StructMake1 x)) -> x
// un-SSAable values use mem->mem copies
(Store {t} dst (Load src mem) mem) && !fe.CanSSA(t.(Type)) ->
- (Move {t} [t.(Type).Size()] dst src mem)
+ (Move {t} [t.(Type).MustSize()] dst src mem)
(Store {t} dst (Load src mem) (VarDef {x} mem)) && !fe.CanSSA(t.(Type)) ->
- (Move {t} [t.(Type).Size()] dst src (VarDef {x} mem))
+ (Move {t} [t.(Type).MustSize()] dst src (VarDef {x} mem))
// array ops
(ArraySelect (ArrayMake1 x)) -> x
(Arg <types.BytePtr> {n} [off])
(Arg <types.BytePtr> {n} [off+config.PtrSize]))
-(Arg {n} [off]) && v.Type.IsComplex() && v.Type.Size() == 16 ->
+(Arg {n} [off]) && v.Type.IsComplex() && v.Type.MustSize() == 16 ->
(ComplexMake
(Arg <types.Float64> {n} [off])
(Arg <types.Float64> {n} [off+8]))
-(Arg {n} [off]) && v.Type.IsComplex() && v.Type.Size() == 8 ->
+(Arg {n} [off]) && v.Type.IsComplex() && v.Type.MustSize() == 8 ->
(ComplexMake
(Arg <types.Float32> {n} [off])
(Arg <types.Float32> {n} [off+4]))
g := test.NewValue1(bb.Pos, OpGetG, pt, mem0)
sp := test.NewValue0(bb.Pos, OpSP, pt)
cmpOp := OpLess64U
- if pt.Size() == 4 {
+ if pt.MustSize() == 4 {
cmpOp = OpLess32U
}
- limaddr := test.NewValue1I(bb.Pos, OpOffPtr, pt, 2*pt.Size(), g)
+ limaddr := test.NewValue1I(bb.Pos, OpOffPtr, pt, 2*pt.MustSize(), g)
lim := test.NewValue2(bb.Pos, OpLoad, pt, limaddr, mem0)
cmp := test.NewValue2(bb.Pos, cmpOp, types.Bool, sp, lim)
test.SetControl(cmp)
auxSymOff // aux is a symbol, auxInt is an offset
auxSymValAndOff // aux is a symbol, auxInt is a ValAndOff
auxTyp // aux is a type
- auxTypSize // aux is a type, auxInt is a size, must have Aux.(Type).Size() == AuxInt
+ auxTypSize // aux is a type, auxInt is a size, must have Aux.(Type).MustSize() == AuxInt
auxSymInt32 // aux is a symbol, auxInt is a 32-bit integer
)
negate = !negate
}
- switch v.Type.Size() {
+ switch v.Type.MustSize() {
case 1:
v.reset(OpCopy)
case 2:
case 8:
v.reset(OpZeroExt8to64)
default:
- v.Fatalf("bad int size %d", v.Type.Size())
+ v.Fatalf("bad int size %d", v.Type.MustSize())
}
a := b0.Control
f := b0.Func
if f.pass.debug > 0 {
- f.Warnl(v.Block.Pos, "converted OpPhi bool -> int%d", v.Type.Size()*8)
+ f.Warnl(v.Block.Pos, "converted OpPhi bool -> int%d", v.Type.MustSize()*8)
}
}
// Common functions called from rewriting rules
func is64BitFloat(t Type) bool {
- return t.Size() == 8 && t.IsFloat()
+ return t.MustSize() == 8 && t.IsFloat()
}
func is32BitFloat(t Type) bool {
- return t.Size() == 4 && t.IsFloat()
+ return t.MustSize() == 4 && t.IsFloat()
}
func is64BitInt(t Type) bool {
- return t.Size() == 8 && t.IsInteger()
+ return t.MustSize() == 8 && t.IsInteger()
}
func is32BitInt(t Type) bool {
- return t.Size() == 4 && t.IsInteger()
+ return t.MustSize() == 4 && t.IsInteger()
}
func is16BitInt(t Type) bool {
- return t.Size() == 2 && t.IsInteger()
+ return t.MustSize() == 2 && t.IsInteger()
}
func is8BitInt(t Type) bool {
- return t.Size() == 1 && t.IsInteger()
+ return t.MustSize() == 1 && t.IsInteger()
}
func isPtr(t Type) bool {
}
func typeSize(t Type) int64 {
- return t.Size()
+ return t.MustSize()
}
// mergeSym merges two symbolic offsets. There is no real merging of
return true
}
// match: (ADDL <t> (SHLLconst x [c]) (SHRWconst x [d]))
- // cond: c < 16 && d == 16-c && t.Size() == 2
+ // cond: c < 16 && d == 16-c && t.MustSize() == 2
// result: (ROLWconst x [c])
for {
t := v.Type
if x != v_1.Args[0] {
break
}
- if !(c < 16 && d == 16-c && t.Size() == 2) {
+ if !(c < 16 && d == 16-c && t.MustSize() == 2) {
break
}
v.reset(Op386ROLWconst)
return true
}
// match: (ADDL <t> (SHRWconst x [d]) (SHLLconst x [c]))
- // cond: c < 16 && d == 16-c && t.Size() == 2
+ // cond: c < 16 && d == 16-c && t.MustSize() == 2
// result: (ROLWconst x [c])
for {
t := v.Type
if x != v_1.Args[0] {
break
}
- if !(c < 16 && d == 16-c && t.Size() == 2) {
+ if !(c < 16 && d == 16-c && t.MustSize() == 2) {
break
}
v.reset(Op386ROLWconst)
return true
}
// match: (ADDL <t> (SHLLconst x [c]) (SHRBconst x [d]))
- // cond: c < 8 && d == 8-c && t.Size() == 1
+ // cond: c < 8 && d == 8-c && t.MustSize() == 1
// result: (ROLBconst x [c])
for {
t := v.Type
if x != v_1.Args[0] {
break
}
- if !(c < 8 && d == 8-c && t.Size() == 1) {
+ if !(c < 8 && d == 8-c && t.MustSize() == 1) {
break
}
v.reset(Op386ROLBconst)
return true
}
// match: (ADDL <t> (SHRBconst x [d]) (SHLLconst x [c]))
- // cond: c < 8 && d == 8-c && t.Size() == 1
+ // cond: c < 8 && d == 8-c && t.MustSize() == 1
// result: (ROLBconst x [c])
for {
t := v.Type
if x != v_1.Args[0] {
break
}
- if !(c < 8 && d == 8-c && t.Size() == 1) {
+ if !(c < 8 && d == 8-c && t.MustSize() == 1) {
break
}
v.reset(Op386ROLBconst)
return true
}
// match: (ORL <t> (SHLLconst x [c]) (SHRWconst x [d]))
- // cond: c < 16 && d == 16-c && t.Size() == 2
+ // cond: c < 16 && d == 16-c && t.MustSize() == 2
// result: (ROLWconst x [c])
for {
t := v.Type
if x != v_1.Args[0] {
break
}
- if !(c < 16 && d == 16-c && t.Size() == 2) {
+ if !(c < 16 && d == 16-c && t.MustSize() == 2) {
break
}
v.reset(Op386ROLWconst)
return true
}
// match: (ORL <t> (SHRWconst x [d]) (SHLLconst x [c]))
- // cond: c < 16 && d == 16-c && t.Size() == 2
+ // cond: c < 16 && d == 16-c && t.MustSize() == 2
// result: (ROLWconst x [c])
for {
t := v.Type
if x != v_1.Args[0] {
break
}
- if !(c < 16 && d == 16-c && t.Size() == 2) {
+ if !(c < 16 && d == 16-c && t.MustSize() == 2) {
break
}
v.reset(Op386ROLWconst)
return true
}
// match: (ORL <t> (SHLLconst x [c]) (SHRBconst x [d]))
- // cond: c < 8 && d == 8-c && t.Size() == 1
+ // cond: c < 8 && d == 8-c && t.MustSize() == 1
// result: (ROLBconst x [c])
for {
t := v.Type
if x != v_1.Args[0] {
break
}
- if !(c < 8 && d == 8-c && t.Size() == 1) {
+ if !(c < 8 && d == 8-c && t.MustSize() == 1) {
break
}
v.reset(Op386ROLBconst)
return true
}
// match: (ORL <t> (SHRBconst x [d]) (SHLLconst x [c]))
- // cond: c < 8 && d == 8-c && t.Size() == 1
+ // cond: c < 8 && d == 8-c && t.MustSize() == 1
// result: (ROLBconst x [c])
for {
t := v.Type
if x != v_1.Args[0] {
break
}
- if !(c < 8 && d == 8-c && t.Size() == 1) {
+ if !(c < 8 && d == 8-c && t.MustSize() == 1) {
break
}
v.reset(Op386ROLBconst)
return true
}
// match: (XORL <t> (SHLLconst x [c]) (SHRWconst x [d]))
- // cond: c < 16 && d == 16-c && t.Size() == 2
+ // cond: c < 16 && d == 16-c && t.MustSize() == 2
// result: (ROLWconst x [c])
for {
t := v.Type
if x != v_1.Args[0] {
break
}
- if !(c < 16 && d == 16-c && t.Size() == 2) {
+ if !(c < 16 && d == 16-c && t.MustSize() == 2) {
break
}
v.reset(Op386ROLWconst)
return true
}
// match: (XORL <t> (SHRWconst x [d]) (SHLLconst x [c]))
- // cond: c < 16 && d == 16-c && t.Size() == 2
+ // cond: c < 16 && d == 16-c && t.MustSize() == 2
// result: (ROLWconst x [c])
for {
t := v.Type
if x != v_1.Args[0] {
break
}
- if !(c < 16 && d == 16-c && t.Size() == 2) {
+ if !(c < 16 && d == 16-c && t.MustSize() == 2) {
break
}
v.reset(Op386ROLWconst)
return true
}
// match: (XORL <t> (SHLLconst x [c]) (SHRBconst x [d]))
- // cond: c < 8 && d == 8-c && t.Size() == 1
+ // cond: c < 8 && d == 8-c && t.MustSize() == 1
// result: (ROLBconst x [c])
for {
t := v.Type
if x != v_1.Args[0] {
break
}
- if !(c < 8 && d == 8-c && t.Size() == 1) {
+ if !(c < 8 && d == 8-c && t.MustSize() == 1) {
break
}
v.reset(Op386ROLBconst)
return true
}
// match: (XORL <t> (SHRBconst x [d]) (SHLLconst x [c]))
- // cond: c < 8 && d == 8-c && t.Size() == 1
+ // cond: c < 8 && d == 8-c && t.MustSize() == 1
// result: (ROLBconst x [c])
for {
t := v.Type
if x != v_1.Args[0] {
break
}
- if !(c < 8 && d == 8-c && t.Size() == 1) {
+ if !(c < 8 && d == 8-c && t.MustSize() == 1) {
break
}
v.reset(Op386ROLBconst)
}
func rewriteValue386_OpStore_0(v *Value) bool {
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 8 && is64BitFloat(val.Type)
+ // cond: t.(Type).MustSize() == 8 && is64BitFloat(val.Type)
// result: (MOVSDstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 8 && is64BitFloat(val.Type)) {
+ if !(t.(Type).MustSize() == 8 && is64BitFloat(val.Type)) {
break
}
v.reset(Op386MOVSDstore)
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 4 && is32BitFloat(val.Type)
+ // cond: t.(Type).MustSize() == 4 && is32BitFloat(val.Type)
// result: (MOVSSstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 4 && is32BitFloat(val.Type)) {
+ if !(t.(Type).MustSize() == 4 && is32BitFloat(val.Type)) {
break
}
v.reset(Op386MOVSSstore)
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 4
+ // cond: t.(Type).MustSize() == 4
// result: (MOVLstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 4) {
+ if !(t.(Type).MustSize() == 4) {
break
}
v.reset(Op386MOVLstore)
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 2
+ // cond: t.(Type).MustSize() == 2
// result: (MOVWstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 2) {
+ if !(t.(Type).MustSize() == 2) {
break
}
v.reset(Op386MOVWstore)
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 1
+ // cond: t.(Type).MustSize() == 1
// result: (MOVBstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 1) {
+ if !(t.(Type).MustSize() == 1) {
break
}
v.reset(Op386MOVBstore)
return true
}
// match: (ADDL <t> (SHLLconst x [c]) (SHRWconst x [d]))
- // cond: d==16-c && c < 16 && t.Size() == 2
+ // cond: d==16-c && c < 16 && t.MustSize() == 2
// result: (ROLWconst x [c])
for {
t := v.Type
if x != v_1.Args[0] {
break
}
- if !(d == 16-c && c < 16 && t.Size() == 2) {
+ if !(d == 16-c && c < 16 && t.MustSize() == 2) {
break
}
v.reset(OpAMD64ROLWconst)
return true
}
// match: (ADDL <t> (SHRWconst x [d]) (SHLLconst x [c]))
- // cond: d==16-c && c < 16 && t.Size() == 2
+ // cond: d==16-c && c < 16 && t.MustSize() == 2
// result: (ROLWconst x [c])
for {
t := v.Type
if x != v_1.Args[0] {
break
}
- if !(d == 16-c && c < 16 && t.Size() == 2) {
+ if !(d == 16-c && c < 16 && t.MustSize() == 2) {
break
}
v.reset(OpAMD64ROLWconst)
return true
}
// match: (ADDL <t> (SHLLconst x [c]) (SHRBconst x [d]))
- // cond: d==8-c && c < 8 && t.Size() == 1
+ // cond: d==8-c && c < 8 && t.MustSize() == 1
// result: (ROLBconst x [c])
for {
t := v.Type
if x != v_1.Args[0] {
break
}
- if !(d == 8-c && c < 8 && t.Size() == 1) {
+ if !(d == 8-c && c < 8 && t.MustSize() == 1) {
break
}
v.reset(OpAMD64ROLBconst)
return true
}
// match: (ADDL <t> (SHRBconst x [d]) (SHLLconst x [c]))
- // cond: d==8-c && c < 8 && t.Size() == 1
+ // cond: d==8-c && c < 8 && t.MustSize() == 1
// result: (ROLBconst x [c])
for {
t := v.Type
if x != v_1.Args[0] {
break
}
- if !(d == 8-c && c < 8 && t.Size() == 1) {
+ if !(d == 8-c && c < 8 && t.MustSize() == 1) {
break
}
v.reset(OpAMD64ROLBconst)
return true
}
// match: (ORL <t> (SHLLconst x [c]) (SHRWconst x [d]))
- // cond: d==16-c && c < 16 && t.Size() == 2
+ // cond: d==16-c && c < 16 && t.MustSize() == 2
// result: (ROLWconst x [c])
for {
t := v.Type
if x != v_1.Args[0] {
break
}
- if !(d == 16-c && c < 16 && t.Size() == 2) {
+ if !(d == 16-c && c < 16 && t.MustSize() == 2) {
break
}
v.reset(OpAMD64ROLWconst)
return true
}
// match: (ORL <t> (SHRWconst x [d]) (SHLLconst x [c]))
- // cond: d==16-c && c < 16 && t.Size() == 2
+ // cond: d==16-c && c < 16 && t.MustSize() == 2
// result: (ROLWconst x [c])
for {
t := v.Type
if x != v_1.Args[0] {
break
}
- if !(d == 16-c && c < 16 && t.Size() == 2) {
+ if !(d == 16-c && c < 16 && t.MustSize() == 2) {
break
}
v.reset(OpAMD64ROLWconst)
return true
}
// match: (ORL <t> (SHLLconst x [c]) (SHRBconst x [d]))
- // cond: d==8-c && c < 8 && t.Size() == 1
+ // cond: d==8-c && c < 8 && t.MustSize() == 1
// result: (ROLBconst x [c])
for {
t := v.Type
if x != v_1.Args[0] {
break
}
- if !(d == 8-c && c < 8 && t.Size() == 1) {
+ if !(d == 8-c && c < 8 && t.MustSize() == 1) {
break
}
v.reset(OpAMD64ROLBconst)
return true
}
// match: (ORL <t> (SHRBconst x [d]) (SHLLconst x [c]))
- // cond: d==8-c && c < 8 && t.Size() == 1
+ // cond: d==8-c && c < 8 && t.MustSize() == 1
// result: (ROLBconst x [c])
for {
t := v.Type
if x != v_1.Args[0] {
break
}
- if !(d == 8-c && c < 8 && t.Size() == 1) {
+ if !(d == 8-c && c < 8 && t.MustSize() == 1) {
break
}
v.reset(OpAMD64ROLBconst)
return true
}
// match: (ORL (SHLL x (ANDQconst y [15])) (ANDL (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16]))))
- // cond: v.Type.Size() == 2
+ // cond: v.Type.MustSize() == 2
// result: (ROLW x y)
for {
v_0 := v.Args[0]
if y != v_1_1_0_0_0_0.Args[0] {
break
}
- if !(v.Type.Size() == 2) {
+ if !(v.Type.MustSize() == 2) {
break
}
v.reset(OpAMD64ROLW)
return true
}
// match: (ORL (SHLL x (ANDQconst y [15])) (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16])) (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16])))))
- // cond: v.Type.Size() == 2
+ // cond: v.Type.MustSize() == 2
// result: (ROLW x y)
for {
v_0 := v.Args[0]
if y != v_1_1_1_0_0.Args[0] {
break
}
- if !(v.Type.Size() == 2) {
+ if !(v.Type.MustSize() == 2) {
break
}
v.reset(OpAMD64ROLW)
return true
}
// match: (ORL (ANDL (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16]))) (SHLL x (ANDQconst y [15])))
- // cond: v.Type.Size() == 2
+ // cond: v.Type.MustSize() == 2
// result: (ROLW x y)
for {
v_0 := v.Args[0]
if y != v_1_1.Args[0] {
break
}
- if !(v.Type.Size() == 2) {
+ if !(v.Type.MustSize() == 2) {
break
}
v.reset(OpAMD64ROLW)
return true
}
// match: (ORL (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16])) (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16])))) (SHLL x (ANDQconst y [15])))
- // cond: v.Type.Size() == 2
+ // cond: v.Type.MustSize() == 2
// result: (ROLW x y)
for {
v_0 := v.Args[0]
if y != v_1_1.Args[0] {
break
}
- if !(v.Type.Size() == 2) {
+ if !(v.Type.MustSize() == 2) {
break
}
v.reset(OpAMD64ROLW)
return true
}
// match: (ORL (SHLL x (ANDLconst y [15])) (ANDL (SHRW x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [15]) [-16])) [16]))))
- // cond: v.Type.Size() == 2
+ // cond: v.Type.MustSize() == 2
// result: (ROLW x y)
for {
v_0 := v.Args[0]
if y != v_1_1_0_0_0_0.Args[0] {
break
}
- if !(v.Type.Size() == 2) {
+ if !(v.Type.MustSize() == 2) {
break
}
v.reset(OpAMD64ROLW)
return true
}
// match: (ORL (SHLL x (ANDLconst y [15])) (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [15]) [-16])) [16])) (SHRW x (NEGL (ADDLconst (ANDLconst y [15]) [-16])))))
- // cond: v.Type.Size() == 2
+ // cond: v.Type.MustSize() == 2
// result: (ROLW x y)
for {
v_0 := v.Args[0]
if y != v_1_1_1_0_0.Args[0] {
break
}
- if !(v.Type.Size() == 2) {
+ if !(v.Type.MustSize() == 2) {
break
}
v.reset(OpAMD64ROLW)
}
func rewriteValueAMD64_OpAMD64ORL_30(v *Value) bool {
// match: (ORL (ANDL (SHRW x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [15]) [-16])) [16]))) (SHLL x (ANDLconst y [15])))
- // cond: v.Type.Size() == 2
+ // cond: v.Type.MustSize() == 2
// result: (ROLW x y)
for {
v_0 := v.Args[0]
if y != v_1_1.Args[0] {
break
}
- if !(v.Type.Size() == 2) {
+ if !(v.Type.MustSize() == 2) {
break
}
v.reset(OpAMD64ROLW)
return true
}
// match: (ORL (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [15]) [-16])) [16])) (SHRW x (NEGL (ADDLconst (ANDLconst y [15]) [-16])))) (SHLL x (ANDLconst y [15])))
- // cond: v.Type.Size() == 2
+ // cond: v.Type.MustSize() == 2
// result: (ROLW x y)
for {
v_0 := v.Args[0]
if y != v_1_1.Args[0] {
break
}
- if !(v.Type.Size() == 2) {
+ if !(v.Type.MustSize() == 2) {
break
}
v.reset(OpAMD64ROLW)
return true
}
// match: (ORL (SHRW x (ANDQconst y [15])) (SHLL x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))))
- // cond: v.Type.Size() == 2
+ // cond: v.Type.MustSize() == 2
// result: (RORW x y)
for {
v_0 := v.Args[0]
if y != v_1_1_0_0.Args[0] {
break
}
- if !(v.Type.Size() == 2) {
+ if !(v.Type.MustSize() == 2) {
break
}
v.reset(OpAMD64RORW)
return true
}
// match: (ORL (SHLL x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))) (SHRW x (ANDQconst y [15])))
- // cond: v.Type.Size() == 2
+ // cond: v.Type.MustSize() == 2
// result: (RORW x y)
for {
v_0 := v.Args[0]
if y != v_1_1.Args[0] {
break
}
- if !(v.Type.Size() == 2) {
+ if !(v.Type.MustSize() == 2) {
break
}
v.reset(OpAMD64RORW)
return true
}
// match: (ORL (SHRW x (ANDLconst y [15])) (SHLL x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))))
- // cond: v.Type.Size() == 2
+ // cond: v.Type.MustSize() == 2
// result: (RORW x y)
for {
v_0 := v.Args[0]
if y != v_1_1_0_0.Args[0] {
break
}
- if !(v.Type.Size() == 2) {
+ if !(v.Type.MustSize() == 2) {
break
}
v.reset(OpAMD64RORW)
return true
}
// match: (ORL (SHLL x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))) (SHRW x (ANDLconst y [15])))
- // cond: v.Type.Size() == 2
+ // cond: v.Type.MustSize() == 2
// result: (RORW x y)
for {
v_0 := v.Args[0]
if y != v_1_1.Args[0] {
break
}
- if !(v.Type.Size() == 2) {
+ if !(v.Type.MustSize() == 2) {
break
}
v.reset(OpAMD64RORW)
return true
}
// match: (ORL (SHLL x (ANDQconst y [ 7])) (ANDL (SHRB x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])) [ 8]))))
- // cond: v.Type.Size() == 1
+ // cond: v.Type.MustSize() == 1
// result: (ROLB x y)
for {
v_0 := v.Args[0]
if y != v_1_1_0_0_0_0.Args[0] {
break
}
- if !(v.Type.Size() == 1) {
+ if !(v.Type.MustSize() == 1) {
break
}
v.reset(OpAMD64ROLB)
return true
}
// match: (ORL (SHLL x (ANDQconst y [ 7])) (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])) [ 8])) (SHRB x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])))))
- // cond: v.Type.Size() == 1
+ // cond: v.Type.MustSize() == 1
// result: (ROLB x y)
for {
v_0 := v.Args[0]
if y != v_1_1_1_0_0.Args[0] {
break
}
- if !(v.Type.Size() == 1) {
+ if !(v.Type.MustSize() == 1) {
break
}
v.reset(OpAMD64ROLB)
return true
}
// match: (ORL (ANDL (SHRB x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])) [ 8]))) (SHLL x (ANDQconst y [ 7])))
- // cond: v.Type.Size() == 1
+ // cond: v.Type.MustSize() == 1
// result: (ROLB x y)
for {
v_0 := v.Args[0]
if y != v_1_1.Args[0] {
break
}
- if !(v.Type.Size() == 1) {
+ if !(v.Type.MustSize() == 1) {
break
}
v.reset(OpAMD64ROLB)
return true
}
// match: (ORL (ANDL (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])) [ 8])) (SHRB x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])))) (SHLL x (ANDQconst y [ 7])))
- // cond: v.Type.Size() == 1
+ // cond: v.Type.MustSize() == 1
// result: (ROLB x y)
for {
v_0 := v.Args[0]
if y != v_1_1.Args[0] {
break
}
- if !(v.Type.Size() == 1) {
+ if !(v.Type.MustSize() == 1) {
break
}
v.reset(OpAMD64ROLB)
types := &b.Func.Config.Types
_ = types
// match: (ORL (SHLL x (ANDLconst y [ 7])) (ANDL (SHRB x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])) [ 8]))))
- // cond: v.Type.Size() == 1
+ // cond: v.Type.MustSize() == 1
// result: (ROLB x y)
for {
v_0 := v.Args[0]
if y != v_1_1_0_0_0_0.Args[0] {
break
}
- if !(v.Type.Size() == 1) {
+ if !(v.Type.MustSize() == 1) {
break
}
v.reset(OpAMD64ROLB)
return true
}
// match: (ORL (SHLL x (ANDLconst y [ 7])) (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])) [ 8])) (SHRB x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])))))
- // cond: v.Type.Size() == 1
+ // cond: v.Type.MustSize() == 1
// result: (ROLB x y)
for {
v_0 := v.Args[0]
if y != v_1_1_1_0_0.Args[0] {
break
}
- if !(v.Type.Size() == 1) {
+ if !(v.Type.MustSize() == 1) {
break
}
v.reset(OpAMD64ROLB)
return true
}
// match: (ORL (ANDL (SHRB x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])) [ 8]))) (SHLL x (ANDLconst y [ 7])))
- // cond: v.Type.Size() == 1
+ // cond: v.Type.MustSize() == 1
// result: (ROLB x y)
for {
v_0 := v.Args[0]
if y != v_1_1.Args[0] {
break
}
- if !(v.Type.Size() == 1) {
+ if !(v.Type.MustSize() == 1) {
break
}
v.reset(OpAMD64ROLB)
return true
}
// match: (ORL (ANDL (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])) [ 8])) (SHRB x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])))) (SHLL x (ANDLconst y [ 7])))
- // cond: v.Type.Size() == 1
+ // cond: v.Type.MustSize() == 1
// result: (ROLB x y)
for {
v_0 := v.Args[0]
if y != v_1_1.Args[0] {
break
}
- if !(v.Type.Size() == 1) {
+ if !(v.Type.MustSize() == 1) {
break
}
v.reset(OpAMD64ROLB)
return true
}
// match: (ORL (SHRB x (ANDQconst y [ 7])) (SHLL x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))))
- // cond: v.Type.Size() == 1
+ // cond: v.Type.MustSize() == 1
// result: (RORB x y)
for {
v_0 := v.Args[0]
if y != v_1_1_0_0.Args[0] {
break
}
- if !(v.Type.Size() == 1) {
+ if !(v.Type.MustSize() == 1) {
break
}
v.reset(OpAMD64RORB)
return true
}
// match: (ORL (SHLL x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))) (SHRB x (ANDQconst y [ 7])))
- // cond: v.Type.Size() == 1
+ // cond: v.Type.MustSize() == 1
// result: (RORB x y)
for {
v_0 := v.Args[0]
if y != v_1_1.Args[0] {
break
}
- if !(v.Type.Size() == 1) {
+ if !(v.Type.MustSize() == 1) {
break
}
v.reset(OpAMD64RORB)
return true
}
// match: (ORL (SHRB x (ANDLconst y [ 7])) (SHLL x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))))
- // cond: v.Type.Size() == 1
+ // cond: v.Type.MustSize() == 1
// result: (RORB x y)
for {
v_0 := v.Args[0]
if y != v_1_1_0_0.Args[0] {
break
}
- if !(v.Type.Size() == 1) {
+ if !(v.Type.MustSize() == 1) {
break
}
v.reset(OpAMD64RORB)
return true
}
// match: (ORL (SHLL x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))) (SHRB x (ANDLconst y [ 7])))
- // cond: v.Type.Size() == 1
+ // cond: v.Type.MustSize() == 1
// result: (RORB x y)
for {
v_0 := v.Args[0]
if y != v_1_1.Args[0] {
break
}
- if !(v.Type.Size() == 1) {
+ if !(v.Type.MustSize() == 1) {
break
}
v.reset(OpAMD64RORB)
return true
}
// match: (XORL <t> (SHLLconst x [c]) (SHRWconst x [d]))
- // cond: d==16-c && c < 16 && t.Size() == 2
+ // cond: d==16-c && c < 16 && t.MustSize() == 2
// result: (ROLWconst x [c])
for {
t := v.Type
if x != v_1.Args[0] {
break
}
- if !(d == 16-c && c < 16 && t.Size() == 2) {
+ if !(d == 16-c && c < 16 && t.MustSize() == 2) {
break
}
v.reset(OpAMD64ROLWconst)
return true
}
// match: (XORL <t> (SHRWconst x [d]) (SHLLconst x [c]))
- // cond: d==16-c && c < 16 && t.Size() == 2
+ // cond: d==16-c && c < 16 && t.MustSize() == 2
// result: (ROLWconst x [c])
for {
t := v.Type
if x != v_1.Args[0] {
break
}
- if !(d == 16-c && c < 16 && t.Size() == 2) {
+ if !(d == 16-c && c < 16 && t.MustSize() == 2) {
break
}
v.reset(OpAMD64ROLWconst)
return true
}
// match: (XORL <t> (SHLLconst x [c]) (SHRBconst x [d]))
- // cond: d==8-c && c < 8 && t.Size() == 1
+ // cond: d==8-c && c < 8 && t.MustSize() == 1
// result: (ROLBconst x [c])
for {
t := v.Type
if x != v_1.Args[0] {
break
}
- if !(d == 8-c && c < 8 && t.Size() == 1) {
+ if !(d == 8-c && c < 8 && t.MustSize() == 1) {
break
}
v.reset(OpAMD64ROLBconst)
return true
}
// match: (XORL <t> (SHRBconst x [d]) (SHLLconst x [c]))
- // cond: d==8-c && c < 8 && t.Size() == 1
+ // cond: d==8-c && c < 8 && t.MustSize() == 1
// result: (ROLBconst x [c])
for {
t := v.Type
if x != v_1.Args[0] {
break
}
- if !(d == 8-c && c < 8 && t.Size() == 1) {
+ if !(d == 8-c && c < 8 && t.MustSize() == 1) {
break
}
v.reset(OpAMD64ROLBconst)
}
func rewriteValueAMD64_OpStore_0(v *Value) bool {
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 8 && is64BitFloat(val.Type)
+ // cond: t.(Type).MustSize() == 8 && is64BitFloat(val.Type)
// result: (MOVSDstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 8 && is64BitFloat(val.Type)) {
+ if !(t.(Type).MustSize() == 8 && is64BitFloat(val.Type)) {
break
}
v.reset(OpAMD64MOVSDstore)
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 4 && is32BitFloat(val.Type)
+ // cond: t.(Type).MustSize() == 4 && is32BitFloat(val.Type)
// result: (MOVSSstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 4 && is32BitFloat(val.Type)) {
+ if !(t.(Type).MustSize() == 4 && is32BitFloat(val.Type)) {
break
}
v.reset(OpAMD64MOVSSstore)
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 8
+ // cond: t.(Type).MustSize() == 8
// result: (MOVQstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 8) {
+ if !(t.(Type).MustSize() == 8) {
break
}
v.reset(OpAMD64MOVQstore)
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 4
+ // cond: t.(Type).MustSize() == 4
// result: (MOVLstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 4) {
+ if !(t.(Type).MustSize() == 4) {
break
}
v.reset(OpAMD64MOVLstore)
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 2
+ // cond: t.(Type).MustSize() == 2
// result: (MOVWstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 2) {
+ if !(t.(Type).MustSize() == 2) {
break
}
v.reset(OpAMD64MOVWstore)
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 1
+ // cond: t.(Type).MustSize() == 1
// result: (MOVBstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 1) {
+ if !(t.(Type).MustSize() == 1) {
break
}
v.reset(OpAMD64MOVBstore)
return true
}
// match: (Move [2] {t} dst src mem)
- // cond: t.(Type).Alignment()%2 == 0
+ // cond: t.(Type).MustAlignment()%2 == 0
// result: (MOVHstore dst (MOVHUload src mem) mem)
for {
if v.AuxInt != 2 {
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Alignment()%2 == 0) {
+ if !(t.(Type).MustAlignment()%2 == 0) {
break
}
v.reset(OpARMMOVHstore)
return true
}
// match: (Move [4] {t} dst src mem)
- // cond: t.(Type).Alignment()%4 == 0
+ // cond: t.(Type).MustAlignment()%4 == 0
// result: (MOVWstore dst (MOVWload src mem) mem)
for {
if v.AuxInt != 4 {
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Alignment()%4 == 0) {
+ if !(t.(Type).MustAlignment()%4 == 0) {
break
}
v.reset(OpARMMOVWstore)
return true
}
// match: (Move [4] {t} dst src mem)
- // cond: t.(Type).Alignment()%2 == 0
+ // cond: t.(Type).MustAlignment()%2 == 0
// result: (MOVHstore [2] dst (MOVHUload [2] src mem) (MOVHstore dst (MOVHUload src mem) mem))
for {
if v.AuxInt != 4 {
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Alignment()%2 == 0) {
+ if !(t.(Type).MustAlignment()%2 == 0) {
break
}
v.reset(OpARMMOVHstore)
return true
}
// match: (Move [s] {t} dst src mem)
- // cond: s%4 == 0 && s > 4 && s <= 512 && t.(Type).Alignment()%4 == 0 && !config.noDuffDevice
+ // cond: s%4 == 0 && s > 4 && s <= 512 && t.(Type).MustAlignment()%4 == 0 && !config.noDuffDevice
// result: (DUFFCOPY [8 * (128 - int64(s/4))] dst src mem)
for {
s := v.AuxInt
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
- if !(s%4 == 0 && s > 4 && s <= 512 && t.(Type).Alignment()%4 == 0 && !config.noDuffDevice) {
+ if !(s%4 == 0 && s > 4 && s <= 512 && t.(Type).MustAlignment()%4 == 0 && !config.noDuffDevice) {
break
}
v.reset(OpARMDUFFCOPY)
return true
}
// match: (Move [s] {t} dst src mem)
- // cond: (s > 512 || config.noDuffDevice) || t.(Type).Alignment()%4 != 0
- // result: (LoweredMove [t.(Type).Alignment()] dst src (ADDconst <src.Type> src [s-moveSize(t.(Type).Alignment(), config)]) mem)
+ // cond: (s > 512 || config.noDuffDevice) || t.(Type).MustAlignment()%4 != 0
+ // result: (LoweredMove [t.(Type).MustAlignment()] dst src (ADDconst <src.Type> src [s-moveSize(t.(Type).MustAlignment(), config)]) mem)
for {
s := v.AuxInt
t := v.Aux
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
- if !((s > 512 || config.noDuffDevice) || t.(Type).Alignment()%4 != 0) {
+ if !((s > 512 || config.noDuffDevice) || t.(Type).MustAlignment()%4 != 0) {
break
}
v.reset(OpARMLoweredMove)
- v.AuxInt = t.(Type).Alignment()
+ v.AuxInt = t.(Type).MustAlignment()
v.AddArg(dst)
v.AddArg(src)
v0 := b.NewValue0(v.Pos, OpARMADDconst, src.Type)
- v0.AuxInt = s - moveSize(t.(Type).Alignment(), config)
+ v0.AuxInt = s - moveSize(t.(Type).MustAlignment(), config)
v0.AddArg(src)
v.AddArg(v0)
v.AddArg(mem)
}
func rewriteValueARM_OpStore_0(v *Value) bool {
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 1
+ // cond: t.(Type).MustSize() == 1
// result: (MOVBstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 1) {
+ if !(t.(Type).MustSize() == 1) {
break
}
v.reset(OpARMMOVBstore)
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 2
+ // cond: t.(Type).MustSize() == 2
// result: (MOVHstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 2) {
+ if !(t.(Type).MustSize() == 2) {
break
}
v.reset(OpARMMOVHstore)
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 4 && !is32BitFloat(val.Type)
+ // cond: t.(Type).MustSize() == 4 && !is32BitFloat(val.Type)
// result: (MOVWstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 4 && !is32BitFloat(val.Type)) {
+ if !(t.(Type).MustSize() == 4 && !is32BitFloat(val.Type)) {
break
}
v.reset(OpARMMOVWstore)
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 4 && is32BitFloat(val.Type)
+ // cond: t.(Type).MustSize() == 4 && is32BitFloat(val.Type)
// result: (MOVFstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 4 && is32BitFloat(val.Type)) {
+ if !(t.(Type).MustSize() == 4 && is32BitFloat(val.Type)) {
break
}
v.reset(OpARMMOVFstore)
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 8 && is64BitFloat(val.Type)
+ // cond: t.(Type).MustSize() == 8 && is64BitFloat(val.Type)
// result: (MOVDstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 8 && is64BitFloat(val.Type)) {
+ if !(t.(Type).MustSize() == 8 && is64BitFloat(val.Type)) {
break
}
v.reset(OpARMMOVDstore)
return true
}
// match: (Zero [2] {t} ptr mem)
- // cond: t.(Type).Alignment()%2 == 0
+ // cond: t.(Type).MustAlignment()%2 == 0
// result: (MOVHstore ptr (MOVWconst [0]) mem)
for {
if v.AuxInt != 2 {
t := v.Aux
ptr := v.Args[0]
mem := v.Args[1]
- if !(t.(Type).Alignment()%2 == 0) {
+ if !(t.(Type).MustAlignment()%2 == 0) {
break
}
v.reset(OpARMMOVHstore)
return true
}
// match: (Zero [4] {t} ptr mem)
- // cond: t.(Type).Alignment()%4 == 0
+ // cond: t.(Type).MustAlignment()%4 == 0
// result: (MOVWstore ptr (MOVWconst [0]) mem)
for {
if v.AuxInt != 4 {
t := v.Aux
ptr := v.Args[0]
mem := v.Args[1]
- if !(t.(Type).Alignment()%4 == 0) {
+ if !(t.(Type).MustAlignment()%4 == 0) {
break
}
v.reset(OpARMMOVWstore)
return true
}
// match: (Zero [4] {t} ptr mem)
- // cond: t.(Type).Alignment()%2 == 0
+ // cond: t.(Type).MustAlignment()%2 == 0
// result: (MOVHstore [2] ptr (MOVWconst [0]) (MOVHstore [0] ptr (MOVWconst [0]) mem))
for {
if v.AuxInt != 4 {
t := v.Aux
ptr := v.Args[0]
mem := v.Args[1]
- if !(t.(Type).Alignment()%2 == 0) {
+ if !(t.(Type).MustAlignment()%2 == 0) {
break
}
v.reset(OpARMMOVHstore)
return true
}
// match: (Zero [s] {t} ptr mem)
- // cond: s%4 == 0 && s > 4 && s <= 512 && t.(Type).Alignment()%4 == 0 && !config.noDuffDevice
+ // cond: s%4 == 0 && s > 4 && s <= 512 && t.(Type).MustAlignment()%4 == 0 && !config.noDuffDevice
// result: (DUFFZERO [4 * (128 - int64(s/4))] ptr (MOVWconst [0]) mem)
for {
s := v.AuxInt
t := v.Aux
ptr := v.Args[0]
mem := v.Args[1]
- if !(s%4 == 0 && s > 4 && s <= 512 && t.(Type).Alignment()%4 == 0 && !config.noDuffDevice) {
+ if !(s%4 == 0 && s > 4 && s <= 512 && t.(Type).MustAlignment()%4 == 0 && !config.noDuffDevice) {
break
}
v.reset(OpARMDUFFZERO)
return true
}
// match: (Zero [s] {t} ptr mem)
- // cond: (s > 512 || config.noDuffDevice) || t.(Type).Alignment()%4 != 0
- // result: (LoweredZero [t.(Type).Alignment()] ptr (ADDconst <ptr.Type> ptr [s-moveSize(t.(Type).Alignment(), config)]) (MOVWconst [0]) mem)
+ // cond: (s > 512 || config.noDuffDevice) || t.(Type).MustAlignment()%4 != 0
+ // result: (LoweredZero [t.(Type).MustAlignment()] ptr (ADDconst <ptr.Type> ptr [s-moveSize(t.(Type).MustAlignment(), config)]) (MOVWconst [0]) mem)
for {
s := v.AuxInt
t := v.Aux
ptr := v.Args[0]
mem := v.Args[1]
- if !((s > 512 || config.noDuffDevice) || t.(Type).Alignment()%4 != 0) {
+ if !((s > 512 || config.noDuffDevice) || t.(Type).MustAlignment()%4 != 0) {
break
}
v.reset(OpARMLoweredZero)
- v.AuxInt = t.(Type).Alignment()
+ v.AuxInt = t.(Type).MustAlignment()
v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpARMADDconst, ptr.Type)
- v0.AuxInt = s - moveSize(t.(Type).Alignment(), config)
+ v0.AuxInt = s - moveSize(t.(Type).MustAlignment(), config)
v0.AddArg(ptr)
v.AddArg(v0)
v1 := b.NewValue0(v.Pos, OpARMMOVWconst, types.UInt32)
return true
}
// match: (ADDshiftLL <t> [c] (SRLconst (MOVWUreg x) [32-c]) x)
- // cond: c < 32 && t.Size() == 4
+ // cond: c < 32 && t.MustSize() == 4
// result: (RORWconst [32-c] x)
for {
t := v.Type
if x != v.Args[1] {
break
}
- if !(c < 32 && t.Size() == 4) {
+ if !(c < 32 && t.MustSize() == 4) {
break
}
v.reset(OpARM64RORWconst)
return true
}
// match: (ADDshiftRL <t> [c] (SLLconst x [32-c]) (MOVWUreg x))
- // cond: c < 32 && t.Size() == 4
+ // cond: c < 32 && t.MustSize() == 4
// result: (RORWconst [ c] x)
for {
t := v.Type
if x != v_1.Args[0] {
break
}
- if !(c < 32 && t.Size() == 4) {
+ if !(c < 32 && t.MustSize() == 4) {
break
}
v.reset(OpARM64RORWconst)
return true
}
// match: (ORshiftLL <t> [c] (SRLconst (MOVWUreg x) [32-c]) x)
- // cond: c < 32 && t.Size() == 4
+ // cond: c < 32 && t.MustSize() == 4
// result: (RORWconst [32-c] x)
for {
t := v.Type
if x != v.Args[1] {
break
}
- if !(c < 32 && t.Size() == 4) {
+ if !(c < 32 && t.MustSize() == 4) {
break
}
v.reset(OpARM64RORWconst)
return true
}
// match: (ORshiftRL <t> [c] (SLLconst x [32-c]) (MOVWUreg x))
- // cond: c < 32 && t.Size() == 4
+ // cond: c < 32 && t.MustSize() == 4
// result: (RORWconst [ c] x)
for {
t := v.Type
if x != v_1.Args[0] {
break
}
- if !(c < 32 && t.Size() == 4) {
+ if !(c < 32 && t.MustSize() == 4) {
break
}
v.reset(OpARM64RORWconst)
return true
}
// match: (XORshiftLL <t> [c] (SRLconst (MOVWUreg x) [32-c]) x)
- // cond: c < 32 && t.Size() == 4
+ // cond: c < 32 && t.MustSize() == 4
// result: (RORWconst [32-c] x)
for {
t := v.Type
if x != v.Args[1] {
break
}
- if !(c < 32 && t.Size() == 4) {
+ if !(c < 32 && t.MustSize() == 4) {
break
}
v.reset(OpARM64RORWconst)
return true
}
// match: (XORshiftRL <t> [c] (SLLconst x [32-c]) (MOVWUreg x))
- // cond: c < 32 && t.Size() == 4
+ // cond: c < 32 && t.MustSize() == 4
// result: (RORWconst [ c] x)
for {
t := v.Type
if x != v_1.Args[0] {
break
}
- if !(c < 32 && t.Size() == 4) {
+ if !(c < 32 && t.MustSize() == 4) {
break
}
v.reset(OpARM64RORWconst)
}
func rewriteValueARM64_OpStore_0(v *Value) bool {
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 1
+ // cond: t.(Type).MustSize() == 1
// result: (MOVBstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 1) {
+ if !(t.(Type).MustSize() == 1) {
break
}
v.reset(OpARM64MOVBstore)
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 2
+ // cond: t.(Type).MustSize() == 2
// result: (MOVHstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 2) {
+ if !(t.(Type).MustSize() == 2) {
break
}
v.reset(OpARM64MOVHstore)
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 4 && !is32BitFloat(val.Type)
+ // cond: t.(Type).MustSize() == 4 && !is32BitFloat(val.Type)
// result: (MOVWstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 4 && !is32BitFloat(val.Type)) {
+ if !(t.(Type).MustSize() == 4 && !is32BitFloat(val.Type)) {
break
}
v.reset(OpARM64MOVWstore)
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 8 && !is64BitFloat(val.Type)
+ // cond: t.(Type).MustSize() == 8 && !is64BitFloat(val.Type)
// result: (MOVDstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 8 && !is64BitFloat(val.Type)) {
+ if !(t.(Type).MustSize() == 8 && !is64BitFloat(val.Type)) {
break
}
v.reset(OpARM64MOVDstore)
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 4 && is32BitFloat(val.Type)
+ // cond: t.(Type).MustSize() == 4 && is32BitFloat(val.Type)
// result: (FMOVSstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 4 && is32BitFloat(val.Type)) {
+ if !(t.(Type).MustSize() == 4 && is32BitFloat(val.Type)) {
break
}
v.reset(OpARM64FMOVSstore)
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 8 && is64BitFloat(val.Type)
+ // cond: t.(Type).MustSize() == 8 && is64BitFloat(val.Type)
// result: (FMOVDstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 8 && is64BitFloat(val.Type)) {
+ if !(t.(Type).MustSize() == 8 && is64BitFloat(val.Type)) {
break
}
v.reset(OpARM64FMOVDstore)
return true
}
// match: (Move [2] {t} dst src mem)
- // cond: t.(Type).Alignment()%2 == 0
+ // cond: t.(Type).MustAlignment()%2 == 0
// result: (MOVHstore dst (MOVHUload src mem) mem)
for {
if v.AuxInt != 2 {
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Alignment()%2 == 0) {
+ if !(t.(Type).MustAlignment()%2 == 0) {
break
}
v.reset(OpMIPSMOVHstore)
return true
}
// match: (Move [4] {t} dst src mem)
- // cond: t.(Type).Alignment()%4 == 0
+ // cond: t.(Type).MustAlignment()%4 == 0
// result: (MOVWstore dst (MOVWload src mem) mem)
for {
if v.AuxInt != 4 {
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Alignment()%4 == 0) {
+ if !(t.(Type).MustAlignment()%4 == 0) {
break
}
v.reset(OpMIPSMOVWstore)
return true
}
// match: (Move [4] {t} dst src mem)
- // cond: t.(Type).Alignment()%2 == 0
+ // cond: t.(Type).MustAlignment()%2 == 0
// result: (MOVHstore [2] dst (MOVHUload [2] src mem) (MOVHstore dst (MOVHUload src mem) mem))
for {
if v.AuxInt != 4 {
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Alignment()%2 == 0) {
+ if !(t.(Type).MustAlignment()%2 == 0) {
break
}
v.reset(OpMIPSMOVHstore)
return true
}
// match: (Move [8] {t} dst src mem)
- // cond: t.(Type).Alignment()%4 == 0
+ // cond: t.(Type).MustAlignment()%4 == 0
// result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVWstore dst (MOVWload src mem) mem))
for {
if v.AuxInt != 8 {
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Alignment()%4 == 0) {
+ if !(t.(Type).MustAlignment()%4 == 0) {
break
}
v.reset(OpMIPSMOVWstore)
return true
}
// match: (Move [8] {t} dst src mem)
- // cond: t.(Type).Alignment()%2 == 0
+ // cond: t.(Type).MustAlignment()%2 == 0
// result: (MOVHstore [6] dst (MOVHload [6] src mem) (MOVHstore [4] dst (MOVHload [4] src mem) (MOVHstore [2] dst (MOVHload [2] src mem) (MOVHstore dst (MOVHload src mem) mem))))
for {
if v.AuxInt != 8 {
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Alignment()%2 == 0) {
+ if !(t.(Type).MustAlignment()%2 == 0) {
break
}
v.reset(OpMIPSMOVHstore)
types := &b.Func.Config.Types
_ = types
// match: (Move [6] {t} dst src mem)
- // cond: t.(Type).Alignment()%2 == 0
+ // cond: t.(Type).MustAlignment()%2 == 0
// result: (MOVHstore [4] dst (MOVHload [4] src mem) (MOVHstore [2] dst (MOVHload [2] src mem) (MOVHstore dst (MOVHload src mem) mem)))
for {
if v.AuxInt != 6 {
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Alignment()%2 == 0) {
+ if !(t.(Type).MustAlignment()%2 == 0) {
break
}
v.reset(OpMIPSMOVHstore)
return true
}
// match: (Move [12] {t} dst src mem)
- // cond: t.(Type).Alignment()%4 == 0
+ // cond: t.(Type).MustAlignment()%4 == 0
// result: (MOVWstore [8] dst (MOVWload [8] src mem) (MOVWstore [4] dst (MOVWload [4] src mem) (MOVWstore dst (MOVWload src mem) mem)))
for {
if v.AuxInt != 12 {
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Alignment()%4 == 0) {
+ if !(t.(Type).MustAlignment()%4 == 0) {
break
}
v.reset(OpMIPSMOVWstore)
return true
}
// match: (Move [16] {t} dst src mem)
- // cond: t.(Type).Alignment()%4 == 0
+ // cond: t.(Type).MustAlignment()%4 == 0
// result: (MOVWstore [12] dst (MOVWload [12] src mem) (MOVWstore [8] dst (MOVWload [8] src mem) (MOVWstore [4] dst (MOVWload [4] src mem) (MOVWstore dst (MOVWload src mem) mem))))
for {
if v.AuxInt != 16 {
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Alignment()%4 == 0) {
+ if !(t.(Type).MustAlignment()%4 == 0) {
break
}
v.reset(OpMIPSMOVWstore)
return true
}
// match: (Move [s] {t} dst src mem)
- // cond: (s > 16 || t.(Type).Alignment()%4 != 0)
- // result: (LoweredMove [t.(Type).Alignment()] dst src (ADDconst <src.Type> src [s-moveSize(t.(Type).Alignment(), config)]) mem)
+ // cond: (s > 16 || t.(Type).MustAlignment()%4 != 0)
+ // result: (LoweredMove [t.(Type).MustAlignment()] dst src (ADDconst <src.Type> src [s-moveSize(t.(Type).MustAlignment(), config)]) mem)
for {
s := v.AuxInt
t := v.Aux
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
- if !(s > 16 || t.(Type).Alignment()%4 != 0) {
+ if !(s > 16 || t.(Type).MustAlignment()%4 != 0) {
break
}
v.reset(OpMIPSLoweredMove)
- v.AuxInt = t.(Type).Alignment()
+ v.AuxInt = t.(Type).MustAlignment()
v.AddArg(dst)
v.AddArg(src)
v0 := b.NewValue0(v.Pos, OpMIPSADDconst, src.Type)
- v0.AuxInt = s - moveSize(t.(Type).Alignment(), config)
+ v0.AuxInt = s - moveSize(t.(Type).MustAlignment(), config)
v0.AddArg(src)
v.AddArg(v0)
v.AddArg(mem)
}
func rewriteValueMIPS_OpStore_0(v *Value) bool {
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 1
+ // cond: t.(Type).MustSize() == 1
// result: (MOVBstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 1) {
+ if !(t.(Type).MustSize() == 1) {
break
}
v.reset(OpMIPSMOVBstore)
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 2
+ // cond: t.(Type).MustSize() == 2
// result: (MOVHstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 2) {
+ if !(t.(Type).MustSize() == 2) {
break
}
v.reset(OpMIPSMOVHstore)
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 4 && !is32BitFloat(val.Type)
+ // cond: t.(Type).MustSize() == 4 && !is32BitFloat(val.Type)
// result: (MOVWstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 4 && !is32BitFloat(val.Type)) {
+ if !(t.(Type).MustSize() == 4 && !is32BitFloat(val.Type)) {
break
}
v.reset(OpMIPSMOVWstore)
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 4 && is32BitFloat(val.Type)
+ // cond: t.(Type).MustSize() == 4 && is32BitFloat(val.Type)
// result: (MOVFstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 4 && is32BitFloat(val.Type)) {
+ if !(t.(Type).MustSize() == 4 && is32BitFloat(val.Type)) {
break
}
v.reset(OpMIPSMOVFstore)
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 8 && is64BitFloat(val.Type)
+ // cond: t.(Type).MustSize() == 8 && is64BitFloat(val.Type)
// result: (MOVDstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 8 && is64BitFloat(val.Type)) {
+ if !(t.(Type).MustSize() == 8 && is64BitFloat(val.Type)) {
break
}
v.reset(OpMIPSMOVDstore)
return true
}
// match: (Zero [2] {t} ptr mem)
- // cond: t.(Type).Alignment()%2 == 0
+ // cond: t.(Type).MustAlignment()%2 == 0
// result: (MOVHstore ptr (MOVWconst [0]) mem)
for {
if v.AuxInt != 2 {
t := v.Aux
ptr := v.Args[0]
mem := v.Args[1]
- if !(t.(Type).Alignment()%2 == 0) {
+ if !(t.(Type).MustAlignment()%2 == 0) {
break
}
v.reset(OpMIPSMOVHstore)
return true
}
// match: (Zero [4] {t} ptr mem)
- // cond: t.(Type).Alignment()%4 == 0
+ // cond: t.(Type).MustAlignment()%4 == 0
// result: (MOVWstore ptr (MOVWconst [0]) mem)
for {
if v.AuxInt != 4 {
t := v.Aux
ptr := v.Args[0]
mem := v.Args[1]
- if !(t.(Type).Alignment()%4 == 0) {
+ if !(t.(Type).MustAlignment()%4 == 0) {
break
}
v.reset(OpMIPSMOVWstore)
return true
}
// match: (Zero [4] {t} ptr mem)
- // cond: t.(Type).Alignment()%2 == 0
+ // cond: t.(Type).MustAlignment()%2 == 0
// result: (MOVHstore [2] ptr (MOVWconst [0]) (MOVHstore [0] ptr (MOVWconst [0]) mem))
for {
if v.AuxInt != 4 {
t := v.Aux
ptr := v.Args[0]
mem := v.Args[1]
- if !(t.(Type).Alignment()%2 == 0) {
+ if !(t.(Type).MustAlignment()%2 == 0) {
break
}
v.reset(OpMIPSMOVHstore)
return true
}
// match: (Zero [6] {t} ptr mem)
- // cond: t.(Type).Alignment()%2 == 0
+ // cond: t.(Type).MustAlignment()%2 == 0
// result: (MOVHstore [4] ptr (MOVWconst [0]) (MOVHstore [2] ptr (MOVWconst [0]) (MOVHstore [0] ptr (MOVWconst [0]) mem)))
for {
if v.AuxInt != 6 {
t := v.Aux
ptr := v.Args[0]
mem := v.Args[1]
- if !(t.(Type).Alignment()%2 == 0) {
+ if !(t.(Type).MustAlignment()%2 == 0) {
break
}
v.reset(OpMIPSMOVHstore)
return true
}
// match: (Zero [8] {t} ptr mem)
- // cond: t.(Type).Alignment()%4 == 0
+ // cond: t.(Type).MustAlignment()%4 == 0
// result: (MOVWstore [4] ptr (MOVWconst [0]) (MOVWstore [0] ptr (MOVWconst [0]) mem))
for {
if v.AuxInt != 8 {
t := v.Aux
ptr := v.Args[0]
mem := v.Args[1]
- if !(t.(Type).Alignment()%4 == 0) {
+ if !(t.(Type).MustAlignment()%4 == 0) {
break
}
v.reset(OpMIPSMOVWstore)
types := &b.Func.Config.Types
_ = types
// match: (Zero [12] {t} ptr mem)
- // cond: t.(Type).Alignment()%4 == 0
+ // cond: t.(Type).MustAlignment()%4 == 0
// result: (MOVWstore [8] ptr (MOVWconst [0]) (MOVWstore [4] ptr (MOVWconst [0]) (MOVWstore [0] ptr (MOVWconst [0]) mem)))
for {
if v.AuxInt != 12 {
t := v.Aux
ptr := v.Args[0]
mem := v.Args[1]
- if !(t.(Type).Alignment()%4 == 0) {
+ if !(t.(Type).MustAlignment()%4 == 0) {
break
}
v.reset(OpMIPSMOVWstore)
return true
}
// match: (Zero [16] {t} ptr mem)
- // cond: t.(Type).Alignment()%4 == 0
+ // cond: t.(Type).MustAlignment()%4 == 0
// result: (MOVWstore [12] ptr (MOVWconst [0]) (MOVWstore [8] ptr (MOVWconst [0]) (MOVWstore [4] ptr (MOVWconst [0]) (MOVWstore [0] ptr (MOVWconst [0]) mem))))
for {
if v.AuxInt != 16 {
t := v.Aux
ptr := v.Args[0]
mem := v.Args[1]
- if !(t.(Type).Alignment()%4 == 0) {
+ if !(t.(Type).MustAlignment()%4 == 0) {
break
}
v.reset(OpMIPSMOVWstore)
return true
}
// match: (Zero [s] {t} ptr mem)
- // cond: (s > 16 || t.(Type).Alignment()%4 != 0)
- // result: (LoweredZero [t.(Type).Alignment()] ptr (ADDconst <ptr.Type> ptr [s-moveSize(t.(Type).Alignment(), config)]) mem)
+ // cond: (s > 16 || t.(Type).MustAlignment()%4 != 0)
+ // result: (LoweredZero [t.(Type).MustAlignment()] ptr (ADDconst <ptr.Type> ptr [s-moveSize(t.(Type).MustAlignment(), config)]) mem)
for {
s := v.AuxInt
t := v.Aux
ptr := v.Args[0]
mem := v.Args[1]
- if !(s > 16 || t.(Type).Alignment()%4 != 0) {
+ if !(s > 16 || t.(Type).MustAlignment()%4 != 0) {
break
}
v.reset(OpMIPSLoweredZero)
- v.AuxInt = t.(Type).Alignment()
+ v.AuxInt = t.(Type).MustAlignment()
v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpMIPSADDconst, ptr.Type)
- v0.AuxInt = s - moveSize(t.(Type).Alignment(), config)
+ v0.AuxInt = s - moveSize(t.(Type).MustAlignment(), config)
v0.AddArg(ptr)
v.AddArg(v0)
v.AddArg(mem)
return true
}
// match: (Move [2] {t} dst src mem)
- // cond: t.(Type).Alignment()%2 == 0
+ // cond: t.(Type).MustAlignment()%2 == 0
// result: (MOVHstore dst (MOVHload src mem) mem)
for {
if v.AuxInt != 2 {
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Alignment()%2 == 0) {
+ if !(t.(Type).MustAlignment()%2 == 0) {
break
}
v.reset(OpMIPS64MOVHstore)
return true
}
// match: (Move [4] {t} dst src mem)
- // cond: t.(Type).Alignment()%4 == 0
+ // cond: t.(Type).MustAlignment()%4 == 0
// result: (MOVWstore dst (MOVWload src mem) mem)
for {
if v.AuxInt != 4 {
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Alignment()%4 == 0) {
+ if !(t.(Type).MustAlignment()%4 == 0) {
break
}
v.reset(OpMIPS64MOVWstore)
return true
}
// match: (Move [4] {t} dst src mem)
- // cond: t.(Type).Alignment()%2 == 0
+ // cond: t.(Type).MustAlignment()%2 == 0
// result: (MOVHstore [2] dst (MOVHload [2] src mem) (MOVHstore dst (MOVHload src mem) mem))
for {
if v.AuxInt != 4 {
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Alignment()%2 == 0) {
+ if !(t.(Type).MustAlignment()%2 == 0) {
break
}
v.reset(OpMIPS64MOVHstore)
return true
}
// match: (Move [8] {t} dst src mem)
- // cond: t.(Type).Alignment()%8 == 0
+ // cond: t.(Type).MustAlignment()%8 == 0
// result: (MOVVstore dst (MOVVload src mem) mem)
for {
if v.AuxInt != 8 {
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Alignment()%8 == 0) {
+ if !(t.(Type).MustAlignment()%8 == 0) {
break
}
v.reset(OpMIPS64MOVVstore)
return true
}
// match: (Move [8] {t} dst src mem)
- // cond: t.(Type).Alignment()%4 == 0
+ // cond: t.(Type).MustAlignment()%4 == 0
// result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVWstore dst (MOVWload src mem) mem))
for {
if v.AuxInt != 8 {
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Alignment()%4 == 0) {
+ if !(t.(Type).MustAlignment()%4 == 0) {
break
}
v.reset(OpMIPS64MOVWstore)
return true
}
// match: (Move [8] {t} dst src mem)
- // cond: t.(Type).Alignment()%2 == 0
+ // cond: t.(Type).MustAlignment()%2 == 0
// result: (MOVHstore [6] dst (MOVHload [6] src mem) (MOVHstore [4] dst (MOVHload [4] src mem) (MOVHstore [2] dst (MOVHload [2] src mem) (MOVHstore dst (MOVHload src mem) mem))))
for {
if v.AuxInt != 8 {
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Alignment()%2 == 0) {
+ if !(t.(Type).MustAlignment()%2 == 0) {
break
}
v.reset(OpMIPS64MOVHstore)
return true
}
// match: (Move [6] {t} dst src mem)
- // cond: t.(Type).Alignment()%2 == 0
+ // cond: t.(Type).MustAlignment()%2 == 0
// result: (MOVHstore [4] dst (MOVHload [4] src mem) (MOVHstore [2] dst (MOVHload [2] src mem) (MOVHstore dst (MOVHload src mem) mem)))
for {
if v.AuxInt != 6 {
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Alignment()%2 == 0) {
+ if !(t.(Type).MustAlignment()%2 == 0) {
break
}
v.reset(OpMIPS64MOVHstore)
return true
}
// match: (Move [12] {t} dst src mem)
- // cond: t.(Type).Alignment()%4 == 0
+ // cond: t.(Type).MustAlignment()%4 == 0
// result: (MOVWstore [8] dst (MOVWload [8] src mem) (MOVWstore [4] dst (MOVWload [4] src mem) (MOVWstore dst (MOVWload src mem) mem)))
for {
if v.AuxInt != 12 {
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Alignment()%4 == 0) {
+ if !(t.(Type).MustAlignment()%4 == 0) {
break
}
v.reset(OpMIPS64MOVWstore)
return true
}
// match: (Move [16] {t} dst src mem)
- // cond: t.(Type).Alignment()%8 == 0
+ // cond: t.(Type).MustAlignment()%8 == 0
// result: (MOVVstore [8] dst (MOVVload [8] src mem) (MOVVstore dst (MOVVload src mem) mem))
for {
if v.AuxInt != 16 {
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Alignment()%8 == 0) {
+ if !(t.(Type).MustAlignment()%8 == 0) {
break
}
v.reset(OpMIPS64MOVVstore)
return true
}
// match: (Move [24] {t} dst src mem)
- // cond: t.(Type).Alignment()%8 == 0
+ // cond: t.(Type).MustAlignment()%8 == 0
// result: (MOVVstore [16] dst (MOVVload [16] src mem) (MOVVstore [8] dst (MOVVload [8] src mem) (MOVVstore dst (MOVVload src mem) mem)))
for {
if v.AuxInt != 24 {
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Alignment()%8 == 0) {
+ if !(t.(Type).MustAlignment()%8 == 0) {
break
}
v.reset(OpMIPS64MOVVstore)
return true
}
// match: (Move [s] {t} dst src mem)
- // cond: s > 24 || t.(Type).Alignment()%8 != 0
- // result: (LoweredMove [t.(Type).Alignment()] dst src (ADDVconst <src.Type> src [s-moveSize(t.(Type).Alignment(), config)]) mem)
+ // cond: s > 24 || t.(Type).MustAlignment()%8 != 0
+ // result: (LoweredMove [t.(Type).MustAlignment()] dst src (ADDVconst <src.Type> src [s-moveSize(t.(Type).MustAlignment(), config)]) mem)
for {
s := v.AuxInt
t := v.Aux
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
- if !(s > 24 || t.(Type).Alignment()%8 != 0) {
+ if !(s > 24 || t.(Type).MustAlignment()%8 != 0) {
break
}
v.reset(OpMIPS64LoweredMove)
- v.AuxInt = t.(Type).Alignment()
+ v.AuxInt = t.(Type).MustAlignment()
v.AddArg(dst)
v.AddArg(src)
v0 := b.NewValue0(v.Pos, OpMIPS64ADDVconst, src.Type)
- v0.AuxInt = s - moveSize(t.(Type).Alignment(), config)
+ v0.AuxInt = s - moveSize(t.(Type).MustAlignment(), config)
v0.AddArg(src)
v.AddArg(v0)
v.AddArg(mem)
}
func rewriteValueMIPS64_OpStore_0(v *Value) bool {
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 1
+ // cond: t.(Type).MustSize() == 1
// result: (MOVBstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 1) {
+ if !(t.(Type).MustSize() == 1) {
break
}
v.reset(OpMIPS64MOVBstore)
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 2
+ // cond: t.(Type).MustSize() == 2
// result: (MOVHstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 2) {
+ if !(t.(Type).MustSize() == 2) {
break
}
v.reset(OpMIPS64MOVHstore)
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 4 && !is32BitFloat(val.Type)
+ // cond: t.(Type).MustSize() == 4 && !is32BitFloat(val.Type)
// result: (MOVWstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 4 && !is32BitFloat(val.Type)) {
+ if !(t.(Type).MustSize() == 4 && !is32BitFloat(val.Type)) {
break
}
v.reset(OpMIPS64MOVWstore)
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 8 && !is64BitFloat(val.Type)
+ // cond: t.(Type).MustSize() == 8 && !is64BitFloat(val.Type)
// result: (MOVVstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 8 && !is64BitFloat(val.Type)) {
+ if !(t.(Type).MustSize() == 8 && !is64BitFloat(val.Type)) {
break
}
v.reset(OpMIPS64MOVVstore)
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 4 && is32BitFloat(val.Type)
+ // cond: t.(Type).MustSize() == 4 && is32BitFloat(val.Type)
// result: (MOVFstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 4 && is32BitFloat(val.Type)) {
+ if !(t.(Type).MustSize() == 4 && is32BitFloat(val.Type)) {
break
}
v.reset(OpMIPS64MOVFstore)
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 8 && is64BitFloat(val.Type)
+ // cond: t.(Type).MustSize() == 8 && is64BitFloat(val.Type)
// result: (MOVDstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 8 && is64BitFloat(val.Type)) {
+ if !(t.(Type).MustSize() == 8 && is64BitFloat(val.Type)) {
break
}
v.reset(OpMIPS64MOVDstore)
return true
}
// match: (Zero [2] {t} ptr mem)
- // cond: t.(Type).Alignment()%2 == 0
+ // cond: t.(Type).MustAlignment()%2 == 0
// result: (MOVHstore ptr (MOVVconst [0]) mem)
for {
if v.AuxInt != 2 {
t := v.Aux
ptr := v.Args[0]
mem := v.Args[1]
- if !(t.(Type).Alignment()%2 == 0) {
+ if !(t.(Type).MustAlignment()%2 == 0) {
break
}
v.reset(OpMIPS64MOVHstore)
return true
}
// match: (Zero [4] {t} ptr mem)
- // cond: t.(Type).Alignment()%4 == 0
+ // cond: t.(Type).MustAlignment()%4 == 0
// result: (MOVWstore ptr (MOVVconst [0]) mem)
for {
if v.AuxInt != 4 {
t := v.Aux
ptr := v.Args[0]
mem := v.Args[1]
- if !(t.(Type).Alignment()%4 == 0) {
+ if !(t.(Type).MustAlignment()%4 == 0) {
break
}
v.reset(OpMIPS64MOVWstore)
return true
}
// match: (Zero [4] {t} ptr mem)
- // cond: t.(Type).Alignment()%2 == 0
+ // cond: t.(Type).MustAlignment()%2 == 0
// result: (MOVHstore [2] ptr (MOVVconst [0]) (MOVHstore [0] ptr (MOVVconst [0]) mem))
for {
if v.AuxInt != 4 {
t := v.Aux
ptr := v.Args[0]
mem := v.Args[1]
- if !(t.(Type).Alignment()%2 == 0) {
+ if !(t.(Type).MustAlignment()%2 == 0) {
break
}
v.reset(OpMIPS64MOVHstore)
return true
}
// match: (Zero [8] {t} ptr mem)
- // cond: t.(Type).Alignment()%8 == 0
+ // cond: t.(Type).MustAlignment()%8 == 0
// result: (MOVVstore ptr (MOVVconst [0]) mem)
for {
if v.AuxInt != 8 {
t := v.Aux
ptr := v.Args[0]
mem := v.Args[1]
- if !(t.(Type).Alignment()%8 == 0) {
+ if !(t.(Type).MustAlignment()%8 == 0) {
break
}
v.reset(OpMIPS64MOVVstore)
return true
}
// match: (Zero [8] {t} ptr mem)
- // cond: t.(Type).Alignment()%4 == 0
+ // cond: t.(Type).MustAlignment()%4 == 0
// result: (MOVWstore [4] ptr (MOVVconst [0]) (MOVWstore [0] ptr (MOVVconst [0]) mem))
for {
if v.AuxInt != 8 {
t := v.Aux
ptr := v.Args[0]
mem := v.Args[1]
- if !(t.(Type).Alignment()%4 == 0) {
+ if !(t.(Type).MustAlignment()%4 == 0) {
break
}
v.reset(OpMIPS64MOVWstore)
return true
}
// match: (Zero [8] {t} ptr mem)
- // cond: t.(Type).Alignment()%2 == 0
+ // cond: t.(Type).MustAlignment()%2 == 0
// result: (MOVHstore [6] ptr (MOVVconst [0]) (MOVHstore [4] ptr (MOVVconst [0]) (MOVHstore [2] ptr (MOVVconst [0]) (MOVHstore [0] ptr (MOVVconst [0]) mem))))
for {
if v.AuxInt != 8 {
t := v.Aux
ptr := v.Args[0]
mem := v.Args[1]
- if !(t.(Type).Alignment()%2 == 0) {
+ if !(t.(Type).MustAlignment()%2 == 0) {
break
}
v.reset(OpMIPS64MOVHstore)
return true
}
// match: (Zero [6] {t} ptr mem)
- // cond: t.(Type).Alignment()%2 == 0
+ // cond: t.(Type).MustAlignment()%2 == 0
// result: (MOVHstore [4] ptr (MOVVconst [0]) (MOVHstore [2] ptr (MOVVconst [0]) (MOVHstore [0] ptr (MOVVconst [0]) mem)))
for {
if v.AuxInt != 6 {
t := v.Aux
ptr := v.Args[0]
mem := v.Args[1]
- if !(t.(Type).Alignment()%2 == 0) {
+ if !(t.(Type).MustAlignment()%2 == 0) {
break
}
v.reset(OpMIPS64MOVHstore)
return true
}
// match: (Zero [12] {t} ptr mem)
- // cond: t.(Type).Alignment()%4 == 0
+ // cond: t.(Type).MustAlignment()%4 == 0
// result: (MOVWstore [8] ptr (MOVVconst [0]) (MOVWstore [4] ptr (MOVVconst [0]) (MOVWstore [0] ptr (MOVVconst [0]) mem)))
for {
if v.AuxInt != 12 {
t := v.Aux
ptr := v.Args[0]
mem := v.Args[1]
- if !(t.(Type).Alignment()%4 == 0) {
+ if !(t.(Type).MustAlignment()%4 == 0) {
break
}
v.reset(OpMIPS64MOVWstore)
return true
}
// match: (Zero [16] {t} ptr mem)
- // cond: t.(Type).Alignment()%8 == 0
+ // cond: t.(Type).MustAlignment()%8 == 0
// result: (MOVVstore [8] ptr (MOVVconst [0]) (MOVVstore [0] ptr (MOVVconst [0]) mem))
for {
if v.AuxInt != 16 {
t := v.Aux
ptr := v.Args[0]
mem := v.Args[1]
- if !(t.(Type).Alignment()%8 == 0) {
+ if !(t.(Type).MustAlignment()%8 == 0) {
break
}
v.reset(OpMIPS64MOVVstore)
return true
}
// match: (Zero [24] {t} ptr mem)
- // cond: t.(Type).Alignment()%8 == 0
+ // cond: t.(Type).MustAlignment()%8 == 0
// result: (MOVVstore [16] ptr (MOVVconst [0]) (MOVVstore [8] ptr (MOVVconst [0]) (MOVVstore [0] ptr (MOVVconst [0]) mem)))
for {
if v.AuxInt != 24 {
t := v.Aux
ptr := v.Args[0]
mem := v.Args[1]
- if !(t.(Type).Alignment()%8 == 0) {
+ if !(t.(Type).MustAlignment()%8 == 0) {
break
}
v.reset(OpMIPS64MOVVstore)
return true
}
// match: (Zero [s] {t} ptr mem)
- // cond: s%8 == 0 && s > 24 && s <= 8*128 && t.(Type).Alignment()%8 == 0 && !config.noDuffDevice
+ // cond: s%8 == 0 && s > 24 && s <= 8*128 && t.(Type).MustAlignment()%8 == 0 && !config.noDuffDevice
// result: (DUFFZERO [8 * (128 - int64(s/8))] ptr mem)
for {
s := v.AuxInt
t := v.Aux
ptr := v.Args[0]
mem := v.Args[1]
- if !(s%8 == 0 && s > 24 && s <= 8*128 && t.(Type).Alignment()%8 == 0 && !config.noDuffDevice) {
+ if !(s%8 == 0 && s > 24 && s <= 8*128 && t.(Type).MustAlignment()%8 == 0 && !config.noDuffDevice) {
break
}
v.reset(OpMIPS64DUFFZERO)
return true
}
// match: (Zero [s] {t} ptr mem)
- // cond: (s > 8*128 || config.noDuffDevice) || t.(Type).Alignment()%8 != 0
- // result: (LoweredZero [t.(Type).Alignment()] ptr (ADDVconst <ptr.Type> ptr [s-moveSize(t.(Type).Alignment(), config)]) mem)
+ // cond: (s > 8*128 || config.noDuffDevice) || t.(Type).MustAlignment()%8 != 0
+ // result: (LoweredZero [t.(Type).MustAlignment()] ptr (ADDVconst <ptr.Type> ptr [s-moveSize(t.(Type).MustAlignment(), config)]) mem)
for {
s := v.AuxInt
t := v.Aux
ptr := v.Args[0]
mem := v.Args[1]
- if !((s > 8*128 || config.noDuffDevice) || t.(Type).Alignment()%8 != 0) {
+ if !((s > 8*128 || config.noDuffDevice) || t.(Type).MustAlignment()%8 != 0) {
break
}
v.reset(OpMIPS64LoweredZero)
- v.AuxInt = t.(Type).Alignment()
+ v.AuxInt = t.(Type).MustAlignment()
v.AddArg(ptr)
v0 := b.NewValue0(v.Pos, OpMIPS64ADDVconst, ptr.Type)
- v0.AuxInt = s - moveSize(t.(Type).Alignment(), config)
+ v0.AuxInt = s - moveSize(t.(Type).MustAlignment(), config)
v0.AddArg(ptr)
v.AddArg(v0)
v.AddArg(mem)
return true
}
// match: (Move [8] {t} dst src mem)
- // cond: t.(Type).Alignment()%4 == 0
+ // cond: t.(Type).MustAlignment()%4 == 0
// result: (MOVDstore dst (MOVDload src mem) mem)
for {
if v.AuxInt != 8 {
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Alignment()%4 == 0) {
+ if !(t.(Type).MustAlignment()%4 == 0) {
break
}
v.reset(OpPPC64MOVDstore)
}
func rewriteValuePPC64_OpStore_0(v *Value) bool {
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 8 && is64BitFloat(val.Type)
+ // cond: t.(Type).MustSize() == 8 && is64BitFloat(val.Type)
// result: (FMOVDstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 8 && is64BitFloat(val.Type)) {
+ if !(t.(Type).MustSize() == 8 && is64BitFloat(val.Type)) {
break
}
v.reset(OpPPC64FMOVDstore)
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 8 && is32BitFloat(val.Type)
+ // cond: t.(Type).MustSize() == 8 && is32BitFloat(val.Type)
// result: (FMOVDstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 8 && is32BitFloat(val.Type)) {
+ if !(t.(Type).MustSize() == 8 && is32BitFloat(val.Type)) {
break
}
v.reset(OpPPC64FMOVDstore)
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 4 && is32BitFloat(val.Type)
+ // cond: t.(Type).MustSize() == 4 && is32BitFloat(val.Type)
// result: (FMOVSstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 4 && is32BitFloat(val.Type)) {
+ if !(t.(Type).MustSize() == 4 && is32BitFloat(val.Type)) {
break
}
v.reset(OpPPC64FMOVSstore)
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 8 && (is64BitInt(val.Type) || isPtr(val.Type))
+ // cond: t.(Type).MustSize() == 8 && (is64BitInt(val.Type) || isPtr(val.Type))
// result: (MOVDstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 8 && (is64BitInt(val.Type) || isPtr(val.Type))) {
+ if !(t.(Type).MustSize() == 8 && (is64BitInt(val.Type) || isPtr(val.Type))) {
break
}
v.reset(OpPPC64MOVDstore)
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 4 && is32BitInt(val.Type)
+ // cond: t.(Type).MustSize() == 4 && is32BitInt(val.Type)
// result: (MOVWstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 4 && is32BitInt(val.Type)) {
+ if !(t.(Type).MustSize() == 4 && is32BitInt(val.Type)) {
break
}
v.reset(OpPPC64MOVWstore)
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 2
+ // cond: t.(Type).MustSize() == 2
// result: (MOVHstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 2) {
+ if !(t.(Type).MustSize() == 2) {
break
}
v.reset(OpPPC64MOVHstore)
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 1
+ // cond: t.(Type).MustSize() == 1
// result: (MOVBstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 1) {
+ if !(t.(Type).MustSize() == 1) {
break
}
v.reset(OpPPC64MOVBstore)
}
func rewriteValueS390X_OpStore_0(v *Value) bool {
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 8 && is64BitFloat(val.Type)
+ // cond: t.(Type).MustSize() == 8 && is64BitFloat(val.Type)
// result: (FMOVDstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 8 && is64BitFloat(val.Type)) {
+ if !(t.(Type).MustSize() == 8 && is64BitFloat(val.Type)) {
break
}
v.reset(OpS390XFMOVDstore)
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 4 && is32BitFloat(val.Type)
+ // cond: t.(Type).MustSize() == 4 && is32BitFloat(val.Type)
// result: (FMOVSstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 4 && is32BitFloat(val.Type)) {
+ if !(t.(Type).MustSize() == 4 && is32BitFloat(val.Type)) {
break
}
v.reset(OpS390XFMOVSstore)
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 8
+ // cond: t.(Type).MustSize() == 8
// result: (MOVDstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 8) {
+ if !(t.(Type).MustSize() == 8) {
break
}
v.reset(OpS390XMOVDstore)
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 4
+ // cond: t.(Type).MustSize() == 4
// result: (MOVWstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 4) {
+ if !(t.(Type).MustSize() == 4) {
break
}
v.reset(OpS390XMOVWstore)
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 2
+ // cond: t.(Type).MustSize() == 2
// result: (MOVHstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 2) {
+ if !(t.(Type).MustSize() == 2) {
break
}
v.reset(OpS390XMOVHstore)
return true
}
// match: (Store {t} ptr val mem)
- // cond: t.(Type).Size() == 1
+ // cond: t.(Type).MustSize() == 1
// result: (MOVBstore ptr val mem)
for {
t := v.Aux
ptr := v.Args[0]
val := v.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 1) {
+ if !(t.(Type).MustSize() == 1) {
break
}
v.reset(OpS390XMOVBstore)
types := &b.Func.Config.Types
_ = types
// match: (Load <t> ptr mem)
- // cond: t.IsComplex() && t.Size() == 8
+ // cond: t.IsComplex() && t.MustSize() == 8
// result: (ComplexMake (Load <types.Float32> ptr mem) (Load <types.Float32> (OffPtr <types.Float32Ptr> [4] ptr) mem) )
for {
t := v.Type
ptr := v.Args[0]
mem := v.Args[1]
- if !(t.IsComplex() && t.Size() == 8) {
+ if !(t.IsComplex() && t.MustSize() == 8) {
break
}
v.reset(OpComplexMake)
return true
}
// match: (Load <t> ptr mem)
- // cond: t.IsComplex() && t.Size() == 16
+ // cond: t.IsComplex() && t.MustSize() == 16
// result: (ComplexMake (Load <types.Float64> ptr mem) (Load <types.Float64> (OffPtr <types.Float64Ptr> [8] ptr) mem) )
for {
t := v.Type
ptr := v.Args[0]
mem := v.Args[1]
- if !(t.IsComplex() && t.Size() == 16) {
+ if !(t.IsComplex() && t.MustSize() == 16) {
break
}
v.reset(OpComplexMake)
types := &b.Func.Config.Types
_ = types
// match: (Store {t} dst (ComplexMake real imag) mem)
- // cond: t.(Type).Size() == 8
+ // cond: t.(Type).MustSize() == 8
// result: (Store {types.Float32} (OffPtr <types.Float32Ptr> [4] dst) imag (Store {types.Float32} dst real mem))
for {
t := v.Aux
real := v_1.Args[0]
imag := v_1.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 8) {
+ if !(t.(Type).MustSize() == 8) {
break
}
v.reset(OpStore)
return true
}
// match: (Store {t} dst (ComplexMake real imag) mem)
- // cond: t.(Type).Size() == 16
+ // cond: t.(Type).MustSize() == 16
// result: (Store {types.Float64} (OffPtr <types.Float64Ptr> [8] dst) imag (Store {types.Float64} dst real mem))
for {
t := v.Aux
real := v_1.Args[0]
imag := v_1.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 16) {
+ if !(t.(Type).MustSize() == 16) {
break
}
v.reset(OpStore)
config := b.Func.Config
_ = config
// match: (Store {t} dst (Int64Make hi lo) mem)
- // cond: t.(Type).Size() == 8 && !config.BigEndian
+ // cond: t.(Type).MustSize() == 8 && !config.BigEndian
// result: (Store {hi.Type} (OffPtr <hi.Type.PtrTo()> [4] dst) hi (Store {lo.Type} dst lo mem))
for {
t := v.Aux
hi := v_1.Args[0]
lo := v_1.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 8 && !config.BigEndian) {
+ if !(t.(Type).MustSize() == 8 && !config.BigEndian) {
break
}
v.reset(OpStore)
return true
}
// match: (Store {t} dst (Int64Make hi lo) mem)
- // cond: t.(Type).Size() == 8 && config.BigEndian
+ // cond: t.(Type).MustSize() == 8 && config.BigEndian
// result: (Store {lo.Type} (OffPtr <lo.Type.PtrTo()> [4] dst) lo (Store {hi.Type} dst hi mem))
for {
t := v.Aux
hi := v_1.Args[0]
lo := v_1.Args[1]
mem := v.Args[2]
- if !(t.(Type).Size() == 8 && config.BigEndian) {
+ if !(t.(Type).MustSize() == 8 && config.BigEndian) {
break
}
v.reset(OpStore)
return true
}
// match: (Arg {n} [off])
- // cond: v.Type.IsComplex() && v.Type.Size() == 16
+ // cond: v.Type.IsComplex() && v.Type.MustSize() == 16
// result: (ComplexMake (Arg <types.Float64> {n} [off]) (Arg <types.Float64> {n} [off+8]))
for {
off := v.AuxInt
n := v.Aux
- if !(v.Type.IsComplex() && v.Type.Size() == 16) {
+ if !(v.Type.IsComplex() && v.Type.MustSize() == 16) {
break
}
v.reset(OpComplexMake)
return true
}
// match: (Arg {n} [off])
- // cond: v.Type.IsComplex() && v.Type.Size() == 8
+ // cond: v.Type.IsComplex() && v.Type.MustSize() == 8
// result: (ComplexMake (Arg <types.Float32> {n} [off]) (Arg <types.Float32> {n} [off+4]))
for {
off := v.AuxInt
n := v.Aux
- if !(v.Type.IsComplex() && v.Type.Size() == 8) {
+ if !(v.Type.IsComplex() && v.Type.MustSize() == 8) {
break
}
v.reset(OpComplexMake)
fe := b.Func.fe
_ = fe
// match: (Load <t1> p1 (Store {t2} p2 x _))
- // cond: isSamePtr(p1,p2) && t1.Compare(x.Type)==CMPeq && t1.Size() == t2.(Type).Size()
+ // cond: isSamePtr(p1,p2) && t1.Compare(x.Type)==CMPeq && t1.MustSize() == t2.(Type).MustSize()
// result: x
for {
t1 := v.Type
t2 := v_1.Aux
p2 := v_1.Args[0]
x := v_1.Args[1]
- if !(isSamePtr(p1, p2) && t1.Compare(x.Type) == CMPeq && t1.Size() == t2.(Type).Size()) {
+ if !(isSamePtr(p1, p2) && t1.Compare(x.Type) == CMPeq && t1.MustSize() == t2.(Type).MustSize()) {
break
}
v.reset(OpCopy)
_ = types
// match: (PtrIndex <t> ptr idx)
// cond: config.PtrSize == 4
- // result: (AddPtr ptr (Mul32 <types.Int> idx (Const32 <types.Int> [t.ElemType().Size()])))
+ // result: (AddPtr ptr (Mul32 <types.Int> idx (Const32 <types.Int> [t.ElemType().MustSize()])))
for {
t := v.Type
ptr := v.Args[0]
v0 := b.NewValue0(v.Pos, OpMul32, types.Int)
v0.AddArg(idx)
v1 := b.NewValue0(v.Pos, OpConst32, types.Int)
- v1.AuxInt = t.ElemType().Size()
+ v1.AuxInt = t.ElemType().MustSize()
v0.AddArg(v1)
v.AddArg(v0)
return true
}
// match: (PtrIndex <t> ptr idx)
// cond: config.PtrSize == 8
- // result: (AddPtr ptr (Mul64 <types.Int> idx (Const64 <types.Int> [t.ElemType().Size()])))
+ // result: (AddPtr ptr (Mul64 <types.Int> idx (Const64 <types.Int> [t.ElemType().MustSize()])))
for {
t := v.Type
ptr := v.Args[0]
v0 := b.NewValue0(v.Pos, OpMul64, types.Int)
v0.AddArg(idx)
v1 := b.NewValue0(v.Pos, OpConst64, types.Int)
- v1.AuxInt = t.ElemType().Size()
+ v1.AuxInt = t.ElemType().MustSize()
v0.AddArg(v1)
v.AddArg(v0)
return true
}
// match: (Store {t} dst (Load src mem) mem)
// cond: !fe.CanSSA(t.(Type))
- // result: (Move {t} [t.(Type).Size()] dst src mem)
+ // result: (Move {t} [t.(Type).MustSize()] dst src mem)
for {
t := v.Aux
dst := v.Args[0]
break
}
v.reset(OpMove)
- v.AuxInt = t.(Type).Size()
+ v.AuxInt = t.(Type).MustSize()
v.Aux = t
v.AddArg(dst)
v.AddArg(src)
}
// match: (Store {t} dst (Load src mem) (VarDef {x} mem))
// cond: !fe.CanSSA(t.(Type))
- // result: (Move {t} [t.(Type).Size()] dst src (VarDef {x} mem))
+ // result: (Move {t} [t.(Type).MustSize()] dst src (VarDef {x} mem))
for {
t := v.Aux
dst := v.Args[0]
break
}
v.reset(OpMove)
- v.AuxInt = t.(Type).Size()
+ v.AuxInt = t.(Type).MustSize()
v.Aux = t
v.AddArg(dst)
v.AddArg(src)
// A type interface used to import cmd/internal/gc:Type
// Type instances are not guaranteed to be canonical.
type Type interface {
- Size() int64 // return the size in bytes
- Alignment() int64
+ MustSize() int64 // return the size in bytes
+ MustAlignment() int64
IsBoolean() bool // is a named or unnamed boolean type
IsInteger() bool // ... ditto for the others
Int128 bool
}
-func (t *CompilerType) Size() int64 { return t.size } // Size in bytes
-func (t *CompilerType) Alignment() int64 { return 0 }
+func (t *CompilerType) MustSize() int64 { return t.size } // Size in bytes
+func (t *CompilerType) MustAlignment() int64 { return 0 }
func (t *CompilerType) IsBoolean() bool { return false }
func (t *CompilerType) IsInteger() bool { return false }
func (t *CompilerType) IsSigned() bool { return false }
// Any tuple with a memory type must put that memory type second.
}
-func (t *TupleType) Size() int64 { panic("not implemented") }
-func (t *TupleType) Alignment() int64 { panic("not implemented") }
+func (t *TupleType) MustSize() int64 { panic("not implemented") }
+func (t *TupleType) MustAlignment() int64 { panic("not implemented") }
func (t *TupleType) IsBoolean() bool { return false }
func (t *TupleType) IsInteger() bool { return false }
func (t *TupleType) IsSigned() bool { return false }
Name string
}
-func (t *TypeImpl) Size() int64 { return t.Size_ }
-func (t *TypeImpl) Alignment() int64 { return t.Align }
+func (t *TypeImpl) MustSize() int64 { return t.Size_ }
+func (t *TypeImpl) MustAlignment() int64 { return t.Align }
func (t *TypeImpl) IsBoolean() bool { return t.Boolean }
func (t *TypeImpl) IsInteger() bool { return t.Integer }
func (t *TypeImpl) IsSigned() bool { return t.Signed }
aux := &AutoSymbol{Node: tmp}
mem = b.NewValue1A(pos, OpVarDef, TypeMem, tmp, mem)
tmpaddr := b.NewValue1A(pos, OpAddr, t.PtrTo(), aux, sp)
- siz := t.Size()
+ siz := t.MustSize()
mem = b.NewValue3I(pos, OpMove, TypeMem, siz, tmpaddr, val, mem)
mem.Aux = t
val = tmpaddr
if typ != nil { // for typedmemmove
taddr := b.NewValue1A(pos, OpAddr, b.Func.Config.Types.Uintptr, typ, sb)
- off = round(off, taddr.Type.Alignment())
+ off = round(off, taddr.Type.MustAlignment())
arg := b.NewValue1I(pos, OpOffPtr, taddr.Type.PtrTo(), off, sp)
mem = b.NewValue3A(pos, OpStore, TypeMem, ptr.Type, arg, taddr, mem)
- off += taddr.Type.Size()
+ off += taddr.Type.MustSize()
}
- off = round(off, ptr.Type.Alignment())
+ off = round(off, ptr.Type.MustAlignment())
arg := b.NewValue1I(pos, OpOffPtr, ptr.Type.PtrTo(), off, sp)
mem = b.NewValue3A(pos, OpStore, TypeMem, ptr.Type, arg, ptr, mem)
- off += ptr.Type.Size()
+ off += ptr.Type.MustSize()
if val != nil {
- off = round(off, val.Type.Alignment())
+ off = round(off, val.Type.MustAlignment())
arg = b.NewValue1I(pos, OpOffPtr, val.Type.PtrTo(), off, sp)
mem = b.NewValue3A(pos, OpStore, TypeMem, val.Type, arg, val, mem)
- off += val.Type.Size()
+ off += val.Type.MustSize()
}
off = round(off, config.PtrSize)
return t.Extra.(*Func).Argwid
}
+// Size calculates and returns t's Size.
func (t *Type) Size() int64 {
+ Dowidth(t)
+ return t.Width
+}
+
+// MustSize returns t's Size, which must have been calculated previously.
+// It is intended for use in the backend, where t must be treated as readonly.
+func (t *Type) MustSize() int64 {
t.AssertWidthCalculated()
return t.Width
}
+// Alignment calculates and returns t's Alignment.
func (t *Type) Alignment() int64 {
+ Dowidth(t)
+ return int64(t.Align)
+}
+
+// MustAlignment returns t's Alignment, which must have been calculated previously.
+// It is intended for use in the backend, where t must be treated as readonly.
+func (t *Type) MustAlignment() int64 {
t.AssertWidthCalculated()
return int64(t.Align)
}
}
push(s, v.Args[0])
var op obj.As
- switch v.Type.Size() {
+ switch v.Type.MustSize() {
case 4:
op = x86.AFMOVFP
case 8:
// loadPush returns the opcode for load+push of the given type.
func loadPush(t ssa.Type) obj.As {
- if t.Size() == 4 {
+ if t.MustSize() == 4 {
return x86.AFMOVF
}
return x86.AFMOVD
func zeroAuto(pp *gc.Progs, n *gc.Node) {
// Note: this code must not clobber any registers.
sym := n.Sym.Linksym()
- size := n.Type.Size()
+ size := n.Type.MustSize()
for i := int64(0); i < size; i += 4 {
p := pp.Prog(x86.AMOVL)
p.From.Type = obj.TYPE_CONST
// loadByType returns the load instruction of the given type.
func loadByType(t ssa.Type) obj.As {
// Avoid partial register write
- if !t.IsFloat() && t.Size() <= 2 {
- if t.Size() == 1 {
+ if !t.IsFloat() && t.MustSize() <= 2 {
+ if t.MustSize() == 1 {
return x86.AMOVBLZX
} else {
return x86.AMOVWLZX
// storeByType returns the store instruction of the given type.
func storeByType(t ssa.Type) obj.As {
- width := t.Size()
+ width := t.MustSize()
if t.IsFloat() {
switch width {
case 4:
// moveByType returns the reg->reg move instruction of the given type.
func moveByType(t ssa.Type) obj.As {
if t.IsFloat() {
- switch t.Size() {
+ switch t.MustSize() {
case 4:
return x86.AMOVSS
case 8:
return x86.AMOVSD
default:
- panic(fmt.Sprintf("bad float register width %d:%s", t.Size(), t))
+ panic(fmt.Sprintf("bad float register width %d:%s", t.MustSize(), t))
}
} else {
- switch t.Size() {
+ switch t.MustSize() {
case 1:
// Avoids partial register write
return x86.AMOVL
case 4:
return x86.AMOVL
default:
- panic(fmt.Sprintf("bad int register width %d:%s", t.Size(), t))
+ panic(fmt.Sprintf("bad int register width %d:%s", t.MustSize(), t))
}
}
}
// IMULB puts the high portion in AH instead of DL,
// so move it to DL for consistency
- if v.Type.Size() == 1 {
+ if v.Type.MustSize() == 1 {
m := s.Prog(x86.AMOVB)
m.From.Type = obj.TYPE_REG
m.From.Reg = x86.REG_AH