Passes toolstash/buildall.
[git-generate]
cd src/cmd/compile/internal/ssa
rf '
ex . ../ir ../gc {
import "cmd/compile/internal/types"
var t *types.Type
t.Etype -> t.Kind()
t.Sym -> t.GetSym()
t.Orig -> t.Underlying()
}
'
cd ../types
rf '
mv EType Kind
mv IRNode Object
mv Type.Etype Type.kind
mv Type.Sym Type.sym
mv Type.Orig Type.underlying
mv Type.Cache Type.cache
mv Type.GetSym Type.Sym
mv Bytetype ByteType
mv Runetype RuneType
mv Errortype ErrorType
'
cd ../gc
sed -i 's/Bytetype/ByteType/; s/Runetype/RuneType/' mkbuiltin.go
git codereview gofmt
go install cmd/compile/internal/...
go test cmd/compile -u || go test cmd/compile
Change-Id: Ibecb2d7100d3318a49238eb4a78d70acb49eedca
Reviewed-on: https://go-review.googlesource.com/c/go/+/274437
Run-TryBot: Matthew Dempsky <mdempsky@google.com>
Reviewed-by: Russ Cox <rsc@golang.org>
Trust: Matthew Dempsky <mdempsky@google.com>
"cmd/compile/internal/syntax.position %s": "",
"cmd/compile/internal/syntax.token %q": "",
"cmd/compile/internal/syntax.token %s": "",
- "cmd/compile/internal/types.EType %d": "",
- "cmd/compile/internal/types.EType %s": "",
- "cmd/compile/internal/types.EType %v": "",
- "cmd/compile/internal/types.IRNode %v": "",
+ "cmd/compile/internal/types.Kind %d": "",
+ "cmd/compile/internal/types.Kind %s": "",
+ "cmd/compile/internal/types.Kind %v": "",
+ "cmd/compile/internal/types.Object %v": "",
"cmd/internal/obj.ABI %v": "",
"error %v": "",
"float64 %.2f": "",
// EqCanPanic reports whether == on type t could panic (has an interface somewhere).
// t must be comparable.
func EqCanPanic(t *types.Type) bool {
- switch t.Etype {
+ switch t.Kind() {
default:
return false
case types.TINTER:
return ANOEQ, t
}
- switch t.Etype {
+ switch t.Kind() {
case types.TANY, types.TFORW:
// will be defined later.
return ANOEQ, t
// (And the closure generated by genhash will also get
// dead-code eliminated, as we call the subtype hashers
// directly.)
- switch t.Etype {
+ switch t.Kind() {
case types.TARRAY:
genhash(t.Elem())
case types.TSTRUCT:
np := ir.AsNode(tfn.Type().Params().Field(0).Nname)
nh := ir.AsNode(tfn.Type().Params().Field(1).Nname)
- switch t.Etype {
+ switch t.Kind() {
case types.TARRAY:
// An array of pure memory would be handled by the
// standard algorithm, so the element type must not be
// We reach here only for types that have equality but
// cannot be handled by the standard algorithms,
// so t must be either an array or a struct.
- switch t.Etype {
+ switch t.Kind() {
default:
base.Fatalf("geneq %v", t)
}
}
- switch t.Elem().Etype {
+ switch t.Elem().Kind() {
case types.TSTRING:
// Do two loops. First, check that all the lengths match (cheap).
// Second, check that all the contents match (expensive).
// We implement a simple DFS loop-finding algorithm. This
// could be faster, but type cycles are rare.
- if t.Sym != nil {
+ if t.Sym() != nil {
// Declared type. Check for loops and otherwise
// recurse on the type expression used in the type
// declaration.
// Type imported from package, so it can't be part of
// a type loop (otherwise that package should have
// failed to compile).
- if t.Sym.Pkg != ir.LocalPkg {
+ if t.Sym().Pkg != ir.LocalPkg {
return false
}
} else {
// Anonymous type. Recurse on contained types.
- switch t.Etype {
+ switch t.Kind() {
case types.TARRAY:
if findTypeLoop(t.Elem(), path) {
return true
t.Width = -2
t.Align = 0 // 0 means use t.Width, below
- et := t.Etype
+ et := t.Kind()
switch et {
case types.TFUNC, types.TCHAN, types.TMAP, types.TSTRING:
break
// simtype == 0 during bootstrap
default:
- if simtype[t.Etype] != 0 {
- et = simtype[t.Etype]
+ if simtype[t.Kind()] != 0 {
+ et = simtype[t.Kind()]
}
}
// only their unexpanded method set (i.e., exclusive of
// interface embeddings), and the switch statement below
// handles their full method set.
- if t.Sym != nil && t.Etype != types.TINTER {
+ if t.Sym() != nil && t.Kind() != types.TINTER {
for _, m := range t.Methods().Slice() {
if types.IsExported(m.Sym.Name) {
p.markObject(ir.AsNode(m.Nname))
// Notably, we don't mark function parameter types, because
// the user already needs some way to construct values of
// those types.
- switch t.Etype {
+ switch t.Kind() {
case types.TPTR, types.TARRAY, types.TSLICE:
p.markType(t.Elem())
types.Types[types.TSTRING],
// basic type aliases
- types.Bytetype,
- types.Runetype,
+ types.ByteType,
+ types.RuneType,
// error
- types.Errortype,
+ types.ErrorType,
// untyped types
types.UntypedBool,
func runtimeTypes() []*types.Type {
var typs [131]*types.Type
- typs[0] = types.Bytetype
+ typs[0] = types.ByteType
typs[1] = types.NewPtr(typs[0])
typs[2] = types.Types[types.TANY]
typs[3] = types.NewPtr(typs[2])
typs[43] = functype(nil, []*ir.Field{anonfield(typs[42]), anonfield(typs[22])}, []*ir.Field{anonfield(typs[28])})
typs[44] = functype(nil, []*ir.Field{anonfield(typs[33]), anonfield(typs[1]), anonfield(typs[15])}, []*ir.Field{anonfield(typs[28])})
typs[45] = functype(nil, []*ir.Field{anonfield(typs[1]), anonfield(typs[15])}, []*ir.Field{anonfield(typs[28])})
- typs[46] = types.Runetype
+ typs[46] = types.RuneType
typs[47] = types.NewSlice(typs[46])
typs[48] = functype(nil, []*ir.Field{anonfield(typs[33]), anonfield(typs[47])}, []*ir.Field{anonfield(typs[28])})
typs[49] = types.NewSlice(typs[0])
}
// Nil is technically not a constant, so handle it specially.
- if n.Type().Etype == types.TNIL {
+ if n.Type().Kind() == types.TNIL {
if n.Op() != ir.ONIL {
base.Fatalf("unexpected op: %v (%v)", n, n.Op())
}
return n
}
- if t == nil || !ir.OKForConst[t.Etype] {
+ if t == nil || !ir.OKForConst[t.Kind()] {
t = defaultType(n.Type())
}
return complexForFloat(t)
}
default:
- if okfor[op][t.Etype] {
+ if okfor[op][t.Kind()] {
return t
}
}
}
case ir.OCONV, ir.ORUNESTR:
- if ir.OKForConst[n.Type().Etype] && nl.Op() == ir.OLITERAL {
+ if ir.OKForConst[n.Type().Kind()] && nl.Op() == ir.OLITERAL {
return origConst(n, convertVal(nl.Val(), n.Type(), true))
}
case ir.OCONVNOP:
- if ir.OKForConst[n.Type().Etype] && nl.Op() == ir.OLITERAL {
+ if ir.OKForConst[n.Type().Kind()] && nl.Op() == ir.OLITERAL {
// set so n.Orig gets OCONV instead of OCONVNOP
n.SetOp(ir.OCONV)
return origConst(n, nl.Val())
return n
case ir.OCAP, ir.OLEN:
- switch nl.Type().Etype {
+ switch nl.Type().Kind() {
case types.TSTRING:
if ir.IsConst(nl, constant.String) {
return origIntConst(n, int64(len(nl.StringVal())))
}
func defaultType(t *types.Type) *types.Type {
- if !t.IsUntyped() || t.Etype == types.TNIL {
+ if !t.IsUntyped() || t.Kind() == types.TNIL {
return t
}
case types.UntypedInt:
return types.Types[types.TINT]
case types.UntypedRune:
- return types.Runetype
+ return types.RuneType
case types.UntypedFloat:
return types.Types[types.TFLOAT64]
case types.UntypedComplex:
if n.Op() != ir.OLITERAL {
return -1
}
- if !n.Type().IsInteger() && n.Type().Etype != types.TIDEAL {
+ if !n.Type().IsInteger() && n.Type().Kind() != types.TIDEAL {
return -1
}
typ := n.Type()
switch typ {
- case types.Bytetype:
+ case types.ByteType:
typ = types.Types[types.TUINT8]
- case types.Runetype:
+ case types.RuneType:
typ = types.Types[types.TINT32]
}
k := constSetKey{typ, ir.ConstValue(n)}
// This happens during import, where the hidden_fndcl rule has
// used functype directly to parse the function's type.
func funcargs2(t *types.Type) {
- if t.Etype != types.TFUNC {
+ if t.Kind() != types.TFUNC {
base.Fatalf("funcargs2 %v", t)
}
return
}
- if t.Sym == nil && t.IsPtr() {
+ if t.Sym() == nil && t.IsPtr() {
t = t.Elem()
if t.IsInterface() {
base.Errorf("embedded type cannot be a pointer to interface")
if t.IsPtr() || t.IsUnsafePtr() {
base.Errorf("embedded type cannot be a pointer")
- } else if t.Etype == types.TFORW && !t.ForwardType().Embedlineno.IsKnown() {
+ } else if t.Kind() == types.TFORW && !t.ForwardType().Embedlineno.IsKnown() {
t.ForwardType().Embedlineno = base.Pos
}
}
base.Fatalf("blank method name")
}
- rsym := recv.Sym
+ rsym := recv.Sym()
if recv.IsPtr() {
if rsym != nil {
base.Fatalf("declared pointer receiver type: %v", recv)
}
- rsym = recv.Elem().Sym
+ rsym = recv.Elem().Sym()
}
// Find the package the receiver type appeared in. For
}
mt := methtype(rf.Type)
- if mt == nil || mt.Sym == nil {
+ if mt == nil || mt.Sym() == nil {
pa := rf.Type
t := pa
if t != nil && t.IsPtr() {
- if t.Sym != nil {
+ if t.Sym() != nil {
base.Errorf("invalid receiver type %v (%v is a pointer type)", pa, t)
return nil
}
switch {
case t == nil || t.Broke():
// rely on typecheck having complained before
- case t.Sym == nil:
+ case t.Sym() == nil:
base.Errorf("invalid receiver type %v (%v is not a defined type)", pa, t)
case t.IsPtr():
base.Errorf("invalid receiver type %v (%v is a pointer type)", pa, t)
return nil
}
- if local && mt.Sym.Pkg != ir.LocalPkg {
+ if local && mt.Sym().Pkg != ir.LocalPkg {
base.Errorf("cannot define new methods on non-local type %v", mt)
return nil
}
// embedKind determines the kind of embedding variable.
func embedKind(typ *types.Type) int {
- if typ.Sym != nil && typ.Sym.Name == "FS" && (typ.Sym.Pkg.Path == "embed" || (typ.Sym.Pkg == ir.LocalPkg && base.Ctxt.Pkgpath == "embed")) {
+ if typ.Sym() != nil && typ.Sym().Name == "FS" && (typ.Sym().Pkg.Path == "embed" || (typ.Sym().Pkg == ir.LocalPkg && base.Ctxt.Pkgpath == "embed")) {
return embedFiles
}
if typ == types.Types[types.TSTRING] {
return embedString
}
- if typ.Sym == nil && typ.IsSlice() && typ.Elem() == types.Bytetype {
+ if typ.Sym() == nil && typ.IsSlice() && typ.Elem() == types.ByteType {
return embedBytes
}
return embedUnknown
// unsafeValue evaluates a uintptr-typed arithmetic expression looking
// for conversions from an unsafe.Pointer.
func (e *Escape) unsafeValue(k EscHole, n ir.Node) {
- if n.Type().Etype != types.TUINTPTR {
+ if n.Type().Kind() != types.TUINTPTR {
base.Fatalf("unexpected type %v for %v", n.Type(), n)
}
var zerosize int64
-var simtype [types.NTYPE]types.EType
+var simtype [types.NTYPE]types.Kind
var (
isInt [types.NTYPE]bool
w.tag('T')
w.pos(n.Pos())
- underlying := n.Type().Orig
- if underlying == types.Errortype.Orig {
+ underlying := n.Type().Underlying()
+ if underlying == types.ErrorType.Underlying() {
// For "type T error", use error as the
// underlying type instead of error's own
// underlying anonymous interface. This
// ensures consistency with how importers may
// declare error (e.g., go/types uses nil Pkg
// for predeclared objects).
- underlying = types.Errortype
+ underlying = types.ErrorType
}
w.typ(underlying)
}
func (w *exportWriter) doTyp(t *types.Type) {
- if t.Sym != nil {
- if t.Sym.Pkg == ir.BuiltinPkg || t.Sym.Pkg == unsafepkg {
+ if t.Sym() != nil {
+ if t.Sym().Pkg == ir.BuiltinPkg || t.Sym().Pkg == unsafepkg {
base.Fatalf("builtin type missing from typIndex: %v", t)
}
return
}
- switch t.Etype {
+ switch t.Kind() {
case types.TPTR:
w.startType(pointerType)
w.typ(t.Elem())
return constant.Complex
}
- switch typ.Etype {
+ switch typ.Kind() {
case types.TBOOL:
return constant.Bool
case types.TSTRING:
return true, Mpprec / 8
}
- switch typ.Etype {
+ switch typ.Kind() {
case types.TFLOAT32, types.TCOMPLEX64:
return true, 3
case types.TFLOAT64, types.TCOMPLEX128:
// The go/types API doesn't expose sizes to importers, so they
// don't know how big these types are.
- switch typ.Etype {
+ switch typ.Kind() {
case types.TINT, types.TUINT, types.TUINTPTR:
maxBytes = 8
}
if rcvr.IsPtr() {
rcvr = rcvr.Elem()
}
- if rcvr.Sym == nil {
+ if rcvr.Sym() == nil {
base.Fatalf("receiver with no sym: [%v] %L (%v)", fn.Sym(), fn, rcvr)
}
- return rcvr.Sym.Pkg
+ return rcvr.Sym().Pkg
}
// non-method
case *ast.Ident:
switch t.Name {
case "byte":
- return "types.Bytetype"
+ return "types.ByteType"
case "rune":
- return "types.Runetype"
+ return "types.RuneType"
}
return fmt.Sprintf("types.Types[types.T%s]", strings.ToUpper(t.Name))
case *ast.SelectorExpr:
if s.Pkg.Name != "main" {
continue
}
- if n.Type().Etype == types.TFUNC && n.Class() == ir.PFUNC {
+ if n.Type().Kind() == types.TFUNC && n.Class() == ir.PFUNC {
// function
ptabs = append(ptabs, ptabEntry{s: s, t: ir.AsNode(s.Def).Type()})
} else {
case constant.Float:
f, _ := constant.Float64Val(u)
- switch n.Type().Etype {
+ switch n.Type().Kind() {
case types.TFLOAT32:
s.WriteFloat32(base.Ctxt, n.Offset(), float32(f))
case types.TFLOAT64:
case constant.Complex:
re, _ := constant.Float64Val(constant.Real(u))
im, _ := constant.Float64Val(constant.Imag(u))
- switch n.Type().Etype {
+ switch n.Type().Kind() {
case types.TCOMPLEX64:
s.WriteFloat32(base.Ctxt, n.Offset(), float32(re))
s.WriteFloat32(base.Ctxt, n.Offset()+4, float32(im))
n.SetRight(o.expr(n.Right(), nil))
orderBody := true
- switch n.Type().Etype {
+ switch n.Type().Kind() {
default:
base.Fatalf("order.stmt range %v", n.Type())
return
}
- switch t.Etype {
+ switch t.Kind() {
case types.TPTR, types.TUNSAFEPTR, types.TFUNC, types.TCHAN, types.TMAP:
if off&int64(Widthptr-1) != 0 {
base.Fatalf("onebitwalktype1: invalid alignment, %v", t)
// to fully initialize t.
func isfat(t *types.Type) bool {
if t != nil {
- switch t.Etype {
+ switch t.Kind() {
case types.TSLICE, types.TSTRING,
types.TINTER: // maybe remove later
return true
var t1, t2 *types.Type
toomany := false
- switch t.Etype {
+ switch t.Kind() {
default:
base.ErrorfAt(n.Pos(), "cannot range over %L", n.Right())
return
case types.TSTRING:
t1 = types.Types[types.TINT]
- t2 = types.Runetype
+ t2 = types.RuneType
}
if n.List().Len() > 2 || toomany {
var body []ir.Node
var init []ir.Node
- switch t.Etype {
+ switch t.Kind() {
default:
base.Fatalf("walkrange")
hv1 := temp(types.Types[types.TINT])
hv1t := temp(types.Types[types.TINT])
- hv2 := temp(types.Runetype)
+ hv2 := temp(types.RuneType)
// hv1 := 0
init = append(init, ir.Nod(ir.OAS, hv1, nil))
// hv2 := rune(ha[hv1])
nind := ir.Nod(ir.OINDEX, ha, hv1)
nind.SetBounded(true)
- body = append(body, ir.Nod(ir.OAS, hv2, conv(nind, types.Runetype)))
+ body = append(body, ir.Nod(ir.OAS, hv2, conv(nind, types.RuneType)))
// if hv2 < utf8.RuneSelf
nif := ir.Nod(ir.OIF, nil, nil)
return false
}
- if n.Op() != ir.ORANGE || n.Type().Etype != types.TMAP || n.List().Len() != 1 {
+ if n.Op() != ir.ORANGE || n.Type().Kind() != types.TMAP || n.List().Len() != 1 {
return false
}
func commonSize() int { return 4*Widthptr + 8 + 8 } // Sizeof(runtime._type{})
func uncommonSize(t *types.Type) int { // Sizeof(runtime.uncommontype{})
- if t.Sym == nil && len(methods(t)) == 0 {
+ if t.Sym() == nil && len(methods(t)) == 0 {
return 0
}
return 4 + 2 + 2 + 4 + 4
func imethods(t *types.Type) []*Sig {
var methods []*Sig
for _, f := range t.Fields().Slice() {
- if f.Type.Etype != types.TFUNC || f.Sym == nil {
+ if f.Type.Kind() != types.TFUNC || f.Sym == nil {
continue
}
if f.Sym.IsBlank() {
// backing array of the []method field is written (by dextratypeData).
func dextratype(lsym *obj.LSym, ot int, t *types.Type, dataAdd int) int {
m := methods(t)
- if t.Sym == nil && len(m) == 0 {
+ if t.Sym() == nil && len(m) == 0 {
return ot
}
noff := int(Rnd(int64(ot), int64(Widthptr)))
}
func typePkg(t *types.Type) *types.Pkg {
- tsym := t.Sym
+ tsym := t.Sym()
if tsym == nil {
- switch t.Etype {
+ switch t.Kind() {
case types.TARRAY, types.TSLICE, types.TPTR, types.TCHAN:
if t.Elem() != nil {
- tsym = t.Elem().Sym
+ tsym = t.Elem().Sym()
}
}
}
- if tsym != nil && t != types.Types[t.Etype] && t != types.Errortype {
+ if tsym != nil && t != types.Types[t.Kind()] && t != types.ErrorType {
return tsym.Pkg
}
return nil
return 0
}
- switch t.Etype {
+ switch t.Kind() {
case types.TPTR,
types.TUNSAFEPTR,
types.TFUNC,
var sptr *obj.LSym
if !t.IsPtr() || t.IsPtrElem() {
tptr := types.NewPtr(t)
- if t.Sym != nil || methods(tptr) != nil {
+ if t.Sym() != nil || methods(tptr) != nil {
sptrWeak = false
}
sptr = dtypesym(tptr)
if uncommonSize(t) != 0 {
tflag |= tflagUncommon
}
- if t.Sym != nil && t.Sym.Name != "" {
+ if t.Sym() != nil && t.Sym().Name != "" {
tflag |= tflagNamed
}
if IsRegularMemory(t) {
if !strings.HasPrefix(p, "*") {
p = "*" + p
tflag |= tflagExtraStar
- if t.Sym != nil {
- exported = types.IsExported(t.Sym.Name)
+ if t.Sym() != nil {
+ exported = types.IsExported(t.Sym().Name)
}
} else {
- if t.Elem() != nil && t.Elem().Sym != nil {
- exported = types.IsExported(t.Elem().Sym.Name)
+ if t.Elem() != nil && t.Elem().Sym() != nil {
+ exported = types.IsExported(t.Elem().Sym().Name)
}
}
ot = duint8(lsym, ot, t.Align) // align
ot = duint8(lsym, ot, t.Align) // fieldAlign
- i = kinds[t.Etype]
+ i = kinds[t.Kind()]
if isdirectiface(t) {
i |= objabi.KindDirectIface
}
// isreflexive reports whether t has a reflexive equality operator.
// That is, if x==x for all x of type t.
func isreflexive(t *types.Type) bool {
- switch t.Etype {
+ switch t.Kind() {
case types.TBOOL,
types.TINT,
types.TUINT,
// needkeyupdate reports whether map updates with t as a key
// need the key to be updated.
func needkeyupdate(t *types.Type) bool {
- switch t.Etype {
+ switch t.Kind() {
case types.TBOOL, types.TINT, types.TUINT, types.TINT8, types.TUINT8, types.TINT16, types.TUINT16, types.TINT32, types.TUINT32,
types.TINT64, types.TUINT64, types.TUINTPTR, types.TPTR, types.TUNSAFEPTR, types.TCHAN:
return false
// hashMightPanic reports whether the hash of a map key of type t might panic.
func hashMightPanic(t *types.Type) bool {
- switch t.Etype {
+ switch t.Kind() {
case types.TINTER:
return true
// They've been separate internally to make error messages
// better, but we have to merge them in the reflect tables.
func formalType(t *types.Type) *types.Type {
- if t == types.Bytetype || t == types.Runetype {
- return types.Types[t.Etype]
+ if t == types.ByteType || t == types.RuneType {
+ return types.Types[t.Kind()]
}
return t
}
// emit the type structures for int, float, etc.
tbase := t
- if t.IsPtr() && t.Sym == nil && t.Elem().Sym != nil {
+ if t.IsPtr() && t.Sym() == nil && t.Elem().Sym() != nil {
tbase = t.Elem()
}
dupok := 0
- if tbase.Sym == nil {
+ if tbase.Sym() == nil {
dupok = obj.DUPOK
}
- if base.Ctxt.Pkgpath != "runtime" || (tbase != types.Types[tbase.Etype] && tbase != types.Bytetype && tbase != types.Runetype && tbase != types.Errortype) { // int, float, etc
+ if base.Ctxt.Pkgpath != "runtime" || (tbase != types.Types[tbase.Kind()] && tbase != types.ByteType && tbase != types.RuneType && tbase != types.ErrorType) { // int, float, etc
// named types from other files are defined only by those files
- if tbase.Sym != nil && tbase.Sym.Pkg != ir.LocalPkg {
+ if tbase.Sym() != nil && tbase.Sym().Pkg != ir.LocalPkg {
if i, ok := typeSymIdx[tbase]; ok {
- lsym.Pkg = tbase.Sym.Pkg.Prefix
+ lsym.Pkg = tbase.Sym().Pkg.Prefix
if t != tbase {
lsym.SymIdx = int32(i[1])
} else {
return lsym
}
// TODO(mdempsky): Investigate whether this can happen.
- if tbase.Etype == types.TFORW {
+ if tbase.Kind() == types.TFORW {
return lsym
}
}
ot := 0
- switch t.Etype {
+ switch t.Kind() {
default:
ot = dcommontype(lsym, t)
ot = dextratype(lsym, ot, t, 0)
ot = dcommontype(lsym, t)
var tpkg *types.Pkg
- if t.Sym != nil && t != types.Types[t.Etype] && t != types.Errortype {
- tpkg = t.Sym.Pkg
+ if t.Sym() != nil && t != types.Types[t.Kind()] && t != types.ErrorType {
+ tpkg = t.Sym().Pkg
}
ot = dgopkgpath(lsym, ot, tpkg)
ot = dextratype(lsym, ot, t, 0)
case types.TPTR:
- if t.Elem().Etype == types.TANY {
+ if t.Elem().Kind() == types.TANY {
// ../../../../runtime/type.go:/UnsafePointerType
ot = dcommontype(lsym, t)
ot = dextratype(lsym, ot, t, 0)
// When buildmode=shared, all types are in typelinks so the
// runtime can deduplicate type pointers.
keep := base.Ctxt.Flag_dynlink
- if !keep && t.Sym == nil {
+ if !keep && t.Sym() == nil {
// For an unnamed type, we only need the link if the type can
// be created at run time by reflect.PtrTo and similar
// functions. If the type exists in the program, those
// functions must return the existing type structure rather
// than creating a new one.
- switch t.Etype {
+ switch t.Kind() {
case types.TPTR, types.TARRAY, types.TCHAN, types.TFUNC, types.TMAP, types.TSLICE, types.TSTRUCT:
keep = true
}
for _, ts := range signats {
t := ts.t
dtypesym(t)
- if t.Sym != nil {
+ if t.Sym() != nil {
dtypesym(types.NewPtr(t))
}
}
// another possible choice would be package main,
// but using runtime means fewer copies in object files.
if base.Ctxt.Pkgpath == "runtime" {
- for i := types.EType(1); i <= types.TBOOL; i++ {
+ for i := types.Kind(1); i <= types.TBOOL; i++ {
dtypesym(types.NewPtr(types.Types[i]))
}
dtypesym(types.NewPtr(types.Types[types.TSTRING]))
// emit type structs for error and func(error) string.
// The latter is the type of an auto-generated wrapper.
- dtypesym(types.NewPtr(types.Errortype))
+ dtypesym(types.NewPtr(types.ErrorType))
- dtypesym(functype(nil, []*ir.Field{anonfield(types.Errortype)}, []*ir.Field{anonfield(types.Types[types.TSTRING])}))
+ dtypesym(functype(nil, []*ir.Field{anonfield(types.ErrorType)}, []*ir.Field{anonfield(types.Types[types.TSTRING])}))
// add paths for runtime and main, which 6l imports implicitly.
dimportpath(Runtimepkg)
// will be equal for the above checks, but different in DWARF output.
// Sort by source position to ensure deterministic order.
// See issues 27013 and 30202.
- if a[i].t.Etype == types.TINTER && a[i].t.Methods().Len() > 0 {
+ if a[i].t.Kind() == types.TINTER && a[i].t.Methods().Len() > 0 {
return a[i].t.Methods().Index(0).Pos.Before(a[j].t.Methods().Index(0).Pos)
}
return false
p.w.Ptr(offset / int64(Widthptr))
return
}
- switch t.Etype {
+ switch t.Kind() {
default:
base.Fatalf("GCProg.emit: unexpected type %v", t)
_ = types.NewPtr(types.Types[types.TINTER]) // *interface{}
_ = types.NewPtr(types.NewPtr(types.Types[types.TSTRING])) // **string
_ = types.NewPtr(types.NewSlice(types.Types[types.TINTER])) // *[]interface{}
- _ = types.NewPtr(types.NewPtr(types.Bytetype)) // **byte
- _ = types.NewPtr(types.NewSlice(types.Bytetype)) // *[]byte
+ _ = types.NewPtr(types.NewPtr(types.ByteType)) // **byte
+ _ = types.NewPtr(types.NewSlice(types.ByteType)) // *[]byte
_ = types.NewPtr(types.NewSlice(types.Types[types.TSTRING])) // *[]string
_ = types.NewPtr(types.NewPtr(types.NewPtr(types.Types[types.TUINT8]))) // ***uint8
_ = types.NewPtr(types.Types[types.TINT16]) // *int16
_ = types.NewPtr(types.Types[types.TINT64]) // *int64
- _ = types.NewPtr(types.Errortype) // *error
+ _ = types.NewPtr(types.ErrorType) // *error
types.NewPtrCacheEnabled = false
ssaConfig = ssa.NewConfig(thearch.LinkArch.Name, *types_, base.Ctxt, base.Flag.N == 0)
ssaConfig.SoftFloat = thearch.SoftFloat
type opAndType struct {
op ir.Op
- etype types.EType
+ etype types.Kind
}
var opToSSA = map[opAndType]ssa.Op{
opAndType{ir.OLE, types.TFLOAT32}: ssa.OpLeq32F,
}
-func (s *state) concreteEtype(t *types.Type) types.EType {
- e := t.Etype
+func (s *state) concreteEtype(t *types.Type) types.Kind {
+ e := t.Kind()
switch e {
default:
return e
}
func floatForComplex(t *types.Type) *types.Type {
- switch t.Etype {
+ switch t.Kind() {
case types.TCOMPLEX64:
return types.Types[types.TFLOAT32]
case types.TCOMPLEX128:
}
func complexForFloat(t *types.Type) *types.Type {
- switch t.Etype {
+ switch t.Kind() {
case types.TFLOAT32:
return types.Types[types.TCOMPLEX64]
case types.TFLOAT64:
type opAndTwoTypes struct {
op ir.Op
- etype1 types.EType
- etype2 types.EType
+ etype1 types.Kind
+ etype2 types.Kind
}
type twoTypes struct {
- etype1 types.EType
- etype2 types.EType
+ etype1 types.Kind
+ etype2 types.Kind
}
type twoOpsAndType struct {
op1 ssa.Op
op2 ssa.Op
- intermediateType types.EType
+ intermediateType types.Kind
}
var fpConvOpToSSA = map[twoTypes]twoOpsAndType{
v := s.newValue1(ssa.OpCopy, to, x) // ensure that v has the right type
// CONVNOP closure
- if to.Etype == types.TFUNC && from.IsPtrShaped() {
+ if to.Kind() == types.TFUNC && from.IsPtrShaped() {
return v
}
// named <--> unnamed type or typed <--> untyped const
- if from.Etype == to.Etype {
+ if from.Kind() == to.Kind() {
return v
}
}
// map <--> *hmap
- if to.Etype == types.TMAP && from.IsPtr() &&
+ if to.Kind() == types.TMAP && from.IsPtr() &&
to.MapType().Hmap == from.Elem() {
return v
}
s.Fatalf("CONVNOP width mismatch %v (%d) -> %v (%d)\n", from, from.Width, to, to.Width)
return nil
}
- if etypesign(from.Etype) != etypesign(to.Etype) {
- s.Fatalf("CONVNOP sign mismatch %v (%s) -> %v (%s)\n", from, from.Etype, to, to.Etype)
+ if etypesign(from.Kind()) != etypesign(to.Kind()) {
+ s.Fatalf("CONVNOP sign mismatch %v (%s) -> %v (%s)\n", from, from.Kind(), to, to.Kind())
return nil
}
return v
}
- if etypesign(from.Etype) == 0 {
+ if etypesign(from.Kind()) == 0 {
s.Fatalf("CONVNOP unrecognized non-integer %v -> %v\n", from, to)
return nil
}
s.newValueOrSfCall1(op, ttp, s.newValue1(ssa.OpComplexImag, ftp, x)))
}
- s.Fatalf("unhandled OCONV %s -> %s", n.Left().Type().Etype, n.Type().Etype)
+ s.Fatalf("unhandled OCONV %s -> %s", n.Left().Type().Kind(), n.Type().Kind())
return nil
case ir.ODOTTYPE:
type sfRtCallDef struct {
rtfn *obj.LSym
- rtype types.EType
+ rtype types.Kind
}
var softFloatOps map[ssa.Op]sfRtCallDef
},
sys.AMD64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
- type atomicOpEmitter func(s *state, n ir.Node, args []*ssa.Value, op ssa.Op, typ types.EType)
+ type atomicOpEmitter func(s *state, n ir.Node, args []*ssa.Value, op ssa.Op, typ types.Kind)
- makeAtomicGuardedIntrinsicARM64 := func(op0, op1 ssa.Op, typ, rtyp types.EType, emit atomicOpEmitter) intrinsicBuilder {
+ makeAtomicGuardedIntrinsicARM64 := func(op0, op1 ssa.Op, typ, rtyp types.Kind, emit atomicOpEmitter) intrinsicBuilder {
return func(s *state, n ir.Node, args []*ssa.Value) *ssa.Value {
// Target Atomic feature is identified by dynamic detection
}
}
- atomicXchgXaddEmitterARM64 := func(s *state, n ir.Node, args []*ssa.Value, op ssa.Op, typ types.EType) {
+ atomicXchgXaddEmitterARM64 := func(s *state, n ir.Node, args []*ssa.Value, op ssa.Op, typ types.Kind) {
v := s.newValue3(op, types.NewTuple(types.Types[typ], types.TypeMem), args[0], args[1], s.mem())
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
s.vars[n] = s.newValue1(ssa.OpSelect0, types.Types[typ], v)
},
sys.PPC64)
- atomicCasEmitterARM64 := func(s *state, n ir.Node, args []*ssa.Value, op ssa.Op, typ types.EType) {
+ atomicCasEmitterARM64 := func(s *state, n ir.Node, args []*ssa.Value, op ssa.Op, typ types.Kind) {
v := s.newValue4(op, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
s.vars[n] = s.newValue1(ssa.OpSelect0, types.Types[typ], v)
},
sys.AMD64, sys.MIPS, sys.PPC64, sys.S390X)
- atomicAndOrEmitterARM64 := func(s *state, n ir.Node, args []*ssa.Value, op ssa.Op, typ types.EType) {
+ atomicAndOrEmitterARM64 := func(s *state, n ir.Node, args []*ssa.Value, op ssa.Op, typ types.Kind) {
s.vars[memVar] = s.newValue3(op, types.TypeMem, args[0], args[1], s.mem())
}
// etypesign returns the signed-ness of e, for integer/pointer etypes.
// -1 means signed, +1 means unsigned, 0 means non-integer/non-pointer.
-func etypesign(e types.EType) int8 {
+func etypesign(e types.Kind) int8 {
switch e {
case types.TINT8, types.TINT16, types.TINT32, types.TINT64, types.TINT:
return -1
// Too big and we'll introduce too much register pressure.
return false
}
- switch t.Etype {
+ switch t.Kind() {
case types.TARRAY:
// We can't do larger arrays because dynamic indexing is
// not supported on SSA variables.
return ir.NewLiteral(constant.MakeString(s))
}
-func isptrto(t *types.Type, et types.EType) bool {
+func isptrto(t *types.Type, et types.Kind) bool {
if t == nil {
return false
}
if t == nil {
return false
}
- if t.Etype != et {
+ if t.Kind() != et {
return false
}
return true
// Strip away pointer if it's there.
if t.IsPtr() {
- if t.Sym != nil {
+ if t.Sym() != nil {
return nil
}
t = t.Elem()
}
// Must be a named type or anonymous struct.
- if t.Sym == nil && !t.IsStruct() {
+ if t.Sym() == nil && !t.IsStruct() {
return nil
}
// Check types.
- if issimple[t.Etype] {
+ if issimple[t.Kind()] {
return t
}
- switch t.Etype {
+ switch t.Kind() {
case types.TARRAY, types.TCHAN, types.TFUNC, types.TMAP, types.TSLICE, types.TSTRING, types.TSTRUCT:
return t
}
if src == dst {
return ir.OCONVNOP, ""
}
- if src == nil || dst == nil || src.Etype == types.TFORW || dst.Etype == types.TFORW || src.Orig == nil || dst.Orig == nil {
+ if src == nil || dst == nil || src.Kind() == types.TFORW || dst.Kind() == types.TFORW || src.Underlying() == nil || dst.Underlying() == nil {
return ir.OXXX, ""
}
// we want to recompute the itab. Recomputing the itab ensures
// that itabs are unique (thus an interface with a compile-time
// type I has an itab with interface type I).
- if types.Identical(src.Orig, dst.Orig) {
+ if types.Identical(src.Underlying(), dst.Underlying()) {
if src.IsEmptyInterface() {
// Conversion between two empty interfaces
// requires no code.
return ir.OCONVNOP, ""
}
- if (src.Sym == nil || dst.Sym == nil) && !src.IsInterface() {
+ if (src.Sym() == nil || dst.Sym() == nil) && !src.IsInterface() {
// Conversion between two types, at least one unnamed,
// needs no conversion. The exception is nonempty interfaces
// which need to have their itab updated.
}
// 3. dst is an interface type and src implements dst.
- if dst.IsInterface() && src.Etype != types.TNIL {
+ if dst.IsInterface() && src.Kind() != types.TNIL {
var missing, have *types.Field
var ptr int
if implements(src, dst, &missing, &have, &ptr) {
return ir.OXXX, why
}
- if src.IsInterface() && dst.Etype != types.TBLANK {
+ if src.IsInterface() && dst.Kind() != types.TBLANK {
var missing, have *types.Field
var ptr int
var why string
// src and dst have identical element types, and
// either src or dst is not a named type.
if src.IsChan() && src.ChanDir() == types.Cboth && dst.IsChan() {
- if types.Identical(src.Elem(), dst.Elem()) && (src.Sym == nil || dst.Sym == nil) {
+ if types.Identical(src.Elem(), dst.Elem()) && (src.Sym() == nil || dst.Sym() == nil) {
return ir.OCONVNOP, ""
}
}
// 5. src is the predeclared identifier nil and dst is a nillable type.
- if src.Etype == types.TNIL {
- switch dst.Etype {
+ if src.Kind() == types.TNIL {
+ switch dst.Kind() {
case types.TPTR,
types.TFUNC,
types.TMAP,
// 6. rule about untyped constants - already converted by defaultlit.
// 7. Any typed value can be assigned to the blank identifier.
- if dst.Etype == types.TBLANK {
+ if dst.Kind() == types.TBLANK {
return ir.OCONVNOP, ""
}
return ir.OXXX, why
}
// (b) Disallow string to []T where T is go:notinheap.
- if src.IsString() && dst.IsSlice() && dst.Elem().NotInHeap() && (dst.Elem().Etype == types.Bytetype.Etype || dst.Elem().Etype == types.Runetype.Etype) {
+ if src.IsString() && dst.IsSlice() && dst.Elem().NotInHeap() && (dst.Elem().Kind() == types.ByteType.Kind() || dst.Elem().Kind() == types.RuneType.Kind()) {
why := fmt.Sprintf(":\n\t%v is incomplete (or unallocatable)", dst.Elem())
return ir.OXXX, why
}
}
// 2. Ignoring struct tags, src and dst have identical underlying types.
- if types.IdenticalIgnoreTags(src.Orig, dst.Orig) {
+ if types.IdenticalIgnoreTags(src.Underlying(), dst.Underlying()) {
return ir.OCONVNOP, ""
}
// 3. src and dst are unnamed pointer types and, ignoring struct tags,
// their base types have identical underlying types.
- if src.IsPtr() && dst.IsPtr() && src.Sym == nil && dst.Sym == nil {
- if types.IdenticalIgnoreTags(src.Elem().Orig, dst.Elem().Orig) {
+ if src.IsPtr() && dst.IsPtr() && src.Sym() == nil && dst.Sym() == nil {
+ if types.IdenticalIgnoreTags(src.Elem().Underlying(), dst.Elem().Underlying()) {
return ir.OCONVNOP, ""
}
}
// 4. src and dst are both integer or floating point types.
if (src.IsInteger() || src.IsFloat()) && (dst.IsInteger() || dst.IsFloat()) {
- if simtype[src.Etype] == simtype[dst.Etype] {
+ if simtype[src.Kind()] == simtype[dst.Kind()] {
return ir.OCONVNOP, ""
}
return ir.OCONV, ""
// 5. src and dst are both complex types.
if src.IsComplex() && dst.IsComplex() {
- if simtype[src.Etype] == simtype[dst.Etype] {
+ if simtype[src.Kind()] == simtype[dst.Kind()] {
return ir.OCONVNOP, ""
}
return ir.OCONV, ""
}
if src.IsSlice() && dst.IsString() {
- if src.Elem().Etype == types.Bytetype.Etype {
+ if src.Elem().Kind() == types.ByteType.Kind() {
return ir.OBYTES2STR, ""
}
- if src.Elem().Etype == types.Runetype.Etype {
+ if src.Elem().Kind() == types.RuneType.Kind() {
return ir.ORUNES2STR, ""
}
}
// 7. src is a string and dst is []byte or []rune.
// String to slice.
if src.IsString() && dst.IsSlice() {
- if dst.Elem().Etype == types.Bytetype.Etype {
+ if dst.Elem().Kind() == types.ByteType.Kind() {
return ir.OSTR2BYTES, ""
}
- if dst.Elem().Etype == types.Runetype.Etype {
+ if dst.Elem().Kind() == types.RuneType.Kind() {
return ir.OSTR2RUNES, ""
}
}
// src is map and dst is a pointer to corresponding hmap.
// This rule is needed for the implementation detail that
// go gc maps are implemented as a pointer to a hmap struct.
- if src.Etype == types.TMAP && dst.IsPtr() &&
+ if src.Kind() == types.TMAP && dst.IsPtr() &&
src.MapType().Hmap == dst.Elem() {
return ir.OCONVNOP, ""
}
return n
}
- if t.Etype == types.TBLANK && n.Type().Etype == types.TNIL {
+ if t.Kind() == types.TBLANK && n.Type().Kind() == types.TNIL {
base.Errorf("use of untyped nil")
}
if n.Type() == nil {
return n
}
- if t.Etype == types.TBLANK {
+ if t.Kind() == types.TBLANK {
return n
}
// When using soft-float, these ops might be rewritten to function calls
// so we ensure they are evaluated first.
case ir.OADD, ir.OSUB, ir.ONEG, ir.OMUL:
- if thearch.SoftFloat && (isFloat[n.Type().Etype] || isComplex[n.Type().Etype]) {
+ if thearch.SoftFloat && (isFloat[n.Type().Kind()] || isComplex[n.Type().Kind()]) {
return true
}
case ir.OLT, ir.OEQ, ir.ONE, ir.OLE, ir.OGE, ir.OGT:
- if thearch.SoftFloat && (isFloat[n.Left().Type().Etype] || isComplex[n.Left().Type().Etype]) {
+ if thearch.SoftFloat && (isFloat[n.Left().Type().Kind()] || isComplex[n.Left().Type().Kind()]) {
return true
}
case ir.OCONV:
- if thearch.SoftFloat && ((isFloat[n.Type().Etype] || isComplex[n.Type().Etype]) || (isFloat[n.Left().Type().Etype] || isComplex[n.Left().Type().Etype])) {
+ if thearch.SoftFloat && ((isFloat[n.Type().Kind()] || isComplex[n.Type().Kind()]) || (isFloat[n.Left().Type().Kind()] || isComplex[n.Left().Type().Kind()])) {
return true
}
}
}
u = t
- if t.Sym != nil && t.IsPtr() && !t.Elem().IsPtr() {
+ if t.Sym() != nil && t.IsPtr() && !t.Elem().IsPtr() {
// If t is a defined pointer type, then x.m is shorthand for (*x).m.
u = t.Elem()
}
// Only generate (*T).M wrappers for T.M in T's own package.
if rcvr.IsPtr() && rcvr.Elem() == method.Type.Recv().Type &&
- rcvr.Elem().Sym != nil && rcvr.Elem().Sym.Pkg != ir.LocalPkg {
+ rcvr.Elem().Sym() != nil && rcvr.Elem().Sym().Pkg != ir.LocalPkg {
return
}
// Only generate I.M wrappers for I in I's own package
// but keep doing it for error.Error (was issue #29304).
- if rcvr.IsInterface() && rcvr.Sym != nil && rcvr.Sym.Pkg != ir.LocalPkg && rcvr != types.Errortype {
+ if rcvr.IsInterface() && rcvr.Sym() != nil && rcvr.Sym().Pkg != ir.LocalPkg && rcvr != types.ErrorType {
return
}
// Inline calls within (*T).M wrappers. This is safe because we only
// generate those wrappers within the same compilation unit as (T).M.
// TODO(mdempsky): Investigate why we can't enable this more generally.
- if rcvr.IsPtr() && rcvr.Elem() == method.Type.Recv().Type && rcvr.Elem().Sym != nil {
+ if rcvr.IsPtr() && rcvr.Elem() == method.Type.Recv().Type && rcvr.Elem().Sym() != nil {
inlcalls(fn)
}
escapeFuncs([]*ir.Func{fn}, false)
return false
}
- switch t.Etype {
+ switch t.Kind() {
case types.TPTR:
// Pointers to notinheap types must be stored indirectly. See issue 42076.
return !t.Elem().NotInHeap()
switch {
case t.IsMap():
nilonly = "map"
- case t.Etype == types.TFUNC:
+ case t.Kind() == types.TFUNC:
nilonly = "func"
case t.IsSlice():
nilonly = "slice"
func (s *exprSwitch) Add(pos src.XPos, expr, jmp ir.Node) {
c := exprClause{pos: pos, lo: expr, hi: expr, jmp: jmp}
- if okforcmp[s.exprname.Type().Etype] && expr.Op() == ir.OLITERAL {
+ if okforcmp[s.exprname.Type().Kind()] && expr.Op() == ir.OLITERAL {
s.clauses = append(s.clauses, c)
return
}
if t.IsUntyped() {
return fmt.Sprintf("%v", t)
}
- et := t.Etype
+ et := t.Kind()
if int(et) < len(_typekind) {
s := _typekind[et]
if s != "" {
// The result of indexlit MUST be assigned back to n, e.g.
// n.Left = indexlit(n.Left)
func indexlit(n ir.Node) ir.Node {
- if n != nil && n.Type() != nil && n.Type().Etype == types.TIDEAL {
+ if n != nil && n.Type() != nil && n.Type().Kind() == types.TIDEAL {
return defaultlit(n, types.Types[types.TINT])
}
return n
n.SetType(nil)
return n
}
- if n.Implicit() && !okforarith[l.Type().Etype] {
+ if n.Implicit() && !okforarith[l.Type().Kind()] {
base.Errorf("invalid operation: %v (non-numeric type %v)", n, l.Type())
n.SetType(nil)
return n
return n
}
t = l.Type()
- if t != nil && t.Etype != types.TIDEAL && !t.IsInteger() {
+ if t != nil && t.Kind() != types.TIDEAL && !t.IsInteger() {
base.Errorf("invalid operation: %v (shift of type %v)", n, t)
n.SetType(nil)
return n
return n
}
t := l.Type()
- if t.Etype == types.TIDEAL {
+ if t.Kind() == types.TIDEAL {
t = r.Type()
}
- et := t.Etype
+ et := t.Kind()
if et == types.TIDEAL {
et = types.TINT
}
aop := ir.OXXX
- if iscmp[n.Op()] && t.Etype != types.TIDEAL && !types.Identical(l.Type(), r.Type()) {
+ if iscmp[n.Op()] && t.Kind() != types.TIDEAL && !types.Identical(l.Type(), r.Type()) {
// comparison is okay as long as one side is
// assignable to the other. convert so they have
// the same type.
// in that case, check comparability of the concrete type.
// The conversion allocates, so only do it if the concrete type is huge.
converted := false
- if r.Type().Etype != types.TBLANK {
+ if r.Type().Kind() != types.TBLANK {
aop, _ = assignop(l.Type(), r.Type())
if aop != ir.OXXX {
if r.Type().IsInterface() && !l.Type().IsInterface() && !IsComparable(l.Type()) {
}
}
- if !converted && l.Type().Etype != types.TBLANK {
+ if !converted && l.Type().Kind() != types.TBLANK {
aop, _ = assignop(r.Type(), l.Type())
if aop != ir.OXXX {
if l.Type().IsInterface() && !r.Type().IsInterface() && !IsComparable(r.Type()) {
}
}
- et = t.Etype
+ et = t.Kind()
}
- if t.Etype != types.TIDEAL && !types.Identical(l.Type(), r.Type()) {
+ if t.Kind() != types.TIDEAL && !types.Identical(l.Type(), r.Type()) {
l, r = defaultlit2(l, r, true)
if l.Type() == nil || r.Type() == nil {
n.SetType(nil)
}
}
- if t.Etype == types.TIDEAL {
+ if t.Kind() == types.TIDEAL {
t = mixUntyped(l.Type(), r.Type())
}
- if dt := defaultType(t); !okfor[op][dt.Etype] {
+ if dt := defaultType(t); !okfor[op][dt.Kind()] {
base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(t))
n.SetType(nil)
return n
return n
}
- if l.Type().Etype == types.TFUNC && !ir.IsNil(l) && !ir.IsNil(r) {
+ if l.Type().Kind() == types.TFUNC && !ir.IsNil(l) && !ir.IsNil(r) {
base.Errorf("invalid operation: %v (func can only be compared to nil)", n)
n.SetType(nil)
return n
n.SetType(nil)
return n
}
- if !okfor[n.Op()][defaultType(t).Etype] {
+ if !okfor[n.Op()][defaultType(t).Kind()] {
base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, n.Op(), typekind(t))
n.SetType(nil)
return n
n.SetType(nil)
return n
}
- switch t.Etype {
+ switch t.Kind() {
default:
base.Errorf("invalid operation: %v (type %v does not support indexing)", n, t)
n.SetType(nil)
case types.TSTRING, types.TARRAY, types.TSLICE:
n.SetRight(indexlit(n.Right()))
if t.IsString() {
- n.SetType(types.Bytetype)
+ n.SetType(types.ByteType)
} else {
n.SetType(t.Elem())
}
n.SetLeft(defaultlit(n.Left(), types.Types[types.TINT]))
- if !n.Left().Type().IsInteger() && n.Type().Etype != types.TIDEAL {
+ if !n.Left().Type().IsInteger() && n.Type().Kind() != types.TIDEAL {
base.Errorf("non-integer len argument in OMAKESLICECOPY")
}
default:
n.SetOp(ir.OCALLFUNC)
- if t.Etype != types.TFUNC {
+ if t.Kind() != types.TFUNC {
name := l.String()
if isBuiltinFuncName(name) && l.Name().Defn != nil {
// be more specific when the function
var ok bool
if n.Op() == ir.OLEN {
- ok = okforlen[t.Etype]
+ ok = okforlen[t.Kind()]
} else {
- ok = okforcap[t.Etype]
+ ok = okforcap[t.Kind()]
}
if !ok {
base.Errorf("invalid argument %L for %v", l, n.Op())
}
// Determine result type.
- switch t.Etype {
+ switch t.Kind() {
case types.TIDEAL:
n.SetType(types.UntypedFloat)
case types.TCOMPLEX64:
}
var t *types.Type
- switch l.Type().Etype {
+ switch l.Type().Kind() {
default:
base.Errorf("invalid operation: %v (arguments have type %v, expected floating-point)", n, l.Type())
n.SetType(nil)
break
}
- args.SetSecond(assignconv(args.Second(), t.Orig, "append"))
+ args.SetSecond(assignconv(args.Second(), t.Underlying(), "append"))
break
}
// copy([]byte, string)
if n.Left().Type().IsSlice() && n.Right().Type().IsString() {
- if types.Identical(n.Left().Type().Elem(), types.Bytetype) {
+ if types.Identical(n.Left().Type().Elem(), types.ByteType) {
break
}
base.Errorf("arguments to copy have different element types: %L and string", n.Left().Type())
n.SetOp(op)
switch n.Op() {
case ir.OCONVNOP:
- if t.Etype == n.Type().Etype {
- switch t.Etype {
+ if t.Kind() == n.Type().Kind() {
+ switch t.Kind() {
case types.TFLOAT32, types.TFLOAT64, types.TCOMPLEX64, types.TCOMPLEX128:
// Floating point casts imply rounding and
// so the conversion must be kept.
i := 1
var nn ir.Node
- switch t.Etype {
+ switch t.Kind() {
default:
base.Errorf("cannot make type %v", t)
n.SetType(nil)
t := n.Type()
if t != nil && !t.IsFuncArgStruct() && n.Op() != ir.OTYPE {
- switch t.Etype {
+ switch t.Kind() {
case types.TFUNC, // might have TANY; wait until it's called
types.TANY, types.TFORW, types.TIDEAL, types.TNIL, types.TBLANK:
break
// types declared at package scope. However, we need
// to make sure to generate wrappers for anonymous
// receiver types too.
- if mt.Sym == nil {
+ if mt.Sym() == nil {
addsignat(t)
}
}
me.SetOpt(m)
// Issue 25065. Make sure that we emit the symbol for a local method.
- if base.Ctxt.Flag_dynlink && !inimport && (t.Sym == nil || t.Sym.Pkg == ir.LocalPkg) {
+ if base.Ctxt.Flag_dynlink && !inimport && (t.Sym() == nil || t.Sym().Pkg == ir.LocalPkg) {
makefuncsym(me.Sym())
}
}
var f2 *types.Field
- if n.Left().Type() == t || n.Left().Type().Sym == nil {
+ if n.Left().Type() == t || n.Left().Type().Sym() == nil {
mt := methtype(t)
if mt != nil {
f2 = lookdot1(n, s, mt, mt.Methods(), dostrcmp)
pll = ll
ll = ll.Left()
}
- if pll.Implicit() && ll.Type().IsPtr() && ll.Type().Sym != nil && ir.AsNode(ll.Type().Sym.Def) != nil && ir.AsNode(ll.Type().Sym.Def).Op() == ir.OTYPE {
+ if pll.Implicit() && ll.Type().IsPtr() && ll.Type().Sym() != nil && ir.AsNode(ll.Type().Sym().Def) != nil && ir.AsNode(ll.Type().Sym().Def).Op() == ir.OTYPE {
// It is invalid to automatically dereference a named pointer type when selecting a method.
// Make n.Left == ll to clarify error message.
n.SetLeft(ll)
return "bool"
}
- if t.Etype == types.TIDEAL {
+ if t.Kind() == types.TIDEAL {
// "untyped number" is not commonly used
// outside of the compiler, so let's use "number".
// TODO(mdempsky): Revisit this.
// iscomptype reports whether type t is a composite literal type.
func iscomptype(t *types.Type) bool {
- switch t.Etype {
+ switch t.Kind() {
case types.TARRAY, types.TSLICE, types.TSTRUCT, types.TMAP:
return true
default:
}
n.SetType(t)
- switch t.Etype {
+ switch t.Kind() {
default:
base.Errorf("invalid composite literal type %v", t)
n.SetType(nil)
case ir.OCONV:
// Some conversions can't be reused, such as []byte(str).
// Allow only numeric-ish types. This is a bit conservative.
- return issimple[l.Type().Etype] && samesafeexpr(l.Left(), r.Left())
+ return issimple[l.Type().Kind()] && samesafeexpr(l.Left(), r.Left())
case ir.OINDEX, ir.OINDEXMAP,
ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.OLSH, ir.ORSH, ir.OAND, ir.OANDNOT, ir.ODIV, ir.OMOD:
n.SetDiag(true)
n.SetType(nil)
}
- if t.Etype == types.TFORW && base.Errors() > errorsBefore {
+ if t.Kind() == types.TFORW && base.Errors() > errorsBefore {
// Something went wrong during type-checking,
// but it was reported. Silence future errors.
t.SetBroke(true)
t := n.Type()
if t != nil {
- if !ir.OKForConst[t.Etype] {
+ if !ir.OKForConst[t.Kind()] {
base.ErrorfAt(n.Pos(), "invalid constant type %v", t)
goto ret
}
func checkmake(t *types.Type, arg string, np *ir.Node) bool {
n := *np
- if !n.Type().IsInteger() && n.Type().Etype != types.TIDEAL {
+ if !n.Type().IsInteger() && n.Type().Kind() != types.TIDEAL {
base.Errorf("non-integer %s argument in make(%v) - %v", arg, t, n.Type())
return false
}
var basicTypes = [...]struct {
name string
- etype types.EType
+ etype types.Kind
}{
{"int8", types.TINT8},
{"int16", types.TINT16},
var typedefs = [...]struct {
name string
- etype types.EType
- sameas32 types.EType
- sameas64 types.EType
+ etype types.Kind
+ sameas32 types.Kind
+ sameas64 types.Kind
}{
{"int", types.TINT, types.TINT32, types.TINT64},
{"uint", types.TUINT, types.TUINT32, types.TUINT64},
// string is same as slice wo the cap
sizeofString = Rnd(sliceLenOffset+int64(Widthptr), int64(Widthptr))
- for et := types.EType(0); et < types.NTYPE; et++ {
+ for et := types.Kind(0); et < types.NTYPE; et++ {
simtype[et] = et
}
types.Types[types.TANY] = types.New(types.TANY)
types.Types[types.TINTER] = types.New(types.TINTER) // empty interface
- defBasic := func(kind types.EType, pkg *types.Pkg, name string) *types.Type {
+ defBasic := func(kind types.Kind, pkg *types.Pkg, name string) *types.Type {
sym := pkg.Lookup(name)
n := ir.NewNameAt(src.NoXPos, sym)
n.SetOp(ir.OTYPE)
// of less informative error messages involving bytes and runes)?
// (Alternatively, we could introduce an OTALIAS node representing
// type aliases, albeit at the cost of having to deal with it everywhere).
- types.Bytetype = defBasic(types.TUINT8, ir.BuiltinPkg, "byte")
- types.Runetype = defBasic(types.TINT32, ir.BuiltinPkg, "rune")
+ types.ByteType = defBasic(types.TUINT8, ir.BuiltinPkg, "byte")
+ types.RuneType = defBasic(types.TINT32, ir.BuiltinPkg, "rune")
// error type
s := ir.BuiltinPkg.Lookup("error")
n := ir.NewNameAt(src.NoXPos, s)
n.SetOp(ir.OTYPE)
- types.Errortype = types.NewNamed(n)
- types.Errortype.SetUnderlying(makeErrorInterface())
- n.SetType(types.Errortype)
+ types.ErrorType = types.NewNamed(n)
+ types.ErrorType.SetUnderlying(makeErrorInterface())
+ n.SetType(types.ErrorType)
s.Def = n
- dowidth(types.Errortype)
+ dowidth(types.ErrorType)
types.Types[types.TUNSAFEPTR] = defBasic(types.TUNSAFEPTR, unsafepkg, "Pointer")
isComplex[types.TCOMPLEX128] = true
// initialize okfor
- for et := types.EType(0); et < types.NTYPE; et++ {
+ for et := types.Kind(0); et < types.NTYPE; et++ {
if isInt[et] || et == types.TIDEAL {
okforeq[et] = true
okforcmp[et] = true
// Eagerly checkwidth all expressions for the back end.
if n.Type() != nil && !n.Type().WidthCalculated() {
- switch n.Type().Etype {
+ switch n.Type().Kind() {
case types.TBLANK, types.TNIL, types.TIDEAL:
default:
checkwidth(n.Type())
n.SetRight(walkexpr(n.Right(), init))
// rewrite complex div into function call.
- et := n.Left().Type().Etype
+ et := n.Left().Type().Kind()
if isComplex[et] && n.Op() == ir.ODIV {
t := n.Type()
// name can be derived from the names of the returned types.
//
// If no such function is necessary, it returns (Txxx, Txxx).
-func rtconvfn(src, dst *types.Type) (param, result types.EType) {
+func rtconvfn(src, dst *types.Type) (param, result types.Kind) {
if thearch.SoftFloat {
return types.Txxx, types.Txxx
}
switch thearch.LinkArch.Family {
case sys.ARM, sys.MIPS:
if src.IsFloat() {
- switch dst.Etype {
+ switch dst.Kind() {
case types.TINT64, types.TUINT64:
- return types.TFLOAT64, dst.Etype
+ return types.TFLOAT64, dst.Kind()
}
}
if dst.IsFloat() {
- switch src.Etype {
+ switch src.Kind() {
case types.TINT64, types.TUINT64:
- return src.Etype, types.TFLOAT64
+ return src.Kind(), types.TFLOAT64
}
}
case sys.I386:
if src.IsFloat() {
- switch dst.Etype {
+ switch dst.Kind() {
case types.TINT64, types.TUINT64:
- return types.TFLOAT64, dst.Etype
+ return types.TFLOAT64, dst.Kind()
case types.TUINT32, types.TUINT, types.TUINTPTR:
return types.TFLOAT64, types.TUINT32
}
}
if dst.IsFloat() {
- switch src.Etype {
+ switch src.Kind() {
case types.TINT64, types.TUINT64:
- return src.Etype, types.TFLOAT64
+ return src.Kind(), types.TFLOAT64
case types.TUINT32, types.TUINT, types.TUINTPTR:
return types.TUINT32, types.TFLOAT64
}
for i, n := range nn.List().Slice() {
if n.Op() == ir.OLITERAL {
if n.Type() == types.UntypedRune {
- n = defaultlit(n, types.Runetype)
+ n = defaultlit(n, types.RuneType)
}
switch n.Val().Kind() {
}
}
- if n.Op() != ir.OLITERAL && n.Type() != nil && n.Type().Etype == types.TIDEAL {
+ if n.Op() != ir.OLITERAL && n.Type() != nil && n.Type().Kind() == types.TIDEAL {
n = defaultlit(n, types.Types[types.TINT64])
}
n = defaultlit(n, nil)
nn.List().SetIndex(i, n)
- if n.Type() == nil || n.Type().Etype == types.TFORW {
+ if n.Type() == nil || n.Type().Kind() == types.TFORW {
continue
}
var on ir.Node
- switch n.Type().Etype {
+ switch n.Type().Kind() {
case types.TINTER:
if n.Type().IsEmptyInterface() {
on = syslook("printeface")
on = syslook("printslice")
on = substArgTypes(on, n.Type()) // any-1
case types.TUINT, types.TUINT8, types.TUINT16, types.TUINT32, types.TUINT64, types.TUINTPTR:
- if isRuntimePkg(n.Type().Sym.Pkg) && n.Type().Sym.Name == "hex" {
+ if isRuntimePkg(n.Type().Sym().Pkg) && n.Type().Sym().Name == "hex" {
on = syslook("printhex")
} else {
on = syslook("printuint")
var tsym *types.Sym
switch l.Op() {
case ir.ODOT:
- tsym = l.Left().Type().Sym
+ tsym = l.Left().Type().Sym()
case ir.ODOTPTR:
- tsym = l.Left().Type().Elem().Sym
+ tsym = l.Left().Type().Elem().Sym()
default:
return false
}
}
func vmkcall(fn ir.Node, t *types.Type, init *ir.Nodes, va []ir.Node) ir.Node {
- if fn.Type() == nil || fn.Type().Etype != types.TFUNC {
+ if fn.Type() == nil || fn.Type().Kind() != types.TFUNC {
base.Fatalf("mkcall %v %v", fn, fn.Type())
}
maxcmpsize = 2 * int64(thearch.LinkArch.RegSize)
}
- switch t.Etype {
+ switch t.Kind() {
default:
if base.Debug.Libfuzzer != 0 && t.IsInteger() {
n.SetLeft(cheapexpr(n.Left(), init))
return n
case types.TARRAY:
// We can compare several elements at once with 2/4/8 byte integer compares
- inline = t.NumElem() <= 1 || (issimple[t.Elem().Etype] && (t.NumElem() <= 4 || t.Elem().Width*t.NumElem() <= maxcmpsize))
+ inline = t.NumElem() <= 1 || (issimple[t.Elem().Kind()] && (t.NumElem() <= 4 || t.Elem().Width*t.NumElem() <= maxcmpsize))
case types.TSTRUCT:
inline = t.NumComponents(types.IgnoreBlankFields) <= 4
}
}
if res1 == nil {
- if p0.Type.Etype != types.TINT {
+ if p0.Type.Kind() != types.TINT {
return
}
} else {
// Note: Don't rely on res0.Type.String() since its formatting depends on multiple factors
// (including global variables such as numImports - was issue #19028).
// Also need to check for reflect package itself (see Issue #38515).
- if s := res0.Type.Sym; s != nil && s.Name == "Method" && isReflectPkg(s.Pkg) {
+ if s := res0.Type.Sym(); s != nil && s.Name == "Method" && isReflectPkg(s.Pkg) {
Curfn.SetReflectMethod(true)
// The LSym is initialized at this point. We need to set the attribute on the LSym.
Curfn.LSym.Set(obj.AttrReflectMethod, true)
if outer.IsPtr() {
outer = outer.Elem()
}
- if outer.Sym == nil {
+ if outer.Sym() == nil {
base.Errorf("tracked field must be in named struct type")
}
if !types.IsExported(field.Sym.Name) {
args[i] = &fmtSym{arg, m}
case Nodes:
args[i] = &fmtNodes{arg, m}
- case int32, int64, string, types.EType, constant.Value:
+ case int32, int64, string, types.Kind, constant.Value:
// OK: printing these types doesn't depend on mode
default:
base.Fatalf("mode.prepareArgs type %T", arg)
b.WriteString("<T>")
return
}
- if t.Etype == types.TSSA {
+ if t.Kind() == types.TSSA {
b.WriteString(t.Extra.(string))
return
}
- if t.Etype == types.TTUPLE {
+ if t.Kind() == types.TTUPLE {
b.WriteString(t.FieldType(0).String())
b.WriteByte(',')
b.WriteString(t.FieldType(1).String())
return
}
- if t.Etype == types.TRESULTS {
+ if t.Kind() == types.TRESULTS {
tys := t.Extra.(*types.Results).Types
for i, et := range tys {
if i > 0 {
if mode == FTypeIdName {
flag |= FmtUnsigned
}
- if t == types.Bytetype || t == types.Runetype {
+ if t == types.ByteType || t == types.RuneType {
// in %-T mode collapse rune and byte with their originals.
switch mode {
case FTypeIdName, FTypeId:
- t = types.Types[t.Etype]
+ t = types.Types[t.Kind()]
default:
- sconv2(b, t.Sym, FmtShort, mode)
+ sconv2(b, t.Sym(), FmtShort, mode)
return
}
}
- if t == types.Errortype {
+ if t == types.ErrorType {
b.WriteString("error")
return
}
// Unless the 'L' flag was specified, if the type has a name, just print that name.
- if flag&FmtLong == 0 && t.Sym != nil && t != types.Types[t.Etype] {
+ if flag&FmtLong == 0 && t.Sym() != nil && t != types.Types[t.Kind()] {
switch mode {
case FTypeId, FTypeIdName:
if flag&FmtShort != 0 {
if t.Vargen != 0 {
- sconv2(b, t.Sym, FmtShort, mode)
+ sconv2(b, t.Sym(), FmtShort, mode)
fmt.Fprintf(b, "·%d", t.Vargen)
return
}
- sconv2(b, t.Sym, FmtShort, mode)
+ sconv2(b, t.Sym(), FmtShort, mode)
return
}
if mode == FTypeIdName {
- sconv2(b, t.Sym, FmtUnsigned, mode)
+ sconv2(b, t.Sym(), FmtUnsigned, mode)
return
}
- if t.Sym.Pkg == LocalPkg && t.Vargen != 0 {
- b.WriteString(mode.Sprintf("%v·%d", t.Sym, t.Vargen))
+ if t.Sym().Pkg == LocalPkg && t.Vargen != 0 {
+ b.WriteString(mode.Sprintf("%v·%d", t.Sym(), t.Vargen))
return
}
}
- sconv2(b, t.Sym, 0, mode)
+ sconv2(b, t.Sym(), 0, mode)
return
}
- if int(t.Etype) < len(BasicTypeNames) && BasicTypeNames[t.Etype] != "" {
+ if int(t.Kind()) < len(BasicTypeNames) && BasicTypeNames[t.Kind()] != "" {
var name string
switch t {
case types.UntypedBool:
case types.UntypedComplex:
name = "untyped complex"
default:
- name = BasicTypeNames[t.Etype]
+ name = BasicTypeNames[t.Kind()]
}
b.WriteString(name)
return
}
if mode == FDbg {
- b.WriteString(t.Etype.String())
+ b.WriteString(t.Kind().String())
b.WriteByte('-')
tconv2(b, t, flag, FErr, visited)
return
visited[t] = b.Len()
defer delete(visited, t)
- switch t.Etype {
+ switch t.Kind() {
case types.TPTR:
b.WriteByte('*')
switch mode {
tconv2(b, t.Elem(), 0, mode, visited)
default:
b.WriteString("chan ")
- if t.Elem() != nil && t.Elem().IsChan() && t.Elem().Sym == nil && t.Elem().ChanDir() == types.Crecv {
+ if t.Elem() != nil && t.Elem().IsChan() && t.Elem().Sym() == nil && t.Elem().ChanDir() == types.Crecv {
b.WriteByte('(')
tconv2(b, t.Elem(), 0, mode, visited)
b.WriteByte(')')
case types.TFORW:
b.WriteString("undefined")
- if t.Sym != nil {
+ if t.Sym() != nil {
b.WriteByte(' ')
- sconv2(b, t.Sym, 0, mode)
+ sconv2(b, t.Sym(), 0, mode)
}
case types.TUNSAFEPTR:
b.WriteString("Txxx")
default:
// Don't know how to handle - fall back to detailed prints.
- b.WriteString(mode.Sprintf("%v <%v>", t.Etype, t.Sym))
+ b.WriteString(mode.Sprintf("%v <%v>", t.Kind(), t.Sym()))
}
}
OSTR2BYTES,
OSTR2RUNES,
ORUNESTR:
- if n.Type() == nil || n.Type().Sym == nil {
+ if n.Type() == nil || n.Type().Sym() == nil {
mode.Fprintf(s, "(%v)", n.Type())
} else {
mode.Fprintf(s, "%v", n.Type())
}
if flag&FmtLong != 0 && t != nil {
- if t.Etype == types.TNIL {
+ if t.Kind() == types.TNIL {
fmt.Fprint(s, "nil")
} else if n.Op() == ONAME && n.Name().AutoTemp() {
mode.Fprintf(s, "%v value", t)
GoBuildPragma
)
-func AsNode(n types.IRNode) Node {
+func AsNode(n types.Object) Node {
if n == nil {
return nil
}
func (n *typeNode) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
func (n *typeNode) rawCopy() Node { c := *n; return &c }
func (n *typeNode) Type() *types.Type { return n.typ }
-func (n *typeNode) Sym() *types.Sym { return n.typ.Sym }
+func (n *typeNode) Sym() *types.Sym { return n.typ.Sym() }
func (n *typeNode) CanBeNtype() {}
// TypeNode returns the Node representing the type t.
func ValidTypeForConst(t *types.Type, v constant.Value) bool {
switch v.Kind() {
case constant.Unknown:
- return OKForConst[t.Etype]
+ return OKForConst[t.Kind()]
case constant.Bool:
return t.IsBoolean()
case constant.String:
// intPairTypes returns the pair of 32-bit int types needed to encode a 64-bit integer type on a target
// that has no 64-bit integer registers.
- intPairTypes := func(et types.EType) (tHi, tLo *types.Type) {
+ intPairTypes := func(et types.Kind) (tHi, tLo *types.Type) {
tHi = typ.UInt32
if et == types.TINT64 {
tHi = typ.Int32
case OpStructSelect:
w := selector.Args[0]
var ls []LocalSlot
- if w.Type.Etype != types.TSTRUCT { // IData artifact
+ if w.Type.Kind() != types.TSTRUCT { // IData artifact
ls = rewriteSelect(leaf, w, offset)
} else {
ls = rewriteSelect(leaf, w, offset+w.Type.FieldOff(int(selector.AuxInt)))
decomposeOne func(pos src.XPos, b *Block, base, source, mem *Value, t1 *types.Type, offArg, offStore int64) *Value,
decomposeTwo func(pos src.XPos, b *Block, base, source, mem *Value, t1, t2 *types.Type, offArg, offStore int64) *Value) *Value {
u := source.Type
- switch u.Etype {
+ switch u.Kind() {
case types.TARRAY:
elem := u.Elem()
for i := int64(0); i < u.NumElem(); i++ {
if t.Width == regSize {
break
}
- tHi, tLo := intPairTypes(t.Etype)
+ tHi, tLo := intPairTypes(t.Kind())
mem = decomposeOne(pos, b, base, source, mem, tHi, source.AuxInt+hiOffset, offset+hiOffset)
pos = pos.WithNotStmt()
return decomposeOne(pos, b, base, source, mem, tLo, source.AuxInt+lowOffset, offset+lowOffset)
return storeArgOrLoad(pos, b, base, source.Args[0], mem, t.Elem(), offset)
case OpInt64Make:
- tHi, tLo := intPairTypes(t.Etype)
+ tHi, tLo := intPairTypes(t.Kind())
mem = storeArgOrLoad(pos, b, base, source.Args[0], mem, tHi, offset+hiOffset)
pos = pos.WithNotStmt()
return storeArgOrLoad(pos, b, base, source.Args[1], mem, tLo, offset+lowOffset)
}
// For nodes that cannot be taken apart -- OpSelectN, other structure selectors.
- switch t.Etype {
+ switch t.Kind() {
case types.TARRAY:
elt := t.Elem()
if source.Type != t && t.NumElem() == 1 && elt.Width == t.Width && t.Width == regSize {
if t.Width == regSize {
break
}
- tHi, tLo := intPairTypes(t.Etype)
+ tHi, tLo := intPairTypes(t.Kind())
sel := source.Block.NewValue1(pos, OpInt64Hi, tHi, source)
mem = storeArgOrLoad(pos, b, base, sel, mem, tHi, offset+hiOffset)
pos = pos.WithNotStmt()
offset := int64(0)
switch v.Op {
case OpStructSelect:
- if w.Type.Etype == types.TSTRUCT {
+ if w.Type.Kind() == types.TSTRUCT {
offset = w.Type.FieldOff(int(v.AuxInt))
} else { // Immediate interface data artifact, offset is zero.
f.Fatalf("Expand calls interface data problem, func %s, v=%s, w=%s\n", f.Name, v.LongString(), w.LongString())
// so this test setup can share it.
types.Tconv = func(t *types.Type, flag, mode int) string {
- return t.Etype.String()
+ return t.Kind().String()
}
types.Sconv = func(s *types.Sym, flag, mode int) string {
return "sym"
fmt.Fprintf(s, "sym")
}
types.FormatType = func(t *types.Type, s fmt.State, verb rune, mode int) {
- fmt.Fprintf(s, "%v", t.Etype)
+ fmt.Fprintf(s, "%v", t.Kind())
}
types.Dowidth = func(t *types.Type) {}
for _, typ := range [...]struct {
width int64
- et types.EType
+ et types.Kind
}{
{1, types.TINT8},
{1, types.TUINT8},
return 0
}
if t.IsFloat() || t == types.TypeInt128 {
- if t.Etype == types.TFLOAT32 && s.f.Config.fp32RegMask != 0 {
+ if t.Kind() == types.TFLOAT32 && s.f.Config.fp32RegMask != 0 {
m = s.f.Config.fp32RegMask
- } else if t.Etype == types.TFLOAT64 && s.f.Config.fp64RegMask != 0 {
+ } else if t.Kind() == types.TFLOAT64 && s.f.Config.fp64RegMask != 0 {
m = s.f.Config.fp64RegMask
} else {
m = s.f.Config.fpRegMask
var _EType_index = [...]uint8{0, 3, 7, 12, 17, 23, 28, 34, 39, 45, 48, 52, 59, 68, 78, 85, 92, 96, 99, 103, 108, 113, 119, 123, 126, 131, 135, 138, 144, 153, 158, 161, 166, 174, 182, 185, 190, 197, 202}
-func (i EType) String() string {
- if i >= EType(len(_EType_index)-1) {
+func (i Kind) String() string {
+ if i >= Kind(len(_EType_index)-1) {
return "EType(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _EType_name[_EType_index[i]:_EType_index[i+1]]
if t1 == t2 {
return true
}
- if t1 == nil || t2 == nil || t1.Etype != t2.Etype || t1.Broke() || t2.Broke() {
+ if t1 == nil || t2 == nil || t1.kind != t2.kind || t1.Broke() || t2.Broke() {
return false
}
- if t1.Sym != nil || t2.Sym != nil {
+ if t1.sym != nil || t2.sym != nil {
// Special case: we keep byte/uint8 and rune/int32
// separate for error messages. Treat them as equal.
- switch t1.Etype {
+ switch t1.kind {
case TUINT8:
- return (t1 == Types[TUINT8] || t1 == Bytetype) && (t2 == Types[TUINT8] || t2 == Bytetype)
+ return (t1 == Types[TUINT8] || t1 == ByteType) && (t2 == Types[TUINT8] || t2 == ByteType)
case TINT32:
- return (t1 == Types[TINT32] || t1 == Runetype) && (t2 == Types[TINT32] || t2 == Runetype)
+ return (t1 == Types[TINT32] || t1 == RuneType) && (t2 == Types[TINT32] || t2 == RuneType)
default:
return false
}
}
assumedEqual[typePair{t1, t2}] = struct{}{}
- switch t1.Etype {
+ switch t1.kind {
case TIDEAL:
// Historically, cmd/compile used a single "untyped
// number" type, so all untyped number types were
// restored once the block scope ends.
type dsym struct {
sym *Sym // sym == nil indicates stack mark
- def IRNode
+ def Object
block int32
lastlineno src.XPos // last declaration for diagnostic
}
}
// PkgDef returns the definition associated with s at package scope.
-func (s *Sym) PkgDef() IRNode {
+func (s *Sym) PkgDef() Object {
return *s.pkgDefPtr()
}
// SetPkgDef sets the definition associated with s at package scope.
-func (s *Sym) SetPkgDef(n IRNode) {
+func (s *Sym) SetPkgDef(n Object) {
*s.pkgDefPtr() = n
}
-func (s *Sym) pkgDefPtr() *IRNode {
+func (s *Sym) pkgDefPtr() *Object {
// Look for outermost saved declaration, which must be the
// package scope definition, if present.
for _, d := range dclstack {
Name string // object name
// saved and restored by dcopy
- Def IRNode // definition: ONAME OTYPE OPACK or OLITERAL
+ Def Object // definition: ONAME OTYPE OPACK or OLITERAL
Block int32 // blocknumber to catch redeclaration
Lastlineno src.XPos // last declaration for diagnostic
// IRNode represents an ir.Node, but without needing to import cmd/compile/internal/ir,
// which would cause an import cycle. The uses in other packages must type assert
// values of type IRNode to ir.Node or a more specific type.
-type IRNode interface {
+type Object interface {
Pos() src.XPos
Sym() *Sym
Type() *Type
//go:generate stringer -type EType -trimprefix T
// EType describes a kind of type.
-type EType uint8
+type Kind uint8
const (
- Txxx EType = iota
+ Txxx Kind = iota
TINT8
TUINT8
var (
// Predeclared alias types. Kept separate for better error messages.
- Bytetype *Type
- Runetype *Type
+ ByteType *Type
+ RuneType *Type
// Predeclared error interface type.
- Errortype *Type
+ ErrorType *Type
// Types to represent untyped string and boolean constants.
UntypedString = New(TSTRING)
methods Fields
allMethods Fields
- nod IRNode // canonical OTYPE node
- Orig *Type // original type (type literal or predefined type)
+ nod Object // canonical OTYPE node
+ underlying *Type // original type (type literal or predefined type)
// Cache of composite types, with this type being the element type.
- Cache struct {
+ cache struct {
ptr *Type // *T, or nil
slice *Type // []T, or nil
}
- Sym *Sym // symbol containing name, for named types
+ sym *Sym // symbol containing name, for named types
Vargen int32 // unique name for OTYPE/ONAME
- Etype EType // kind of type
+ kind Kind // kind of type
Align uint8 // the required alignment of this type, in bytes (0 means Width and Align have not yet been computed)
flags bitset8
func (t *Type) SetRecur(b bool) { t.flags.set(typeRecur, b) }
// Kind returns the kind of type t.
-func (t *Type) Kind() EType { return t.Etype }
+func (t *Type) Kind() Kind { return t.kind }
// Sym returns the name of type t.
-func (t *Type) GetSym() *Sym { return t.Sym }
+func (t *Type) Sym() *Sym { return t.sym }
// Underlying returns the underlying type of type t.
-func (t *Type) Underlying() *Type { return t.Orig }
+func (t *Type) Underlying() *Type { return t.underlying }
// SetNod associates t with syntax node n.
-func (t *Type) SetNod(n IRNode) {
+func (t *Type) SetNod(n Object) {
// t.nod can be non-nil already
// in the case of shared *Types, like []byte or interface{}.
if t.nod == nil {
// cmd/compile itself, but we need to track it because it's exposed by
// the go/types API.
func (t *Type) Pkg() *Pkg {
- switch t.Etype {
+ switch t.kind {
case TFUNC:
return t.Extra.(*Func).pkg
case TSTRUCT:
// SetPkg sets the package that t appeared in.
func (t *Type) SetPkg(pkg *Pkg) {
- switch t.Etype {
+ switch t.kind {
case TFUNC:
t.Extra.(*Func).pkg = pkg
case TSTRUCT:
// For fields that represent function parameters, Nname points
// to the associated ONAME Node.
- Nname IRNode
+ Nname Object
// Offset in bytes of this field or method within its enclosing struct
// or interface Type.
// IsMethod reports whether f represents a method rather than a struct field.
func (f *Field) IsMethod() bool {
- return f.Type.Etype == TFUNC && f.Type.Recv() != nil
+ return f.Type.kind == TFUNC && f.Type.Recv() != nil
}
// Fields is a pointer to a slice of *Field.
}
// New returns a new Type of the specified kind.
-func New(et EType) *Type {
+func New(et Kind) *Type {
t := &Type{
- Etype: et,
+ kind: et,
Width: BADWIDTH,
}
- t.Orig = t
+ t.underlying = t
// TODO(josharian): lazily initialize some of these?
- switch t.Etype {
+ switch t.kind {
case TMAP:
t.Extra = new(Map)
case TFORW:
// NewSlice returns the slice Type with element type elem.
func NewSlice(elem *Type) *Type {
- if t := elem.Cache.slice; t != nil {
+ if t := elem.cache.slice; t != nil {
if t.Elem() != elem {
Fatalf("elem mismatch")
}
t := New(TSLICE)
t.Extra = Slice{Elem: elem}
- elem.Cache.slice = t
+ elem.cache.slice = t
return t
}
Fatalf("NewPtr: pointer to elem Type is nil")
}
- if t := elem.Cache.ptr; t != nil {
+ if t := elem.cache.ptr; t != nil {
if t.Elem() != elem {
Fatalf("NewPtr: elem mismatch")
}
t.Width = int64(Widthptr)
t.Align = uint8(Widthptr)
if NewPtrCacheEnabled {
- elem.Cache.ptr = t
+ elem.cache.ptr = t
}
return t
}
return nil
}
- switch t.Etype {
+ switch t.kind {
default:
// Leave the type unchanged.
}
nt := *t
// copy any *T Extra fields, to avoid aliasing
- switch t.Etype {
+ switch t.kind {
case TMAP:
x := *t.Extra.(*Map)
nt.Extra = &x
Fatalf("ssa types cannot be copied")
}
// TODO(mdempsky): Find out why this is necessary and explain.
- if t.Orig == t {
- nt.Orig = &nt
+ if t.underlying == t {
+ nt.underlying = &nt
}
return &nt
}
return &nf
}
-func (t *Type) wantEtype(et EType) {
- if t.Etype != et {
+func (t *Type) wantEtype(et Kind) {
+ if t.kind != et {
Fatalf("want %v, but have %v", et, t)
}
}
// Elem returns the type of elements of t.
// Usable with pointers, channels, arrays, slices, and maps.
func (t *Type) Elem() *Type {
- switch t.Etype {
+ switch t.kind {
case TPTR:
return t.Extra.(Ptr).Elem
case TARRAY:
case TMAP:
return t.Extra.(*Map).Elem
}
- Fatalf("Type.Elem %s", t.Etype)
+ Fatalf("Type.Elem %s", t.kind)
return nil
}
// IsFuncArgStruct reports whether t is a struct representing function parameters.
func (t *Type) IsFuncArgStruct() bool {
- return t.Etype == TSTRUCT && t.Extra.(*Struct).Funarg != FunargNone
+ return t.kind == TSTRUCT && t.Extra.(*Struct).Funarg != FunargNone
}
func (t *Type) Methods() *Fields {
}
func (t *Type) Fields() *Fields {
- switch t.Etype {
+ switch t.kind {
case TSTRUCT:
return &t.Extra.(*Struct).fields
case TINTER:
}
func (t *Type) Size() int64 {
- if t.Etype == TSSA {
+ if t.kind == TSSA {
if t == TypeInt128 {
return 16
}
}
func (t *Type) SimpleString() string {
- return t.Etype.String()
+ return t.kind.String()
}
// Cmp is a comparison between values a and b.
return CMPgt
}
- if t.Etype != x.Etype {
- return cmpForNe(t.Etype < x.Etype)
+ if t.kind != x.kind {
+ return cmpForNe(t.kind < x.kind)
}
- if t.Sym != nil || x.Sym != nil {
+ if t.sym != nil || x.sym != nil {
// Special case: we keep byte and uint8 separate
// for error messages. Treat them as equal.
- switch t.Etype {
+ switch t.kind {
case TUINT8:
- if (t == Types[TUINT8] || t == Bytetype) && (x == Types[TUINT8] || x == Bytetype) {
+ if (t == Types[TUINT8] || t == ByteType) && (x == Types[TUINT8] || x == ByteType) {
return CMPeq
}
case TINT32:
- if (t == Types[Runetype.Etype] || t == Runetype) && (x == Types[Runetype.Etype] || x == Runetype) {
+ if (t == Types[RuneType.kind] || t == RuneType) && (x == Types[RuneType.kind] || x == RuneType) {
return CMPeq
}
}
}
- if c := t.Sym.cmpsym(x.Sym); c != CMPeq {
+ if c := t.sym.cmpsym(x.sym); c != CMPeq {
return c
}
- if x.Sym != nil {
+ if x.sym != nil {
// Syms non-nil, if vargens match then equal.
if t.Vargen != x.Vargen {
return cmpForNe(t.Vargen < x.Vargen)
}
// both syms nil, look at structure below.
- switch t.Etype {
+ switch t.kind {
case TBOOL, TFLOAT32, TFLOAT64, TCOMPLEX64, TCOMPLEX128, TUNSAFEPTR, TUINTPTR,
TINT8, TINT16, TINT32, TINT64, TINT, TUINT8, TUINT16, TUINT32, TUINT64, TUINT:
return CMPeq
}
// IsKind reports whether t is a Type of the specified kind.
-func (t *Type) IsKind(et EType) bool {
- return t != nil && t.Etype == et
+func (t *Type) IsKind(et Kind) bool {
+ return t != nil && t.kind == et
}
func (t *Type) IsBoolean() bool {
- return t.Etype == TBOOL
+ return t.kind == TBOOL
}
-var unsignedEType = [...]EType{
+var unsignedEType = [...]Kind{
TINT8: TUINT8,
TUINT8: TUINT8,
TINT16: TUINT16,
if !t.IsInteger() {
Fatalf("unsignedType(%v)", t)
}
- return Types[unsignedEType[t.Etype]]
+ return Types[unsignedEType[t.kind]]
}
func (t *Type) IsInteger() bool {
- switch t.Etype {
+ switch t.kind {
case TINT8, TUINT8, TINT16, TUINT16, TINT32, TUINT32, TINT64, TUINT64, TINT, TUINT, TUINTPTR:
return true
}
}
func (t *Type) IsSigned() bool {
- switch t.Etype {
+ switch t.kind {
case TINT8, TINT16, TINT32, TINT64, TINT:
return true
}
}
func (t *Type) IsUnsigned() bool {
- switch t.Etype {
+ switch t.kind {
case TUINT8, TUINT16, TUINT32, TUINT64, TUINT, TUINTPTR:
return true
}
}
func (t *Type) IsFloat() bool {
- return t.Etype == TFLOAT32 || t.Etype == TFLOAT64 || t == UntypedFloat
+ return t.kind == TFLOAT32 || t.kind == TFLOAT64 || t == UntypedFloat
}
func (t *Type) IsComplex() bool {
- return t.Etype == TCOMPLEX64 || t.Etype == TCOMPLEX128 || t == UntypedComplex
+ return t.kind == TCOMPLEX64 || t.kind == TCOMPLEX128 || t == UntypedComplex
}
// IsPtr reports whether t is a regular Go pointer type.
// This does not include unsafe.Pointer.
func (t *Type) IsPtr() bool {
- return t.Etype == TPTR
+ return t.kind == TPTR
}
// IsPtrElem reports whether t is the element of a pointer (to t).
func (t *Type) IsPtrElem() bool {
- return t.Cache.ptr != nil
+ return t.cache.ptr != nil
}
// IsUnsafePtr reports whether t is an unsafe pointer.
func (t *Type) IsUnsafePtr() bool {
- return t.Etype == TUNSAFEPTR
+ return t.kind == TUNSAFEPTR
}
// IsUintptr reports whether t is an uintptr.
func (t *Type) IsUintptr() bool {
- return t.Etype == TUINTPTR
+ return t.kind == TUINTPTR
}
// IsPtrShaped reports whether t is represented by a single machine pointer.
// that consist of a single pointer shaped type.
// TODO(mdempsky): Should it? See golang.org/issue/15028.
func (t *Type) IsPtrShaped() bool {
- return t.Etype == TPTR || t.Etype == TUNSAFEPTR ||
- t.Etype == TMAP || t.Etype == TCHAN || t.Etype == TFUNC
+ return t.kind == TPTR || t.kind == TUNSAFEPTR ||
+ t.kind == TMAP || t.kind == TCHAN || t.kind == TFUNC
}
// HasNil reports whether the set of values determined by t includes nil.
func (t *Type) HasNil() bool {
- switch t.Etype {
+ switch t.kind {
case TCHAN, TFUNC, TINTER, TMAP, TNIL, TPTR, TSLICE, TUNSAFEPTR:
return true
}
}
func (t *Type) IsString() bool {
- return t.Etype == TSTRING
+ return t.kind == TSTRING
}
func (t *Type) IsMap() bool {
- return t.Etype == TMAP
+ return t.kind == TMAP
}
func (t *Type) IsChan() bool {
- return t.Etype == TCHAN
+ return t.kind == TCHAN
}
func (t *Type) IsSlice() bool {
- return t.Etype == TSLICE
+ return t.kind == TSLICE
}
func (t *Type) IsArray() bool {
- return t.Etype == TARRAY
+ return t.kind == TARRAY
}
func (t *Type) IsStruct() bool {
- return t.Etype == TSTRUCT
+ return t.kind == TSTRUCT
}
func (t *Type) IsInterface() bool {
- return t.Etype == TINTER
+ return t.kind == TINTER
}
// IsEmptyInterface reports whether t is an empty interface type.
return t.Fields().Len()
}
func (t *Type) FieldType(i int) *Type {
- if t.Etype == TTUPLE {
+ if t.kind == TTUPLE {
switch i {
case 0:
return t.Extra.(*Tuple).first
panic("bad tuple index")
}
}
- if t.Etype == TRESULTS {
+ if t.kind == TRESULTS {
return t.Extra.(*Results).Types[i]
}
return t.Field(i).Type
// (and their comprised elements) are excluded from the count.
// struct { x, y [3]int } has six components; [10]struct{ x, y string } has twenty.
func (t *Type) NumComponents(countBlank componentsIncludeBlankFields) int64 {
- switch t.Etype {
+ switch t.kind {
case TSTRUCT:
if t.IsFuncArgStruct() {
Fatalf("NumComponents func arg struct")
// if there is exactly one. Otherwise, it returns nil.
// Components are counted as in NumComponents, including blank fields.
func (t *Type) SoleComponent() *Type {
- switch t.Etype {
+ switch t.kind {
case TSTRUCT:
if t.IsFuncArgStruct() {
Fatalf("SoleComponent func arg struct")
}
func (t *Type) IsMemory() bool {
- if t == TypeMem || t.Etype == TTUPLE && t.Extra.(*Tuple).second == TypeMem {
+ if t == TypeMem || t.kind == TTUPLE && t.Extra.(*Tuple).second == TypeMem {
return true
}
- if t.Etype == TRESULTS {
+ if t.kind == TRESULTS {
if types := t.Extra.(*Results).Types; len(types) > 0 && types[len(types)-1] == TypeMem {
return true
}
}
func (t *Type) IsFlags() bool { return t == TypeFlags }
func (t *Type) IsVoid() bool { return t == TypeVoid }
-func (t *Type) IsTuple() bool { return t.Etype == TTUPLE }
-func (t *Type) IsResults() bool { return t.Etype == TRESULTS }
+func (t *Type) IsTuple() bool { return t.kind == TTUPLE }
+func (t *Type) IsResults() bool { return t.kind == TRESULTS }
// IsUntyped reports whether t is an untyped type.
func (t *Type) IsUntyped() bool {
if t == UntypedString || t == UntypedBool {
return true
}
- switch t.Etype {
+ switch t.kind {
case TNIL, TIDEAL:
return true
}
// HasPointers reports whether t contains a heap pointer.
// Note that this function ignores pointers to go:notinheap types.
func (t *Type) HasPointers() bool {
- switch t.Etype {
+ switch t.kind {
case TINT, TUINT, TINT8, TUINT8, TINT16, TUINT16, TINT32, TUINT32, TINT64,
TUINT64, TUINTPTR, TFLOAT32, TFLOAT64, TCOMPLEX64, TCOMPLEX128, TBOOL, TSSA:
return false
)
// NewNamed returns a new named type for the given type name.
-func NewNamed(obj IRNode) *Type {
+func NewNamed(obj Object) *Type {
t := New(TFORW)
- t.Sym = obj.Sym()
+ t.sym = obj.Sym()
t.nod = obj
return t
}
// Obj returns the type name for the named type t.
-func (t *Type) Obj() IRNode {
- if t.Sym != nil {
+func (t *Type) Obj() Object {
+ if t.sym != nil {
return t.nod
}
return nil
// SetUnderlying sets the underlying type.
func (t *Type) SetUnderlying(underlying *Type) {
- if underlying.Etype == TFORW {
+ if underlying.kind == TFORW {
// This type isn't computed yet; when it is, update n.
underlying.ForwardType().Copyto = append(underlying.ForwardType().Copyto, t)
return
ft := t.ForwardType()
// TODO(mdempsky): Fix Type rekinding.
- t.Etype = underlying.Etype
+ t.kind = underlying.kind
t.Extra = underlying.Extra
t.Width = underlying.Width
t.Align = underlying.Align
- t.Orig = underlying.Orig
+ t.underlying = underlying.underlying
if underlying.NotInHeap() {
t.SetNotInHeap(true)
}
// NewNamed returns a new basic type of the given kind.
-func NewBasic(kind EType, obj IRNode) *Type {
+func NewBasic(kind Kind, obj Object) *Type {
t := New(kind)
- t.Sym = obj.Sym()
+ t.sym = obj.Sym()
t.nod = obj
return t
}