w = int64(Widthptr)
checkwidth(t.Type)
- checkwidth(t.Down)
+ checkwidth(t.Key())
case TFORW: // should have been filled in
if !t.Broke {
case TMAP:
p.tag(mapTag)
- p.typ(t.Down) // key
- p.typ(t.Type) // val
+ p.typ(t.Key()) // key
+ p.typ(t.Type) // val
case TCHAN:
p.tag(chanTag)
Fatalf("struct expected")
}
- tp := &t.Type
+ var fields []*Type
for _, n := range l {
- f := structfield(n)
- *tp = f
- tp = &f.Down
+ fields = append(fields, structfield(n))
}
+ t.SetFields(fields)
for f, it := IterFields(t); f != nil && !t.Broke; f = it.Next() {
if f.Broke {
}
func tofunargs(l []*Node) *Type {
- var f *Type
-
t := typ(TSTRUCT)
t.Funarg = true
- tp := &t.Type
+ var fields []*Type
for _, n := range l {
- f = structfield(n)
+ f := structfield(n)
f.Funarg = true
// esc.go needs to find f given a PPARAM to add the tag.
n.Left.Name.Param.Field = f
}
- *tp = f
- tp = &f.Down
+ fields = append(fields, f)
}
+ t.SetFields(fields)
for f, it := IterFields(t); f != nil && !t.Broke; f = it.Next() {
if f.Broke {
Fatalf("interface expected")
}
- tp := &t.Type
+ var fields []*Type
for _, n := range l {
f := interfacefield(n)
if f.Sym != nil {
f.Nname = newname(f.Sym)
}
- *tp = f
- tp = &f.Down
+ fields = append(fields, f)
}
} else {
- *tp = f
- tp = &f.Down
+ fields = append(fields, f)
}
}
+ t.SetFields(fields)
for f, it := IterFields(t); f != nil && !t.Broke; f = it.Next() {
if f.Broke {
return fmt.Sprintf("chan %v", t.Type)
case TMAP:
- return fmt.Sprintf("map[%v]%v", t.Down, t.Type)
+ return fmt.Sprintf("map[%v]%v", t.Key(), t.Type)
case TINTER:
var buf bytes.Buffer
// Format the bucket struct for map[x]y as map.bucket[x]y.
// This avoids a recursive print that generates very long names.
if t.Map.Bucket == t {
- return fmt.Sprintf("map.bucket[%v]%v", t.Map.Down, t.Map.Type)
+ return fmt.Sprintf("map.bucket[%v]%v", t.Map.Key(), t.Map.Type)
}
if t.Map.Hmap == t {
- return fmt.Sprintf("map.hdr[%v]%v", t.Map.Down, t.Map.Type)
+ return fmt.Sprintf("map.hdr[%v]%v", t.Map.Key(), t.Map.Type)
}
if t.Map.Hiter == t {
- return fmt.Sprintf("map.iter[%v]%v", t.Map.Down, t.Map.Type)
+ return fmt.Sprintf("map.iter[%v]%v", t.Map.Key(), t.Map.Type)
}
Yyerror("unknown internal map type")
t2 = t.Type
case TMAP:
- t1 = t.Down
+ t1 = t.Key()
t2 = t.Type
case TCHAN:
hit := prealloc[n]
hit.Type = th
n.Left = nil
- keyname := newname(th.Type.Sym) // depends on layout of iterator struct. See reflect.go:hiter
- valname := newname(th.Type.Down.Sym) // ditto
+ keyname := newname(th.Field(0).Sym) // depends on layout of iterator struct. See reflect.go:hiter
+ valname := newname(th.Field(1).Sym) // ditto
fn := syslook("mapiterinit")
- substArgTypes(&fn, t.Down, t.Type, th)
+ substArgTypes(&fn, t.Key(), t.Type, th)
init = append(init, mkcall1(fn, nil, nil, typename(t), ha, Nod(OADDR, hit, nil)))
n.Left = Nod(ONE, Nod(ODOT, hit, keyname), nodnil())
}
bucket := typ(TSTRUCT)
- keytype := t.Down
+ keytype := t.Key()
valtype := t.Type
dowidth(keytype)
dowidth(valtype)
// so if the struct needs 64-bit padding (because a key or value does)
// then it would end with an extra 32-bit padding field.
// Preempt that by emitting the padding here.
- if int(t.Type.Align) > Widthptr || int(t.Down.Align) > Widthptr {
+ if int(t.Type.Align) > Widthptr || int(t.Key().Align) > Widthptr {
field = append(field, makefield("pad", Types[TUINTPTR]))
}
// the type of the overflow field to uintptr in this case.
// See comment on hmap.overflow in ../../../../runtime/hashmap.go.
otyp := Ptrto(bucket)
- if !haspointers(t.Type) && !haspointers(t.Down) && t.Type.Width <= MAXKEYSIZE && t.Down.Width <= MAXVALSIZE {
+ if !haspointers(t.Type) && !haspointers(t.Key()) && t.Type.Width <= MAXVALSIZE && t.Key().Width <= MAXKEYSIZE {
otyp = Types[TUINTPTR]
}
ovf := makefield("overflow", otyp)
// link up fields
bucket.Noalg = true
bucket.Local = t.Local
- bucket.Type = field[0]
- for n := int32(0); n < int32(len(field)-1); n++ {
- field[n].Down = field[n+1]
- }
- field[len(field)-1].Down = nil
+ bucket.SetFields(field[:])
dowidth(bucket)
// Double-check that overflow field is final memory in struct,
h := typ(TSTRUCT)
h.Noalg = true
h.Local = t.Local
- h.Type = field[0]
- for n := int32(0); n < int32(len(field)-1); n++ {
- field[n].Down = field[n+1]
- }
- field[len(field)-1].Down = nil
+ h.SetFields(field[:])
dowidth(h)
t.Hmap = h
h.Map = t
// }
// must match ../../../../runtime/hashmap.go:hiter.
var field [12]*Type
- field[0] = makefield("key", Ptrto(t.Down))
-
+ field[0] = makefield("key", Ptrto(t.Key()))
field[1] = makefield("val", Ptrto(t.Type))
field[2] = makefield("t", Ptrto(Types[TUINT8]))
field[3] = makefield("h", Ptrto(hmap(t)))
// build iterator struct holding the above fields
i := typ(TSTRUCT)
-
i.Noalg = true
- i.Type = field[0]
- for n := int32(0); n < int32(len(field)-1); n++ {
- field[n].Down = field[n+1]
- }
- field[len(field)-1].Down = nil
+ i.SetFields(field[:])
dowidth(i)
if i.Width != int64(12*Widthptr) {
Yyerror("hash_iter size not correct %d %d", i.Width, 12*Widthptr)
// ../../../../runtime/type.go:/mapType
case TMAP:
- s1 := dtypesym(t.Down)
-
+ s1 := dtypesym(t.Key())
s2 := dtypesym(t.Type)
s3 := dtypesym(mapbucket(t))
s4 := dtypesym(hmap(t))
ot = dsymptr(s, ot, s2, 0)
ot = dsymptr(s, ot, s3, 0)
ot = dsymptr(s, ot, s4, 0)
- if t.Down.Width > MAXKEYSIZE {
+ if t.Key().Width > MAXKEYSIZE {
ot = duint8(s, ot, uint8(Widthptr))
ot = duint8(s, ot, 1) // indirect
} else {
- ot = duint8(s, ot, uint8(t.Down.Width))
+ ot = duint8(s, ot, uint8(t.Key().Width))
ot = duint8(s, ot, 0) // not indirect
}
}
ot = duint16(s, ot, uint16(mapbucket(t).Width))
- ot = duint8(s, ot, uint8(obj.Bool2int(isreflexive(t.Down))))
- ot = duint8(s, ot, uint8(obj.Bool2int(needkeyupdate(t.Down))))
+ ot = duint8(s, ot, uint8(obj.Bool2int(isreflexive(t.Key()))))
+ ot = duint8(s, ot, uint8(obj.Bool2int(needkeyupdate(t.Key()))))
ot = dextratype(s, ot, t, 0)
case TPTR32, TPTR64:
// build list of var[c] = expr.
// use temporary so that mapassign1 can have addressable key, val.
if key == nil {
- key = temp(var_.Type.Down)
+ key = temp(var_.Type.Key())
val = temp(var_.Type.Type)
}
}
func sortinter(t *Type) *Type {
- if t.Type == nil || t.Type.Down == nil {
- return t
- }
-
- var a []*Type
- for f, it := IterFields(t); f != nil; f = it.Next() {
- a = append(a, f)
- }
- sort.Sort(methcmp(a))
-
- n := len(a) // n > 0 due to initial conditions.
- for i := 0; i < n-1; i++ {
- a[i].Down = a[i+1]
- }
- a[n-1].Down = nil
-
- t.Type = a[0]
+ s := t.FieldSlice()
+ sort.Sort(methcmp(s))
+ t.SetFields(s)
return t
}
switch t1.Etype {
case TINTER, TSTRUCT:
- t1 = t1.Type
- t2 = t2.Type
- for ; t1 != nil && t2 != nil; t1, t2 = t1.Down, t2.Down {
- if t1.Etype != TFIELD || t2.Etype != TFIELD {
- Fatalf("struct/interface missing field: %v %v", t1, t2)
- }
+ t1, i1 := IterFields(t1)
+ t2, i2 := IterFields(t2)
+ for ; t1 != nil && t2 != nil; t1, t2 = i1.Next(), i2.Next() {
if t1.Sym != t2.Sym || t1.Embedded != t2.Embedded || !eqtype1(t1.Type, t2.Type, assumedEqual) || !eqnote(t1.Note, t2.Note) {
return false
}
if t1.Chan != t2.Chan {
return false
}
+
+ case TMAP:
+ if !eqtype1(t1.Key(), t2.Key(), assumedEqual) {
+ return false
+ }
}
- return eqtype1(t1.Down, t2.Down, assumedEqual) && eqtype1(t1.Type, t2.Type, assumedEqual)
+ return eqtype1(t1.Type, t2.Type, assumedEqual)
}
// Are t1 and t2 equal struct types when field names are ignored?
return false
}
- t1 = t1.Type
- t2 = t2.Type
+ t1, i1 := IterFields(t1)
+ t2, i2 := IterFields(t2)
for {
if !Eqtype(t1, t2) {
return false
if t1 == nil {
return true
}
- t1 = t1.Down
- t2 = t2.Down
+ t1 = i1.Next()
+ t2 = i2.Next()
}
}
TUNSAFEPTR:
return true
- // Array of 1 direct iface type can be direct.
case TARRAY:
+ // Array of 1 direct iface type can be direct.
return t.Bound == 1 && isdirectiface(t.Type)
- // Struct with 1 field of direct iface type can be direct.
case TSTRUCT:
- return t.Type != nil && t.Type.Down == nil && isdirectiface(t.Type.Type)
+ // Struct with 1 field of direct iface type can be direct.
+ return countfield(t) == 1 && isdirectiface(t.Field(0).Type)
}
return false
(*Type).Recvs, (*Type).Params, (*Type).Results,
}
+// Key returns the key type of map type t.
+func (t *Type) Key() *Type {
+ t.wantEtype(TMAP)
+ return t.Down
+}
+
// Field returns the i'th field/method of struct/interface type t.
func (t *Type) Field(i int) *Type {
// TODO: store fields in a slice so we can
panic("not enough fields")
}
+// FieldSlice returns a slice of containing all fields/methods of
+// struct/interface type t.
+func (t *Type) FieldSlice() []*Type {
+ var s []*Type
+ for f, it := IterFields(t); f != nil; f = it.Next() {
+ s = append(s, f)
+ }
+ return s
+}
+
+// SetFields sets struct/interface type t's fields/methods to fields.
+func (t *Type) SetFields(fields []*Type) {
+ if t.Etype != TSTRUCT && t.Etype != TINTER {
+ Fatalf("SetFields: type %v does not have fields", t)
+ }
+ var next *Type
+ for i := len(fields) - 1; i >= 0; i-- {
+ fields[i].Down = next
+ next = fields[i]
+ }
+ t.Type = next
+}
+
func (t *Type) Size() int64 {
dowidth(t)
return t.Width
case TMAP:
n.Etype = 0
- defaultlit(&n.Right, t.Down)
+ defaultlit(&n.Right, t.Key())
if n.Right.Type != nil {
- n.Right = assignconv(n.Right, t.Down, "map index")
+ n.Right = assignconv(n.Right, t.Key(), "map index")
}
n.Type = t.Type
n.Op = OINDEXMAP
return
}
- t = n.List.First().Type.Type
- l = t.Nname
- r = t.Down.Nname
+ t = n.List.First().Type
+ l = t.Field(0).Nname
+ r = t.Field(1).Nname
} else {
if !twoarg(n) {
n.Type = nil
return
}
- args.SetIndex(1, assignconv(r, l.Type.Down, "delete"))
+ args.SetIndex(1, assignconv(r, l.Type.Key(), "delete"))
break OpSwitch
case OAPPEND:
}
r = l.Left
- pushtype(r, t.Down)
+ pushtype(r, t.Key())
typecheck(&r, Erv)
- defaultlit(&r, t.Down)
- l.Left = assignconv(r, t.Down, "map key")
+ defaultlit(&r, t.Key())
+ l.Left = assignconv(r, t.Key(), "map key")
if l.Left.Op != OCONV {
keydup(l.Left, hash)
}
goto bad
}
+ var f [2]*Type
+ f[0] = typ(TFIELD)
+ f[0].Type = Types[TUINT8]
+ f[1] = typ(TFIELD)
+ f[1].Type = tr
+
// make struct { byte; T; }
t := typ(TSTRUCT)
-
- t.Type = typ(TFIELD)
- t.Type.Type = Types[TUINT8]
- t.Type.Down = typ(TFIELD)
- t.Type.Down.Type = tr
+ t.SetFields(f[:])
// compute struct widths
dowidth(t)
// the offset of T is its required alignment
- v = t.Type.Down.Width
+ v = t.Field(1).Width
goto yes
}
t := r.Left.Type
p := ""
if t.Type.Width <= 128 { // Check ../../runtime/hashmap.go:maxValueSize before changing.
- switch algtype(t.Down) {
+ switch algtype(t.Key()) {
case AMEM32:
p = "mapaccess2_fast32"
case AMEM64:
// the boolean result of i.(T) is now untyped so we make it the
// same type as the variable on the lhs.
if !isblank(n.List.Second()) {
- r.Type.Type.Down.Type = n.List.Second().Type
+ r.Type.Field(1).Type = n.List.Second().Type
}
n.Rlist.Set1(r)
n.Op = OAS2FUNC
t := n.Left.Type
p := ""
if t.Type.Width <= 128 { // Check ../../runtime/hashmap.go:maxValueSize before changing.
- switch algtype(t.Down) {
+ switch algtype(t.Key()) {
case AMEM32:
p = "mapaccess1_fast32"
case AMEM64:
}
fn := syslook("makemap")
- substArgTypes(&fn, hmap(t), mapbucket(t), t.Down, t.Type)
+ substArgTypes(&fn, hmap(t), mapbucket(t), t.Key(), t.Type)
n = mkcall1(fn, n.Type, init, typename(n.Type), conv(n.Left, Types[TINT64]), a, r)
case OMAKESLICE:
Fatalf("mapfn %v", t)
}
fn := syslook(name)
- substArgTypes(&fn, t.Down, t.Type, t.Down, t.Type)
+ substArgTypes(&fn, t.Key(), t.Type, t.Key(), t.Type)
return fn
}
Fatalf("mapfn %v", t)
}
fn := syslook(name)
- substArgTypes(&fn, t.Down, t.Type, t.Down)
+ substArgTypes(&fn, t.Key(), t.Type, t.Key())
return fn
}