break
}
}
- end := fields[next-1].Width + fields[next-1].Type.Width
- return end - fields[start].Width, next
+ end := fields[next-1].Offset + fields[next-1].Type.Width
+ return end - fields[start].Offset, next
}
// ispaddedfield reports whether the i'th field of struct type t is followed
}
end := t.Width
if i+1 < len(fields) {
- end = fields[i+1].Width
+ end = fields[i+1].Offset
}
- return fields[i].Width+fields[i].Type.Width != end
+ return fields[i].Offset+fields[i].Type.Width != end
}
func offmod(t *Type) {
o := int32(0)
for _, f := range t.Fields().Slice() {
- f.Width = int64(o)
+ f.Offset = int64(o)
o += int32(Widthptr)
if int64(o) >= Thearch.MAXWIDTH {
Yyerror("interface too large")
if f.Type.Align > 0 {
o = Rnd(o, int64(f.Type.Align))
}
- f.Width = o // really offset for TFIELD
+ f.Offset = o
if f.Nname != nil {
// this same stackparam logic is in addrescapes
// in typecheck.go. usually addrescapes runs after
for _, p := range recvsParamsResults {
for _, f := range p(t).Fields().Slice() {
- if x := f.Width + f.Type.Width; x > w {
+ if x := f.Offset + f.Type.Width; x > w {
w = x
}
}
a.Op = OINDREG
a.Reg = int16(Thearch.REGSP)
a.Addable = true
- a.Xoffset = fp.Width + Ctxt.FixedFrameSize()
+ a.Xoffset = fp.Offset + Ctxt.FixedFrameSize()
a.Type = n.Type
return
f := t.Results().Field(0)
if f != nil {
- return f.Width + Ctxt.FixedFrameSize()
+ return f.Offset + Ctxt.FixedFrameSize()
}
}
nod.Reg = int16(Thearch.REGSP)
nod.Addable = true
- nod.Xoffset = fp.Width + Ctxt.FixedFrameSize()
+ nod.Xoffset = fp.Offset + Ctxt.FixedFrameSize()
nod.Type = fp.Type
Cgen_as(res, &nod)
}
nod1.Op = OINDREG
nod1.Reg = int16(Thearch.REGSP)
nod1.Addable = true
- nod1.Xoffset = fp.Width + Ctxt.FixedFrameSize()
+ nod1.Xoffset = fp.Offset + Ctxt.FixedFrameSize()
nod1.Type = fp.Type
if res.Op != OREGISTER {
fmt.Fprintf(b, "#define %s__size %d\n", t.Sym.Name, int(t.Width))
for _, t := range t.Fields().Slice() {
if !isblanksym(t.Sym) {
- fmt.Fprintf(b, "#define %s_%s %d\n", n.Sym.Name, t.Sym.Name, int(t.Width))
+ fmt.Fprintf(b, "#define %s_%s %d\n", n.Sym.Name, t.Sym.Name, int(t.Offset))
}
}
}
}
for _, field := range t.Fields().Slice() {
- if !visitComponents(field.Type, startOffset+field.Width, f) {
+ if !visitComponents(field.Type, startOffset+field.Offset, f) {
return false
}
}
if first == nil {
Fatalf("nodarg: bad struct")
}
- if first.Width == BADWIDTH {
+ if first.Offset == BADWIDTH {
Fatalf("nodarg: offset not computed for %v", t)
}
- n.Xoffset = first.Width
+ n.Xoffset = first.Offset
n.Addable = true
case *Field:
if fp == 1 || fp == -1 {
n = Nod(ONAME, nil, nil)
n.Type = t.Type
n.Sym = t.Sym
- if t.Width == BADWIDTH {
+ if t.Offset == BADWIDTH {
Fatalf("nodarg: offset not computed for %v", t)
}
- n.Xoffset = t.Width
+ n.Xoffset = t.Offset
n.Addable = true
n.Orig = t.Nname
default:
case TSTRUCT:
var o int64
for _, t1 := range t.Fields().Slice() {
- fieldoffset := t1.Width
+ fieldoffset := t1.Offset
*xoffset += fieldoffset - o
onebitwalktype1(t1.Type, xoffset, bv)
o = fieldoffset + t1.Type.Width
// Double-check that overflow field is final memory in struct,
// with no padding at end. See comment above.
- if ovf.Width != bucket.Width-int64(Widthptr) {
+ if ovf.Offset != bucket.Width-int64(Widthptr) {
Yyerror("bad math in mapbucket for %v", t)
}
lastPtrField = t1
}
}
- return lastPtrField.Width + typeptrdata(lastPtrField.Type)
+ return lastPtrField.Offset + typeptrdata(lastPtrField.Type)
default:
Fatalf("typeptrdata: unexpected type, %v", t)
// ../../../../runtime/type.go:/structField
ot = dnameField(s, ot, f)
ot = dsymptr(s, ot, dtypesym(f.Type), 0)
- ot = duintptr(s, ot, uint64(f.Width)) // field offset
+ ot = duintptr(s, ot, uint64(f.Offset))
}
}
case TSTRUCT:
for _, t1 := range t.Fields().Slice() {
- p.emit(t1.Type, offset+t1.Width)
+ p.emit(t1.Type, offset+t1.Offset)
}
}
}
return nil
}
fp := res.Field(0)
- return s.entryNewValue1I(ssa.OpOffPtr, Ptrto(fp.Type), fp.Width, s.sp)
+ return s.entryNewValue1I(ssa.OpOffPtr, Ptrto(fp.Type), fp.Offset, s.sp)
}
// etypesign returns the signed-ness of e, for integer/pointer etypes.
i++
continue
}
- if t1.Width != n.Xoffset {
+ if t1.Offset != n.Xoffset {
panic("field offset doesn't match")
}
return i
Sym *Sym
Nname *Node
- Type *Type // field type
- Width int64 // TODO(mdempsky): Rename to offset.
- Note *string // literal string annotation
+ Type *Type // field type
+
+ // Offset in bytes of this field or method within its enclosing struct
+ // or interface Type.
+ Offset int64
+
+ Note *string // literal string annotation
}
// Fields is a pointer to a slice of *Field.
func newField() *Field {
return &Field{
- Width: BADWIDTH,
+ Offset: BADWIDTH,
}
}
return t.Field(i).Type
}
func (t *Type) FieldOff(i int) int64 {
- return t.Field(i).Width
+ return t.Field(i).Offset
}
func (t *Type) NumElem() int64 {
}
n.Sym = methodsym(n.Sym, t, 0)
- n.Xoffset = f1.Width
+ n.Xoffset = f1.Offset
n.Type = f1.Type
n.Op = ODOTINTER
return true
}
n.Sym = methodsym(n.Sym, t, 0)
- n.Xoffset = f2.Width
+ n.Xoffset = f2.Offset
n.Type = f2.Type
n.Op = ODOTMETH
return true
if f2 != nil {
Yyerror("%v is both field and method", n.Sym)
}
- if f1.Width == BADWIDTH {
+ if f1.Offset == BADWIDTH {
Fatalf("lookdot badwidth %v %p", f1, f1)
}
- n.Xoffset = f1.Width
+ n.Xoffset = f1.Offset
n.Type = f1.Type
if obj.Fieldtrack_enabled > 0 {
dotField[typeSym{t.Orig, s}] = f1
}
n.Sym = methodsym(n.Sym, n.Left.Type, 0)
- n.Xoffset = f2.Width
+ n.Xoffset = f2.Offset
n.Type = f2.Type
// print("lookdot found [%p] %T\n", f2->type, f2->type);
n1 = assignconv(n1, f.Type, "field value")
n1 = Nod(OKEY, newname(f.Sym), n1)
n1.Left.Type = structkey
- n1.Left.Xoffset = f.Width
+ n1.Left.Xoffset = f.Offset
n1.Left.Typecheck = 1
ls[i1] = n1
f = it.Next()
l.Left = newname(s)
l.Left.Type = structkey
- l.Left.Xoffset = f.Width
+ l.Left.Xoffset = f.Offset
l.Left.Typecheck = 1
s = f.Sym
fielddup(newname(s), hash)