import (
"bufio"
"cmd/compile/internal/base"
+ "cmd/compile/internal/reflectdata"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/internal/obj"
types.PtrSize = thearch.LinkArch.PtrSize
types.RegSize = thearch.LinkArch.RegSize
types.TypeLinkSym = func(t *types.Type) *obj.LSym {
- return typenamesym(t).Linksym()
+ return reflectdata.TypeSym(t).Linksym()
}
types.TypeLinkSym = func(t *types.Type) *obj.LSym {
- return typenamesym(t).Linksym()
+ return reflectdata.TypeSym(t).Linksym()
}
typecheck.Init()
os.Exit(m.Run())
var pragcgobuf [][]string
-var zerosize int64
-
// interface to back end
type Arch struct {
"cmd/compile/internal/ir"
"cmd/compile/internal/logopt"
"cmd/compile/internal/noder"
+ "cmd/compile/internal/reflectdata"
"cmd/compile/internal/ssa"
"cmd/compile/internal/staticdata"
"cmd/compile/internal/typecheck"
types.RegSize = thearch.LinkArch.RegSize
types.MaxWidth = thearch.MAXWIDTH
types.TypeLinkSym = func(t *types.Type) *obj.LSym {
- return typenamesym(t).Linksym()
+ return reflectdata.TypeSym(t).Linksym()
}
typecheck.Target = new(ir.Package)
typecheck.NeedFuncSym = staticdata.NeedFuncSym
- typecheck.NeedITab = func(t, iface *types.Type) { itabname(t, iface) }
- typecheck.NeedRuntimeType = addsignat // TODO(rsc): typenamesym for lock?
+ typecheck.NeedITab = func(t, iface *types.Type) { reflectdata.ITabAddr(t, iface) }
+ typecheck.NeedRuntimeType = reflectdata.NeedRuntimeType // TODO(rsc): typenamesym for lock?
base.AutogeneratedPos = makePos(src.NewFileBase("<autogenerated>", "<autogenerated>"), 1, 0)
types.TypeLinkSym = func(t *types.Type) *obj.LSym {
- return typenamesym(t).Linksym()
+ return reflectdata.TypeSym(t).Linksym()
}
typecheck.Init()
// the right side of OCONVIFACE so that methods
// can be de-virtualized during compilation.
ir.CurFunc = nil
- peekitabs()
+ reflectdata.CompileITabs()
// Compile top level functions.
// Don't use range--walk can add functions to Target.Decls.
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/objw"
+ "cmd/compile/internal/reflectdata"
"cmd/compile/internal/staticdata"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
dumpglobls(typecheck.Target.Externs)
staticdata.WriteFuncSyms()
- addptabs()
+ reflectdata.CollectPTabs()
numExports := len(typecheck.Target.Exports)
addsignats(typecheck.Target.Externs)
- dumpsignats()
- dumptabs()
- numPTabs, numITabs := CountTabs()
- dumpimportstrings()
- dumpbasictypes()
+ reflectdata.WriteRuntimeTypes()
+ reflectdata.WriteTabs()
+ numPTabs, numITabs := reflectdata.CountTabs()
+ reflectdata.WriteImportStrings()
+ reflectdata.WriteBasicTypes()
dumpembeds()
// Calls to dumpsignats can generate functions,
}
numDecls = len(typecheck.Target.Decls)
compileFunctions()
- dumpsignats()
+ reflectdata.WriteRuntimeTypes()
if numDecls == len(typecheck.Target.Decls) {
break
}
// Dump extra globals.
dumpglobls(typecheck.Target.Externs[numExterns:])
- if zerosize > 0 {
+ if reflectdata.ZeroSize > 0 {
zero := ir.Pkgs.Map.Lookup("zero")
- objw.Global(zero.Linksym(), int32(zerosize), obj.DUPOK|obj.RODATA)
+ objw.Global(zero.Linksym(), int32(reflectdata.ZeroSize), obj.DUPOK|obj.RODATA)
}
addGCLocals()
if numExports != len(typecheck.Target.Exports) {
base.Fatalf("Target.Exports changed after compile functions loop")
}
- newNumPTabs, newNumITabs := CountTabs()
+ newNumPTabs, newNumITabs := reflectdata.CountTabs()
if newNumPTabs != numPTabs {
base.Fatalf("ptabs changed after compile functions loop")
}
obj.WriteObjFile(base.Ctxt, bout)
}
-func addptabs() {
- if !base.Ctxt.Flag_dynlink || types.LocalPkg.Name != "main" {
- return
- }
- for _, exportn := range typecheck.Target.Exports {
- s := exportn.Sym()
- nn := ir.AsNode(s.Def)
- if nn == nil {
- continue
- }
- if nn.Op() != ir.ONAME {
- continue
- }
- n := nn.(*ir.Name)
- if !types.IsExported(s.Name) {
- continue
- }
- if s.Pkg.Name != "main" {
- continue
- }
- if n.Type().Kind() == types.TFUNC && n.Class_ == ir.PFUNC {
- // function
- ptabs = append(ptabs, ptabEntry{s: s, t: s.Def.Type()})
- } else {
- // variable
- ptabs = append(ptabs, ptabEntry{s: s, t: types.NewPtr(s.Def.Type())})
- }
- }
-}
-
func dumpGlobal(n *ir.Name) {
if n.Type() == nil {
base.Fatalf("external %v nil type\n", n)
staticdata.WriteEmbed(v)
}
}
+
+func addsignats(dcls []ir.Node) {
+ // copy types from dcl list to signatset
+ for _, n := range dcls {
+ if n.Op() == ir.OTYPE {
+ reflectdata.NeedRuntimeType(n.Type())
+ }
+ }
+}
"cmd/compile/internal/base"
"cmd/compile/internal/escape"
"cmd/compile/internal/ir"
+ "cmd/compile/internal/reflectdata"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/internal/src"
// n.Prealloc is the temp for the iterator.
// hiter contains pointers and needs to be zeroed.
- n.Prealloc = o.newTemp(hiter(n.Type()), true)
+ n.Prealloc = o.newTemp(reflectdata.MapIterType(n.Type()), true)
}
o.exprListInPlace(n.Vars)
if orderBody {
"cmd/compile/internal/ir"
"cmd/compile/internal/liveness"
"cmd/compile/internal/objw"
+ "cmd/compile/internal/reflectdata"
"cmd/compile/internal/ssa"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
switch n.Class_ {
case ir.PPARAM, ir.PPARAMOUT, ir.PAUTO:
if liveness.ShouldTrack(n) && n.Addrtaken() {
- dtypesym(n.Type())
+ reflectdata.WriteType(n.Type())
// Also make sure we allocate a linker symbol
// for the stack object data, for the same reason.
if fn.LSym.Func().StackObjects == nil {
import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
+ "cmd/compile/internal/reflectdata"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/internal/sys"
fn := typecheck.LookupRuntime("mapiterinit")
fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem(), th)
- init = append(init, mkcall1(fn, nil, nil, typename(t), ha, typecheck.NodAddr(hit)))
+ init = append(init, mkcall1(fn, nil, nil, reflectdata.TypePtr(t), ha, typecheck.NodAddr(hit)))
nfor.Cond = ir.NewBinaryExpr(base.Pos, ir.ONE, ir.NewSelectorExpr(base.Pos, ir.ODOT, hit, keysym), typecheck.NodNil())
fn = typecheck.LookupRuntime("mapiternext")
// instantiate mapclear(typ *type, hmap map[any]any)
fn := typecheck.LookupRuntime("mapclear")
fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem())
- n := mkcall1(fn, nil, nil, typename(t), m)
+ n := mkcall1(fn, nil, nil, reflectdata.TypePtr(t), m)
return walkstmt(typecheck.Stmt(n))
}
import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
+ "cmd/compile/internal/reflectdata"
"cmd/compile/internal/staticdata"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
var itab *ir.AddrExpr
if typ.IsEmptyInterface() {
- itab = typename(val.Type())
+ itab = reflectdata.TypePtr(val.Type())
} else {
- itab = itabname(val.Type(), typ)
+ itab = reflectdata.ITabAddr(val.Type(), typ)
}
// Create a copy of l to modify while we emit data.
"cmd/compile/internal/ir"
"cmd/compile/internal/liveness"
"cmd/compile/internal/objw"
+ "cmd/compile/internal/reflectdata"
"cmd/compile/internal/ssa"
"cmd/compile/internal/staticdata"
"cmd/compile/internal/typecheck"
_ = types.NewPtr(types.Types[types.TINT64]) // *int64
_ = types.NewPtr(types.ErrorType) // *error
types.NewPtrCacheEnabled = false
- ssaConfig = ssa.NewConfig(thearch.LinkArch.Name, *types_, base.Ctxt, base.Flag.N == 0)
+ ssaConfig = ssa.NewConfig(base.Ctxt.Arch.Name, *types_, base.Ctxt, base.Flag.N == 0)
ssaConfig.SoftFloat = thearch.SoftFloat
ssaConfig.Race = base.Flag.Race
ssaCaches = make([]ssa.Cache, base.Flag.LowerC)
ir.Syms.Zerobase = typecheck.LookupRuntimeVar("zerobase")
// asm funcs with special ABI
- if thearch.LinkArch.Name == "amd64" {
+ if base.Ctxt.Arch.Name == "amd64" {
GCWriteBarrierReg = map[int16]*obj.LSym{
x86.REG_AX: typecheck.LookupRuntimeFunc("gcWriteBarrier"),
x86.REG_CX: typecheck.LookupRuntimeFunc("gcWriteBarrierCX"),
s.hasOpenDefers = base.Flag.N == 0 && s.hasdefer && !s.curfn.OpenCodedDeferDisallowed()
switch {
- case s.hasOpenDefers && (base.Ctxt.Flag_shared || base.Ctxt.Flag_dynlink) && thearch.LinkArch.Name == "386":
+ case s.hasOpenDefers && (base.Ctxt.Flag_shared || base.Ctxt.Flag_dynlink) && base.Ctxt.Arch.Name == "386":
// Don't support open-coded defers for 386 ONLY when using shared
// libraries, because there is extra code (added by rewriteToUseGot())
// preceding the deferreturn/ret code that is generated by gencallret()
if !types.TypeSym(v.Type()).Siggen() {
e.Fatalf(v.Pos(), "stack object's type symbol not generated for type %s", v.Type())
}
- off = objw.SymPtr(x, off, dtypesym(v.Type()), 0)
+ off = objw.SymPtr(x, off, reflectdata.WriteType(v.Type()), 0)
}
// Emit a funcdata pointing at the stack object data.
}
func (e *ssafn) DerefItab(it *obj.LSym, offset int64) *obj.LSym {
- return itabsym(it, offset)
+ return reflectdata.ITabSym(it, offset)
}
// SplitSlot returns a slot representing the data of parent starting at offset.
}
return b
}
+
+// deferstruct makes a runtime._defer structure, with additional space for
+// stksize bytes of args.
+func deferstruct(stksize int64) *types.Type {
+ makefield := func(name string, typ *types.Type) *types.Field {
+ // Unlike the global makefield function, this one needs to set Pkg
+ // because these types might be compared (in SSA CSE sorting).
+ // TODO: unify this makefield and the global one above.
+ sym := &types.Sym{Name: name, Pkg: types.LocalPkg}
+ return types.NewField(src.NoXPos, sym, typ)
+ }
+ argtype := types.NewArray(types.Types[types.TUINT8], stksize)
+ argtype.Width = stksize
+ argtype.Align = 1
+ // These fields must match the ones in runtime/runtime2.go:_defer and
+ // cmd/compile/internal/gc/ssa.go:(*state).call.
+ fields := []*types.Field{
+ makefield("siz", types.Types[types.TUINT32]),
+ makefield("started", types.Types[types.TBOOL]),
+ makefield("heap", types.Types[types.TBOOL]),
+ makefield("openDefer", types.Types[types.TBOOL]),
+ makefield("sp", types.Types[types.TUINTPTR]),
+ makefield("pc", types.Types[types.TUINTPTR]),
+ // Note: the types here don't really matter. Defer structures
+ // are always scanned explicitly during stack copying and GC,
+ // so we make them uintptr type even though they are real pointers.
+ makefield("fn", types.Types[types.TUINTPTR]),
+ makefield("_panic", types.Types[types.TUINTPTR]),
+ makefield("link", types.Types[types.TUINTPTR]),
+ makefield("framepc", types.Types[types.TUINTPTR]),
+ makefield("varp", types.Types[types.TUINTPTR]),
+ makefield("fd", types.Types[types.TUINTPTR]),
+ makefield("args", argtype),
+ }
+
+ // build struct holding the above fields
+ s := types.NewStruct(types.NoPkg, fields)
+ s.SetNoalg(true)
+ types.CalcStructSize(s)
+ return s
+}
import (
"cmd/compile/internal/base"
- "cmd/compile/internal/escape"
- "cmd/compile/internal/inline"
"cmd/compile/internal/ir"
+ "cmd/compile/internal/reflectdata"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/internal/src"
return copyexpr(n, n.Type(), init)
}
-// Generate a wrapper function to convert from
-// a receiver of type T to a receiver of type U.
-// That is,
-//
-// func (t T) M() {
-// ...
-// }
-//
-// already exists; this function generates
-//
-// func (u U) M() {
-// u.M()
-// }
-//
-// where the types T and U are such that u.M() is valid
-// and calls the T.M method.
-// The resulting function is for use in method tables.
-//
-// rcvr - U
-// method - M func (t T)(), a TFIELD type struct
-// newnam - the eventual mangled name of this function
-func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) {
- if false && base.Flag.LowerR != 0 {
- fmt.Printf("genwrapper rcvrtype=%v method=%v newnam=%v\n", rcvr, method, newnam)
- }
-
- // Only generate (*T).M wrappers for T.M in T's own package.
- if rcvr.IsPtr() && rcvr.Elem() == method.Type.Recv().Type &&
- rcvr.Elem().Sym() != nil && rcvr.Elem().Sym().Pkg != types.LocalPkg {
- return
- }
-
- // Only generate I.M wrappers for I in I's own package
- // but keep doing it for error.Error (was issue #29304).
- if rcvr.IsInterface() && rcvr.Sym() != nil && rcvr.Sym().Pkg != types.LocalPkg && rcvr != types.ErrorType {
- return
- }
-
- base.Pos = base.AutogeneratedPos
- typecheck.DeclContext = ir.PEXTERN
-
- tfn := ir.NewFuncType(base.Pos,
- ir.NewField(base.Pos, typecheck.Lookup(".this"), nil, rcvr),
- typecheck.NewFuncParams(method.Type.Params(), true),
- typecheck.NewFuncParams(method.Type.Results(), false))
-
- fn := typecheck.DeclFunc(newnam, tfn)
- fn.SetDupok(true)
-
- nthis := ir.AsNode(tfn.Type().Recv().Nname)
-
- methodrcvr := method.Type.Recv().Type
-
- // generate nil pointer check for better error
- if rcvr.IsPtr() && rcvr.Elem() == methodrcvr {
- // generating wrapper from *T to T.
- n := ir.NewIfStmt(base.Pos, nil, nil, nil)
- n.Cond = ir.NewBinaryExpr(base.Pos, ir.OEQ, nthis, typecheck.NodNil())
- call := ir.NewCallExpr(base.Pos, ir.OCALL, typecheck.LookupRuntime("panicwrap"), nil)
- n.Body = []ir.Node{call}
- fn.Body.Append(n)
- }
-
- dot := typecheck.AddImplicitDots(ir.NewSelectorExpr(base.Pos, ir.OXDOT, nthis, method.Sym))
-
- // generate call
- // It's not possible to use a tail call when dynamic linking on ppc64le. The
- // bad scenario is when a local call is made to the wrapper: the wrapper will
- // call the implementation, which might be in a different module and so set
- // the TOC to the appropriate value for that module. But if it returns
- // directly to the wrapper's caller, nothing will reset it to the correct
- // value for that function.
- if !base.Flag.Cfg.Instrumenting && rcvr.IsPtr() && methodrcvr.IsPtr() && method.Embedded != 0 && !types.IsInterfaceMethod(method.Type) && !(thearch.LinkArch.Name == "ppc64le" && base.Ctxt.Flag_dynlink) {
- // generate tail call: adjust pointer receiver and jump to embedded method.
- left := dot.X // skip final .M
- if !left.Type().IsPtr() {
- left = typecheck.NodAddr(left)
- }
- as := ir.NewAssignStmt(base.Pos, nthis, typecheck.ConvNop(left, rcvr))
- fn.Body.Append(as)
- fn.Body.Append(ir.NewBranchStmt(base.Pos, ir.ORETJMP, ir.MethodSym(methodrcvr, method.Sym)))
- } else {
- fn.SetWrapper(true) // ignore frame for panic+recover matching
- call := ir.NewCallExpr(base.Pos, ir.OCALL, dot, nil)
- call.Args.Set(ir.ParamNames(tfn.Type()))
- call.IsDDD = tfn.Type().IsVariadic()
- if method.Type.NumResults() > 0 {
- ret := ir.NewReturnStmt(base.Pos, nil)
- ret.Results = []ir.Node{call}
- fn.Body.Append(ret)
- } else {
- fn.Body.Append(call)
- }
- }
-
- if false && base.Flag.LowerR != 0 {
- ir.DumpList("genwrapper body", fn.Body)
- }
-
- typecheck.FinishFuncBody()
- if base.Debug.DclStack != 0 {
- types.CheckDclstack()
- }
-
- typecheck.Func(fn)
- ir.CurFunc = fn
- typecheck.Stmts(fn.Body)
-
- // Inline calls within (*T).M wrappers. This is safe because we only
- // generate those wrappers within the same compilation unit as (T).M.
- // TODO(mdempsky): Investigate why we can't enable this more generally.
- if rcvr.IsPtr() && rcvr.Elem() == method.Type.Recv().Type && rcvr.Elem().Sym() != nil {
- inline.InlineCalls(fn)
- }
- escape.Batch([]*ir.Func{fn}, false)
-
- ir.CurFunc = nil
- typecheck.Target.Decls = append(typecheck.Target.Decls, fn)
-}
-
-func hashmem(t *types.Type) ir.Node {
- sym := ir.Pkgs.Runtime.Lookup("memhash")
-
- n := typecheck.NewName(sym)
- ir.MarkFunc(n)
- n.SetType(typecheck.NewFuncType(nil, []*ir.Field{
- ir.NewField(base.Pos, nil, nil, types.NewPtr(t)),
- ir.NewField(base.Pos, nil, nil, types.Types[types.TUINTPTR]),
- ir.NewField(base.Pos, nil, nil, types.Types[types.TUINTPTR]),
- }, []*ir.Field{
- ir.NewField(base.Pos, nil, nil, types.Types[types.TUINTPTR]),
- }))
- return n
-}
-
func ngotype(n ir.Node) *types.Sym {
if n.Type() != nil {
- return typenamesym(n.Type())
+ return reflectdata.TypeSym(n.Type())
}
return nil
}
"cmd/compile/internal/base"
"cmd/compile/internal/escape"
"cmd/compile/internal/ir"
+ "cmd/compile/internal/reflectdata"
"cmd/compile/internal/staticdata"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
n := n.(*ir.TypeAssertExpr)
n.X = walkexpr(n.X, init)
// Set up interface type addresses for back end.
- n.Ntype = typename(n.Type())
+ n.Ntype = reflectdata.TypePtr(n.Type())
if n.Op() == ir.ODOTTYPE {
- n.Ntype.(*ir.AddrExpr).Alloc = typename(n.X.Type())
+ n.Ntype.(*ir.AddrExpr).Alloc = reflectdata.TypePtr(n.X.Type())
}
if !n.Type().IsInterface() && !n.X.Type().IsEmptyInterface() {
- n.Itab = []ir.Node{itabname(n.Type(), n.X.Type())}
+ n.Itab = []ir.Node{reflectdata.ITabAddr(n.Type(), n.X.Type())}
}
return n
// Left in place for back end.
// Do not add a new write barrier.
// Set up address of type for back end.
- r.(*ir.CallExpr).X = typename(r.Type().Elem())
+ r.(*ir.CallExpr).X = reflectdata.TypePtr(r.Type().Elem())
return as
}
// Otherwise, lowered for race detector.
var call *ir.CallExpr
if w := t.Elem().Width; w <= zeroValSize {
fn := mapfn(mapaccess2[fast], t)
- call = mkcall1(fn, fn.Type().Results(), init, typename(t), r.X, key)
+ call = mkcall1(fn, fn.Type().Results(), init, reflectdata.TypePtr(t), r.X, key)
} else {
fn := mapfn("mapaccess2_fat", t)
- z := zeroaddr(w)
- call = mkcall1(fn, fn.Type().Results(), init, typename(t), r.X, key, z)
+ z := reflectdata.ZeroAddr(w)
+ call = mkcall1(fn, fn.Type().Results(), init, reflectdata.TypePtr(t), r.X, key, z)
}
// mapaccess2* returns a typed bool, but due to spec changes,
// order.stmt made sure key is addressable.
key = typecheck.NodAddr(key)
}
- return mkcall1(mapfndel(mapdelete[fast], t), nil, init, typename(t), map_, key)
+ return mkcall1(mapfndel(mapdelete[fast], t), nil, init, reflectdata.TypePtr(t), map_, key)
case ir.OAS2DOTTYPE:
n := n.(*ir.AssignListStmt)
// typeword generates the type word of the interface value.
typeword := func() ir.Node {
if toType.IsEmptyInterface() {
- return typename(fromType)
+ return reflectdata.TypePtr(fromType)
}
- return itabname(fromType, toType)
+ return reflectdata.ITabAddr(fromType, toType)
}
// Optimize convT2E or convT2I as a two-word copy when T is pointer-shaped.
var tab ir.Node
if fromType.IsInterface() {
// convI2I
- tab = typename(toType)
+ tab = reflectdata.TypePtr(toType)
} else {
// convT2x
tab = typeword()
// order.expr made sure key is addressable.
key = typecheck.NodAddr(key)
}
- call = mkcall1(mapfn(mapassign[fast], t), nil, init, typename(t), map_, key)
+ call = mkcall1(mapfn(mapassign[fast], t), nil, init, reflectdata.TypePtr(t), map_, key)
} else {
// m[k] is not the target of an assignment.
fast := mapfast(t)
}
if w := t.Elem().Width; w <= zeroValSize {
- call = mkcall1(mapfn(mapaccess1[fast], t), types.NewPtr(t.Elem()), init, typename(t), map_, key)
+ call = mkcall1(mapfn(mapaccess1[fast], t), types.NewPtr(t.Elem()), init, reflectdata.TypePtr(t), map_, key)
} else {
- z := zeroaddr(w)
- call = mkcall1(mapfn("mapaccess1_fat", t), types.NewPtr(t.Elem()), init, typename(t), map_, key, z)
+ z := reflectdata.ZeroAddr(w)
+ call = mkcall1(mapfn("mapaccess1_fat", t), types.NewPtr(t.Elem()), init, reflectdata.TypePtr(t), map_, key, z)
}
}
call.SetType(types.NewPtr(t.Elem()))
argtype = types.Types[types.TINT]
}
- return mkcall1(chanfn(fnname, 1, n.Type()), n.Type(), init, typename(n.Type()), typecheck.Conv(size, argtype))
+ return mkcall1(chanfn(fnname, 1, n.Type()), n.Type(), init, reflectdata.TypePtr(n.Type()), typecheck.Conv(size, argtype))
case ir.OMAKEMAP:
n := n.(*ir.MakeExpr)
t := n.Type()
- hmapType := hmap(t)
+ hmapType := reflectdata.MapType(t)
hint := n.Len
// var h *hmap
// Maximum key and elem size is 128 bytes, larger objects
// are stored with an indirection. So max bucket size is 2048+eps.
if !ir.IsConst(hint, constant.Int) ||
- constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(BUCKETSIZE)) {
+ constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(reflectdata.BUCKETSIZE)) {
// In case hint is larger than BUCKETSIZE runtime.makemap
// will allocate the buckets on the heap, see #20184
// h.buckets = b
// }
- nif := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OLE, hint, ir.NewInt(BUCKETSIZE)), nil, nil)
+ nif := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OLE, hint, ir.NewInt(reflectdata.BUCKETSIZE)), nil, nil)
nif.Likely = true
// var bv bmap
- bv := typecheck.Temp(bmap(t))
+ bv := typecheck.Temp(reflectdata.MapBucketType(t))
nif.Body.Append(ir.NewAssignStmt(base.Pos, bv, nil))
// b = &bv
}
}
- if ir.IsConst(hint, constant.Int) && constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(BUCKETSIZE)) {
+ if ir.IsConst(hint, constant.Int) && constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(reflectdata.BUCKETSIZE)) {
// Handling make(map[any]any) and
// make(map[any]any, hint) where hint <= BUCKETSIZE
// special allows for faster map initialization and
fn := typecheck.LookupRuntime(fnname)
fn = typecheck.SubstArgTypes(fn, hmapType, t.Key(), t.Elem())
- return mkcall1(fn, n.Type(), init, typename(n.Type()), typecheck.Conv(hint, argtype), h)
+ return mkcall1(fn, n.Type(), init, reflectdata.TypePtr(n.Type()), typecheck.Conv(hint, argtype), h)
case ir.OMAKESLICE:
n := n.(*ir.MakeExpr)
m.SetType(t)
fn := typecheck.LookupRuntime(fnname)
- m.Ptr = mkcall1(fn, types.Types[types.TUNSAFEPTR], init, typename(t.Elem()), typecheck.Conv(len, argtype), typecheck.Conv(cap, argtype))
+ m.Ptr = mkcall1(fn, types.Types[types.TUNSAFEPTR], init, reflectdata.TypePtr(t.Elem()), typecheck.Conv(len, argtype), typecheck.Conv(cap, argtype))
m.Ptr.MarkNonNil()
m.LenCap = []ir.Node{typecheck.Conv(len, types.Types[types.TINT]), typecheck.Conv(cap, types.Types[types.TINT])}
return walkexpr(typecheck.Expr(m), init)
// instantiate makeslicecopy(typ *byte, tolen int, fromlen int, from unsafe.Pointer) unsafe.Pointer
fn := typecheck.LookupRuntime("makeslicecopy")
s := ir.NewSliceHeaderExpr(base.Pos, nil, nil, nil, nil)
- s.Ptr = mkcall1(fn, types.Types[types.TUNSAFEPTR], init, typename(t.Elem()), length, copylen, typecheck.Conv(copyptr, types.Types[types.TUNSAFEPTR]))
+ s.Ptr = mkcall1(fn, types.Types[types.TUNSAFEPTR], init, reflectdata.TypePtr(t.Elem()), length, copylen, typecheck.Conv(copyptr, types.Types[types.TUNSAFEPTR]))
s.Ptr.MarkNonNil()
s.LenCap = []ir.Node{length, length}
s.SetType(t)
// markTypeUsedInInterface marks that type t is converted to an interface.
// This information is used in the linker in dead method elimination.
func markTypeUsedInInterface(t *types.Type, from *obj.LSym) {
- tsym := typenamesym(t).Linksym()
+ tsym := reflectdata.TypeSym(t).Linksym()
// Emit a marker relocation. The linker will know the type is converted
// to an interface if "from" is reachable.
r := obj.Addrel(from)
func markUsedIfaceMethod(n *ir.CallExpr) {
dot := n.X.(*ir.SelectorExpr)
ityp := dot.X.Type()
- tsym := typenamesym(ityp).Linksym()
+ tsym := reflectdata.TypeSym(ityp).Linksym()
r := obj.Addrel(ir.CurFunc.LSym)
r.Sym = tsym
// dot.Xoffset is the method index * Widthptr (the offset of code pointer
// in itab).
midx := dot.Offset / int64(types.PtrSize)
- r.Add = ifaceMethodOffset(ityp, midx)
+ r.Add = reflectdata.InterfaceMethodOffset(ityp, midx)
r.Type = objabi.R_USEIFACEMETHOD
}
func callnew(t *types.Type) ir.Node {
types.CalcSize(t)
- n := ir.NewUnaryExpr(base.Pos, ir.ONEWOBJ, typename(t))
+ n := ir.NewUnaryExpr(base.Pos, ir.ONEWOBJ, reflectdata.TypePtr(t))
n.SetType(types.NewPtr(t))
n.SetTypecheck(1)
n.MarkNonNil()
if t.Elem().Width > 128 {
return mapslow
}
- switch algtype(t.Key()) {
+ switch reflectdata.AlgType(t.Key()) {
case types.AMEM32:
if !t.Key().HasPointers() {
return mapfast32
fn = typecheck.SubstArgTypes(fn, elemtype, elemtype)
// s = growslice(T, s, n)
- nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, s, mkcall1(fn, s.Type(), nif.PtrInit(), typename(elemtype), s, nn))}
+ nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, s, mkcall1(fn, s.Type(), nif.PtrInit(), reflectdata.TypePtr(elemtype), s, nn))}
nodes.Append(nif)
// s = s[:n]
fn = typecheck.SubstArgTypes(fn, l1.Type().Elem(), l2.Type().Elem())
ptr1, len1 := backingArrayPtrLen(cheapexpr(slice, &nodes))
ptr2, len2 := backingArrayPtrLen(l2)
- ncopy = mkcall1(fn, types.Types[types.TINT], &nodes, typename(elemtype), ptr1, len1, ptr2, len2)
+ ncopy = mkcall1(fn, types.Types[types.TINT], &nodes, reflectdata.TypePtr(elemtype), ptr1, len1, ptr2, len2)
} else if base.Flag.Cfg.Instrumenting && !base.Flag.CompilingRuntime {
// rely on runtime to instrument:
// copy(s[len(l1):], l2)
fn = typecheck.SubstArgTypes(fn, elemtype, elemtype)
// s = growslice(T, s, n)
- nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, s, mkcall1(fn, s.Type(), nif.PtrInit(), typename(elemtype), s, nn))}
+ nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, s, mkcall1(fn, s.Type(), nif.PtrInit(), reflectdata.TypePtr(elemtype), s, nn))}
nodes = append(nodes, nif)
// s = s[:n]
fn := typecheck.LookupRuntime("growslice") // growslice(<type>, old []T, mincap int) (ret []T)
fn = typecheck.SubstArgTypes(fn, ns.Type().Elem(), ns.Type().Elem())
- nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, ns, mkcall1(fn, ns.Type(), nif.PtrInit(), typename(ns.Type().Elem()), ns,
+ nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, ns, mkcall1(fn, ns.Type(), nif.PtrInit(), reflectdata.TypePtr(ns.Type().Elem()), ns,
ir.NewBinaryExpr(base.Pos, ir.OADD, ir.NewUnaryExpr(base.Pos, ir.OLEN, ns), na)))}
l = append(l, nif)
ptrL, lenL := backingArrayPtrLen(n.X)
n.Y = cheapexpr(n.Y, init)
ptrR, lenR := backingArrayPtrLen(n.Y)
- return mkcall1(fn, n.Type(), init, typename(n.X.Type().Elem()), ptrL, lenL, ptrR, lenR)
+ return mkcall1(fn, n.Type(), init, reflectdata.TypePtr(n.X.Type().Elem()), ptrL, lenL, ptrR, lenR)
}
if runtimecall {
n = typecheck.SubstArgTypes(n, t, t)
return n, true
case types.ASPECIAL:
- sym := typesymprefix(".eq", t)
+ sym := reflectdata.TypeSymPrefix(".eq", t)
n := typecheck.NewName(sym)
ir.MarkFunc(n)
n.SetType(typecheck.NewFuncType(nil, []*ir.Field{
// l.tab != nil && l.tab._type == type(r)
var eqtype ir.Node
tab := ir.NewUnaryExpr(base.Pos, ir.OITAB, l)
- rtyp := typename(r.Type())
+ rtyp := reflectdata.TypePtr(r.Type())
if l.Type().IsEmptyInterface() {
tab.SetType(types.NewPtr(types.Types[types.TUINT8]))
tab.SetTypecheck(1)
func walkcompareInterface(n *ir.BinaryExpr, init *ir.Nodes) ir.Node {
n.Y = cheapexpr(n.Y, init)
n.X = cheapexpr(n.X, init)
- eqtab, eqdata := eqinterface(n.X, n.Y)
+ eqtab, eqdata := reflectdata.EqInterface(n.X, n.Y)
var cmp ir.Node
if n.Op() == ir.OEQ {
cmp = ir.NewLogicalExpr(base.Pos, ir.OANDAND, eqtab, eqdata)
// prepare for rewrite below
n.X = cheapexpr(n.X, init)
n.Y = cheapexpr(n.Y, init)
- eqlen, eqmem := eqstring(n.X, n.Y)
+ eqlen, eqmem := reflectdata.EqString(n.X, n.Y)
// quick check of len before full compare for == or !=.
// memequal then tests equality up to length len.
if n.Op() == ir.OEQ {
base.Errorf("tracked field must be exported (upper case)")
}
- sym := tracksym(outer, field)
+ sym := reflectdata.TrackSym(outer, field)
if ir.CurFunc.FieldTrack == nil {
ir.CurFunc.FieldTrack = make(map[*types.Sym]struct{})
}
}
n.X = cheapexpr(n.X, init)
- init.Append(mkcall("checkptrAlignment", nil, init, typecheck.ConvNop(n.X, types.Types[types.TUNSAFEPTR]), typename(elem), typecheck.Conv(count, types.Types[types.TUINTPTR])))
+ init.Append(mkcall("checkptrAlignment", nil, init, typecheck.ConvNop(n.X, types.Types[types.TUNSAFEPTR]), reflectdata.TypePtr(elem), typecheck.Conv(count, types.Types[types.TUINTPTR])))
return n
}
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package gc
+package reflectdata
import (
+ "fmt"
+ "sort"
+
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/objw"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/internal/obj"
- "fmt"
- "sort"
)
-// IsRegularMemory reports whether t can be compared/hashed as regular memory.
-func IsRegularMemory(t *types.Type) bool {
+// isRegularMemory reports whether t can be compared/hashed as regular memory.
+func isRegularMemory(t *types.Type) bool {
a, _ := types.AlgType(t)
return a == types.AMEM
}
-// EqCanPanic reports whether == on type t could panic (has an interface somewhere).
+// eqCanPanic reports whether == on type t could panic (has an interface somewhere).
// t must be comparable.
-func EqCanPanic(t *types.Type) bool {
+func eqCanPanic(t *types.Type) bool {
switch t.Kind() {
default:
return false
case types.TINTER:
return true
case types.TARRAY:
- return EqCanPanic(t.Elem())
+ return eqCanPanic(t.Elem())
case types.TSTRUCT:
for _, f := range t.FieldSlice() {
- if !f.Sym.IsBlank() && EqCanPanic(f.Type) {
+ if !f.Sym.IsBlank() && eqCanPanic(f.Type) {
return true
}
}
}
}
-// algtype is like algtype1, except it returns the fixed-width AMEMxx variants
+// AlgType is like algtype1, except it returns the fixed-width AMEMxx variants
// instead of the general AMEM kind when possible.
-func algtype(t *types.Type) types.AlgKind {
+func AlgType(t *types.Type) types.AlgKind {
a, _ := types.AlgType(t)
if a == types.AMEM {
switch t.Width {
// the hash of a value of type t.
// Note: the generated function must match runtime.typehash exactly.
func genhash(t *types.Type) *obj.LSym {
- switch algtype(t) {
+ switch AlgType(t) {
default:
// genhash is only called for types that have equality
base.Fatalf("genhash %v", t)
break
}
- closure := typesymprefix(".hashfunc", t).Linksym()
+ closure := TypeSymPrefix(".hashfunc", t).Linksym()
if len(closure.P) > 0 { // already generated
return closure
}
}
}
- sym := typesymprefix(".hash", t)
+ sym := TypeSymPrefix(".hash", t)
if base.Flag.LowerR != 0 {
fmt.Printf("genhash %v %v %v\n", closure, sym, t)
}
}
// Hash non-memory fields with appropriate hash function.
- if !IsRegularMemory(f.Type) {
+ if !isRegularMemory(f.Type) {
hashel := hashfor(f.Type)
call := ir.NewCallExpr(base.Pos, ir.OCALL, hashel, nil)
nx := ir.NewSelectorExpr(base.Pos, ir.OXDOT, np, f.Sym) // TODO: fields from other packages?
default:
// Note: the caller of hashfor ensured that this symbol
// exists and has a body by calling genhash for t.
- sym = typesymprefix(".hash", t)
+ sym = TypeSymPrefix(".hash", t)
}
n := typecheck.NewName(sym)
// geneq returns a symbol which is the closure used to compute
// equality for two objects of type t.
func geneq(t *types.Type) *obj.LSym {
- switch algtype(t) {
+ switch AlgType(t) {
case types.ANOEQ:
// The runtime will panic if it tries to compare
// a type with a nil equality function.
break
}
- closure := typesymprefix(".eqfunc", t).Linksym()
+ closure := TypeSymPrefix(".eqfunc", t).Linksym()
if len(closure.P) > 0 { // already generated
return closure
}
- sym := typesymprefix(".eq", t)
+ sym := TypeSymPrefix(".eq", t)
if base.Flag.LowerR != 0 {
fmt.Printf("geneq %v\n", t)
}
// TODO: when the array size is small, unroll the length match checks.
checkAll(3, false, func(pi, qi ir.Node) ir.Node {
// Compare lengths.
- eqlen, _ := eqstring(pi, qi)
+ eqlen, _ := EqString(pi, qi)
return eqlen
})
checkAll(1, true, func(pi, qi ir.Node) ir.Node {
// Compare contents.
- _, eqmem := eqstring(pi, qi)
+ _, eqmem := EqString(pi, qi)
return eqmem
})
case types.TFLOAT32, types.TFLOAT64:
}
// Compare non-memory fields with field equality.
- if !IsRegularMemory(f.Type) {
- if EqCanPanic(f.Type) {
+ if !isRegularMemory(f.Type) {
+ if eqCanPanic(f.Type) {
// Enforce ordering by starting a new set of reorderable conditions.
conds = append(conds, []ir.Node{})
}
q := ir.NewSelectorExpr(base.Pos, ir.OXDOT, nq, f.Sym)
switch {
case f.Type.IsString():
- eqlen, eqmem := eqstring(p, q)
+ eqlen, eqmem := EqString(p, q)
and(eqlen)
and(eqmem)
default:
and(ir.NewBinaryExpr(base.Pos, ir.OEQ, p, q))
}
- if EqCanPanic(f.Type) {
+ if eqCanPanic(f.Type) {
// Also enforce ordering after something that can panic.
conds = append(conds, []ir.Node{})
}
// return (or goto ret)
fn.Body.Append(ir.NewLabelStmt(base.Pos, neq))
fn.Body.Append(ir.NewAssignStmt(base.Pos, nr, ir.NewBool(false)))
- if EqCanPanic(t) || anyCall(fn) {
+ if eqCanPanic(t) || anyCall(fn) {
// Epilogue is large, so share it with the equal case.
fn.Body.Append(ir.NewBranchStmt(base.Pos, ir.OGOTO, ret))
} else {
return ne
}
-// eqstring returns the nodes
+// EqString returns the nodes
// len(s) == len(t)
// and
// memequal(s.ptr, t.ptr, len(s))
// which can be used to construct string equality comparison.
// eqlen must be evaluated before eqmem, and shortcircuiting is required.
-func eqstring(s, t ir.Node) (eqlen *ir.BinaryExpr, eqmem *ir.CallExpr) {
+func EqString(s, t ir.Node) (eqlen *ir.BinaryExpr, eqmem *ir.CallExpr) {
s = typecheck.Conv(s, types.Types[types.TSTRING])
t = typecheck.Conv(t, types.Types[types.TSTRING])
sptr := ir.NewUnaryExpr(base.Pos, ir.OSPTR, s)
return cmp, call
}
-// eqinterface returns the nodes
+// EqInterface returns the nodes
// s.tab == t.tab (or s.typ == t.typ, as appropriate)
// and
// ifaceeq(s.tab, s.data, t.data) (or efaceeq(s.typ, s.data, t.data), as appropriate)
// which can be used to construct interface equality comparison.
// eqtab must be evaluated before eqdata, and shortcircuiting is required.
-func eqinterface(s, t ir.Node) (eqtab *ir.BinaryExpr, eqdata *ir.CallExpr) {
+func EqInterface(s, t ir.Node) (eqtab *ir.BinaryExpr, eqdata *ir.CallExpr) {
if !types.Identical(s.Type(), t.Type()) {
base.Fatalf("eqinterface %v %v", s.Type(), t.Type())
}
break
}
// Also, stop before a blank or non-memory field.
- if f := t.Field(next); f.Sym.IsBlank() || !IsRegularMemory(f.Type) {
+ if f := t.Field(next); f.Sym.IsBlank() || !isRegularMemory(f.Type) {
break
}
}
return t.Field(next-1).End() - t.Field(start).Offset, next
}
+
+func hashmem(t *types.Type) ir.Node {
+ sym := ir.Pkgs.Runtime.Lookup("memhash")
+
+ n := typecheck.NewName(sym)
+ ir.MarkFunc(n)
+ n.SetType(typecheck.NewFuncType(nil, []*ir.Field{
+ ir.NewField(base.Pos, nil, nil, types.NewPtr(t)),
+ ir.NewField(base.Pos, nil, nil, types.Types[types.TUINTPTR]),
+ ir.NewField(base.Pos, nil, nil, types.Types[types.TUINTPTR]),
+ }, []*ir.Field{
+ ir.NewField(base.Pos, nil, nil, types.Types[types.TUINTPTR]),
+ }))
+ return n
+}
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package gc
+package reflectdata
import (
+ "fmt"
+ "os"
+ "sort"
+ "strings"
+ "sync"
+
"cmd/compile/internal/base"
"cmd/compile/internal/bitvec"
+ "cmd/compile/internal/escape"
+ "cmd/compile/internal/inline"
"cmd/compile/internal/ir"
"cmd/compile/internal/liveness"
"cmd/compile/internal/objw"
"cmd/internal/obj"
"cmd/internal/objabi"
"cmd/internal/src"
- "fmt"
- "os"
- "sort"
- "strings"
- "sync"
)
type itabEntry struct {
ptabs []ptabEntry
)
-type Sig struct {
+type typeSig struct {
name *types.Sym
isym *types.Sym
tsym *types.Sym
return types.NewField(src.NoXPos, sym, t)
}
-// bmap makes the map bucket type given the type of the map.
-func bmap(t *types.Type) *types.Type {
+// MapBucketType makes the map bucket type given the type of the map.
+func MapBucketType(t *types.Type) *types.Type {
if t.MapType().Bucket != nil {
return t.MapType().Bucket
}
return bucket
}
-// hmap builds a type representing a Hmap structure for the given map type.
+// MapType builds a type representing a Hmap structure for the given map type.
// Make sure this stays in sync with runtime/map.go.
-func hmap(t *types.Type) *types.Type {
+func MapType(t *types.Type) *types.Type {
if t.MapType().Hmap != nil {
return t.MapType().Hmap
}
- bmap := bmap(t)
+ bmap := MapBucketType(t)
// build a struct:
// type hmap struct {
return hmap
}
-// hiter builds a type representing an Hiter structure for the given map type.
+// MapIterType builds a type representing an Hiter structure for the given map type.
// Make sure this stays in sync with runtime/map.go.
-func hiter(t *types.Type) *types.Type {
+func MapIterType(t *types.Type) *types.Type {
if t.MapType().Hiter != nil {
return t.MapType().Hiter
}
- hmap := hmap(t)
- bmap := bmap(t)
+ hmap := MapType(t)
+ bmap := MapBucketType(t)
// build a struct:
// type hiter struct {
return hiter
}
-// deferstruct makes a runtime._defer structure, with additional space for
-// stksize bytes of args.
-func deferstruct(stksize int64) *types.Type {
- makefield := func(name string, typ *types.Type) *types.Field {
- // Unlike the global makefield function, this one needs to set Pkg
- // because these types might be compared (in SSA CSE sorting).
- // TODO: unify this makefield and the global one above.
- sym := &types.Sym{Name: name, Pkg: types.LocalPkg}
- return types.NewField(src.NoXPos, sym, typ)
- }
- argtype := types.NewArray(types.Types[types.TUINT8], stksize)
- argtype.Width = stksize
- argtype.Align = 1
- // These fields must match the ones in runtime/runtime2.go:_defer and
- // cmd/compile/internal/gc/ssa.go:(*state).call.
- fields := []*types.Field{
- makefield("siz", types.Types[types.TUINT32]),
- makefield("started", types.Types[types.TBOOL]),
- makefield("heap", types.Types[types.TBOOL]),
- makefield("openDefer", types.Types[types.TBOOL]),
- makefield("sp", types.Types[types.TUINTPTR]),
- makefield("pc", types.Types[types.TUINTPTR]),
- // Note: the types here don't really matter. Defer structures
- // are always scanned explicitly during stack copying and GC,
- // so we make them uintptr type even though they are real pointers.
- makefield("fn", types.Types[types.TUINTPTR]),
- makefield("_panic", types.Types[types.TUINTPTR]),
- makefield("link", types.Types[types.TUINTPTR]),
- makefield("framepc", types.Types[types.TUINTPTR]),
- makefield("varp", types.Types[types.TUINTPTR]),
- makefield("fd", types.Types[types.TUINTPTR]),
- makefield("args", argtype),
- }
-
- // build struct holding the above fields
- s := types.NewStruct(types.NoPkg, fields)
- s.SetNoalg(true)
- types.CalcStructSize(s)
- return s
-}
-
// methods returns the methods of the non-interface type t, sorted by name.
// Generates stub functions as needed.
-func methods(t *types.Type) []*Sig {
+func methods(t *types.Type) []*typeSig {
// method type
mt := types.ReceiverBaseType(t)
// make list of methods for t,
// generating code if necessary.
- var ms []*Sig
+ var ms []*typeSig
for _, f := range mt.AllMethods().Slice() {
if !f.IsMethod() {
base.Fatalf("non-method on %v method %v %v\n", mt, f.Sym, f)
continue
}
- sig := &Sig{
+ sig := &typeSig{
name: method,
isym: ir.MethodSym(it, method),
tsym: ir.MethodSym(t, method),
}
// imethods returns the methods of the interface type t, sorted by name.
-func imethods(t *types.Type) []*Sig {
- var methods []*Sig
+func imethods(t *types.Type) []*typeSig {
+ var methods []*typeSig
for _, f := range t.Fields().Slice() {
if f.Type.Kind() != types.TFUNC || f.Sym == nil {
continue
}
}
- sig := &Sig{
+ sig := &typeSig{
name: f.Sym,
mtype: f.Type,
type_: typecheck.NewMethodType(f.Type, nil),
}
for _, a := range m {
- dtypesym(a.type_)
+ WriteType(a.type_)
}
ot = dgopkgpathOff(lsym, ot, typePkg(t))
nsym := dname(a.name.Name, "", pkg, exported)
ot = objw.SymPtrOff(lsym, ot, nsym)
- ot = dmethodptrOff(lsym, ot, dtypesym(a.mtype))
+ ot = dmethodptrOff(lsym, ot, WriteType(a.mtype))
ot = dmethodptrOff(lsym, ot, a.isym.Linksym())
ot = dmethodptrOff(lsym, ot, a.tsym.Linksym())
}
if t.Sym() != nil || methods(tptr) != nil {
sptrWeak = false
}
- sptr = dtypesym(tptr)
+ sptr = WriteType(tptr)
}
gcsym, useGCProg, ptrdata := dgcsym(t)
if t.Sym() != nil && t.Sym().Name != "" {
tflag |= tflagNamed
}
- if IsRegularMemory(t) {
+ if isRegularMemory(t) {
tflag |= tflagRegularMemory
}
return ot
}
-// tracksym returns the symbol for tracking use of field/method f, assumed
+// TrackSym returns the symbol for tracking use of field/method f, assumed
// to be a member of struct/interface type t.
-func tracksym(t *types.Type, f *types.Field) *types.Sym {
+func TrackSym(t *types.Type, f *types.Field) *types.Sym {
return ir.Pkgs.Track.Lookup(t.ShortString() + "." + f.Sym.Name)
}
-func typesymprefix(prefix string, t *types.Type) *types.Sym {
+func TypeSymPrefix(prefix string, t *types.Type) *types.Sym {
p := prefix + "." + t.ShortString()
s := types.TypeSymLookup(p)
// This function is for looking up type-related generated functions
// (e.g. eq and hash). Make sure they are indeed generated.
signatmu.Lock()
- addsignat(t)
+ NeedRuntimeType(t)
signatmu.Unlock()
//print("algsym: %s -> %+S\n", p, s);
return s
}
-func typenamesym(t *types.Type) *types.Sym {
+func TypeSym(t *types.Type) *types.Sym {
if t == nil || (t.IsPtr() && t.Elem() == nil) || t.IsUntyped() {
base.Fatalf("typenamesym %v", t)
}
s := types.TypeSym(t)
signatmu.Lock()
- addsignat(t)
+ NeedRuntimeType(t)
signatmu.Unlock()
return s
}
-func typename(t *types.Type) *ir.AddrExpr {
- s := typenamesym(t)
+func TypePtr(t *types.Type) *ir.AddrExpr {
+ s := TypeSym(t)
if s.Def == nil {
n := ir.NewNameAt(src.NoXPos, s)
n.SetType(types.Types[types.TUINT8])
return n
}
-func itabname(t, itype *types.Type) *ir.AddrExpr {
+func ITabAddr(t, itype *types.Type) *ir.AddrExpr {
if t == nil || (t.IsPtr() && t.Elem() == nil) || t.IsUntyped() || !itype.IsInterface() || itype.IsEmptyInterface() {
base.Fatalf("itabname(%v, %v)", t, itype)
}
return t
}
-func dtypesym(t *types.Type) *obj.LSym {
+func WriteType(t *types.Type) *obj.LSym {
t = formalType(t)
if t.IsUntyped() {
base.Fatalf("dtypesym %v", t)
case types.TARRAY:
// ../../../../runtime/type.go:/arrayType
- s1 := dtypesym(t.Elem())
+ s1 := WriteType(t.Elem())
t2 := types.NewSlice(t.Elem())
- s2 := dtypesym(t2)
+ s2 := WriteType(t2)
ot = dcommontype(lsym, t)
ot = objw.SymPtr(lsym, ot, s1, 0)
ot = objw.SymPtr(lsym, ot, s2, 0)
case types.TSLICE:
// ../../../../runtime/type.go:/sliceType
- s1 := dtypesym(t.Elem())
+ s1 := WriteType(t.Elem())
ot = dcommontype(lsym, t)
ot = objw.SymPtr(lsym, ot, s1, 0)
ot = dextratype(lsym, ot, t, 0)
case types.TCHAN:
// ../../../../runtime/type.go:/chanType
- s1 := dtypesym(t.Elem())
+ s1 := WriteType(t.Elem())
ot = dcommontype(lsym, t)
ot = objw.SymPtr(lsym, ot, s1, 0)
ot = objw.Uintptr(lsym, ot, uint64(t.ChanDir()))
case types.TFUNC:
for _, t1 := range t.Recvs().Fields().Slice() {
- dtypesym(t1.Type)
+ WriteType(t1.Type)
}
isddd := false
for _, t1 := range t.Params().Fields().Slice() {
isddd = t1.IsDDD()
- dtypesym(t1.Type)
+ WriteType(t1.Type)
}
for _, t1 := range t.Results().Fields().Slice() {
- dtypesym(t1.Type)
+ WriteType(t1.Type)
}
ot = dcommontype(lsym, t)
// Array of rtype pointers follows funcType.
for _, t1 := range t.Recvs().Fields().Slice() {
- ot = objw.SymPtr(lsym, ot, dtypesym(t1.Type), 0)
+ ot = objw.SymPtr(lsym, ot, WriteType(t1.Type), 0)
}
for _, t1 := range t.Params().Fields().Slice() {
- ot = objw.SymPtr(lsym, ot, dtypesym(t1.Type), 0)
+ ot = objw.SymPtr(lsym, ot, WriteType(t1.Type), 0)
}
for _, t1 := range t.Results().Fields().Slice() {
- ot = objw.SymPtr(lsym, ot, dtypesym(t1.Type), 0)
+ ot = objw.SymPtr(lsym, ot, WriteType(t1.Type), 0)
}
case types.TINTER:
m := imethods(t)
n := len(m)
for _, a := range m {
- dtypesym(a.type_)
+ WriteType(a.type_)
}
// ../../../../runtime/type.go:/interfaceType
nsym := dname(a.name.Name, "", pkg, exported)
ot = objw.SymPtrOff(lsym, ot, nsym)
- ot = objw.SymPtrOff(lsym, ot, dtypesym(a.type_))
+ ot = objw.SymPtrOff(lsym, ot, WriteType(a.type_))
}
// ../../../../runtime/type.go:/mapType
case types.TMAP:
- s1 := dtypesym(t.Key())
- s2 := dtypesym(t.Elem())
- s3 := dtypesym(bmap(t))
+ s1 := WriteType(t.Key())
+ s2 := WriteType(t.Elem())
+ s3 := WriteType(MapBucketType(t))
hasher := genhash(t.Key())
ot = dcommontype(lsym, t)
} else {
ot = objw.Uint8(lsym, ot, uint8(t.Elem().Width))
}
- ot = objw.Uint16(lsym, ot, uint16(bmap(t).Width))
+ ot = objw.Uint16(lsym, ot, uint16(MapBucketType(t).Width))
if types.IsReflexive(t.Key()) {
flags |= 4 // reflexive key
}
}
// ../../../../runtime/type.go:/ptrType
- s1 := dtypesym(t.Elem())
+ s1 := WriteType(t.Elem())
ot = dcommontype(lsym, t)
ot = objw.SymPtr(lsym, ot, s1, 0)
case types.TSTRUCT:
fields := t.Fields().Slice()
for _, t1 := range fields {
- dtypesym(t1.Type)
+ WriteType(t1.Type)
}
// All non-exported struct field names within a struct
for _, f := range fields {
// ../../../../runtime/type.go:/structField
ot = dnameField(lsym, ot, spkg, f)
- ot = objw.SymPtr(lsym, ot, dtypesym(f.Type), 0)
+ ot = objw.SymPtr(lsym, ot, WriteType(f.Type), 0)
offsetAnon := uint64(f.Offset) << 1
if offsetAnon>>1 != uint64(f.Offset) {
base.Fatalf("%v: bad field offset for %s", t, f.Sym.Name)
return lsym
}
-// ifaceMethodOffset returns the offset of the i-th method in the interface
+// InterfaceMethodOffset returns the offset of the i-th method in the interface
// type descriptor, ityp.
-func ifaceMethodOffset(ityp *types.Type, i int64) int64 {
+func InterfaceMethodOffset(ityp *types.Type, i int64) int64 {
// interface type descriptor layout is struct {
// _type // commonSize
// pkgpath // 1 word
// for each itabEntry, gather the methods on
// the concrete type that implement the interface
-func peekitabs() {
+func CompileITabs() {
for i := range itabs {
tab := &itabs[i]
methods := genfun(tab.t, tab.itype)
return out
}
-// itabsym uses the information gathered in
+// ITabSym uses the information gathered in
// peekitabs to de-virtualize interface methods.
// Since this is called by the SSA backend, it shouldn't
// generate additional Nodes, Syms, etc.
-func itabsym(it *obj.LSym, offset int64) *obj.LSym {
+func ITabSym(it *obj.LSym, offset int64) *obj.LSym {
var syms []*obj.LSym
if it == nil {
return nil
return syms[methodnum]
}
-// addsignat ensures that a runtime type descriptor is emitted for t.
-func addsignat(t *types.Type) {
+// NeedRuntimeType ensures that a runtime type descriptor is emitted for t.
+func NeedRuntimeType(t *types.Type) {
if _, ok := signatset[t]; !ok {
signatset[t] = struct{}{}
signatslice = append(signatslice, t)
}
}
-func addsignats(dcls []ir.Node) {
- // copy types from dcl list to signatset
- for _, n := range dcls {
- if n.Op() == ir.OTYPE {
- addsignat(n.Type())
- }
- }
-}
-
-func dumpsignats() {
+func WriteRuntimeTypes() {
// Process signatset. Use a loop, as dtypesym adds
// entries to signatset while it is being processed.
signats := make([]typeAndStr, len(signatslice))
sort.Sort(typesByString(signats))
for _, ts := range signats {
t := ts.t
- dtypesym(t)
+ WriteType(t)
if t.Sym() != nil {
- dtypesym(types.NewPtr(t))
+ WriteType(types.NewPtr(t))
}
}
}
}
-func dumptabs() {
+func WriteTabs() {
// process itabs
for _, i := range itabs {
// dump empty itab symbol into i.sym
// _ [4]byte
// fun [1]uintptr // variable sized
// }
- o := objw.SymPtr(i.lsym, 0, dtypesym(i.itype), 0)
- o = objw.SymPtr(i.lsym, o, dtypesym(i.t), 0)
+ o := objw.SymPtr(i.lsym, 0, WriteType(i.itype), 0)
+ o = objw.SymPtr(i.lsym, o, WriteType(i.t), 0)
o = objw.Uint32(i.lsym, o, types.TypeHash(i.t)) // copy of type hash
o += 4 // skip unused field
for _, fn := range genfun(i.t, i.itype) {
// typ typeOff // pointer to symbol
// }
nsym := dname(p.s.Name, "", nil, true)
- tsym := dtypesym(p.t)
+ tsym := WriteType(p.t)
ot = objw.SymPtrOff(s, ot, nsym)
ot = objw.SymPtrOff(s, ot, tsym)
// Plugin exports symbols as interfaces. Mark their types
}
}
-func dumpimportstrings() {
+func WriteImportStrings() {
// generate import strings for imported packages
for _, p := range types.ImportedPkgList() {
dimportpath(p)
}
}
-func dumpbasictypes() {
+func WriteBasicTypes() {
// do basic types if compiling package runtime.
// they have to be in at least one package,
// and runtime is always loaded implicitly,
// but using runtime means fewer copies in object files.
if base.Ctxt.Pkgpath == "runtime" {
for i := types.Kind(1); i <= types.TBOOL; i++ {
- dtypesym(types.NewPtr(types.Types[i]))
+ WriteType(types.NewPtr(types.Types[i]))
}
- dtypesym(types.NewPtr(types.Types[types.TSTRING]))
- dtypesym(types.NewPtr(types.Types[types.TUNSAFEPTR]))
+ WriteType(types.NewPtr(types.Types[types.TSTRING]))
+ WriteType(types.NewPtr(types.Types[types.TUNSAFEPTR]))
// emit type structs for error and func(error) string.
// The latter is the type of an auto-generated wrapper.
- dtypesym(types.NewPtr(types.ErrorType))
+ WriteType(types.NewPtr(types.ErrorType))
- dtypesym(typecheck.NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, types.ErrorType)}, []*ir.Field{ir.NewField(base.Pos, nil, nil, types.Types[types.TSTRING])}))
+ WriteType(typecheck.NewFuncType(nil, []*ir.Field{ir.NewField(base.Pos, nil, nil, types.ErrorType)}, []*ir.Field{ir.NewField(base.Pos, nil, nil, types.Types[types.TSTRING])}))
// add paths for runtime and main, which 6l imports implicitly.
dimportpath(ir.Pkgs.Runtime)
if t.Width == types.BADWIDTH {
base.Fatalf("dgcprog: %v badwidth", t)
}
- lsym := typesymprefix(".gcprog", t).Linksym()
- var p GCProg
+ lsym := TypeSymPrefix(".gcprog", t).Linksym()
+ var p gcProg
p.init(lsym)
p.emit(t, 0)
offset := p.w.BitIndex() * int64(types.PtrSize)
return lsym, offset
}
-type GCProg struct {
+type gcProg struct {
lsym *obj.LSym
symoff int
w gcprog.Writer
}
-func (p *GCProg) init(lsym *obj.LSym) {
+func (p *gcProg) init(lsym *obj.LSym) {
p.lsym = lsym
p.symoff = 4 // first 4 bytes hold program length
p.w.Init(p.writeByte)
}
}
-func (p *GCProg) writeByte(x byte) {
+func (p *gcProg) writeByte(x byte) {
p.symoff = objw.Uint8(p.lsym, p.symoff, x)
}
-func (p *GCProg) end() {
+func (p *gcProg) end() {
p.w.End()
objw.Uint32(p.lsym, 0, uint32(p.symoff-4))
objw.Global(p.lsym, int32(p.symoff), obj.DUPOK|obj.RODATA|obj.LOCAL)
}
}
-func (p *GCProg) emit(t *types.Type, offset int64) {
+func (p *gcProg) emit(t *types.Type, offset int64) {
types.CalcSize(t)
if !t.HasPointers() {
return
}
}
-// zeroaddr returns the address of a symbol with at least
+// ZeroAddr returns the address of a symbol with at least
// size bytes of zeros.
-func zeroaddr(size int64) ir.Node {
+func ZeroAddr(size int64) ir.Node {
if size >= 1<<31 {
base.Fatalf("map elem too big %d", size)
}
- if zerosize < size {
- zerosize = size
+ if ZeroSize < size {
+ ZeroSize = size
}
s := ir.Pkgs.Map.Lookup("zero")
if s.Def == nil {
z.SetTypecheck(1)
return z
}
+
+func CollectPTabs() {
+ if !base.Ctxt.Flag_dynlink || types.LocalPkg.Name != "main" {
+ return
+ }
+ for _, exportn := range typecheck.Target.Exports {
+ s := exportn.Sym()
+ nn := ir.AsNode(s.Def)
+ if nn == nil {
+ continue
+ }
+ if nn.Op() != ir.ONAME {
+ continue
+ }
+ n := nn.(*ir.Name)
+ if !types.IsExported(s.Name) {
+ continue
+ }
+ if s.Pkg.Name != "main" {
+ continue
+ }
+ if n.Type().Kind() == types.TFUNC && n.Class_ == ir.PFUNC {
+ // function
+ ptabs = append(ptabs, ptabEntry{s: s, t: s.Def.Type()})
+ } else {
+ // variable
+ ptabs = append(ptabs, ptabEntry{s: s, t: types.NewPtr(s.Def.Type())})
+ }
+ }
+}
+
+// Generate a wrapper function to convert from
+// a receiver of type T to a receiver of type U.
+// That is,
+//
+// func (t T) M() {
+// ...
+// }
+//
+// already exists; this function generates
+//
+// func (u U) M() {
+// u.M()
+// }
+//
+// where the types T and U are such that u.M() is valid
+// and calls the T.M method.
+// The resulting function is for use in method tables.
+//
+// rcvr - U
+// method - M func (t T)(), a TFIELD type struct
+// newnam - the eventual mangled name of this function
+func genwrapper(rcvr *types.Type, method *types.Field, newnam *types.Sym) {
+ if false && base.Flag.LowerR != 0 {
+ fmt.Printf("genwrapper rcvrtype=%v method=%v newnam=%v\n", rcvr, method, newnam)
+ }
+
+ // Only generate (*T).M wrappers for T.M in T's own package.
+ if rcvr.IsPtr() && rcvr.Elem() == method.Type.Recv().Type &&
+ rcvr.Elem().Sym() != nil && rcvr.Elem().Sym().Pkg != types.LocalPkg {
+ return
+ }
+
+ // Only generate I.M wrappers for I in I's own package
+ // but keep doing it for error.Error (was issue #29304).
+ if rcvr.IsInterface() && rcvr.Sym() != nil && rcvr.Sym().Pkg != types.LocalPkg && rcvr != types.ErrorType {
+ return
+ }
+
+ base.Pos = base.AutogeneratedPos
+ typecheck.DeclContext = ir.PEXTERN
+
+ tfn := ir.NewFuncType(base.Pos,
+ ir.NewField(base.Pos, typecheck.Lookup(".this"), nil, rcvr),
+ typecheck.NewFuncParams(method.Type.Params(), true),
+ typecheck.NewFuncParams(method.Type.Results(), false))
+
+ fn := typecheck.DeclFunc(newnam, tfn)
+ fn.SetDupok(true)
+
+ nthis := ir.AsNode(tfn.Type().Recv().Nname)
+
+ methodrcvr := method.Type.Recv().Type
+
+ // generate nil pointer check for better error
+ if rcvr.IsPtr() && rcvr.Elem() == methodrcvr {
+ // generating wrapper from *T to T.
+ n := ir.NewIfStmt(base.Pos, nil, nil, nil)
+ n.Cond = ir.NewBinaryExpr(base.Pos, ir.OEQ, nthis, typecheck.NodNil())
+ call := ir.NewCallExpr(base.Pos, ir.OCALL, typecheck.LookupRuntime("panicwrap"), nil)
+ n.Body = []ir.Node{call}
+ fn.Body.Append(n)
+ }
+
+ dot := typecheck.AddImplicitDots(ir.NewSelectorExpr(base.Pos, ir.OXDOT, nthis, method.Sym))
+
+ // generate call
+ // It's not possible to use a tail call when dynamic linking on ppc64le. The
+ // bad scenario is when a local call is made to the wrapper: the wrapper will
+ // call the implementation, which might be in a different module and so set
+ // the TOC to the appropriate value for that module. But if it returns
+ // directly to the wrapper's caller, nothing will reset it to the correct
+ // value for that function.
+ if !base.Flag.Cfg.Instrumenting && rcvr.IsPtr() && methodrcvr.IsPtr() && method.Embedded != 0 && !types.IsInterfaceMethod(method.Type) && !(base.Ctxt.Arch.Name == "ppc64le" && base.Ctxt.Flag_dynlink) {
+ // generate tail call: adjust pointer receiver and jump to embedded method.
+ left := dot.X // skip final .M
+ if !left.Type().IsPtr() {
+ left = typecheck.NodAddr(left)
+ }
+ as := ir.NewAssignStmt(base.Pos, nthis, typecheck.ConvNop(left, rcvr))
+ fn.Body.Append(as)
+ fn.Body.Append(ir.NewBranchStmt(base.Pos, ir.ORETJMP, ir.MethodSym(methodrcvr, method.Sym)))
+ } else {
+ fn.SetWrapper(true) // ignore frame for panic+recover matching
+ call := ir.NewCallExpr(base.Pos, ir.OCALL, dot, nil)
+ call.Args.Set(ir.ParamNames(tfn.Type()))
+ call.IsDDD = tfn.Type().IsVariadic()
+ if method.Type.NumResults() > 0 {
+ ret := ir.NewReturnStmt(base.Pos, nil)
+ ret.Results = []ir.Node{call}
+ fn.Body.Append(ret)
+ } else {
+ fn.Body.Append(call)
+ }
+ }
+
+ if false && base.Flag.LowerR != 0 {
+ ir.DumpList("genwrapper body", fn.Body)
+ }
+
+ typecheck.FinishFuncBody()
+ if base.Debug.DclStack != 0 {
+ types.CheckDclstack()
+ }
+
+ typecheck.Func(fn)
+ ir.CurFunc = fn
+ typecheck.Stmts(fn.Body)
+
+ // Inline calls within (*T).M wrappers. This is safe because we only
+ // generate those wrappers within the same compilation unit as (T).M.
+ // TODO(mdempsky): Investigate why we can't enable this more generally.
+ if rcvr.IsPtr() && rcvr.Elem() == method.Type.Recv().Type && rcvr.Elem().Sym() != nil {
+ inline.InlineCalls(fn)
+ }
+ escape.Batch([]*ir.Func{fn}, false)
+
+ ir.CurFunc = nil
+ typecheck.Target.Decls = append(typecheck.Target.Decls, fn)
+}
+
+var ZeroSize int64