import (
"cmd/compile/internal/types"
+ "cmd/internal/obj"
"fmt"
)
return 0, nil
}
-// Generate a helper function to compute the hash of a value of type t.
-func genhash(sym *types.Sym, t *types.Type) {
+// genhash returns a symbol which is the closure used to compute
+// the hash of a value of type t.
+func genhash(t *types.Type) *obj.LSym {
+ switch algtype(t) {
+ default:
+ // genhash is only called for types that have equality
+ Fatalf("genhash %v", t)
+ case AMEM0:
+ return sysClosure("memhash0")
+ case AMEM8:
+ return sysClosure("memhash8")
+ case AMEM16:
+ return sysClosure("memhash16")
+ case AMEM32:
+ return sysClosure("memhash32")
+ case AMEM64:
+ return sysClosure("memhash64")
+ case AMEM128:
+ return sysClosure("memhash128")
+ case ASTRING:
+ return sysClosure("strhash")
+ case AINTER:
+ return sysClosure("interhash")
+ case ANILINTER:
+ return sysClosure("nilinterhash")
+ case AFLOAT32:
+ return sysClosure("f32hash")
+ case AFLOAT64:
+ return sysClosure("f64hash")
+ case ACPLX64:
+ return sysClosure("c64hash")
+ case ACPLX128:
+ return sysClosure("c128hash")
+ case AMEM:
+ // For other sizes of plain memory, we build a closure
+ // that calls memhash_varlen. The size of the memory is
+ // encoded in the first slot of the closure.
+ closure := typeLookup(fmt.Sprintf(".hashfunc%d", t.Width)).Linksym()
+ if len(closure.P) > 0 { // already generated
+ return closure
+ }
+ if memhashvarlen == nil {
+ memhashvarlen = sysfunc("memhash_varlen")
+ }
+ ot := 0
+ ot = dsymptr(closure, ot, memhashvarlen, 0)
+ ot = duintptr(closure, ot, uint64(t.Width)) // size encoded in closure
+ ggloblsym(closure, int32(ot), obj.DUPOK|obj.RODATA)
+ return closure
+ case ASPECIAL:
+ break
+ }
+
+ closure := typesymprefix(".hashfunc", t).Linksym()
+ if len(closure.P) > 0 { // already generated
+ return closure
+ }
+
+ // Generate hash functions for subtypes.
+ // There are cases where we might not use these hashes,
+ // but in that case they will get dead-code eliminated.
+ // (And the closure generated by genhash will also get
+ // dead-code eliminated, as we call the subtype hashers
+ // directly.)
+ switch t.Etype {
+ case types.TARRAY:
+ genhash(t.Elem())
+ case types.TSTRUCT:
+ for _, f := range t.FieldSlice() {
+ genhash(f.Type)
+ }
+ }
+
+ sym := typesymprefix(".hash", t)
if Debug['r'] != 0 {
- fmt.Printf("genhash %v %v\n", sym, t)
+ fmt.Printf("genhash %v %v %v\n", closure, sym, t)
}
lineno = autogeneratedPos // less confusing than end of input
np := asNode(tfn.Type.Params().Field(0).Nname)
nh := asNode(tfn.Type.Params().Field(1).Nname)
- // genhash is only called for types that have equality but
- // cannot be handled by the standard algorithms,
- // so t must be either an array or a struct.
switch t.Etype {
- default:
- Fatalf("genhash %v", t)
-
case types.TARRAY:
// An array of pure memory would be handled by the
// standard algorithm, so the element type must not be
fn.Func.SetNilCheckDisabled(true)
funccompile(fn)
+
+ // Build closure. It doesn't close over any variables, so
+ // it contains just the function pointer.
+ dsymptr(closure, 0, sym.Linksym(), 0)
+ ggloblsym(closure, int32(Widthptr), obj.DUPOK|obj.RODATA)
+
+ return closure
}
func hashfor(t *types.Type) *Node {
case ACPLX128:
sym = Runtimepkg.Lookup("c128hash")
default:
+ // Note: the caller of hashfor ensured that this symbol
+ // exists and has a body by calling genhash for t.
sym = typesymprefix(".hash", t)
}
return n
}
-// geneq generates a helper function to
-// check equality of two values of type t.
-func geneq(sym *types.Sym, t *types.Type) {
+// sysClosure returns a closure which will call the
+// given runtime function (with no closed-over variables).
+func sysClosure(name string) *obj.LSym {
+ s := sysvar(name + "·f")
+ if len(s.P) == 0 {
+ f := sysfunc(name)
+ dsymptr(s, 0, f, 0)
+ ggloblsym(s, int32(Widthptr), obj.DUPOK|obj.RODATA)
+ }
+ return s
+}
+
+// geneq returns a symbol which is the closure used to compute
+// equality for two objects of type t.
+func geneq(t *types.Type) *obj.LSym {
+ switch algtype(t) {
+ case ANOEQ:
+ // The runtime will panic if it tries to compare
+ // a type with a nil equality function.
+ return nil
+ case AMEM0:
+ return sysClosure("memequal0")
+ case AMEM8:
+ return sysClosure("memequal8")
+ case AMEM16:
+ return sysClosure("memequal16")
+ case AMEM32:
+ return sysClosure("memequal32")
+ case AMEM64:
+ return sysClosure("memequal64")
+ case AMEM128:
+ return sysClosure("memequal128")
+ case ASTRING:
+ return sysClosure("strequal")
+ case AINTER:
+ return sysClosure("interequal")
+ case ANILINTER:
+ return sysClosure("nilinterequal")
+ case AFLOAT32:
+ return sysClosure("f32equal")
+ case AFLOAT64:
+ return sysClosure("f64equal")
+ case ACPLX64:
+ return sysClosure("c64equal")
+ case ACPLX128:
+ return sysClosure("c128equal")
+ case AMEM:
+ // make equality closure. The size of the type
+ // is encoded in the closure.
+ closure := typeLookup(fmt.Sprintf(".eqfunc%d", t.Width)).Linksym()
+ if len(closure.P) != 0 {
+ return closure
+ }
+ if memequalvarlen == nil {
+ memequalvarlen = sysvar("memequal_varlen") // asm func
+ }
+ ot := 0
+ ot = dsymptr(closure, ot, memequalvarlen, 0)
+ ot = duintptr(closure, ot, uint64(t.Width))
+ ggloblsym(closure, int32(ot), obj.DUPOK|obj.RODATA)
+ return closure
+ case ASPECIAL:
+ break
+ }
+
+ closure := typesymprefix(".eqfunc", t).Linksym()
+ if len(closure.P) > 0 { // already generated
+ return closure
+ }
+ sym := typesymprefix(".eq", t)
if Debug['r'] != 0 {
- fmt.Printf("geneq %v %v\n", sym, t)
+ fmt.Printf("geneq %v\n", t)
}
+ // Autogenerate code for equality of structs and arrays.
+
lineno = autogeneratedPos // less confusing than end of input
dclcontext = PEXTERN
np := asNode(tfn.Type.Params().Field(0).Nname)
nq := asNode(tfn.Type.Params().Field(1).Nname)
- // geneq is only called for types that have equality but
+ // We reach here only for types that have equality but
// cannot be handled by the standard algorithms,
// so t must be either an array or a struct.
switch t.Etype {
// are shallow.
fn.Func.SetNilCheckDisabled(true)
funccompile(fn)
+
+ // Generate a closure which points at the function we just generated.
+ dsymptr(closure, 0, sym.Linksym(), 0)
+ ggloblsym(closure, int32(Widthptr), obj.DUPOK|obj.RODATA)
+ return closure
}
// eqfield returns the node
{"memclrNoHeapPointers", funcTag, 103},
{"memclrHasPointers", funcTag, 103},
{"memequal", funcTag, 104},
+ {"memequal0", funcTag, 105},
{"memequal8", funcTag, 105},
{"memequal16", funcTag, 105},
{"memequal32", funcTag, 105},
{"memequal64", funcTag, 105},
{"memequal128", funcTag, 105},
- {"int64div", funcTag, 106},
- {"uint64div", funcTag, 107},
- {"int64mod", funcTag, 106},
- {"uint64mod", funcTag, 107},
- {"float64toint64", funcTag, 108},
- {"float64touint64", funcTag, 109},
- {"float64touint32", funcTag, 110},
- {"int64tofloat64", funcTag, 111},
- {"uint64tofloat64", funcTag, 112},
- {"uint32tofloat64", funcTag, 113},
- {"complex128div", funcTag, 114},
- {"racefuncenter", funcTag, 115},
+ {"f32equal", funcTag, 106},
+ {"f64equal", funcTag, 106},
+ {"c64equal", funcTag, 106},
+ {"c128equal", funcTag, 106},
+ {"strequal", funcTag, 106},
+ {"interequal", funcTag, 106},
+ {"nilinterequal", funcTag, 106},
+ {"memhash", funcTag, 107},
+ {"memhash0", funcTag, 108},
+ {"memhash8", funcTag, 108},
+ {"memhash16", funcTag, 108},
+ {"memhash32", funcTag, 108},
+ {"memhash64", funcTag, 108},
+ {"memhash128", funcTag, 108},
+ {"f32hash", funcTag, 108},
+ {"f64hash", funcTag, 108},
+ {"c64hash", funcTag, 108},
+ {"c128hash", funcTag, 108},
+ {"strhash", funcTag, 108},
+ {"interhash", funcTag, 108},
+ {"nilinterhash", funcTag, 108},
+ {"int64div", funcTag, 109},
+ {"uint64div", funcTag, 110},
+ {"int64mod", funcTag, 109},
+ {"uint64mod", funcTag, 110},
+ {"float64toint64", funcTag, 111},
+ {"float64touint64", funcTag, 112},
+ {"float64touint32", funcTag, 113},
+ {"int64tofloat64", funcTag, 114},
+ {"uint64tofloat64", funcTag, 115},
+ {"uint32tofloat64", funcTag, 116},
+ {"complex128div", funcTag, 117},
+ {"racefuncenter", funcTag, 118},
{"racefuncenterfp", funcTag, 5},
{"racefuncexit", funcTag, 5},
- {"raceread", funcTag, 115},
- {"racewrite", funcTag, 115},
- {"racereadrange", funcTag, 116},
- {"racewriterange", funcTag, 116},
- {"msanread", funcTag, 116},
- {"msanwrite", funcTag, 116},
+ {"raceread", funcTag, 118},
+ {"racewrite", funcTag, 118},
+ {"racereadrange", funcTag, 119},
+ {"racewriterange", funcTag, 119},
+ {"msanread", funcTag, 119},
+ {"msanwrite", funcTag, 119},
{"x86HasPOPCNT", varTag, 15},
{"x86HasSSE41", varTag, 15},
{"arm64HasATOMICS", varTag, 15},
}
func runtimeTypes() []*types.Type {
- var typs [117]*types.Type
+ var typs [120]*types.Type
typs[0] = types.Bytetype
typs[1] = types.NewPtr(typs[0])
typs[2] = types.Types[TANY]
typs[103] = functype(nil, []*Node{anonfield(typs[56]), anonfield(typs[50])}, nil)
typs[104] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3]), anonfield(typs[50])}, []*Node{anonfield(typs[15])})
typs[105] = functype(nil, []*Node{anonfield(typs[3]), anonfield(typs[3])}, []*Node{anonfield(typs[15])})
- typs[106] = functype(nil, []*Node{anonfield(typs[19]), anonfield(typs[19])}, []*Node{anonfield(typs[19])})
- typs[107] = functype(nil, []*Node{anonfield(typs[21]), anonfield(typs[21])}, []*Node{anonfield(typs[21])})
- typs[108] = functype(nil, []*Node{anonfield(typs[17])}, []*Node{anonfield(typs[19])})
- typs[109] = functype(nil, []*Node{anonfield(typs[17])}, []*Node{anonfield(typs[21])})
- typs[110] = functype(nil, []*Node{anonfield(typs[17])}, []*Node{anonfield(typs[64])})
- typs[111] = functype(nil, []*Node{anonfield(typs[19])}, []*Node{anonfield(typs[17])})
- typs[112] = functype(nil, []*Node{anonfield(typs[21])}, []*Node{anonfield(typs[17])})
- typs[113] = functype(nil, []*Node{anonfield(typs[64])}, []*Node{anonfield(typs[17])})
- typs[114] = functype(nil, []*Node{anonfield(typs[23]), anonfield(typs[23])}, []*Node{anonfield(typs[23])})
- typs[115] = functype(nil, []*Node{anonfield(typs[50])}, nil)
- typs[116] = functype(nil, []*Node{anonfield(typs[50]), anonfield(typs[50])}, nil)
+ typs[106] = functype(nil, []*Node{anonfield(typs[56]), anonfield(typs[56])}, []*Node{anonfield(typs[15])})
+ typs[107] = functype(nil, []*Node{anonfield(typs[56]), anonfield(typs[50]), anonfield(typs[50])}, []*Node{anonfield(typs[50])})
+ typs[108] = functype(nil, []*Node{anonfield(typs[56]), anonfield(typs[50])}, []*Node{anonfield(typs[50])})
+ typs[109] = functype(nil, []*Node{anonfield(typs[19]), anonfield(typs[19])}, []*Node{anonfield(typs[19])})
+ typs[110] = functype(nil, []*Node{anonfield(typs[21]), anonfield(typs[21])}, []*Node{anonfield(typs[21])})
+ typs[111] = functype(nil, []*Node{anonfield(typs[17])}, []*Node{anonfield(typs[19])})
+ typs[112] = functype(nil, []*Node{anonfield(typs[17])}, []*Node{anonfield(typs[21])})
+ typs[113] = functype(nil, []*Node{anonfield(typs[17])}, []*Node{anonfield(typs[64])})
+ typs[114] = functype(nil, []*Node{anonfield(typs[19])}, []*Node{anonfield(typs[17])})
+ typs[115] = functype(nil, []*Node{anonfield(typs[21])}, []*Node{anonfield(typs[17])})
+ typs[116] = functype(nil, []*Node{anonfield(typs[64])}, []*Node{anonfield(typs[17])})
+ typs[117] = functype(nil, []*Node{anonfield(typs[23]), anonfield(typs[23])}, []*Node{anonfield(typs[23])})
+ typs[118] = functype(nil, []*Node{anonfield(typs[50])}, nil)
+ typs[119] = functype(nil, []*Node{anonfield(typs[50]), anonfield(typs[50])}, nil)
return typs[:]
}
func memclrHasPointers(ptr unsafe.Pointer, n uintptr)
func memequal(x, y *any, size uintptr) bool
+func memequal0(x, y *any) bool
func memequal8(x, y *any) bool
func memequal16(x, y *any) bool
func memequal32(x, y *any) bool
func memequal64(x, y *any) bool
func memequal128(x, y *any) bool
+func f32equal(p, q unsafe.Pointer) bool
+func f64equal(p, q unsafe.Pointer) bool
+func c64equal(p, q unsafe.Pointer) bool
+func c128equal(p, q unsafe.Pointer) bool
+func strequal(p, q unsafe.Pointer) bool
+func interequal(p, q unsafe.Pointer) bool
+func nilinterequal(p, q unsafe.Pointer) bool
+
+func memhash(p unsafe.Pointer, h uintptr, size uintptr) uintptr
+func memhash0(p unsafe.Pointer, h uintptr) uintptr
+func memhash8(p unsafe.Pointer, h uintptr) uintptr
+func memhash16(p unsafe.Pointer, h uintptr) uintptr
+func memhash32(p unsafe.Pointer, h uintptr) uintptr
+func memhash64(p unsafe.Pointer, h uintptr) uintptr
+func memhash128(p unsafe.Pointer, h uintptr) uintptr
+func f32hash(p unsafe.Pointer, h uintptr) uintptr
+func f64hash(p unsafe.Pointer, h uintptr) uintptr
+func c64hash(p unsafe.Pointer, h uintptr) uintptr
+func c128hash(p unsafe.Pointer, h uintptr) uintptr
+func strhash(a unsafe.Pointer, h uintptr) uintptr
+func interhash(p unsafe.Pointer, h uintptr) uintptr
+func nilinterhash(p unsafe.Pointer, h uintptr) uintptr
// only used on 32-bit
func int64div(int64, int64) int64
// reflect/type.go
// runtime/type.go
const (
- tflagUncommon = 1 << 0
- tflagExtraStar = 1 << 1
- tflagNamed = 1 << 2
+ tflagUncommon = 1 << 0
+ tflagExtraStar = 1 << 1
+ tflagNamed = 1 << 2
+ tflagRegularMemory = 1 << 3
)
var (
- algarray *obj.LSym
memhashvarlen *obj.LSym
memequalvarlen *obj.LSym
)
// dcommontype dumps the contents of a reflect.rtype (runtime._type).
func dcommontype(lsym *obj.LSym, t *types.Type) int {
- sizeofAlg := 2 * Widthptr
- if algarray == nil {
- algarray = sysvar("algarray")
- }
dowidth(t)
- alg := algtype(t)
- var algsym *obj.LSym
- if alg == ASPECIAL || alg == AMEM {
- algsym = dalgsym(t)
- }
+ eqfunc := geneq(t)
sptrWeak := true
var sptr *obj.LSym
// align uint8
// fieldAlign uint8
// kind uint8
- // alg *typeAlg
+ // equal func(unsafe.Pointer, unsafe.Pointer) bool
// gcdata *byte
// str nameOff
// ptrToThis typeOff
if t.Sym != nil && t.Sym.Name != "" {
tflag |= tflagNamed
}
+ if IsRegularMemory(t) {
+ tflag |= tflagRegularMemory
+ }
exported := false
p := t.LongString()
i |= objabi.KindGCProg
}
ot = duint8(lsym, ot, uint8(i)) // kind
- if algsym == nil {
- ot = dsymptr(lsym, ot, algarray, int(alg)*sizeofAlg)
+ if eqfunc != nil {
+ ot = dsymptr(lsym, ot, eqfunc, 0) // equality function
} else {
- ot = dsymptr(lsym, ot, algsym, 0)
+ ot = duintptr(lsym, ot, 0) // type we can't do == with
}
ot = dsymptr(lsym, ot, gcsym, 0) // gcdata
s1 := dtypesym(t.Key())
s2 := dtypesym(t.Elem())
s3 := dtypesym(bmap(t))
+ hasher := genhash(t.Key())
+
ot = dcommontype(lsym, t)
ot = dsymptr(lsym, ot, s1, 0)
ot = dsymptr(lsym, ot, s2, 0)
ot = dsymptr(lsym, ot, s3, 0)
+ ot = dsymptr(lsym, ot, hasher, 0)
var flags uint32
// Note: flags must match maptype accessors in ../../../../runtime/type.go
// and maptype builder in ../../../../reflect/type.go:MapOf.
}
func (a typesByString) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
-func dalgsym(t *types.Type) *obj.LSym {
- var lsym *obj.LSym
- var hashfunc *obj.LSym
- var eqfunc *obj.LSym
-
- // dalgsym is only called for a type that needs an algorithm table,
- // which implies that the type is comparable (or else it would use ANOEQ).
-
- if algtype(t) == AMEM {
- // we use one algorithm table for all AMEM types of a given size
- p := fmt.Sprintf(".alg%d", t.Width)
-
- s := typeLookup(p)
- lsym = s.Linksym()
- if s.AlgGen() {
- return lsym
- }
- s.SetAlgGen(true)
-
- if memhashvarlen == nil {
- memhashvarlen = sysfunc("memhash_varlen")
- memequalvarlen = sysvar("memequal_varlen") // asm func
- }
-
- // make hash closure
- p = fmt.Sprintf(".hashfunc%d", t.Width)
-
- hashfunc = typeLookup(p).Linksym()
-
- ot := 0
- ot = dsymptr(hashfunc, ot, memhashvarlen, 0)
- ot = duintptr(hashfunc, ot, uint64(t.Width)) // size encoded in closure
- ggloblsym(hashfunc, int32(ot), obj.DUPOK|obj.RODATA)
-
- // make equality closure
- p = fmt.Sprintf(".eqfunc%d", t.Width)
-
- eqfunc = typeLookup(p).Linksym()
-
- ot = 0
- ot = dsymptr(eqfunc, ot, memequalvarlen, 0)
- ot = duintptr(eqfunc, ot, uint64(t.Width))
- ggloblsym(eqfunc, int32(ot), obj.DUPOK|obj.RODATA)
- } else {
- // generate an alg table specific to this type
- s := typesymprefix(".alg", t)
- lsym = s.Linksym()
-
- hash := typesymprefix(".hash", t)
- eq := typesymprefix(".eq", t)
- hashfunc = typesymprefix(".hashfunc", t).Linksym()
- eqfunc = typesymprefix(".eqfunc", t).Linksym()
-
- genhash(hash, t)
- geneq(eq, t)
-
- // make Go funcs (closures) for calling hash and equal from Go
- dsymptr(hashfunc, 0, hash.Linksym(), 0)
- ggloblsym(hashfunc, int32(Widthptr), obj.DUPOK|obj.RODATA)
- dsymptr(eqfunc, 0, eq.Linksym(), 0)
- ggloblsym(eqfunc, int32(Widthptr), obj.DUPOK|obj.RODATA)
- }
-
- // ../../../../runtime/alg.go:/typeAlg
- ot := 0
-
- ot = dsymptr(lsym, ot, hashfunc, 0)
- ot = dsymptr(lsym, ot, eqfunc, 0)
- ggloblsym(lsym, int32(ot), obj.DUPOK|obj.RODATA)
- return lsym
-}
-
// maxPtrmaskBytes is the maximum length of a GC ptrmask bitmap,
// which holds 1-bit entries describing where pointers are in a given type.
// Above this length, the GC information is recorded as a GC program,
symUniq
symSiggen // type symbol has been generated
symAsm // on asmlist, for writing to -asmhdr
- symAlgGen // algorithm table has been generated
symFunc // function symbol; uses internal ABI
)
func (sym *Sym) Uniq() bool { return sym.flags&symUniq != 0 }
func (sym *Sym) Siggen() bool { return sym.flags&symSiggen != 0 }
func (sym *Sym) Asm() bool { return sym.flags&symAsm != 0 }
-func (sym *Sym) AlgGen() bool { return sym.flags&symAlgGen != 0 }
func (sym *Sym) Func() bool { return sym.flags&symFunc != 0 }
func (sym *Sym) SetOnExportList(b bool) { sym.flags.set(symOnExportList, b) }
func (sym *Sym) SetUniq(b bool) { sym.flags.set(symUniq, b) }
func (sym *Sym) SetSiggen(b bool) { sym.flags.set(symSiggen, b) }
func (sym *Sym) SetAsm(b bool) { sym.flags.set(symAsm, b) }
-func (sym *Sym) SetAlgGen(b bool) { sym.flags.set(symAlgGen, b) }
func (sym *Sym) SetFunc(b bool) { sym.flags.set(symFunc, b) }
func (sym *Sym) IsBlank() bool {
case kindChan: // reflect.chanType
off += 2 * arch.PtrSize
case kindMap: // reflect.mapType
- off += 3*arch.PtrSize + 8
+ off += 4*arch.PtrSize + 8
case kindInterface: // reflect.interfaceType
off += 3 * arch.PtrSize
default:
toc.Attr |= sym.AttrReachable
toc.Attr |= sym.AttrVisibilityHidden
- // XCOFF does not allow relocations of data symbol address to a text symbol.
- // Such case occurs when a RODATA symbol retrieves a data symbol address.
- // When it happens, this RODATA symbol is moved to .data section.
- // runtime.algarray is a readonly symbol but stored inside .data section.
- // If it stays in .data, all type symbols will be moved to .data which
- // cannot be done.
- algarray := ctxt.Syms.Lookup("runtime.algarray", 0)
- algarray.Type = sym.SRODATA
- for {
- again := false
- for _, s := range ctxt.Syms.Allsym {
- if s.Type != sym.SRODATA {
- continue
- }
- for ri := range s.R {
- r := &s.R[ri]
- if r.Type != objabi.R_ADDR {
- continue
- }
- if r.Sym.Type != sym.Sxxx && r.Sym.Type != sym.STEXT && r.Sym.Type != sym.SRODATA {
- s.Type = sym.SDATA
- again = true
- break
- }
- }
-
- }
- if !again {
- break
- }
- }
-
// Add entry point to .loader symbols.
ep := ctxt.Syms.ROLookup(*flagEntrySymbol, 0)
if !ep.Attr.Reachable() {
// tflagNamed means the type has a name.
tflagNamed tflag = 1 << 2
+
+ // tflagRegularMemory means that equal and hash functions can treat
+ // this type as a single region of t.size bytes.
+ tflagRegularMemory tflag = 1 << 3
)
// rtype is the common implementation of most values.
// rtype must be kept in sync with ../runtime/type.go:/^type._type.
type rtype struct {
size uintptr
- ptrdata uintptr // number of bytes in the type that can contain pointers
- hash uint32 // hash of type; avoids computation in hash tables
- tflag tflag // extra type information flags
- align uint8 // alignment of variable with this type
- fieldAlign uint8 // alignment of struct field with this type
- kind uint8 // enumeration for C
- alg *typeAlg // algorithm table
- gcdata *byte // garbage collection data
- str nameOff // string form
- ptrToThis typeOff // type for pointer to this type, may be zero
-}
-
-// a copy of runtime.typeAlg
-type typeAlg struct {
- // function for hashing objects of this type
- // (ptr to object, seed) -> hash
- hash func(unsafe.Pointer, uintptr) uintptr
+ ptrdata uintptr // number of bytes in the type that can contain pointers
+ hash uint32 // hash of type; avoids computation in hash tables
+ tflag tflag // extra type information flags
+ align uint8 // alignment of variable with this type
+ fieldAlign uint8 // alignment of struct field with this type
+ kind uint8 // enumeration for C
// function for comparing objects of this type
// (ptr to object A, ptr to object B) -> ==?
- equal func(unsafe.Pointer, unsafe.Pointer) bool
+ equal func(unsafe.Pointer, unsafe.Pointer) bool
+ gcdata *byte // garbage collection data
+ str nameOff // string form
+ ptrToThis typeOff // type for pointer to this type, may be zero
}
// Method on non-interface type
// mapType represents a map type.
type mapType struct {
rtype
- key *rtype // map key type
- elem *rtype // map element (value) type
- bucket *rtype // internal bucket structure
+ key *rtype // map key type
+ elem *rtype // map element (value) type
+ bucket *rtype // internal bucket structure
+ // function for hashing keys (ptr to key, seed) -> hash
+ hasher func(unsafe.Pointer, uintptr) uintptr
keysize uint8 // size of key slot
valuesize uint8 // size of value slot
bucketsize uint16 // size of bucket
}
func (t *rtype) Comparable() bool {
- return t.alg != nil && t.alg.equal != nil
+ return t.equal != nil
}
// implements reports whether the type V implements the interface type T.
var ichan interface{} = (chan unsafe.Pointer)(nil)
prototype := *(**chanType)(unsafe.Pointer(&ichan))
ch := *prototype
- ch.tflag = 0
+ ch.tflag = tflagRegularMemory
ch.dir = uintptr(dir)
ch.str = resolveReflectName(newName(s, "", false))
ch.hash = fnv1(typ.hash, 'c', byte(dir))
return ti.(Type)
}
-func ismapkey(*rtype) bool // implemented in runtime
-
// MapOf returns the map type with the given key and element types.
// For example, if k represents int and e represents string,
// MapOf(k, e) represents map[int]string.
ktyp := key.(*rtype)
etyp := elem.(*rtype)
- if !ismapkey(ktyp) {
+ if ktyp.equal == nil {
panic("reflect.MapOf: invalid key type " + ktyp.String())
}
mt.key = ktyp
mt.elem = etyp
mt.bucket = bucketOf(ktyp, etyp)
+ mt.hasher = func(p unsafe.Pointer, seed uintptr) uintptr {
+ return typehash(ktyp, p, seed)
+ }
mt.flags = 0
if ktyp.size > maxKeySize {
mt.keysize = uint8(ptrSize)
size uintptr
typalign uint8
comparable = true
- hashable = true
methods []method
fs = make([]structField, len(fields))
repr = append(repr, ';')
}
- comparable = comparable && (ft.alg.equal != nil)
- hashable = hashable && (ft.alg.hash != nil)
+ comparable = comparable && (ft.equal != nil)
offset := align(size, uintptr(ft.align))
if ft.align > typalign {
}
typ.str = resolveReflectName(newName(str, "", false))
- typ.tflag = 0
+ typ.tflag = 0 // TODO: set tflagRegularMemory
typ.hash = hash
typ.size = size
typ.ptrdata = typeptrdata(typ.common())
typ.gcdata = &bv.data[0]
}
}
- typ.alg = new(typeAlg)
- if hashable {
- typ.alg.hash = func(p unsafe.Pointer, seed uintptr) uintptr {
- o := seed
- for _, ft := range typ.fields {
- pi := add(p, ft.offset(), "&x.field safe")
- o = ft.typ.alg.hash(pi, o)
- }
- return o
- }
- }
-
+ typ.equal = nil
if comparable {
- typ.alg.equal = func(p, q unsafe.Pointer) bool {
+ typ.equal = func(p, q unsafe.Pointer) bool {
for _, ft := range typ.fields {
pi := add(p, ft.offset(), "&x.field safe")
qi := add(q, ft.offset(), "&x.field safe")
- if !ft.typ.alg.equal(pi, qi) {
+ if !ft.typ.equal(pi, qi) {
return false
}
}
var iarray interface{} = [1]unsafe.Pointer{}
prototype := *(**arrayType)(unsafe.Pointer(&iarray))
array := *prototype
- array.tflag = 0
+ array.tflag = typ.tflag & tflagRegularMemory
array.str = resolveReflectName(newName(s, "", false))
array.hash = fnv1(typ.hash, '[')
for n := uint32(count); n > 0; n >>= 8 {
etyp := typ.common()
esize := etyp.Size()
- ealg := etyp.alg
- array.alg = new(typeAlg)
- if ealg.equal != nil {
- eequal := ealg.equal
- array.alg.equal = func(p, q unsafe.Pointer) bool {
+ array.equal = nil
+ if eequal := etyp.equal; eequal != nil {
+ array.equal = func(p, q unsafe.Pointer) bool {
for i := 0; i < count; i++ {
pi := arrayAt(p, i, esize, "i < count")
qi := arrayAt(q, i, esize, "i < count")
return true
}
}
- if ealg.hash != nil {
- ehash := ealg.hash
- array.alg.hash = func(ptr unsafe.Pointer, seed uintptr) uintptr {
- o := seed
- for i := 0; i < count; i++ {
- o = ehash(arrayAt(ptr, i, esize, "i < count"), o)
- }
- return o
- }
- }
switch {
case count == 1 && !ifaceIndir(typ):
//go:noescape
func typedslicecopy(elemType *rtype, dst, src sliceHeader) int
+//go:noescape
+func typehash(t *rtype, p unsafe.Pointer, h uintptr) uintptr
+
// Dummy annotation marking that the value x escapes,
// for use in cases where the reflect code is so clever that
// the compiler cannot follow.
alg_max
)
-// typeAlg is also copied/used in reflect/type.go.
-// keep them in sync.
-type typeAlg struct {
- // function for hashing objects of this type
- // (ptr to object, seed) -> hash
- hash func(unsafe.Pointer, uintptr) uintptr
- // function for comparing objects of this type
- // (ptr to object A, ptr to object B) -> ==?
- equal func(unsafe.Pointer, unsafe.Pointer) bool
-}
-
func memhash0(p unsafe.Pointer, h uintptr) uintptr {
return h
}
return memhash(p, h, size)
}
-var algarray = [alg_max]typeAlg{
- alg_NOEQ: {nil, nil},
- alg_MEM0: {memhash0, memequal0},
- alg_MEM8: {memhash8, memequal8},
- alg_MEM16: {memhash16, memequal16},
- alg_MEM32: {memhash32, memequal32},
- alg_MEM64: {memhash64, memequal64},
- alg_MEM128: {memhash128, memequal128},
- alg_STRING: {strhash, strequal},
- alg_INTER: {interhash, interequal},
- alg_NILINTER: {nilinterhash, nilinterequal},
- alg_FLOAT32: {f32hash, f32equal},
- alg_FLOAT64: {f64hash, f64equal},
- alg_CPLX64: {c64hash, c64equal},
- alg_CPLX128: {c128hash, c128equal},
-}
-
+// runtime variable to check if the processor we're running on
+// actually supports the instructions used by the AES-based
+// hash implementation.
var useAeshash bool
// in asm_*.s
return h
}
t := tab._type
- fn := t.alg.hash
- if fn == nil {
+ if t.equal == nil {
+ // Check hashability here. We could do this check inside
+ // typehash, but we want to report the topmost type in
+ // the error text (e.g. in a struct with a field of slice type
+ // we want to report the struct, not the slice).
panic(errorString("hash of unhashable type " + t.string()))
}
if isDirectIface(t) {
- return c1 * fn(unsafe.Pointer(&a.data), h^c0)
+ return c1 * typehash(t, unsafe.Pointer(&a.data), h^c0)
} else {
- return c1 * fn(a.data, h^c0)
+ return c1 * typehash(t, a.data, h^c0)
}
}
if t == nil {
return h
}
- fn := t.alg.hash
- if fn == nil {
+ if t.equal == nil {
+ // See comment in interhash above.
panic(errorString("hash of unhashable type " + t.string()))
}
if isDirectIface(t) {
- return c1 * fn(unsafe.Pointer(&a.data), h^c0)
+ return c1 * typehash(t, unsafe.Pointer(&a.data), h^c0)
} else {
- return c1 * fn(a.data, h^c0)
+ return c1 * typehash(t, a.data, h^c0)
+ }
+}
+
+// typehash computes the hash of the object of type t at address p.
+// h is the seed.
+// This function is seldom used. Most maps use for hashing either
+// fixed functions (e.g. f32hash) or compiler-generated functions
+// (e.g. for a type like struct { x, y string }). This implementation
+// is slower but more general and is used for hashing interface types
+// (called from interhash or nilinterhash, above) or for hashing in
+// maps generated by reflect.MapOf (reflect_typehash, below).
+func typehash(t *_type, p unsafe.Pointer, h uintptr) uintptr {
+ if t.tflag&tflagRegularMemory != 0 {
+ return memhash(p, h, t.size)
}
+ switch t.kind & kindMask {
+ case kindFloat32:
+ return f32hash(p, h)
+ case kindFloat64:
+ return f64hash(p, h)
+ case kindComplex64:
+ return c64hash(p, h)
+ case kindComplex128:
+ return c128hash(p, h)
+ case kindString:
+ return strhash(p, h)
+ case kindInterface:
+ i := (*interfacetype)(unsafe.Pointer(t))
+ if len(i.mhdr) == 0 {
+ return nilinterhash(p, h)
+ }
+ return interhash(p, h)
+ case kindArray:
+ a := (*arraytype)(unsafe.Pointer(t))
+ for i := uintptr(0); i < a.len; i++ {
+ h = typehash(a.elem, add(p, i*a.elem.size), h)
+ }
+ return h
+ case kindStruct:
+ s := (*structtype)(unsafe.Pointer(t))
+ for _, f := range s.fields {
+ // TODO: maybe we could hash several contiguous fields all at once.
+ if f.name.isBlank() {
+ continue
+ }
+ h = typehash(f.typ, add(p, f.offset()), h)
+ }
+ return h
+ default:
+ // Should never happen, as typehash should only be called
+ // with comparable types.
+ panic(errorString("hash of unhashable type " + t.string()))
+ }
+}
+
+//go:linkname reflect_typehash reflect.typehash
+func reflect_typehash(t *_type, p unsafe.Pointer, h uintptr) uintptr {
+ return typehash(t, p, h)
}
func memequal0(p, q unsafe.Pointer) bool {
if t == nil {
return true
}
- eq := t.alg.equal
+ eq := t.equal
if eq == nil {
panic(errorString("comparing uncomparable type " + t.string()))
}
return true
}
t := tab._type
- eq := t.alg.equal
+ eq := t.equal
if eq == nil {
panic(errorString("comparing uncomparable type " + t.string()))
}
// Testing adapters for hash quality tests (see hash_test.go)
func stringHash(s string, seed uintptr) uintptr {
- return algarray[alg_STRING].hash(noescape(unsafe.Pointer(&s)), seed)
+ return strhash(noescape(unsafe.Pointer(&s)), seed)
}
func bytesHash(b []byte, seed uintptr) uintptr {
}
func int32Hash(i uint32, seed uintptr) uintptr {
- return algarray[alg_MEM32].hash(noescape(unsafe.Pointer(&i)), seed)
+ return memhash32(noescape(unsafe.Pointer(&i)), seed)
}
func int64Hash(i uint64, seed uintptr) uintptr {
- return algarray[alg_MEM64].hash(noescape(unsafe.Pointer(&i)), seed)
+ return memhash64(noescape(unsafe.Pointer(&i)), seed)
}
func efaceHash(i interface{}, seed uintptr) uintptr {
- return algarray[alg_NILINTER].hash(noescape(unsafe.Pointer(&i)), seed)
+ return nilinterhash(noescape(unsafe.Pointer(&i)), seed)
}
func ifaceHash(i interface {
F()
}, seed uintptr) uintptr {
- return algarray[alg_INTER].hash(noescape(unsafe.Pointer(&i)), seed)
+ return interhash(noescape(unsafe.Pointer(&i)), seed)
}
const hashRandomBytes = sys.PtrSize / 4 * 64
c.dataqsiz = uint(size)
if debugChan {
- print("makechan: chan=", c, "; elemsize=", elem.size, "; elemalg=", elem.alg, "; dataqsiz=", size, "\n")
+ print("makechan: chan=", c, "; elemsize=", elem.size, "; dataqsiz=", size, "\n")
}
return c
}
}
if h == nil || h.count == 0 {
if t.hashMightPanic() {
- t.key.alg.hash(key, 0) // see issue 23734
+ t.hasher(key, 0) // see issue 23734
}
return unsafe.Pointer(&zeroVal[0])
}
if h.flags&hashWriting != 0 {
throw("concurrent map read and map write")
}
- alg := t.key.alg
- hash := alg.hash(key, uintptr(h.hash0))
+ hash := t.hasher(key, uintptr(h.hash0))
m := bucketMask(h.B)
b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
if t.indirectkey() {
k = *((*unsafe.Pointer)(k))
}
- if alg.equal(key, k) {
+ if t.key.equal(key, k) {
e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
if t.indirectelem() {
e = *((*unsafe.Pointer)(e))
}
if h == nil || h.count == 0 {
if t.hashMightPanic() {
- t.key.alg.hash(key, 0) // see issue 23734
+ t.hasher(key, 0) // see issue 23734
}
return unsafe.Pointer(&zeroVal[0]), false
}
if h.flags&hashWriting != 0 {
throw("concurrent map read and map write")
}
- alg := t.key.alg
- hash := alg.hash(key, uintptr(h.hash0))
+ hash := t.hasher(key, uintptr(h.hash0))
m := bucketMask(h.B)
b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
if t.indirectkey() {
k = *((*unsafe.Pointer)(k))
}
- if alg.equal(key, k) {
+ if t.key.equal(key, k) {
e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
if t.indirectelem() {
e = *((*unsafe.Pointer)(e))
if h == nil || h.count == 0 {
return nil, nil
}
- alg := t.key.alg
- hash := alg.hash(key, uintptr(h.hash0))
+ hash := t.hasher(key, uintptr(h.hash0))
m := bucketMask(h.B)
b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
if t.indirectkey() {
k = *((*unsafe.Pointer)(k))
}
- if alg.equal(key, k) {
+ if t.key.equal(key, k) {
e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
if t.indirectelem() {
e = *((*unsafe.Pointer)(e))
if h.flags&hashWriting != 0 {
throw("concurrent map writes")
}
- alg := t.key.alg
- hash := alg.hash(key, uintptr(h.hash0))
+ hash := t.hasher(key, uintptr(h.hash0))
- // Set hashWriting after calling alg.hash, since alg.hash may panic,
+ // Set hashWriting after calling t.hasher, since t.hasher may panic,
// in which case we have not actually done a write.
h.flags ^= hashWriting
if t.indirectkey() {
k = *((*unsafe.Pointer)(k))
}
- if !alg.equal(key, k) {
+ if !t.key.equal(key, k) {
continue
}
// already have a mapping for key. Update it.
}
if h == nil || h.count == 0 {
if t.hashMightPanic() {
- t.key.alg.hash(key, 0) // see issue 23734
+ t.hasher(key, 0) // see issue 23734
}
return
}
throw("concurrent map writes")
}
- alg := t.key.alg
- hash := alg.hash(key, uintptr(h.hash0))
+ hash := t.hasher(key, uintptr(h.hash0))
- // Set hashWriting after calling alg.hash, since alg.hash may panic,
+ // Set hashWriting after calling t.hasher, since t.hasher may panic,
// in which case we have not actually done a write (delete).
h.flags ^= hashWriting
if t.indirectkey() {
k2 = *((*unsafe.Pointer)(k2))
}
- if !alg.equal(key, k2) {
+ if !t.key.equal(key, k2) {
continue
}
// Only clear key if there are pointers in it.
b := it.bptr
i := it.i
checkBucket := it.checkBucket
- alg := t.key.alg
next:
if b == nil {
// through the oldbucket, skipping any keys that will go
// to the other new bucket (each oldbucket expands to two
// buckets during a grow).
- if t.reflexivekey() || alg.equal(k, k) {
+ if t.reflexivekey() || t.key.equal(k, k) {
// If the item in the oldbucket is not destined for
// the current new bucket in the iteration, skip it.
- hash := alg.hash(k, uintptr(h.hash0))
+ hash := t.hasher(k, uintptr(h.hash0))
if hash&bucketMask(it.B) != checkBucket {
continue
}
}
}
if (b.tophash[offi] != evacuatedX && b.tophash[offi] != evacuatedY) ||
- !(t.reflexivekey() || alg.equal(k, k)) {
+ !(t.reflexivekey() || t.key.equal(k, k)) {
// This is the golden data, we can return it.
// OR
// key!=key, so the entry can't be deleted or updated, so we can just return it.
if !h.sameSizeGrow() {
// Compute hash to make our evacuation decision (whether we need
// to send this key/elem to bucket x or bucket y).
- hash := t.key.alg.hash(k2, uintptr(h.hash0))
- if h.flags&iterator != 0 && !t.reflexivekey() && !t.key.alg.equal(k2, k2) {
+ hash := t.hasher(k2, uintptr(h.hash0))
+ if h.flags&iterator != 0 && !t.reflexivekey() && !t.key.equal(k2, k2) {
// If key != key (NaNs), then the hash could be (and probably
// will be) entirely different from the old hash. Moreover,
// it isn't reproducible. Reproducibility is required in the
}
}
-func ismapkey(t *_type) bool {
- return t.alg.hash != nil
-}
-
// Reflect stubs. Called from ../reflect/asm_*.s
//go:linkname reflect_makemap reflect.makemap
func reflect_makemap(t *maptype, cap int) *hmap {
// Check invariants and reflects math.
- if !ismapkey(t.key) {
+ if t.key.equal == nil {
throw("runtime.reflect_makemap: unsupported map key type")
}
if t.key.size > maxKeySize && (!t.indirectkey() || t.keysize != uint8(sys.PtrSize)) ||
return h.count
}
-//go:linkname reflect_ismapkey reflect.ismapkey
-func reflect_ismapkey(t *_type) bool {
- return ismapkey(t)
-}
-
const maxZero = 1024 // must match value in cmd/compile/internal/gc/walk.go:zeroValSize
var zeroVal [maxZero]byte
})
}
}
+
+var BoolSink bool
+
+func BenchmarkMapInterfaceString(b *testing.B) {
+ m := map[interface{}]bool{}
+
+ for i := 0; i < 100; i++ {
+ m[fmt.Sprintf("%d", i)] = true
+ }
+
+ key := (interface{})("A")
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ BoolSink = m[key]
+ }
+}
+func BenchmarkMapInterfacePtr(b *testing.B) {
+ m := map[interface{}]bool{}
+
+ for i := 0; i < 100; i++ {
+ i := i
+ m[&i] = true
+ }
+
+ key := new(int)
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ BoolSink = m[key]
+ }
+}
// One-bucket table. No need to hash.
b = (*bmap)(h.buckets)
} else {
- hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
+ hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
m := bucketMask(h.B)
b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
// One-bucket table. No need to hash.
b = (*bmap)(h.buckets)
} else {
- hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
+ hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
m := bucketMask(h.B)
b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
if h.flags&hashWriting != 0 {
throw("concurrent map writes")
}
- hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
+ hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
- // Set hashWriting after calling alg.hash for consistency with mapassign.
+ // Set hashWriting after calling t.hasher for consistency with mapassign.
h.flags ^= hashWriting
if h.buckets == nil {
if h.flags&hashWriting != 0 {
throw("concurrent map writes")
}
- hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
+ hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
- // Set hashWriting after calling alg.hash for consistency with mapassign.
+ // Set hashWriting after calling t.hasher for consistency with mapassign.
h.flags ^= hashWriting
if h.buckets == nil {
throw("concurrent map writes")
}
- hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
+ hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
- // Set hashWriting after calling alg.hash for consistency with mapdelete
+ // Set hashWriting after calling t.hasher for consistency with mapdelete
h.flags ^= hashWriting
bucket := hash & bucketMask(h.B)
if !h.sameSizeGrow() {
// Compute hash to make our evacuation decision (whether we need
// to send this key/elem to bucket x or bucket y).
- hash := t.key.alg.hash(k, uintptr(h.hash0))
+ hash := t.hasher(k, uintptr(h.hash0))
if hash&newbit != 0 {
useY = 1
}
// One-bucket table. No need to hash.
b = (*bmap)(h.buckets)
} else {
- hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
+ hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
m := bucketMask(h.B)
b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
// One-bucket table. No need to hash.
b = (*bmap)(h.buckets)
} else {
- hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
+ hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
m := bucketMask(h.B)
b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
if h.flags&hashWriting != 0 {
throw("concurrent map writes")
}
- hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
+ hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
- // Set hashWriting after calling alg.hash for consistency with mapassign.
+ // Set hashWriting after calling t.hasher for consistency with mapassign.
h.flags ^= hashWriting
if h.buckets == nil {
if h.flags&hashWriting != 0 {
throw("concurrent map writes")
}
- hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
+ hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
- // Set hashWriting after calling alg.hash for consistency with mapassign.
+ // Set hashWriting after calling t.hasher for consistency with mapassign.
h.flags ^= hashWriting
if h.buckets == nil {
throw("concurrent map writes")
}
- hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
+ hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
- // Set hashWriting after calling alg.hash for consistency with mapdelete
+ // Set hashWriting after calling t.hasher for consistency with mapdelete
h.flags ^= hashWriting
bucket := hash & bucketMask(h.B)
if !h.sameSizeGrow() {
// Compute hash to make our evacuation decision (whether we need
// to send this key/elem to bucket x or bucket y).
- hash := t.key.alg.hash(k, uintptr(h.hash0))
+ hash := t.hasher(k, uintptr(h.hash0))
if hash&newbit != 0 {
useY = 1
}
return unsafe.Pointer(&zeroVal[0])
}
dohash:
- hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
+ hash := t.hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
m := bucketMask(h.B)
b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
return unsafe.Pointer(&zeroVal[0]), false
}
dohash:
- hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
+ hash := t.hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
m := bucketMask(h.B)
b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
throw("concurrent map writes")
}
key := stringStructOf(&s)
- hash := t.key.alg.hash(noescape(unsafe.Pointer(&s)), uintptr(h.hash0))
+ hash := t.hasher(noescape(unsafe.Pointer(&s)), uintptr(h.hash0))
- // Set hashWriting after calling alg.hash for consistency with mapassign.
+ // Set hashWriting after calling t.hasher for consistency with mapassign.
h.flags ^= hashWriting
if h.buckets == nil {
}
key := stringStructOf(&ky)
- hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
+ hash := t.hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
- // Set hashWriting after calling alg.hash for consistency with mapdelete
+ // Set hashWriting after calling t.hasher for consistency with mapdelete
h.flags ^= hashWriting
bucket := hash & bucketMask(h.B)
if !h.sameSizeGrow() {
// Compute hash to make our evacuation decision (whether we need
// to send this key/elem to bucket x or bucket y).
- hash := t.key.alg.hash(k, uintptr(h.hash0))
+ hash := t.hasher(k, uintptr(h.hash0))
if hash&newbit != 0 {
useY = 1
}
// ("quick keys") as well as long keys.
func TestSingleBucketMapStringKeys_DupLen(t *testing.T) {
testMapLookups(t, map[string]string{
- "x": "x1val",
- "xx": "x2val",
- "foo": "fooval",
- "bar": "barval", // same key length as "foo"
- "xxxx": "x4val",
+ "x": "x1val",
+ "xx": "x2val",
+ "foo": "fooval",
+ "bar": "barval", // same key length as "foo"
+ "xxxx": "x4val",
strings.Repeat("x", 128): "longval1",
strings.Repeat("y", 128): "longval2",
})
}
runtime.MapTombstoneCheck(m)
}
+
+type canString int
+
+func (c canString) String() string {
+ return fmt.Sprintf("%d", int(c))
+}
+
+func TestMapInterfaceKey(t *testing.T) {
+ // Test all the special cases in runtime.typehash.
+ type GrabBag struct {
+ f32 float32
+ f64 float64
+ c64 complex64
+ c128 complex128
+ s string
+ i0 interface{}
+ i1 interface {
+ String() string
+ }
+ a [4]string
+ }
+
+ m := map[interface{}]bool{}
+ // Put a bunch of data in m, so that a bad hash is likely to
+ // lead to a bad bucket, which will lead to a missed lookup.
+ for i := 0; i < 1000; i++ {
+ m[i] = true
+ }
+ m[GrabBag{f32: 1.0}] = true
+ if !m[GrabBag{f32: 1.0}] {
+ panic("f32 not found")
+ }
+ m[GrabBag{f64: 1.0}] = true
+ if !m[GrabBag{f64: 1.0}] {
+ panic("f64 not found")
+ }
+ m[GrabBag{c64: 1.0i}] = true
+ if !m[GrabBag{c64: 1.0i}] {
+ panic("c64 not found")
+ }
+ m[GrabBag{c128: 1.0i}] = true
+ if !m[GrabBag{c128: 1.0i}] {
+ panic("c128 not found")
+ }
+ m[GrabBag{s: "foo"}] = true
+ if !m[GrabBag{s: "foo"}] {
+ panic("string not found")
+ }
+ m[GrabBag{i0: "foo"}] = true
+ if !m[GrabBag{i0: "foo"}] {
+ panic("interface{} not found")
+ }
+ m[GrabBag{i1: canString(5)}] = true
+ if !m[GrabBag{i1: canString(5)}] {
+ panic("interface{String() string} not found")
+ }
+ m[GrabBag{a: [4]string{"foo", "bar", "baz", "bop"}}] = true
+ if !m[GrabBag{a: [4]string{"foo", "bar", "baz", "bop"}}] {
+ panic("array not found")
+ }
+}
type tflag uint8
const (
- tflagUncommon tflag = 1 << 0
- tflagExtraStar tflag = 1 << 1
- tflagNamed tflag = 1 << 2
+ tflagUncommon tflag = 1 << 0
+ tflagExtraStar tflag = 1 << 1
+ tflagNamed tflag = 1 << 2
+ tflagRegularMemory tflag = 1 << 3 // equal and hash can treat values of this type as a single region of t.size bytes
)
// Needs to be in sync with ../cmd/link/internal/ld/decodesym.go:/^func.commonsize,
align uint8
fieldalign uint8
kind uint8
- alg *typeAlg
+ // function for comparing objects of this type
+ // (ptr to object A, ptr to object B) -> ==?
+ equal func(unsafe.Pointer, unsafe.Pointer) bool
// gcdata stores the GC type data for the garbage collector.
// If the KindGCProg bit is set in kind, gcdata is a GC program.
// Otherwise it is a ptrmask bitmap. See mbitmap.go for details.
}
type maptype struct {
- typ _type
- key *_type
- elem *_type
- bucket *_type // internal type representing a hash bucket
+ typ _type
+ key *_type
+ elem *_type
+ bucket *_type // internal type representing a hash bucket
+ // function for hashing keys (ptr to key, seed) -> hash
+ hasher func(unsafe.Pointer, uintptr) uintptr
keysize uint8 // size of key slot
elemsize uint8 // size of elem slot
bucketsize uint16 // size of bucket
return pkgPathName.name()
}
+func (n name) isBlank() bool {
+ if n.bytes == nil {
+ return false
+ }
+ if n.nameLen() != 1 {
+ return false
+ }
+ return *n.data(3) == '_'
+}
+
// typelinksinit scans the types from extra modules and builds the
// moduledata typemap used to de-duplicate type pointers.
func typelinksinit() {
}
func test6() {
- defer mustRecover("unhashable")
+ defer mustRecover("unhashable type main.T")
var x T
var z interface{} = x
m := make(map[interface{}]int)