align uint8 // alignment of variable with this type
fieldAlign uint8 // alignment of struct field with this type
kind uint8 // enumeration for C
- alg *typeAlg // algorithm table (../runtime/runtime.h:/Alg)
+ alg *typeAlg // algorithm table
gc [2]unsafe.Pointer // garbage collection data
string *string // string form; unnecessary but undeniably useful
*uncommonType // (relatively) uncommon fields
zero unsafe.Pointer // pointer to zero value
}
+// a copy of runtime.typeAlg
type typeAlg struct {
// function for hashing objects of this type
// (ptr to object, size, seed) -> hash
return h
}
t := tab._type
- fn := goalg(t.alg).hash
+ fn := t.alg.hash
if fn == nil {
panic(errorString("hash of unhashable type " + *t._string))
}
if t == nil {
return h
}
- fn := goalg(t.alg).hash
+ fn := t.alg.hash
if fn == nil {
panic(errorString("hash of unhashable type " + *t._string))
}
if t == nil {
return true
}
- eq := goalg(t.alg).equal
+ eq := t.alg.equal
if eq == nil {
panic(errorString("comparing uncomparable type " + *t._string))
}
return true
}
t := xtab._type
- eq := goalg(t.alg).equal
+ eq := t.alg.equal
if eq == nil {
panic(errorString("comparing uncomparable type " + *t._string))
}
memclr(s.array, uintptr(s.len))
}
-// TODO(dvyukov): remove when Type is converted to Go and contains *typeAlg.
-func goalg(a unsafe.Pointer) *typeAlg {
- return (*typeAlg)(a)
-}
-
// used in asm_{386,amd64}.s
const hashRandomBytes = ptrSize / 4 * 64
if h == nil || h.count == 0 {
return unsafe.Pointer(t.elem.zero)
}
- alg := goalg(t.key.alg)
+ alg := t.key.alg
hash := alg.hash(key, uintptr(t.key.size), uintptr(h.hash0))
m := uintptr(1)<<h.B - 1
b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if h == nil || h.count == 0 {
return unsafe.Pointer(t.elem.zero), false
}
- alg := goalg(t.key.alg)
+ alg := t.key.alg
hash := alg.hash(key, uintptr(t.key.size), uintptr(h.hash0))
m := uintptr(1)<<h.B - 1
b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize)))
if h == nil || h.count == 0 {
return nil, nil
}
- alg := goalg(t.key.alg)
+ alg := t.key.alg
hash := alg.hash(key, uintptr(t.key.size), uintptr(h.hash0))
m := uintptr(1)<<h.B - 1
b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize)))
raceReadObjectPC(t.elem, val, callerpc, pc)
}
- alg := goalg(t.key.alg)
+ alg := t.key.alg
hash := alg.hash(key, uintptr(t.key.size), uintptr(h.hash0))
if h.buckets == nil {
if h == nil || h.count == 0 {
return
}
- alg := goalg(t.key.alg)
+ alg := t.key.alg
hash := alg.hash(key, uintptr(t.key.size), uintptr(h.hash0))
bucket := hash & (uintptr(1)<<h.B - 1)
if h.oldbuckets != nil {
b := it.bptr
i := it.i
checkBucket := it.checkBucket
- alg := goalg(t.key.alg)
+ alg := t.key.alg
next:
if b == nil {
func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
newbit := uintptr(1) << (h.B - 1)
- alg := goalg(t.key.alg)
+ alg := t.key.alg
if !evacuated(b) {
// TODO: reuse overflow buckets instead of using new ones, if there
// is no iterator using the old buckets. (If !oldIterator.)
}
func ismapkey(t *_type) bool {
- return goalg(t.alg).hash != nil
+ return t.alg.hash != nil
}
// Reflect stubs. Called from ../reflect/asm_*.s
// One-bucket table. No need to hash.
b = (*bmap)(h.buckets)
} else {
- hash := goalg(t.key.alg).hash(noescape(unsafe.Pointer(&key)), 4, uintptr(h.hash0))
+ hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), 4, uintptr(h.hash0))
m := uintptr(1)<<h.B - 1
b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
// One-bucket table. No need to hash.
b = (*bmap)(h.buckets)
} else {
- hash := goalg(t.key.alg).hash(noescape(unsafe.Pointer(&key)), 4, uintptr(h.hash0))
+ hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), 4, uintptr(h.hash0))
m := uintptr(1)<<h.B - 1
b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
// One-bucket table. No need to hash.
b = (*bmap)(h.buckets)
} else {
- hash := goalg(t.key.alg).hash(noescape(unsafe.Pointer(&key)), 8, uintptr(h.hash0))
+ hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), 8, uintptr(h.hash0))
m := uintptr(1)<<h.B - 1
b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
// One-bucket table. No need to hash.
b = (*bmap)(h.buckets)
} else {
- hash := goalg(t.key.alg).hash(noescape(unsafe.Pointer(&key)), 8, uintptr(h.hash0))
+ hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), 8, uintptr(h.hash0))
m := uintptr(1)<<h.B - 1
b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
return unsafe.Pointer(t.elem.zero)
}
dohash:
- hash := goalg(t.key.alg).hash(noescape(unsafe.Pointer(&ky)), 2*ptrSize, uintptr(h.hash0))
+ hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), 2*ptrSize, uintptr(h.hash0))
m := uintptr(1)<<h.B - 1
b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
return unsafe.Pointer(t.elem.zero), false
}
dohash:
- hash := goalg(t.key.alg).hash(noescape(unsafe.Pointer(&ky)), 2*ptrSize, uintptr(h.hash0))
+ hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), 2*ptrSize, uintptr(h.hash0))
m := uintptr(1)<<h.B - 1
b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
align uint8
fieldalign uint8
kind uint8
- alg unsafe.Pointer
+ alg *typeAlg
// gc stores _type info required for garbage collector.
// If (kind&KindGCProg)==0, then gc[0] points at sparse GC bitmap
// (no indirection), 4 bits per word.