]> Cypherpunks repositories - gostls13.git/commitdiff
runtime: hashmap: move overflow pointer to end of bucket
authorKeith Randall <khr@golang.org>
Sat, 20 Dec 2014 04:44:18 +0000 (20:44 -0800)
committerKeith Randall <khr@golang.org>
Mon, 22 Dec 2014 22:25:48 +0000 (22:25 +0000)
Pointers to zero-sized values may end up pointing to the next
object in memory, and possibly off the end of a span.  This
can cause memory leaks and/or confuse the garbage collector.

By putting the overflow pointer at the end of the bucket, we
make sure that pointers to any zero-sized keys or values don't
accidentally point to the next object in memory.

fixes #9384

Change-Id: I5d434df176984cb0210b4d0195dd106d6eb28f73
Reviewed-on: https://go-review.googlesource.com/1869
Reviewed-by: Russ Cox <rsc@golang.org>
src/cmd/gc/reflect.c
src/cmd/ld/dwarf.c
src/reflect/type.go
src/runtime/hashmap.go
src/runtime/hashmap_fast.go

index 897fdc635a171d8cf693b431c31cb5ecbb6e0c85..7b17072e59b0f08f129935d41c84220e4e10a225 100644 (file)
@@ -143,18 +143,6 @@ mapbucket(Type *t)
        // We don't need to encode it as GC doesn't care about it.
        offset = BUCKETSIZE * 1;
 
-       overflowfield = typ(TFIELD);
-       overflowfield->type = ptrto(bucket);
-       overflowfield->width = offset;         // "width" is offset in structure
-       overflowfield->sym = mal(sizeof(Sym)); // not important but needs to be set to give this type a name
-       overflowfield->sym->name = "overflow";
-       offset += widthptr;
-       
-       // The keys are padded to the native integer alignment.
-       // This is usually the same as widthptr; the exception (as usual) is nacl/amd64.
-       if(widthreg > widthptr)
-               offset += widthreg - widthptr;
-
        keysfield = typ(TFIELD);
        keysfield->type = typ(TARRAY);
        keysfield->type->type = keytype;
@@ -175,11 +163,23 @@ mapbucket(Type *t)
        valuesfield->sym->name = "values";
        offset += BUCKETSIZE * valtype->width;
 
+       overflowfield = typ(TFIELD);
+       overflowfield->type = ptrto(bucket);
+       overflowfield->width = offset;         // "width" is offset in structure
+       overflowfield->sym = mal(sizeof(Sym)); // not important but needs to be set to give this type a name
+       overflowfield->sym->name = "overflow";
+       offset += widthptr;
+       
+       // Pad to the native integer alignment.
+       // This is usually the same as widthptr; the exception (as usual) is nacl/amd64.
+       if(widthreg > widthptr)
+               offset += widthreg - widthptr;
+
        // link up fields
-       bucket->type = overflowfield;
-       overflowfield->down = keysfield;
+       bucket->type = keysfield;
        keysfield->down = valuesfield;
-       valuesfield->down = T;
+       valuesfield->down = overflowfield;
+       overflowfield->down = T;
 
        bucket->width = offset;
        bucket->local = t->local;
index fbb5699bdb6489e72a3026d83fd400516f91af5a..b5331e829f1dc595d316659dc2c9da34403583da 100644 (file)
@@ -1281,12 +1281,19 @@ synthesizemaptypes(DWDie *die)
                
                fld = newdie(dwhb, DW_ABRV_STRUCTFIELD, "keys");
                newrefattr(fld, DW_AT_type, dwhk);
-               newmemberoffsetattr(fld, BucketSize + PtrSize);
+               newmemberoffsetattr(fld, BucketSize);
                fld = newdie(dwhb, DW_ABRV_STRUCTFIELD, "values");
                newrefattr(fld, DW_AT_type, dwhv);
-               newmemberoffsetattr(fld, BucketSize + PtrSize + BucketSize * keysize);
-               newattr(dwhb, DW_AT_byte_size, DW_CLS_CONSTANT, BucketSize + PtrSize + BucketSize * keysize + BucketSize * valsize, 0);
-               substitutetype(dwhb, "overflow", defptrto(dwhb));
+               newmemberoffsetattr(fld, BucketSize + BucketSize * keysize);
+               fld = newdie(dwhb, DW_ABRV_STRUCTFIELD, "overflow");
+               newrefattr(fld, DW_AT_type, defptrto(dwhb));
+               newmemberoffsetattr(fld, BucketSize + BucketSize * (keysize + valsize));
+               if(RegSize > PtrSize) {
+                       fld = newdie(dwhb, DW_ABRV_STRUCTFIELD, "pad");
+                       newrefattr(fld, DW_AT_type, find_or_diag(&dwtypes, "uintptr"));
+                       newmemberoffsetattr(fld, BucketSize + BucketSize * (keysize + valsize) + PtrSize);
+               }
+               newattr(dwhb, DW_AT_byte_size, DW_CLS_CONSTANT, BucketSize + BucketSize * keysize + BucketSize * valsize + RegSize, 0);
 
                // Construct hash<K,V>
                dwh = newdie(&dwtypes, DW_ABRV_STRUCTTYPE,
index 26285da674cddbe9b9a4f2ce724ef23219011fc7..ec4920d8a3f1c9eacb493ea56a346713af833fb7 100644 (file)
@@ -1646,10 +1646,6 @@ func bucketOf(ktyp, etyp *rtype) *rtype {
        for i := 0; i < int(bucketSize*unsafe.Sizeof(uint8(0))/ptrsize); i++ {
                gc.append(bitsScalar)
        }
-       gc.append(bitsPointer) // overflow
-       if runtime.GOARCH == "amd64p32" {
-               gc.append(bitsScalar)
-       }
        // keys
        for i := 0; i < bucketSize; i++ {
                gc.appendProg(ktyp)
@@ -1658,6 +1654,11 @@ func bucketOf(ktyp, etyp *rtype) *rtype {
        for i := 0; i < bucketSize; i++ {
                gc.appendProg(etyp)
        }
+       // overflow
+       gc.append(bitsPointer)
+       if runtime.GOARCH == "amd64p32" {
+               gc.append(bitsScalar)
+       }
 
        b := new(rtype)
        b.size = gc.size
index 0aa7c60af6cdd78d35f7965a7a822a865e03780e..14557f8835696a23d64f6242d89f677962eb3ae7 100644 (file)
@@ -118,11 +118,11 @@ type hmap struct {
 // A bucket for a Go map.
 type bmap struct {
        tophash  [bucketCnt]uint8
-       overflow *bmap
        // Followed by bucketCnt keys and then bucketCnt values.
        // NOTE: packing all the keys together and then all the values together makes the
        // code a bit more complicated than alternating key/value/key/value/... but it allows
        // us to eliminate padding which would be needed for, e.g., map[int64]int8.
+       // Followed by an overflow pointer.
 }
 
 // A hash iteration structure.
@@ -149,6 +149,13 @@ func evacuated(b *bmap) bool {
        return h > empty && h < minTopHash
 }
 
+func (b *bmap) overflow(t *maptype) *bmap {
+       return *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize) - ptrSize))
+}
+func (b *bmap) setoverflow(t *maptype, ovf *bmap) {
+       *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize) - ptrSize)) = ovf
+}
+
 func makemap(t *maptype, hint int64) *hmap {
        if sz := unsafe.Sizeof(hmap{}); sz > 48 || sz != uintptr(t.hmap.size) {
                gothrow("bad hmap size")
@@ -275,7 +282,7 @@ func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
                                return v
                        }
                }
-               b = b.overflow
+               b = b.overflow(t)
                if b == nil {
                        return unsafe.Pointer(t.elem.zero)
                }
@@ -323,7 +330,7 @@ func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool)
                                return v, true
                        }
                }
-               b = b.overflow
+               b = b.overflow(t)
                if b == nil {
                        return unsafe.Pointer(t.elem.zero), false
                }
@@ -366,7 +373,7 @@ func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe
                                return k, v
                        }
                }
-               b = b.overflow
+               b = b.overflow(t)
                if b == nil {
                        return nil, nil
                }
@@ -437,10 +444,11 @@ again:
                        memmove(v2, val, uintptr(t.elem.size))
                        return
                }
-               if b.overflow == nil {
+               ovf := b.overflow(t)
+               if ovf == nil {
                        break
                }
-               b = b.overflow
+               b = ovf
        }
 
        // did not find mapping for key.  Allocate new cell & add entry.
@@ -455,7 +463,7 @@ again:
                        memstats.next_gc = memstats.heap_alloc
                }
                newb := (*bmap)(newobject(t.bucket))
-               b.overflow = newb
+               b.setoverflow(t, newb)
                inserti = &newb.tophash[0]
                insertk = add(unsafe.Pointer(newb), dataOffset)
                insertv = add(insertk, bucketCnt*uintptr(t.keysize))
@@ -525,7 +533,7 @@ func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
                        h.count--
                        return
                }
-               b = b.overflow
+               b = b.overflow(t)
                if b == nil {
                        return
                }
@@ -720,7 +728,7 @@ next:
                        return
                }
        }
-       b = b.overflow
+       b = b.overflow(t)
        i = 0
        goto next
 }
@@ -778,7 +786,7 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
                yk := add(unsafe.Pointer(y), dataOffset)
                xv := add(xk, bucketCnt*uintptr(t.keysize))
                yv := add(yk, bucketCnt*uintptr(t.keysize))
-               for ; b != nil; b = b.overflow {
+               for ; b != nil; b = b.overflow(t) {
                        k := add(unsafe.Pointer(b), dataOffset)
                        v := add(k, bucketCnt*uintptr(t.keysize))
                        for i := 0; i < bucketCnt; i, k, v = i+1, add(k, uintptr(t.keysize)), add(v, uintptr(t.valuesize)) {
@@ -828,7 +836,7 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
                                                        memstats.next_gc = memstats.heap_alloc
                                                }
                                                newx := (*bmap)(newobject(t.bucket))
-                                               x.overflow = newx
+                                               x.setoverflow(t, newx)
                                                x = newx
                                                xi = 0
                                                xk = add(unsafe.Pointer(x), dataOffset)
@@ -855,7 +863,7 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
                                                        memstats.next_gc = memstats.heap_alloc
                                                }
                                                newy := (*bmap)(newobject(t.bucket))
-                                               y.overflow = newy
+                                               y.setoverflow(t, newy)
                                                y = newy
                                                yi = 0
                                                yk = add(unsafe.Pointer(y), dataOffset)
@@ -881,7 +889,6 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
                // Unlink the overflow buckets & clear key/value to help GC.
                if h.flags&oldIterator == 0 {
                        b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
-                       b.overflow = nil
                        memclr(add(unsafe.Pointer(b), dataOffset), uintptr(t.bucketsize)-dataOffset)
                }
        }
index 8e21e02d64e2bc1a1d5f91ce1dc30717946eae38..afa6ecc99a9fd5f9e581fe7603ac4b781460e9d9 100644 (file)
@@ -43,7 +43,7 @@ func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
                        }
                        return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize))
                }
-               b = b.overflow
+               b = b.overflow(t)
                if b == nil {
                        return unsafe.Pointer(t.elem.zero)
                }
@@ -85,7 +85,7 @@ func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) {
                        }
                        return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize)), true
                }
-               b = b.overflow
+               b = b.overflow(t)
                if b == nil {
                        return unsafe.Pointer(t.elem.zero), false
                }
@@ -127,7 +127,7 @@ func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
                        }
                        return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize))
                }
-               b = b.overflow
+               b = b.overflow(t)
                if b == nil {
                        return unsafe.Pointer(t.elem.zero)
                }
@@ -169,7 +169,7 @@ func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) {
                        }
                        return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize)), true
                }
-               b = b.overflow
+               b = b.overflow(t)
                if b == nil {
                        return unsafe.Pointer(t.elem.zero), false
                }
@@ -271,7 +271,7 @@ dohash:
                                return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+i*uintptr(t.valuesize))
                        }
                }
-               b = b.overflow
+               b = b.overflow(t)
                if b == nil {
                        return unsafe.Pointer(t.elem.zero)
                }
@@ -371,7 +371,7 @@ dohash:
                                return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+i*uintptr(t.valuesize)), true
                        }
                }
-               b = b.overflow
+               b = b.overflow(t)
                if b == nil {
                        return unsafe.Pointer(t.elem.zero), false
                }