]> Cypherpunks repositories - gostls13.git/commitdiff
runtime: replace t.keysize with fixed key size in evacuate_fastX
authorJosh Bleecher Snyder <josharian@gmail.com>
Sat, 19 Aug 2017 04:15:44 +0000 (21:15 -0700)
committerJosh Bleecher Snyder <josharian@gmail.com>
Mon, 28 Aug 2017 18:54:21 +0000 (18:54 +0000)
Change-Id: I89c3c3b21d7a4acbc49b14a52ac8d9a5861c0c39
Reviewed-on: https://go-review.googlesource.com/59131
Run-TryBot: Josh Bleecher Snyder <josharian@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
Reviewed-by: Martin Möhrmann <moehrmann@google.com>
src/runtime/hashmap_fast.go

index 32e7cff0903f58b05eea867bffeb2689aa3c30a5..626b3531f5c17b94deeddda84526cf630f571dea 100644 (file)
@@ -795,7 +795,7 @@ func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) {
                x := &xy[0]
                x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
                x.k = add(unsafe.Pointer(x.b), dataOffset)
-               x.v = add(x.k, bucketCnt*uintptr(t.keysize))
+               x.v = add(x.k, bucketCnt*4)
 
                if !h.sameSizeGrow() {
                        // Only calculate y pointers if we're growing bigger.
@@ -803,13 +803,13 @@ func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) {
                        y := &xy[1]
                        y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
                        y.k = add(unsafe.Pointer(y.b), dataOffset)
-                       y.v = add(y.k, bucketCnt*uintptr(t.keysize))
+                       y.v = add(y.k, bucketCnt*4)
                }
 
                for ; b != nil; b = b.overflow(t) {
                        k := add(unsafe.Pointer(b), dataOffset)
-                       v := add(k, bucketCnt*uintptr(t.keysize))
-                       for i := 0; i < bucketCnt; i, k, v = i+1, add(k, uintptr(t.keysize)), add(v, uintptr(t.valuesize)) {
+                       v := add(k, bucketCnt*4)
+                       for i := 0; i < bucketCnt; i, k, v = i+1, add(k, 4), add(v, uintptr(t.valuesize)) {
                                top := b.tophash[i]
                                if top == empty {
                                        b.tophash[i] = evacuatedEmpty
@@ -855,7 +855,7 @@ func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) {
                                        dst.b = h.newoverflow(t, dst.b)
                                        dst.i = 0
                                        dst.k = add(unsafe.Pointer(dst.b), dataOffset)
-                                       dst.v = add(dst.k, bucketCnt*uintptr(t.keysize))
+                                       dst.v = add(dst.k, bucketCnt*4)
                                }
                                dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
                                if t.indirectkey {
@@ -873,7 +873,7 @@ func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) {
                                // key or value arrays.  That's ok, as we have the overflow pointer
                                // at the end of the bucket to protect against pointing past the
                                // end of the bucket.
-                               dst.k = add(dst.k, uintptr(t.keysize))
+                               dst.k = add(dst.k, 4)
                                dst.v = add(dst.v, uintptr(t.valuesize))
                        }
                }
@@ -916,7 +916,7 @@ func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) {
                x := &xy[0]
                x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
                x.k = add(unsafe.Pointer(x.b), dataOffset)
-               x.v = add(x.k, bucketCnt*uintptr(t.keysize))
+               x.v = add(x.k, bucketCnt*8)
 
                if !h.sameSizeGrow() {
                        // Only calculate y pointers if we're growing bigger.
@@ -924,13 +924,13 @@ func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) {
                        y := &xy[1]
                        y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
                        y.k = add(unsafe.Pointer(y.b), dataOffset)
-                       y.v = add(y.k, bucketCnt*uintptr(t.keysize))
+                       y.v = add(y.k, bucketCnt*8)
                }
 
                for ; b != nil; b = b.overflow(t) {
                        k := add(unsafe.Pointer(b), dataOffset)
-                       v := add(k, bucketCnt*uintptr(t.keysize))
-                       for i := 0; i < bucketCnt; i, k, v = i+1, add(k, uintptr(t.keysize)), add(v, uintptr(t.valuesize)) {
+                       v := add(k, bucketCnt*8)
+                       for i := 0; i < bucketCnt; i, k, v = i+1, add(k, 8), add(v, uintptr(t.valuesize)) {
                                top := b.tophash[i]
                                if top == empty {
                                        b.tophash[i] = evacuatedEmpty
@@ -976,7 +976,7 @@ func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) {
                                        dst.b = h.newoverflow(t, dst.b)
                                        dst.i = 0
                                        dst.k = add(unsafe.Pointer(dst.b), dataOffset)
-                                       dst.v = add(dst.k, bucketCnt*uintptr(t.keysize))
+                                       dst.v = add(dst.k, bucketCnt*8)
                                }
                                dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
                                if t.indirectkey {
@@ -994,7 +994,7 @@ func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) {
                                // key or value arrays.  That's ok, as we have the overflow pointer
                                // at the end of the bucket to protect against pointing past the
                                // end of the bucket.
-                               dst.k = add(dst.k, uintptr(t.keysize))
+                               dst.k = add(dst.k, 8)
                                dst.v = add(dst.v, uintptr(t.valuesize))
                        }
                }
@@ -1037,7 +1037,7 @@ func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) {
                x := &xy[0]
                x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
                x.k = add(unsafe.Pointer(x.b), dataOffset)
-               x.v = add(x.k, bucketCnt*uintptr(t.keysize))
+               x.v = add(x.k, bucketCnt*2*sys.PtrSize)
 
                if !h.sameSizeGrow() {
                        // Only calculate y pointers if we're growing bigger.
@@ -1045,13 +1045,13 @@ func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) {
                        y := &xy[1]
                        y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
                        y.k = add(unsafe.Pointer(y.b), dataOffset)
-                       y.v = add(y.k, bucketCnt*uintptr(t.keysize))
+                       y.v = add(y.k, bucketCnt*2*sys.PtrSize)
                }
 
                for ; b != nil; b = b.overflow(t) {
                        k := add(unsafe.Pointer(b), dataOffset)
-                       v := add(k, bucketCnt*uintptr(t.keysize))
-                       for i := 0; i < bucketCnt; i, k, v = i+1, add(k, uintptr(t.keysize)), add(v, uintptr(t.valuesize)) {
+                       v := add(k, bucketCnt*2*sys.PtrSize)
+                       for i := 0; i < bucketCnt; i, k, v = i+1, add(k, 2*sys.PtrSize), add(v, uintptr(t.valuesize)) {
                                top := b.tophash[i]
                                if top == empty {
                                        b.tophash[i] = evacuatedEmpty
@@ -1097,7 +1097,7 @@ func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) {
                                        dst.b = h.newoverflow(t, dst.b)
                                        dst.i = 0
                                        dst.k = add(unsafe.Pointer(dst.b), dataOffset)
-                                       dst.v = add(dst.k, bucketCnt*uintptr(t.keysize))
+                                       dst.v = add(dst.k, bucketCnt*2*sys.PtrSize)
                                }
                                dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
                                if t.indirectkey {
@@ -1115,7 +1115,7 @@ func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) {
                                // key or value arrays.  That's ok, as we have the overflow pointer
                                // at the end of the bucket to protect against pointing past the
                                // end of the bucket.
-                               dst.k = add(dst.k, uintptr(t.keysize))
+                               dst.k = add(dst.k, 2*sys.PtrSize)
                                dst.v = add(dst.v, uintptr(t.valuesize))
                        }
                }