]> Cypherpunks repositories - gostls13.git/commitdiff
runtime: code cleanup about map
authormengxiaodong <920432478@qq.com>
Mon, 28 Sep 2020 09:38:13 +0000 (17:38 +0800)
committerKeith Randall <khr@golang.org>
Wed, 30 Sep 2020 19:59:48 +0000 (19:59 +0000)
1.Revise ambiguous comments: "all current buckets" means buckets in hmap.buckets, actually current bucket and all the overflow buckets connected to it are full
2.All the pointer address add use src/runtime/stubs.go:add, keep the code style uniform

Change-Id: Idc7224dbe6c391e1b03bf5d009c3734bc75187ce
Reviewed-on: https://go-review.googlesource.com/c/go/+/257979
Reviewed-by: Austin Clements <austin@google.com>
Reviewed-by: Keith Randall <khr@golang.org>
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Go Bot <gobot@golang.org>

src/runtime/map.go
src/runtime/map_fast32.go
src/runtime/map_fast64.go
src/runtime/map_faststr.go

index 6f31f23d6fba33fdf56901a71b2ea924758777f8..5ac3a9958bd03280cdc1494713f21f646d750db3 100644 (file)
@@ -599,7 +599,7 @@ again:
        if h.growing() {
                growWork(t, h, bucket)
        }
-       b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
+       b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
        top := tophash(hash)
 
        var inserti *uint8
@@ -650,7 +650,7 @@ bucketloop:
        }
 
        if inserti == nil {
-               // all current buckets are full, allocate a new one.
+               // The current bucket and all the overflow buckets connected to it are full, allocate a new one.
                newb := h.newoverflow(t, b)
                inserti = &newb.tophash[0]
                insertk = add(unsafe.Pointer(newb), dataOffset)
index d80f5eac78e51eaf523d9fef5d065c2621943396..8d52dad217afb0fabce82c73c1a27ef52345eddd 100644 (file)
@@ -114,7 +114,7 @@ again:
        if h.growing() {
                growWork_fast32(t, h, bucket)
        }
-       b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
+       b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
 
        var insertb *bmap
        var inserti uintptr
@@ -158,7 +158,7 @@ bucketloop:
        }
 
        if insertb == nil {
-               // all current buckets are full, allocate a new one.
+               // The current bucket and all the overflow buckets connected to it are full, allocate a new one.
                insertb = h.newoverflow(t, b)
                inserti = 0 // not necessary, but avoids needlessly spilling inserti
        }
@@ -204,7 +204,7 @@ again:
        if h.growing() {
                growWork_fast32(t, h, bucket)
        }
-       b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
+       b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
 
        var insertb *bmap
        var inserti uintptr
@@ -248,7 +248,7 @@ bucketloop:
        }
 
        if insertb == nil {
-               // all current buckets are full, allocate a new one.
+               // The current bucket and all the overflow buckets connected to it are full, allocate a new one.
                insertb = h.newoverflow(t, b)
                inserti = 0 // not necessary, but avoids needlessly spilling inserti
        }
index 3bc84bbdd37a6fe3cde3b510edef2eeac85ca9eb..f1368dc774ec4de8cc6ec4071fe507ee2020d6c0 100644 (file)
@@ -114,7 +114,7 @@ again:
        if h.growing() {
                growWork_fast64(t, h, bucket)
        }
-       b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
+       b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
 
        var insertb *bmap
        var inserti uintptr
@@ -158,7 +158,7 @@ bucketloop:
        }
 
        if insertb == nil {
-               // all current buckets are full, allocate a new one.
+               // The current bucket and all the overflow buckets connected to it are full, allocate a new one.
                insertb = h.newoverflow(t, b)
                inserti = 0 // not necessary, but avoids needlessly spilling inserti
        }
@@ -204,7 +204,7 @@ again:
        if h.growing() {
                growWork_fast64(t, h, bucket)
        }
-       b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
+       b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
 
        var insertb *bmap
        var inserti uintptr
@@ -248,7 +248,7 @@ bucketloop:
        }
 
        if insertb == nil {
-               // all current buckets are full, allocate a new one.
+               // The current bucket and all the overflow buckets connected to it are full, allocate a new one.
                insertb = h.newoverflow(t, b)
                inserti = 0 // not necessary, but avoids needlessly spilling inserti
        }
index 108c50239486ec1917e5b65d764ead64547d5e17..2d1ac762a81e24d16d6b4826530b072513d0cbba 100644 (file)
@@ -225,7 +225,7 @@ again:
        if h.growing() {
                growWork_faststr(t, h, bucket)
        }
-       b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
+       b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
        top := tophash(hash)
 
        var insertb *bmap
@@ -274,7 +274,7 @@ bucketloop:
        }
 
        if insertb == nil {
-               // all current buckets are full, allocate a new one.
+               // The current bucket and all the overflow buckets connected to it are full, allocate a new one.
                insertb = h.newoverflow(t, b)
                inserti = 0 // not necessary, but avoids needlessly spilling inserti
        }