]> Cypherpunks repositories - gostls13.git/commitdiff
all: omit unnecessary type conversions
authorJes Cok <xigua67damn@gmail.com>
Mon, 28 Jul 2025 11:36:17 +0000 (11:36 +0000)
committerGopher Robot <gobot@golang.org>
Mon, 28 Jul 2025 18:13:58 +0000 (11:13 -0700)
Found by github.com/mdempsky/unconvert

Change-Id: Ib78cceb718146509d96dbb6da87b27dbaeba1306
GitHub-Last-Rev: dedf354811701ce8920c305b6f7aa78914a4171c
GitHub-Pull-Request: golang/go#74771
Reviewed-on: https://go-review.googlesource.com/c/go/+/690735
Reviewed-by: Mark Freeman <mark@golang.org>
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
Reviewed-by: Keith Randall <khr@google.com>
Auto-Submit: Keith Randall <khr@golang.org>
Reviewed-by: Keith Randall <khr@golang.org>
20 files changed:
src/bytes/bytes_test.go
src/debug/dwarf/entry.go
src/net/tcpconn_keepalive_test.go
src/runtime/heapdump.go
src/runtime/mcleanup.go
src/runtime/mgcmark.go
src/runtime/mgcsweep.go
src/runtime/mheap.go
src/runtime/slice.go
src/runtime/traceallocfree.go
src/runtime/tracebuf.go
src/runtime/tracecpu.go
src/runtime/traceevent.go
src/runtime/traceruntime.go
src/runtime/tracestack.go
src/runtime/tracetype.go
src/sync/map_test.go
src/syscall/dirent.go
src/time/example_test.go
src/time/sleep.go

index 0f6cf4993af642d6c6ca24981f9f39decac44f56..03f01582c59a4668b678fccac32f00918cd0475e 100644 (file)
@@ -693,14 +693,14 @@ func bmIndexRuneUnicode(rt *unicode.RangeTable, needle rune) func(b *testing.B,
        for _, r16 := range rt.R16 {
                for r := rune(r16.Lo); r <= rune(r16.Hi); r += rune(r16.Stride) {
                        if r != needle {
-                               rs = append(rs, rune(r))
+                               rs = append(rs, r)
                        }
                }
        }
        for _, r32 := range rt.R32 {
                for r := rune(r32.Lo); r <= rune(r32.Hi); r += rune(r32.Stride) {
                        if r != needle {
-                               rs = append(rs, rune(r))
+                               rs = append(rs, r)
                        }
                }
        }
index 07b9259be5a8d66ee2f018e2d54d78e8d9f2ca0c..8741479483348e0c0cfb905af70560320a066df9 100644 (file)
@@ -554,7 +554,7 @@ func (b *buf) entry(cu *Entry, u *unit) *Entry {
                case formData16:
                        val = b.bytes(16)
                case formSdata:
-                       val = int64(b.int())
+                       val = b.int()
                case formUdata:
                        val = int64(b.uint())
                case formImplicitConst:
index 53d0be034fb554fce7532dec5dfd71a517839644..4bf2f9ef2064200bee6e03822ab85218b551b34c 100644 (file)
@@ -22,7 +22,7 @@ func TestTCPConnKeepAliveConfigDialer(t *testing.T) {
                oldCfg  KeepAliveConfig
        )
        testPreHookSetKeepAlive = func(nfd *netFD) {
-               oldCfg, errHook = getCurrentKeepAliveSettings(fdType(nfd.pfd.Sysfd))
+               oldCfg, errHook = getCurrentKeepAliveSettings(nfd.pfd.Sysfd)
        }
 
        handler := func(ls *localServer, ln Listener) {
@@ -80,7 +80,7 @@ func TestTCPConnKeepAliveConfigListener(t *testing.T) {
                oldCfg  KeepAliveConfig
        )
        testPreHookSetKeepAlive = func(nfd *netFD) {
-               oldCfg, errHook = getCurrentKeepAliveSettings(fdType(nfd.pfd.Sysfd))
+               oldCfg, errHook = getCurrentKeepAliveSettings(nfd.pfd.Sysfd)
        }
 
        ch := make(chan Conn, 1)
index 5476035b2ea5e1bd8a58e6ea411605193237580c..72878d072824fcf34d71d9452176a35a1ba48ce7 100644 (file)
@@ -460,7 +460,7 @@ func dumproots() {
                                        continue
                                }
                                spf := (*specialfinalizer)(unsafe.Pointer(sp))
-                               p := unsafe.Pointer(s.base() + uintptr(spf.special.offset))
+                               p := unsafe.Pointer(s.base() + spf.special.offset)
                                dumpfinalizer(p, spf.fn, spf.fint, spf.ot)
                        }
                }
@@ -659,7 +659,7 @@ func dumpmemprof() {
                                continue
                        }
                        spp := (*specialprofile)(unsafe.Pointer(sp))
-                       p := s.base() + uintptr(spp.special.offset)
+                       p := s.base() + spp.special.offset
                        dumpint(tagAllocSample)
                        dumpint(uint64(p))
                        dumpint(uint64(uintptr(unsafe.Pointer(spp.b))))
index c368730c5765e322aec7333fc914890a03d197aa..383217aa057cef2e69907003c3867ecd74d2b0ee 100644 (file)
@@ -173,14 +173,14 @@ func (c Cleanup) Stop() {
                                // Reached the end of the linked list. Stop searching at this point.
                                break
                        }
-                       if offset == uintptr(s.offset) && _KindSpecialCleanup == s.kind &&
+                       if offset == s.offset && _KindSpecialCleanup == s.kind &&
                                (*specialCleanup)(unsafe.Pointer(s)).id == c.id {
                                // The special is a cleanup and contains a matching cleanup id.
                                *iter = s.next
                                found = s
                                break
                        }
-                       if offset < uintptr(s.offset) || (offset == uintptr(s.offset) && _KindSpecialCleanup < s.kind) {
+                       if offset < s.offset || (offset == s.offset && _KindSpecialCleanup < s.kind) {
                                // The special is outside the region specified for that kind of
                                // special. The specials are sorted by kind.
                                break
index a0087ab6e07b1cf93bc607e2b77b59a849e5ada6..8b306045c5da21758f8b569e58f2a16cfad227be 100644 (file)
@@ -415,7 +415,7 @@ func gcScanFinalizer(spf *specialfinalizer, s *mspan, gcw *gcWork) {
        // Don't mark finalized object, but scan it so we retain everything it points to.
 
        // A finalizer can be set for an inner byte of an object, find object beginning.
-       p := s.base() + uintptr(spf.special.offset)/s.elemsize*s.elemsize
+       p := s.base() + spf.special.offset/s.elemsize*s.elemsize
 
        // Mark everything that can be reached from
        // the object (but *not* the object itself or
index 1605c21966ce1541b35bb2114659f0e905f1444e..b72cc461ba93af3a602a39b4b0fd6c768bdc1690 100644 (file)
@@ -553,7 +553,7 @@ func (sl *sweepLocked) sweep(preserve bool) bool {
        siter := newSpecialsIter(s)
        for siter.valid() {
                // A finalizer can be set for an inner byte of an object, find object beginning.
-               objIndex := uintptr(siter.s.offset) / size
+               objIndex := siter.s.offset / size
                p := s.base() + objIndex*size
                mbits := s.markBitsForIndex(objIndex)
                if !mbits.isMarked() {
@@ -561,7 +561,7 @@ func (sl *sweepLocked) sweep(preserve bool) bool {
                        // Pass 1: see if it has a finalizer.
                        hasFinAndRevived := false
                        endOffset := p - s.base() + size
-                       for tmp := siter.s; tmp != nil && uintptr(tmp.offset) < endOffset; tmp = tmp.next {
+                       for tmp := siter.s; tmp != nil && tmp.offset < endOffset; tmp = tmp.next {
                                if tmp.kind == _KindSpecialFinalizer {
                                        // Stop freeing of object if it has a finalizer.
                                        mbits.setMarkedNonAtomic()
@@ -573,11 +573,11 @@ func (sl *sweepLocked) sweep(preserve bool) bool {
                                // Pass 2: queue all finalizers and clear any weak handles. Weak handles are cleared
                                // before finalization as specified by the weak package. See the documentation
                                // for that package for more details.
-                               for siter.valid() && uintptr(siter.s.offset) < endOffset {
+                               for siter.valid() && siter.s.offset < endOffset {
                                        // Find the exact byte for which the special was setup
                                        // (as opposed to object beginning).
                                        special := siter.s
-                                       p := s.base() + uintptr(special.offset)
+                                       p := s.base() + special.offset
                                        if special.kind == _KindSpecialFinalizer || special.kind == _KindSpecialWeakHandle {
                                                siter.unlinkAndNext()
                                                freeSpecial(special, unsafe.Pointer(p), size)
@@ -589,11 +589,11 @@ func (sl *sweepLocked) sweep(preserve bool) bool {
                                }
                        } else {
                                // Pass 2: the object is truly dead, free (and handle) all specials.
-                               for siter.valid() && uintptr(siter.s.offset) < endOffset {
+                               for siter.valid() && siter.s.offset < endOffset {
                                        // Find the exact byte for which the special was setup
                                        // (as opposed to object beginning).
                                        special := siter.s
-                                       p := s.base() + uintptr(special.offset)
+                                       p := s.base() + special.offset
                                        siter.unlinkAndNext()
                                        freeSpecial(special, unsafe.Pointer(p), size)
                                }
index d8193ddb46d572e4860414d16ae303443e6e29e5..cb0d34004899ca7a8799db9aca346f29ed455c61 100644 (file)
@@ -1488,7 +1488,7 @@ func (h *mheap) initSpan(s *mspan, typ spanAllocType, spanclass spanClass, base,
                s.allocBits = newAllocBits(uintptr(s.nelems))
 
                // Adjust s.limit down to the object-containing part of the span.
-               s.limit = s.base() + uintptr(s.elemsize)*uintptr(s.nelems)
+               s.limit = s.base() + s.elemsize*uintptr(s.nelems)
 
                // It's safe to access h.sweepgen without the heap lock because it's
                // only ever updated with the world stopped and we run on the
@@ -2152,11 +2152,11 @@ func (span *mspan) specialFindSplicePoint(offset uintptr, kind byte) (**special,
                if s == nil {
                        break
                }
-               if offset == uintptr(s.offset) && kind == s.kind {
+               if offset == s.offset && kind == s.kind {
                        found = true
                        break
                }
-               if offset < uintptr(s.offset) || (offset == uintptr(s.offset) && kind < s.kind) {
+               if offset < s.offset || (offset == s.offset && kind < s.kind) {
                        break
                }
                iter = &s.next
@@ -2323,14 +2323,14 @@ func getCleanupContext(ptr uintptr, cleanupID uint64) *specialCheckFinalizer {
                                // Reached the end of the linked list. Stop searching at this point.
                                break
                        }
-                       if offset == uintptr(s.offset) && _KindSpecialCheckFinalizer == s.kind &&
+                       if offset == s.offset && _KindSpecialCheckFinalizer == s.kind &&
                                (*specialCheckFinalizer)(unsafe.Pointer(s)).cleanupID == cleanupID {
                                // The special is a cleanup and contains a matching cleanup id.
                                *iter = s.next
                                found = (*specialCheckFinalizer)(unsafe.Pointer(s))
                                break
                        }
-                       if offset < uintptr(s.offset) || (offset == uintptr(s.offset) && _KindSpecialCheckFinalizer < s.kind) {
+                       if offset < s.offset || (offset == s.offset && _KindSpecialCheckFinalizer < s.kind) {
                                // The special is outside the region specified for that kind of
                                // special. The specials are sorted by kind.
                                break
@@ -2373,14 +2373,14 @@ func clearCleanupContext(ptr uintptr, cleanupID uint64) {
                                // Reached the end of the linked list. Stop searching at this point.
                                break
                        }
-                       if offset == uintptr(s.offset) && _KindSpecialCheckFinalizer == s.kind &&
+                       if offset == s.offset && _KindSpecialCheckFinalizer == s.kind &&
                                (*specialCheckFinalizer)(unsafe.Pointer(s)).cleanupID == cleanupID {
                                // The special is a cleanup and contains a matching cleanup id.
                                *iter = s.next
                                found = s
                                break
                        }
-                       if offset < uintptr(s.offset) || (offset == uintptr(s.offset) && _KindSpecialCheckFinalizer < s.kind) {
+                       if offset < s.offset || (offset == s.offset && _KindSpecialCheckFinalizer < s.kind) {
                                // The special is outside the region specified for that kind of
                                // special. The specials are sorted by kind.
                                break
@@ -2476,7 +2476,7 @@ type specialWeakHandle struct {
 
 //go:linkname internal_weak_runtime_registerWeakPointer weak.runtime_registerWeakPointer
 func internal_weak_runtime_registerWeakPointer(p unsafe.Pointer) unsafe.Pointer {
-       return unsafe.Pointer(getOrAddWeakHandle(unsafe.Pointer(p)))
+       return unsafe.Pointer(getOrAddWeakHandle(p))
 }
 
 //go:linkname internal_weak_runtime_makeStrongFromWeak weak.runtime_makeStrongFromWeak
index 79d3f6c0de626a0bf47a11b1aab29ba38dd581df..e31d5dccb24b4f68043b8a4aa5988f569516906c 100644 (file)
@@ -397,5 +397,5 @@ func bytealg_MakeNoZero(len int) []byte {
                panicmakeslicelen()
        }
        cap := roundupsize(uintptr(len), true)
-       return unsafe.Slice((*byte)(mallocgc(uintptr(cap), nil, false)), cap)[:len]
+       return unsafe.Slice((*byte)(mallocgc(cap, nil, false)), cap)[:len]
 }
index 70e48ea3a673926a22e08ca3a94aa53d7b18bab5..b1b6c63462f4ab4a78e7ca2f6596d132c35fffbc 100644 (file)
@@ -37,7 +37,7 @@ func traceSnapshotMemory(gen uintptr) {
        }
 
        // Emit info.
-       w.varint(uint64(trace.minPageHeapAddr))
+       w.varint(trace.minPageHeapAddr)
        w.varint(uint64(pageSize))
        w.varint(uint64(gc.MinHeapAlign))
        w.varint(uint64(fixedStack))
index 08a1d46838b67d8dfa8b5181f95a9ec4a0450348..5adaede424c4db3a66b0969e0f2fe315a0de4e11 100644 (file)
@@ -183,7 +183,7 @@ func (w traceWriter) refill() traceWriter {
        // Tolerate a nil mp.
        mID := ^uint64(0)
        if w.mp != nil {
-               mID = uint64(w.mp.procid)
+               mID = w.mp.procid
        }
 
        // Write the buffer's header.
@@ -194,7 +194,7 @@ func (w traceWriter) refill() traceWriter {
                w.byte(byte(w.exp))
        }
        w.varint(uint64(w.gen))
-       w.varint(uint64(mID))
+       w.varint(mID)
        w.varint(uint64(ts))
        w.traceBuf.lenPos = w.varintReserve()
        return w
index 092c707f83335f4679d358783e428666fc5b188b..e64ca32cdf155e4d51b777e46cdf9e39d6ef0c17 100644 (file)
@@ -258,7 +258,7 @@ func traceCPUSample(gp *g, mp *m, pp *p, stk []uintptr) {
        if gp != nil {
                hdr[1] = gp.goid
        }
-       hdr[2] = uint64(mp.procid)
+       hdr[2] = mp.procid
 
        // Allow only one writer at a time
        for !trace.signalLock.CompareAndSwap(0, 1) {
index 263847be2e12cd44817bd808aba9143d825bb9df..b0bc4c017da2660f50c93790183202b24f8e7d06 100644 (file)
@@ -42,7 +42,7 @@ func (tl traceLocker) eventWriter(goStatus tracev2.GoStatus, procStatus tracev2.
                tl.writer().writeProcStatus(uint64(pp.id), procStatus, pp.trace.inSweep).end()
        }
        if gp := tl.mp.curg; gp != nil && !gp.trace.statusWasTraced(tl.gen) && gp.trace.acquireStatus(tl.gen) {
-               tl.writer().writeGoStatus(uint64(gp.goid), int64(tl.mp.procid), goStatus, gp.inMarkAssist, 0 /* no stack */).end()
+               tl.writer().writeGoStatus(gp.goid, int64(tl.mp.procid), goStatus, gp.inMarkAssist, 0 /* no stack */).end()
        }
        return traceEventWriter{tl}
 }
index a2775a3427194373f47cd362d75de3495942db85..06e36fd8026d392a608bfd544761614ba75886b5 100644 (file)
@@ -457,7 +457,7 @@ func (tl traceLocker) GoPreempt() {
 
 // GoStop emits a GoStop event with the provided reason.
 func (tl traceLocker) GoStop(reason traceGoStopReason) {
-       tl.eventWriter(tracev2.GoRunning, tracev2.ProcRunning).event(tracev2.EvGoStop, traceArg(trace.goStopReasons[tl.gen%2][reason]), tl.stack(0))
+       tl.eventWriter(tracev2.GoRunning, tracev2.ProcRunning).event(tracev2.EvGoStop, trace.goStopReasons[tl.gen%2][reason], tl.stack(0))
 }
 
 // GoPark emits a GoBlock event with the provided reason.
@@ -465,7 +465,7 @@ func (tl traceLocker) GoStop(reason traceGoStopReason) {
 // TODO(mknyszek): Replace traceBlockReason with waitReason. It's silly
 // that we have both, and waitReason is way more descriptive.
 func (tl traceLocker) GoPark(reason traceBlockReason, skip int) {
-       tl.eventWriter(tracev2.GoRunning, tracev2.ProcRunning).event(tracev2.EvGoBlock, traceArg(trace.goBlockReasons[tl.gen%2][reason]), tl.stack(skip))
+       tl.eventWriter(tracev2.GoRunning, tracev2.ProcRunning).event(tracev2.EvGoBlock, trace.goBlockReasons[tl.gen%2][reason], tl.stack(skip))
 }
 
 // GoUnpark emits a GoUnblock event.
index 76d6b05048f83a96f4188636292b155ff4cac1ef..51f3c29445c5a74c6a279ffea04a1c3d3ee14b1b 100644 (file)
@@ -190,7 +190,7 @@ func dumpStacksRec(node *traceMapNode, w traceWriter, stackBuf []uintptr) traceW
 
        // Emit stack event.
        w.byte(byte(tracev2.EvStack))
-       w.varint(uint64(node.id))
+       w.varint(node.id)
        w.varint(uint64(len(frames)))
        for _, frame := range frames {
                w.varint(uint64(frame.PC))
index f54f8125784e24f1774fcc46652bd96003a77556..613fc88202b16f728c8c690f054df3deacdce8c7 100644 (file)
@@ -64,7 +64,7 @@ func dumpTypesRec(node *traceMapNode, w traceWriter) traceWriter {
        }
 
        // Emit type.
-       w.varint(uint64(node.id))
+       w.varint(node.id)
        w.varint(uint64(uintptr(unsafe.Pointer(typ))))
        w.varint(uint64(typ.Size()))
        w.varint(uint64(typ.PtrBytes))
index 05c81354c87b67edef0a3661f26466727277cd7c..0d6690c74672a8f8954b80e7132dd426c1ce7c36 100644 (file)
@@ -161,7 +161,7 @@ func TestConcurrentRange(t *testing.T) {
 
        m := new(sync.Map)
        for n := int64(1); n <= mapSize; n++ {
-               m.Store(n, int64(n))
+               m.Store(n, n)
        }
 
        done := make(chan struct{})
index f6e78d9bb570e4cbf31ea584d44acaa279656d4e..c12b1193356d21a1457ea83149c8d0ce73b6a7fd 100644 (file)
@@ -33,7 +33,7 @@ func readIntBE(b []byte, size uintptr) uint64 {
        case 4:
                return uint64(byteorder.BEUint32(b))
        case 8:
-               return uint64(byteorder.BEUint64(b))
+               return byteorder.BEUint64(b)
        default:
                panic("syscall: readInt with unsupported size")
        }
@@ -48,7 +48,7 @@ func readIntLE(b []byte, size uintptr) uint64 {
        case 4:
                return uint64(byteorder.LEUint32(b))
        case 8:
-               return uint64(byteorder.LEUint64(b))
+               return byteorder.LEUint64(b)
        default:
                panic("syscall: readInt with unsupported size")
        }
index eeadcdb1c10a15f6e83190ba3450eaf78c627f14..05eac86738db946a2f8648d9ac6448953f540cc4 100644 (file)
@@ -735,8 +735,8 @@ func ExampleTime_String() {
        timeWithoutNanoseconds := time.Date(2000, 2, 1, 12, 13, 14, 0, time.UTC)
        withoutNanoseconds := timeWithoutNanoseconds.String()
 
-       fmt.Printf("withNanoseconds = %v\n", string(withNanoseconds))
-       fmt.Printf("withoutNanoseconds = %v\n", string(withoutNanoseconds))
+       fmt.Printf("withNanoseconds = %v\n", withNanoseconds)
+       fmt.Printf("withoutNanoseconds = %v\n", withoutNanoseconds)
 
        // Output:
        // withNanoseconds = 2000-02-01 12:13:14.000000015 +0000 UTC
index e9cd483be5d2b09c2b69f4be04a8658ad8449481..4b7750eb9414e881bc91fd683c1cf2353dd10085 100644 (file)
@@ -142,7 +142,7 @@ func (t *Timer) Stop() bool {
 // in Go 1.27 or later.
 func NewTimer(d Duration) *Timer {
        c := make(chan Time, 1)
-       t := (*Timer)(newTimer(when(d), 0, sendTime, c, syncTimer(c)))
+       t := newTimer(when(d), 0, sendTime, c, syncTimer(c))
        t.C = c
        return t
 }
@@ -208,7 +208,7 @@ func After(d Duration) <-chan Time {
 // be used to cancel the call using its Stop method.
 // The returned Timer's C field is not used and will be nil.
 func AfterFunc(d Duration, f func()) *Timer {
-       return (*Timer)(newTimer(when(d), 0, goFunc, f, nil))
+       return newTimer(when(d), 0, goFunc, f, nil)
 }
 
 func goFunc(arg any, seq uintptr, delta int64) {