// type zeros struct{}
//
// func (zeros) Read(b []byte) (int, error) {
-// for i := range b {
-// b[i] = 0
-// }
+// clear(b)
// return len(b), nil
// }
//
type zeros struct{}
func (zeros) Read(p []byte) (int, error) {
- for i := range p {
- p[i] = 0
- }
+ clear(p)
return len(p), nil
}
gcmAesFinish(&g.productTable, &tagMask, &expectedTag, uint64(len(ciphertext)), uint64(len(data)))
if subtle.ConstantTimeCompare(expectedTag[:g.tagSize], tag) != 1 {
- for i := range out {
- out[i] = 0
- }
+ clear(out)
return nil, errOpen
}
ret, out := sliceForAppend(dst, len(ciphertext))
if subtle.ConstantTimeCompare(expectedTag[:g.tagSize], tag) != 1 {
- for i := range out {
- out[i] = 0
- }
+ clear(out)
return nil, errOpen
}
// so overwrites dst in the event of a tag mismatch. That
// behavior is mimicked here in order to be consistent across
// platforms.
- for i := range out {
- out[i] = 0
- }
+ clear(out)
return nil, errOpen
}
// so overwrites dst in the event of a tag mismatch. That
// behavior is mimicked here in order to be consistent across
// platforms.
- for i := range out {
- out[i] = 0
- }
+ clear(out)
return nil, errOpen
}
// so overwrites dst in the event of a tag mismatch. That
// behavior is mimicked here in order to be consistent across
// platforms.
- for i := range out {
- out[i] = 0
- }
+ clear(out)
return nil, errOpen
}
// Read replaces the contents of dst with zeros. It is safe for concurrent use.
func (zr) Read(dst []byte) (n int, err error) {
- for i := range dst {
- dst[i] = 0
- }
+ clear(dst)
return len(dst), nil
}
// Read replaces the contents of dst with zeros. It is safe for concurrent use.
func (zr) Read(dst []byte) (n int, err error) {
- for i := range dst {
- dst[i] = 0
- }
+ clear(dst)
return len(dst), nil
}
type zeroReader struct{}
func (zeroReader) Read(buf []byte) (int, error) {
- for i := range buf {
- buf[i] = 0
- }
+ clear(buf)
return len(buf), nil
}
return x
}
extraLimbs := x.limbs[len(x.limbs):n]
- for i := range extraLimbs {
- extraLimbs[i] = 0
- }
+ clear(extraLimbs)
x.limbs = x.limbs[:n]
return x
}
x.limbs = make([]uint, n)
return x
}
- for i := range x.limbs {
- x.limbs[i] = 0
- }
+ clear(x.limbs)
x.limbs = x.limbs[:n]
return x
}
type zeroSource struct{}
func (zeroSource) Read(b []byte) (n int, err error) {
- for i := range b {
- b[i] = 0
- }
-
+ clear(b)
return len(b), nil
}
type zeroSource struct{}
func (zeroSource) Read(b []byte) (n int, err error) {
- for i := range b {
- b[i] = 0
- }
-
+ clear(b)
return len(b), nil
}
}
func (k *bytesKey) clear() {
- for i := range k.b {
- k.b[i] = 0
- }
+ clear(k.b)
}
func (k *bytesKey) random(r *rand.Rand) {
randBytes(r, k.b)
// source code to 0.
func ResetCoverage() {
cov := coverage()
- for i := range cov {
- cov[i] = 0
- }
+ clear(cov)
}
// SnapshotCoverage copies the current counter values into coverageSnapshot,
TestHookDidWritev(int(wrote))
n += int64(wrote)
consume(v, int64(wrote))
- for i := range iovecs {
- iovecs[i] = syscall.Iovec{}
- }
+ clear(iovecs)
if err != nil {
if err == syscall.EINTR {
continue
if len(stats.PauseQuantiles) > 0 {
if n == 0 {
- for i := range stats.PauseQuantiles {
- stats.PauseQuantiles[i] = 0
- }
+ clear(stats.PauseQuantiles)
} else {
// There's room for a second copy of the data in stats.Pause.
// See the allocation at the top of the function.
func (s *mspan) initHeapBits(forceClear bool) {
if (!s.spanclass.noscan() && heapBitsInSpan(s.elemsize)) || s.isUserArenaChunk {
b := s.heapBits()
- for i := range b {
- b[i] = 0
- }
+ clear(b)
}
}
for i := range src {
src[i] = &x
}
- for i := range dst {
- dst[i] = nil
- }
+ clear(dst)
var ready atomic.Uint32
go func() {
}
evts = evts[:len(pollsubs)]
- for i := range evts {
- evts[i] = event{}
- }
+ clear(evts)
retry:
var nevents size
if v == '\n' || writePos == len(dst)-1 {
dst[writePos] = 0
write(writeFD, unsafe.Pointer(&writeBuf[0]), int32(hlen+writePos))
- for i := range dst {
- dst[i] = 0
- }
+ clear(dst)
writePos = 0
}
}