}
func traceEventLocked(extraBytes int, mp *m, pid int32, bufp *traceBufPtr, ev byte, skip int, args ...uint64) {
- buf := (*bufp).ptr()
+ buf := bufp.ptr()
// TODO: test on non-zero extraBytes param.
maxSize := 2 + 5*traceBytesPerNumber + extraBytes // event type, length, sequence, timestamp, stack id and two add params
if buf == nil || len(buf.arr)-buf.pos < maxSize {
buf = traceFlush(traceBufPtrOf(buf), pid).ptr()
- (*bufp).set(buf)
+ bufp.set(buf)
}
ticks := uint64(cputicks()) / traceTickDiv
// so there must be no memory allocation or any activities
// that causes tracing after this point.
- buf := (*bufp).ptr()
+ buf := bufp.ptr()
size := 1 + 2*traceBytesPerNumber + len(s)
if buf == nil || len(buf.arr)-buf.pos < size {
buf = traceFlush(traceBufPtrOf(buf), pid).ptr()
- (*bufp).set(buf)
+ bufp.set(buf)
}
buf.byte(traceEvString)
buf.varint(id)
buf.varint(uint64(slen))
buf.pos += copy(buf.arr[buf.pos:], s[:slen])
- (*bufp).set(buf)
+ bufp.set(buf)
return id, bufp
}
traceEventLocked(extraSpace, mp, pid, bufp, traceEvUserLog, 3, id, categoryID)
// traceEventLocked reserved extra space for val and len(val)
// in buf, so buf now has room for the following.
- buf := (*bufp).ptr()
+ buf := bufp.ptr()
// double-check the message and its length can fit.
// Otherwise, truncate the message.