bytes *byte
}
-func (n name) data(off int) *byte {
- return (*byte)(add(unsafe.Pointer(n.bytes), uintptr(off)))
+func (n name) data(off int, whySafe string) *byte {
+ return (*byte)(add(unsafe.Pointer(n.bytes), uintptr(off), whySafe))
}
func (n name) isExported() bool {
}
func (n name) nameLen() int {
- return int(uint16(*n.data(1))<<8 | uint16(*n.data(2)))
+ return int(uint16(*n.data(1, "name len field"))<<8 | uint16(*n.data(2, "name len field")))
}
func (n name) tagLen() int {
- if *n.data(0)&(1<<1) == 0 {
+ if *n.data(0, "name flag field")&(1<<1) == 0 {
return 0
}
off := 3 + n.nameLen()
- return int(uint16(*n.data(off))<<8 | uint16(*n.data(off + 1)))
+ return int(uint16(*n.data(off, "name taglen field"))<<8 | uint16(*n.data(off+1, "name taglen field")))
}
func (n name) name() (s string) {
}
nl := n.nameLen()
hdr := (*stringHeader)(unsafe.Pointer(&s))
- hdr.Data = unsafe.Pointer(n.data(3 + nl + 2))
+ hdr.Data = unsafe.Pointer(n.data(3+nl+2, "non-empty string"))
hdr.Len = tl
return s
}
func (n name) pkgPath() string {
- if n.bytes == nil || *n.data(0)&(1<<2) == 0 {
+ if n.bytes == nil || *n.data(0, "name flag field")&(1<<2) == 0 {
return ""
}
off := 3 + n.nameLen()
off += 2 + tl
}
var nameOff int32
- copy((*[4]byte)(unsafe.Pointer(&nameOff))[:], (*[4]byte)(unsafe.Pointer(n.data(off)))[:])
+ // Note that this field may not be aligned in memory,
+ // so we cannot use a direct int32 assignment here.
+ copy((*[4]byte)(unsafe.Pointer(&nameOff))[:], (*[4]byte)(unsafe.Pointer(n.data(off, "name offset field")))[:])
pkgPathName := name{(*byte)(resolveTypeOff(unsafe.Pointer(n.bytes), nameOff))}
return pkgPathName.name()
}
}
func (t *uncommonType) methods() []method {
- return (*[1 << 16]method)(add(unsafe.Pointer(t), uintptr(t.moff)))[:t.mcount:t.mcount]
+ if t.mcount == 0 {
+ return nil
+ }
+ return (*[1 << 16]method)(add(unsafe.Pointer(t), uintptr(t.moff), "t.mcount > 0"))[:t.mcount:t.mcount]
}
// resolveNameOff resolves a name offset from a base pointer.
if t.tflag&tflagUncommon != 0 {
uadd += unsafe.Sizeof(uncommonType{})
}
- return (*[1 << 20]*rtype)(add(unsafe.Pointer(t), uadd))[:t.inCount]
+ if t.inCount == 0 {
+ return nil
+ }
+ return (*[1 << 20]*rtype)(add(unsafe.Pointer(t), uadd, "t.inCount > 0"))[:t.inCount]
}
func (t *funcType) out() []*rtype {
uadd += unsafe.Sizeof(uncommonType{})
}
outCount := t.outCount & (1<<15 - 1)
- return (*[1 << 20]*rtype)(add(unsafe.Pointer(t), uadd))[t.inCount : t.inCount+outCount]
+ if outCount == 0 {
+ return nil
+ }
+ return (*[1 << 20]*rtype)(add(unsafe.Pointer(t), uadd, "outCount > 0"))[t.inCount : t.inCount+outCount]
}
-func add(p unsafe.Pointer, x uintptr) unsafe.Pointer {
+// add returns p+x.
+//
+// The whySafe string is ignored, so that the function still inlines
+// as efficiently as p+x, but all call sites should use the string to
+// record why the addition is safe, which is to say why the addition
+// does not cause x to advance to the very end of p's allocation
+// and therefore point incorrectly at the next block in memory.
+func add(p unsafe.Pointer, x uintptr, whySafe string) unsafe.Pointer {
return unsafe.Pointer(uintptr(p) + x)
}
func typelinks() (sections []unsafe.Pointer, offset [][]int32)
func rtypeOff(section unsafe.Pointer, off int32) *rtype {
- return (*rtype)(add(section, uintptr(off)))
+ return (*rtype)(add(section, uintptr(off), "sizeof(rtype) > 0"))
}
// typesByString returns the subslice of typelinks() whose elements have
typ.alg.hash = func(p unsafe.Pointer, seed uintptr) uintptr {
o := seed
for _, ft := range typ.fields {
- pi := unsafe.Pointer(uintptr(p) + ft.offset())
+ pi := add(p, ft.offset(), "&x.field safe")
o = ft.typ.alg.hash(pi, o)
}
return o
if comparable {
typ.alg.equal = func(p, q unsafe.Pointer) bool {
for _, ft := range typ.fields {
- pi := unsafe.Pointer(uintptr(p) + ft.offset())
- qi := unsafe.Pointer(uintptr(q) + ft.offset())
+ pi := add(p, ft.offset(), "&x.field safe")
+ qi := add(q, ft.offset(), "&x.field safe")
if !ft.typ.alg.equal(pi, qi) {
return false
}
eequal := ealg.equal
array.alg.equal = func(p, q unsafe.Pointer) bool {
for i := 0; i < count; i++ {
- pi := arrayAt(p, i, esize)
- qi := arrayAt(q, i, esize)
+ pi := arrayAt(p, i, esize, "i < count")
+ qi := arrayAt(q, i, esize, "i < count")
if !eequal(pi, qi) {
return false
}
array.alg.hash = func(ptr unsafe.Pointer, seed uintptr) uintptr {
o := seed
for i := 0; i < count; i++ {
- o = ehash(arrayAt(ptr, i, esize), o)
+ o = ehash(arrayAt(ptr, i, esize, "i < count"), o)
}
return o
}
a := uintptr(targ.align)
off = (off + a - 1) &^ (a - 1)
n := targ.size
- addr := unsafe.Pointer(uintptr(args) + off)
+ if n == 0 {
+ // Not safe to compute args+off pointing at 0 bytes,
+ // because that might point beyond the end of the frame,
+ // but we still need to call assignTo to check assignability.
+ v.assignTo("reflect.Value.Call", targ, nil)
+ continue
+ }
+ addr := add(args, off, "n > 0")
v = v.assignTo("reflect.Value.Call", targ, addr)
if v.flag&flagIndir != 0 {
typedmemmove(targ, addr, v.ptr)
off = (off + a - 1) &^ (a - 1)
if tv.Size() != 0 {
fl := flagIndir | flag(tv.Kind())
- ret[i] = Value{tv.common(), unsafe.Pointer(uintptr(args) + off), fl}
+ ret[i] = Value{tv.common(), add(args, off, "tv.Size() != 0"), fl}
} else {
// For zero-sized return value, args+off may point to the next object.
// In this case, return the zero value instead.
in := make([]Value, 0, int(ftyp.inCount))
for _, typ := range ftyp.in() {
off += -off & uintptr(typ.align-1)
- addr := unsafe.Pointer(uintptr(ptr) + off)
v := Value{typ, nil, flag(typ.Kind())}
if ifaceIndir(typ) {
// value cannot be inlined in interface data.
// and we cannot let f keep a reference to the stack frame
// after this function returns, not even a read-only reference.
v.ptr = unsafe_New(typ)
- typedmemmove(typ, v.ptr, addr)
+ if typ.size > 0 {
+ typedmemmove(typ, v.ptr, add(ptr, off, "typ.size > 0"))
+ }
v.flag |= flagIndir
} else {
- v.ptr = *(*unsafe.Pointer)(addr)
+ v.ptr = *(*unsafe.Pointer)(add(ptr, off, "1-ptr"))
}
in = append(in, v)
off += typ.size
" returned value obtained from unexported field")
}
off += -off & uintptr(typ.align-1)
- addr := unsafe.Pointer(uintptr(ptr) + off)
+ if typ.size == 0 {
+ continue
+ }
+ addr := add(ptr, off, "typ.size > 0")
if v.flag&flagIndir != 0 {
typedmemmove(typ, addr, v.ptr)
} else {
// Avoid constructing out-of-bounds pointers if there are no args.
storeRcvr(rcvr, args)
if argSize-ptrSize > 0 {
- typedmemmovepartial(frametype, unsafe.Pointer(uintptr(args)+ptrSize), frame, ptrSize, argSize-ptrSize)
+ typedmemmovepartial(frametype, add(args, ptrSize, "argSize > ptrSize"), frame, ptrSize, argSize-ptrSize)
}
// Call.
callerRetOffset = align(argSize-ptrSize, 8)
}
typedmemmovepartial(frametype,
- unsafe.Pointer(uintptr(frame)+callerRetOffset),
- unsafe.Pointer(uintptr(args)+retOffset),
+ add(frame, callerRetOffset, "frametype.size > retOffset"),
+ add(args, retOffset, "frametype.size > retOffset"),
retOffset,
frametype.size-retOffset)
}
// or flagIndir is not set and v.ptr is the actual struct data.
// In the former case, we want v.ptr + offset.
// In the latter case, we must have field.offset = 0,
- // so v.ptr + field.offset is still okay.
- ptr := unsafe.Pointer(uintptr(v.ptr) + field.offset())
+ // so v.ptr + field.offset is still the correct address.
+ ptr := add(v.ptr, field.offset(), "same as non-reflect &v.field")
return Value{typ, ptr, fl}
}
// or flagIndir is not set and v.ptr is the actual array data.
// In the former case, we want v.ptr + offset.
// In the latter case, we must be doing Index(0), so offset = 0,
- // so v.ptr + offset is still okay.
- val := unsafe.Pointer(uintptr(v.ptr) + offset)
+ // so v.ptr + offset is still the correct address.
+ val := add(v.ptr, offset, "same as &v[i], i < tt.len")
fl := v.flag&(flagIndir|flagAddr) | v.flag.ro() | flag(typ.Kind()) // bits same as overall array
return Value{typ, val, fl}
}
tt := (*sliceType)(unsafe.Pointer(v.typ))
typ := tt.elem
- val := arrayAt(s.Data, i, typ.size)
+ val := arrayAt(s.Data, i, typ.size, "i < s.Len")
fl := flagAddr | flagIndir | v.flag.ro() | flag(typ.Kind())
return Value{typ, val, fl}
if uint(i) >= uint(s.Len) {
panic("reflect: string index out of range")
}
- p := arrayAt(s.Data, i, 1)
+ p := arrayAt(s.Data, i, 1, "i < s.Len")
fl := v.flag.ro() | flag(Uint8) | flagIndir
return Value{uint8Type, p, fl}
}
if i < 0 || j < i || j > s.Len {
panic("reflect.Value.Slice: string slice index out of bounds")
}
- t := stringHeader{arrayAt(s.Data, i, 1), j - i}
+ var t stringHeader
+ if i < s.Len {
+ t = stringHeader{arrayAt(s.Data, i, 1, "i < s.Len"), j - i}
+ }
return Value{v.typ, unsafe.Pointer(&t), v.flag}
}
s.Len = j - i
s.Cap = cap - i
if cap-i > 0 {
- s.Data = arrayAt(base, i, typ.elem.Size())
+ s.Data = arrayAt(base, i, typ.elem.Size(), "i < cap")
} else {
// do not advance pointer, to avoid pointing beyond end of slice
s.Data = base
s.Len = j - i
s.Cap = k - i
if k-i > 0 {
- s.Data = arrayAt(base, i, typ.elem.Size())
+ s.Data = arrayAt(base, i, typ.elem.Size(), "i < k <= cap")
} else {
// do not advance pointer, to avoid pointing beyond end of slice
s.Data = base
}
}
-// arrayAt returns the i-th element of p, a C-array whose elements are
-// eltSize wide (in bytes).
-func arrayAt(p unsafe.Pointer, i int, eltSize uintptr) unsafe.Pointer {
- return unsafe.Pointer(uintptr(p) + uintptr(i)*eltSize)
+// arrayAt returns the i-th element of p,
+// an array whose elements are eltSize bytes wide.
+// The array pointed at by p must have at least i+1 elements:
+// it is invalid (but impossible to check here) to pass i >= len,
+// because then the result will point outside the array.
+// whySafe must explain why i < len. (Passing "i < len" is fine;
+// the benefit is to surface this assumption at the call site.)
+func arrayAt(p unsafe.Pointer, i int, eltSize uintptr, whySafe string) unsafe.Pointer {
+ return add(p, uintptr(i)*eltSize, "i < len")
}
// grow grows the slice s so that it can hold extra more values, allocating