"func @\"\".growslice (@\"\".typ·2 *byte, @\"\".old·3 []any, @\"\".n·4 int64) (@\"\".ary·1 []any)\n"
"func @\"\".memmove (@\"\".to·1 *any, @\"\".frm·2 *any, @\"\".length·3 uintptr)\n"
"func @\"\".memequal (@\"\".x·2 *any, @\"\".y·3 *any, @\"\".size·4 uintptr) (? bool)\n"
- "func @\"\".memequal8 (@\"\".x·2 *any, @\"\".y·3 *any, @\"\".size·4 uintptr) (? bool)\n"
- "func @\"\".memequal16 (@\"\".x·2 *any, @\"\".y·3 *any, @\"\".size·4 uintptr) (? bool)\n"
- "func @\"\".memequal32 (@\"\".x·2 *any, @\"\".y·3 *any, @\"\".size·4 uintptr) (? bool)\n"
- "func @\"\".memequal64 (@\"\".x·2 *any, @\"\".y·3 *any, @\"\".size·4 uintptr) (? bool)\n"
- "func @\"\".memequal128 (@\"\".x·2 *any, @\"\".y·3 *any, @\"\".size·4 uintptr) (? bool)\n"
+ "func @\"\".memequal8 (@\"\".x·2 *any, @\"\".y·3 *any) (? bool)\n"
+ "func @\"\".memequal16 (@\"\".x·2 *any, @\"\".y·3 *any) (? bool)\n"
+ "func @\"\".memequal32 (@\"\".x·2 *any, @\"\".y·3 *any) (? bool)\n"
+ "func @\"\".memequal64 (@\"\".x·2 *any, @\"\".y·3 *any) (? bool)\n"
+ "func @\"\".memequal128 (@\"\".x·2 *any, @\"\".y·3 *any) (? bool)\n"
"func @\"\".int64div (? int64, ? int64) (? int64)\n"
"func @\"\".uint64div (? uint64, ? uint64) (? uint64)\n"
"func @\"\".int64mod (? int64, ? int64) (? int64)\n"
SymUniq = 1<<3,
SymSiggen = 1<<4,
SymAsm = 1<<5,
+ SymAlgGen = 1<<6,
};
struct Sym
dowidth(t);
alg = algtype(t);
algsym = S;
- if(alg < 0)
+ if(alg < 0 || alg == AMEM)
algsym = dalgsym(t);
if(t->sym != nil && !isptr[t->etype])
if(gcprog)
i |= KindGCProg;
ot = duint8(s, ot, i); // kind
- if(alg >= 0)
+ if(algsym == S)
ot = dsymptr(s, ot, algarray, alg*sizeofAlg);
else
ot = dsymptr(s, ot, algsym, 0);
{
int ot;
Sym *s, *hash, *hashfunc, *eq, *eqfunc;
+ char *p;
// dalgsym is only called for a type that needs an algorithm table,
// which implies that the type is comparable (or else it would use ANOEQ).
- s = typesymprefix(".alg", t);
- hash = typesymprefix(".hash", t);
- genhash(hash, t);
- eq = typesymprefix(".eq", t);
- geneq(eq, t);
-
- // make Go funcs (closures) for calling hash and equal from Go
- hashfunc = typesymprefix(".hashfunc", t);
- dsymptr(hashfunc, 0, hash, 0);
- ggloblsym(hashfunc, widthptr, DUPOK|RODATA);
- eqfunc = typesymprefix(".eqfunc", t);
- dsymptr(eqfunc, 0, eq, 0);
- ggloblsym(eqfunc, widthptr, DUPOK|RODATA);
-
+ if(algtype(t) == AMEM) {
+ // we use one algorithm table for all AMEM types of a given size
+ p = smprint(".alg%lld", t->width);
+ s = pkglookup(p, typepkg);
+ free(p);
+ if(s->flags & SymAlgGen)
+ return s;
+ s->flags |= SymAlgGen;
+
+ // make hash closure
+ p = smprint(".hashfunc%lld", t->width);
+ hashfunc = pkglookup(p, typepkg);
+ free(p);
+ ot = 0;
+ ot = dsymptr(hashfunc, ot, pkglookup("memhash_varlen", runtimepkg), 0);
+ ot = duintxx(hashfunc, ot, t->width, widthptr); // size encoded in closure
+ ggloblsym(hashfunc, ot, DUPOK|RODATA);
+
+ // make equality closure
+ p = smprint(".eqfunc%lld", t->width);
+ eqfunc = pkglookup(p, typepkg);
+ free(p);
+ ot = 0;
+ ot = dsymptr(eqfunc, ot, pkglookup("memequal_varlen", runtimepkg), 0);
+ ot = duintxx(eqfunc, ot, t->width, widthptr);
+ ggloblsym(eqfunc, ot, DUPOK|RODATA);
+ } else {
+ // generate an alg table specific to this type
+ s = typesymprefix(".alg", t);
+ hash = typesymprefix(".hash", t);
+ eq = typesymprefix(".eq", t);
+ hashfunc = typesymprefix(".hashfunc", t);
+ eqfunc = typesymprefix(".eqfunc", t);
+
+ genhash(hash, t);
+ geneq(eq, t);
+
+ // make Go funcs (closures) for calling hash and equal from Go
+ dsymptr(hashfunc, 0, hash, 0);
+ ggloblsym(hashfunc, widthptr, DUPOK|RODATA);
+ dsymptr(eqfunc, 0, eq, 0);
+ ggloblsym(eqfunc, widthptr, DUPOK|RODATA);
+ }
// ../../runtime/alg.go:/typeAlg
ot = 0;
ot = dsymptr(s, ot, hashfunc, 0);
ot = dsymptr(s, ot, eqfunc, 0);
-
ggloblsym(s, ot, DUPOK|RODATA);
return s;
}
func memmove(to *any, frm *any, length uintptr)
func memequal(x, y *any, size uintptr) bool
-func memequal8(x, y *any, size uintptr) bool
-func memequal16(x, y *any, size uintptr) bool
-func memequal32(x, y *any, size uintptr) bool
-func memequal64(x, y *any, size uintptr) bool
-func memequal128(x, y *any, size uintptr) bool
+func memequal8(x, y *any) bool
+func memequal16(x, y *any) bool
+func memequal32(x, y *any) bool
+func memequal64(x, y *any) bool
+func memequal128(x, y *any) bool
// only used on 32-bit
func int64div(int64, int64) int64
{
Node *tfn, *n;
Sym *sym;
-
+
sym = pkglookup("memhash", runtimepkg);
n = newname(sym);
a = algtype1(t, nil);
switch(a) {
case AMEM:
- return hashmem(t);
+ fatal("hashfor with AMEM type");
case AINTER:
sym = pkglookup("interhash", runtimepkg);
break;
tfn = nod(OTFUNC, N, N);
tfn->list = list(tfn->list, nod(ODCLFIELD, N, typenod(ptrto(t))));
tfn->list = list(tfn->list, nod(ODCLFIELD, N, typenod(types[TUINTPTR])));
- tfn->list = list(tfn->list, nod(ODCLFIELD, N, typenod(types[TUINTPTR])));
tfn->rlist = list(tfn->rlist, nod(ODCLFIELD, N, typenod(types[TUINTPTR])));
typecheck(&tfn, Etype);
n->type = tfn->type;
dclcontext = PEXTERN;
markdcl();
- // func sym(p *T, s uintptr, h uintptr) uintptr
+ // func sym(p *T, h uintptr) uintptr
fn = nod(ODCLFUNC, N, N);
fn->nname = newname(sym);
fn->nname->class = PFUNC;
n = nod(ODCLFIELD, newname(lookup("p")), typenod(ptrto(t)));
tfn->list = list(tfn->list, n);
np = n->left;
- n = nod(ODCLFIELD, newname(lookup("s")), typenod(types[TUINTPTR]));
- tfn->list = list(tfn->list, n);
n = nod(ODCLFIELD, newname(lookup("h")), typenod(types[TUINTPTR]));
tfn->list = list(tfn->list, n);
nh = n->left;
nh,
nod(OMUL, nh, nodintconst(mul))));
- // h = hashel(&p[i], sizeof(p[i]), h)
+ // h = hashel(&p[i], h)
call = nod(OCALL, hashel, N);
nx = nod(OINDEX, np, ni);
nx->bounded = 1;
na = nod(OADDR, nx, N);
na->etype = 1; // no escape to heap
call->list = list(call->list, na);
- call->list = list(call->list, nodintconst(t->type->width));
call->list = list(call->list, nh);
n->nbody = list(n->nbody, nod(OAS, nh, call));
na = nod(OADDR, nx, N);
na->etype = 1; // no escape to heap
call->list = list(call->list, na);
- call->list = list(call->list, nodintconst(size));
call->list = list(call->list, nh);
+ call->list = list(call->list, nodintconst(size));
fn->nbody = list(fn->nbody, nod(OAS, nh, call));
first = T;
continue;
// Run hash for this field.
- hashel = hashfor(t1->type);
- // h = hashel(&p.t1, size, h)
- call = nod(OCALL, hashel, N);
- nx = nod(OXDOT, np, newname(t1->sym)); // TODO: fields from other packages?
- na = nod(OADDR, nx, N);
- na->etype = 1; // no escape to heap
- call->list = list(call->list, na);
- call->list = list(call->list, nodintconst(t1->type->width));
- call->list = list(call->list, nh);
- fn->nbody = list(fn->nbody, nod(OAS, nh, call));
+ if(algtype1(t1->type, nil) == AMEM) {
+ hashel = hashmem(t1->type);
+ // h = memhash(&p.t1, h, size)
+ call = nod(OCALL, hashel, N);
+ nx = nod(OXDOT, np, newname(t1->sym)); // TODO: fields from other packages?
+ na = nod(OADDR, nx, N);
+ na->etype = 1; // no escape to heap
+ call->list = list(call->list, na);
+ call->list = list(call->list, nh);
+ call->list = list(call->list, nodintconst(t1->type->width));
+ fn->nbody = list(fn->nbody, nod(OAS, nh, call));
+ } else {
+ hashel = hashfor(t1->type);
+ // h = hashel(&p.t1, h)
+ call = nod(OCALL, hashel, N);
+ nx = nod(OXDOT, np, newname(t1->sym)); // TODO: fields from other packages?
+ na = nod(OADDR, nx, N);
+ na->etype = 1; // no escape to heap
+ call->list = list(call->list, na);
+ call->list = list(call->list, nh);
+ fn->nbody = list(fn->nbody, nod(OAS, nh, call));
+ }
}
break;
}
}
static Node*
-eqmemfunc(vlong size, Type *type)
+eqmemfunc(vlong size, Type *type, int *needsize)
{
char buf[30];
Node *fn;
switch(size) {
default:
fn = syslook("memequal", 1);
+ *needsize = 1;
break;
case 1:
case 2:
case 16:
snprint(buf, sizeof buf, "memequal%d", (int)size*8);
fn = syslook(buf, 1);
+ *needsize = 0;
break;
}
argtype(fn, type);
}
// Return node for
-// if !memequal(&p.field, &q.field, size) { return false }
+// if !memequal(&p.field, &q.field [, size]) { return false }
static Node*
eqmem(Node *p, Node *q, Node *field, vlong size)
{
Node *nif, *nx, *ny, *call, *r;
+ int needsize;
nx = nod(OADDR, nod(OXDOT, p, field), N);
nx->etype = 1; // does not escape
typecheck(&nx, Erv);
typecheck(&ny, Erv);
- call = nod(OCALL, eqmemfunc(size, nx->type->type), N);
+ call = nod(OCALL, eqmemfunc(size, nx->type->type, &needsize), N);
call->list = list(call->list, nx);
call->list = list(call->list, ny);
- call->list = list(call->list, nodintconst(size));
+ if(needsize)
+ call->list = list(call->list, nodintconst(size));
nif = nod(OIF, N, N);
nif->ninit = list(nif->ninit, call);
dclcontext = PEXTERN;
markdcl();
- // func sym(p, q *T, s uintptr) bool
+ // func sym(p, q *T) bool
fn = nod(ODCLFUNC, N, N);
fn->nname = newname(sym);
fn->nname->class = PFUNC;
n = nod(ODCLFIELD, newname(lookup("q")), typenod(ptrto(t)));
tfn->list = list(tfn->list, n);
nq = n->left;
- n = nod(ODCLFIELD, newname(lookup("s")), typenod(types[TUINTPTR]));
- tfn->list = list(tfn->list, n);
n = nod(ODCLFIELD, N, typenod(types[TBOOL]));
tfn->rlist = list(tfn->rlist, n);
}
static Node*
-eqfor(Type *t)
+eqfor(Type *t, int *needsize)
{
int a;
Node *n;
n = syslook("memequal", 1);
argtype(n, t);
argtype(n, t);
+ *needsize = 1;
return n;
}
ntype = nod(OTFUNC, N, N);
ntype->list = list(ntype->list, nod(ODCLFIELD, N, typenod(ptrto(t))));
ntype->list = list(ntype->list, nod(ODCLFIELD, N, typenod(ptrto(t))));
- ntype->list = list(ntype->list, nod(ODCLFIELD, N, typenod(types[TUINTPTR])));
ntype->rlist = list(ntype->rlist, nod(ODCLFIELD, N, typenod(types[TBOOL])));
typecheck(&ntype, Etype);
n->type = ntype->type;
+ *needsize = 0;
return n;
}
walkcompare(Node **np, NodeList **init)
{
Node *n, *l, *r, *call, *a, *li, *ri, *expr, *cmpl, *cmpr;
- int andor, i;
+ int andor, i, needsize;
Type *t, *t1;
n = *np;
}
// Chose not to inline. Call equality function directly.
- call = nod(OCALL, eqfor(t), N);
+ call = nod(OCALL, eqfor(t, &needsize), N);
call->list = list(call->list, l);
call->list = list(call->list, r);
- call->list = list(call->list, nodintconst(t->width));
+ if(needsize)
+ call->list = list(call->list, nodintconst(t->width));
r = call;
if(n->op != OEQ)
r = nod(ONOT, r, N);
type typeAlg struct {
// function for hashing objects of this type
- // (ptr to object, size, seed) -> hash
- hash func(unsafe.Pointer, uintptr, uintptr) uintptr
+ // (ptr to object, seed) -> hash
+ hash func(unsafe.Pointer, uintptr) uintptr
// function for comparing objects of this type
- // (ptr to object A, ptr to object B, size) -> ==?
- equal func(unsafe.Pointer, unsafe.Pointer, uintptr) bool
+ // (ptr to object A, ptr to object B) -> ==?
+ equal func(unsafe.Pointer, unsafe.Pointer) bool
}
+func memhash0(p unsafe.Pointer, h uintptr) uintptr {
+ return h
+}
+func memhash8(p unsafe.Pointer, h uintptr) uintptr {
+ return memhash(p, h, 1)
+}
+func memhash16(p unsafe.Pointer, h uintptr) uintptr {
+ return memhash(p, h, 2)
+}
+func memhash32(p unsafe.Pointer, h uintptr) uintptr {
+ return memhash(p, h, 4)
+}
+func memhash64(p unsafe.Pointer, h uintptr) uintptr {
+ return memhash(p, h, 8)
+}
+func memhash128(p unsafe.Pointer, h uintptr) uintptr {
+ return memhash(p, h, 16)
+}
+
+// memhash_varlen is defined in assembly because it needs access
+// to the closure. It appears here to provide an argument
+// signature for the assembly routine.
+func memhash_varlen(p unsafe.Pointer, h uintptr) uintptr
+
var algarray = [alg_max]typeAlg{
- alg_MEM: {memhash, memequal},
- alg_MEM0: {memhash, memequal0},
- alg_MEM8: {memhash, memequal8},
- alg_MEM16: {memhash, memequal16},
- alg_MEM32: {memhash, memequal32},
- alg_MEM64: {memhash, memequal64},
- alg_MEM128: {memhash, memequal128},
+ alg_MEM: {nil, nil}, // not used
+ alg_MEM0: {memhash0, memequal0},
+ alg_MEM8: {memhash8, memequal8},
+ alg_MEM16: {memhash16, memequal16},
+ alg_MEM32: {memhash32, memequal32},
+ alg_MEM64: {memhash64, memequal64},
+ alg_MEM128: {memhash128, memequal128},
alg_NOEQ: {nil, nil},
alg_NOEQ0: {nil, nil},
alg_NOEQ8: {nil, nil},
var useAeshash bool
// in asm_*.s
-func aeshash(p unsafe.Pointer, s, h uintptr) uintptr
-func aeshash32(p unsafe.Pointer, s, h uintptr) uintptr
-func aeshash64(p unsafe.Pointer, s, h uintptr) uintptr
-func aeshashstr(p unsafe.Pointer, s, h uintptr) uintptr
+func aeshash(p unsafe.Pointer, h, s uintptr) uintptr
+func aeshash32(p unsafe.Pointer, h uintptr) uintptr
+func aeshash64(p unsafe.Pointer, h uintptr) uintptr
+func aeshashstr(p unsafe.Pointer, h uintptr) uintptr
-func strhash(a unsafe.Pointer, s, h uintptr) uintptr {
+func strhash(a unsafe.Pointer, h uintptr) uintptr {
x := (*stringStruct)(a)
- return memhash(x.str, uintptr(x.len), h)
+ return memhash(x.str, h, uintptr(x.len))
}
// NOTE: Because NaN != NaN, a map can contain any
// To avoid long hash chains, we assign a random number
// as the hash value for a NaN.
-func f32hash(p unsafe.Pointer, s, h uintptr) uintptr {
+func f32hash(p unsafe.Pointer, h uintptr) uintptr {
f := *(*float32)(p)
switch {
case f == 0:
case f != f:
return c1 * (c0 ^ h ^ uintptr(fastrand1())) // any kind of NaN
default:
- return memhash(p, 4, h)
+ return memhash(p, h, 4)
}
}
-func f64hash(p unsafe.Pointer, s, h uintptr) uintptr {
+func f64hash(p unsafe.Pointer, h uintptr) uintptr {
f := *(*float64)(p)
switch {
case f == 0:
case f != f:
return c1 * (c0 ^ h ^ uintptr(fastrand1())) // any kind of NaN
default:
- return memhash(p, 8, h)
+ return memhash(p, h, 8)
}
}
-func c64hash(p unsafe.Pointer, s, h uintptr) uintptr {
+func c64hash(p unsafe.Pointer, h uintptr) uintptr {
x := (*[2]float32)(p)
- return f32hash(unsafe.Pointer(&x[1]), 4, f32hash(unsafe.Pointer(&x[0]), 4, h))
+ return f32hash(unsafe.Pointer(&x[1]), f32hash(unsafe.Pointer(&x[0]), h))
}
-func c128hash(p unsafe.Pointer, s, h uintptr) uintptr {
+func c128hash(p unsafe.Pointer, h uintptr) uintptr {
x := (*[2]float64)(p)
- return f64hash(unsafe.Pointer(&x[1]), 8, f64hash(unsafe.Pointer(&x[0]), 8, h))
+ return f64hash(unsafe.Pointer(&x[1]), f64hash(unsafe.Pointer(&x[0]), h))
}
-func interhash(p unsafe.Pointer, s, h uintptr) uintptr {
+func interhash(p unsafe.Pointer, h uintptr) uintptr {
a := (*iface)(p)
tab := a.tab
if tab == nil {
panic(errorString("hash of unhashable type " + *t._string))
}
if isDirectIface(t) {
- return c1 * fn(unsafe.Pointer(&a.data), uintptr(t.size), h^c0)
+ return c1 * fn(unsafe.Pointer(&a.data), h^c0)
} else {
- return c1 * fn(a.data, uintptr(t.size), h^c0)
+ return c1 * fn(a.data, h^c0)
}
}
-func nilinterhash(p unsafe.Pointer, s, h uintptr) uintptr {
+func nilinterhash(p unsafe.Pointer, h uintptr) uintptr {
a := (*eface)(p)
t := a._type
if t == nil {
panic(errorString("hash of unhashable type " + *t._string))
}
if isDirectIface(t) {
- return c1 * fn(unsafe.Pointer(&a.data), uintptr(t.size), h^c0)
+ return c1 * fn(unsafe.Pointer(&a.data), h^c0)
} else {
- return c1 * fn(a.data, uintptr(t.size), h^c0)
+ return c1 * fn(a.data, h^c0)
}
}
return memeq(p, q, size)
}
-func memequal0(p, q unsafe.Pointer, size uintptr) bool {
+func memequal0(p, q unsafe.Pointer) bool {
return true
}
-func memequal8(p, q unsafe.Pointer, size uintptr) bool {
+func memequal8(p, q unsafe.Pointer) bool {
return *(*int8)(p) == *(*int8)(q)
}
-func memequal16(p, q unsafe.Pointer, size uintptr) bool {
+func memequal16(p, q unsafe.Pointer) bool {
return *(*int16)(p) == *(*int16)(q)
}
-func memequal32(p, q unsafe.Pointer, size uintptr) bool {
+func memequal32(p, q unsafe.Pointer) bool {
return *(*int32)(p) == *(*int32)(q)
}
-func memequal64(p, q unsafe.Pointer, size uintptr) bool {
+func memequal64(p, q unsafe.Pointer) bool {
return *(*int64)(p) == *(*int64)(q)
}
-func memequal128(p, q unsafe.Pointer, size uintptr) bool {
+func memequal128(p, q unsafe.Pointer) bool {
return *(*[2]int64)(p) == *(*[2]int64)(q)
}
-func f32equal(p, q unsafe.Pointer, size uintptr) bool {
+func f32equal(p, q unsafe.Pointer) bool {
return *(*float32)(p) == *(*float32)(q)
}
-func f64equal(p, q unsafe.Pointer, size uintptr) bool {
+func f64equal(p, q unsafe.Pointer) bool {
return *(*float64)(p) == *(*float64)(q)
}
-func c64equal(p, q unsafe.Pointer, size uintptr) bool {
+func c64equal(p, q unsafe.Pointer) bool {
return *(*complex64)(p) == *(*complex64)(q)
}
-func c128equal(p, q unsafe.Pointer, size uintptr) bool {
+func c128equal(p, q unsafe.Pointer) bool {
return *(*complex128)(p) == *(*complex128)(q)
}
-func strequal(p, q unsafe.Pointer, size uintptr) bool {
+func strequal(p, q unsafe.Pointer) bool {
return *(*string)(p) == *(*string)(q)
}
-func interequal(p, q unsafe.Pointer, size uintptr) bool {
+func interequal(p, q unsafe.Pointer) bool {
return ifaceeq(*(*interface {
f()
})(p), *(*interface {
f()
})(q))
}
-func nilinterequal(p, q unsafe.Pointer, size uintptr) bool {
+func nilinterequal(p, q unsafe.Pointer) bool {
return efaceeq(*(*interface{})(p), *(*interface{})(q))
}
func efaceeq(p, q interface{}) bool {
panic(errorString("comparing uncomparable type " + *t._string))
}
if isDirectIface(t) {
- return eq(noescape(unsafe.Pointer(&x.data)), noescape(unsafe.Pointer(&y.data)), uintptr(t.size))
+ return eq(noescape(unsafe.Pointer(&x.data)), noescape(unsafe.Pointer(&y.data)))
}
- return eq(x.data, y.data, uintptr(t.size))
+ return eq(x.data, y.data)
}
func ifaceeq(p, q interface {
f()
panic(errorString("comparing uncomparable type " + *t._string))
}
if isDirectIface(t) {
- return eq(noescape(unsafe.Pointer(&x.data)), noescape(unsafe.Pointer(&y.data)), uintptr(t.size))
+ return eq(noescape(unsafe.Pointer(&x.data)), noescape(unsafe.Pointer(&y.data)))
}
- return eq(x.data, y.data, uintptr(t.size))
+ return eq(x.data, y.data)
}
// Testing adapters for hash quality tests (see hash_test.go)
func stringHash(s string, seed uintptr) uintptr {
- return algarray[alg_STRING].hash(noescape(unsafe.Pointer(&s)), unsafe.Sizeof(s), seed)
+ return algarray[alg_STRING].hash(noescape(unsafe.Pointer(&s)), seed)
}
func bytesHash(b []byte, seed uintptr) uintptr {
s := (*sliceStruct)(unsafe.Pointer(&b))
- return algarray[alg_MEM].hash(s.array, uintptr(s.len), seed)
+ return memhash(s.array, seed, uintptr(s.len))
}
func int32Hash(i uint32, seed uintptr) uintptr {
- return algarray[alg_MEM32].hash(noescape(unsafe.Pointer(&i)), 4, seed)
+ return algarray[alg_MEM32].hash(noescape(unsafe.Pointer(&i)), seed)
}
func int64Hash(i uint64, seed uintptr) uintptr {
- return algarray[alg_MEM64].hash(noescape(unsafe.Pointer(&i)), 8, seed)
+ return algarray[alg_MEM64].hash(noescape(unsafe.Pointer(&i)), seed)
}
func efaceHash(i interface{}, seed uintptr) uintptr {
- return algarray[alg_NILINTER].hash(noescape(unsafe.Pointer(&i)), unsafe.Sizeof(i), seed)
+ return algarray[alg_NILINTER].hash(noescape(unsafe.Pointer(&i)), seed)
}
func ifaceHash(i interface {
F()
}, seed uintptr) uintptr {
- return algarray[alg_INTER].hash(noescape(unsafe.Pointer(&i)), unsafe.Sizeof(i), seed)
+ return algarray[alg_INTER].hash(noescape(unsafe.Pointer(&i)), seed)
}
// Testing adapter for memclr
cpuid_ecx&(1<<9) != 0 && // sse3 (pshufb)
cpuid_ecx&(1<<19) != 0 { // sse4.1 (pinsr{d,q})
useAeshash = true
- algarray[alg_MEM].hash = aeshash
- algarray[alg_MEM8].hash = aeshash
- algarray[alg_MEM16].hash = aeshash
algarray[alg_MEM32].hash = aeshash32
algarray[alg_MEM64].hash = aeshash64
- algarray[alg_MEM128].hash = aeshash
algarray[alg_STRING].hash = aeshashstr
// Initialize with random data so hash collisions will be hard to engineer.
getRandomData(aeskeysched[:])
TEXT runtime·abort(SB),NOSPLIT,$0-0
INT $0x3
+// memhash_varlen(p unsafe.Pointer, h seed) uintptr
+// redirects to memhash(p, h, size) using the size
+// stored in the closure.
+TEXT runtime·memhash_varlen(SB),NOSPLIT,$16-12
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVL p+0(FP), AX
+ MOVL h+4(FP), BX
+ MOVL 4(DX), CX
+ MOVL AX, 0(SP)
+ MOVL BX, 4(SP)
+ MOVL CX, 8(SP)
+ CALL runtime·memhash(SB)
+ MOVL 12(SP), AX
+ MOVL AX, ret+8(FP)
+ RET
+
// hash function using AES hardware instructions
TEXT runtime·aeshash(SB),NOSPLIT,$0-16
MOVL p+0(FP), AX // ptr to data
- MOVL s+4(FP), CX // size
+ MOVL s+8(FP), CX // size
+ LEAL ret+12(FP), DX
JMP runtime·aeshashbody(SB)
-TEXT runtime·aeshashstr(SB),NOSPLIT,$0-16
+TEXT runtime·aeshashstr(SB),NOSPLIT,$0-12
MOVL p+0(FP), AX // ptr to string object
- // s+4(FP) is ignored, it is always sizeof(String)
MOVL 4(AX), CX // length of string
MOVL (AX), AX // string data
+ LEAL ret+8(FP), DX
JMP runtime·aeshashbody(SB)
// AX: data
// CX: length
-TEXT runtime·aeshashbody(SB),NOSPLIT,$0-16
- MOVL h+8(FP), X6 // seed to low 64 bits of xmm6
+// DX: address to put return value
+TEXT runtime·aeshashbody(SB),NOSPLIT,$0-0
+ MOVL h+4(FP), X6 // seed to low 64 bits of xmm6
PINSRD $2, CX, X6 // size to high 64 bits of xmm6
PSHUFHW $0, X6, X6 // replace size with its low 2 bytes repeated 4 times
MOVO runtime·aeskeysched(SB), X7
AESENC X6, X0
AESENC X7, X0
AESENC X7, X0
- MOVL X0, ret+12(FP)
+ MOVL X0, (DX)
RET
endofpage:
AESENC X6, X0
AESENC X7, X0
AESENC X7, X0
- MOVL X0, ret+12(FP)
+ MOVL X0, (DX)
RET
aes0:
// return input seed
- MOVL h+8(FP), AX
- MOVL AX, ret+12(FP)
+ MOVL h+4(FP), AX
+ MOVL AX, (DX)
RET
aes16:
AESENC X6, X0
AESENC X7, X0
AESENC X7, X0
- MOVL X0, ret+12(FP)
+ MOVL X0, (DX)
RET
// combine results
PXOR X1, X0
- MOVL X0, ret+12(FP)
+ MOVL X0, (DX)
RET
aes33to64:
PXOR X2, X0
PXOR X3, X1
PXOR X1, X0
- MOVL X0, ret+12(FP)
+ MOVL X0, (DX)
RET
aes65plus:
PXOR X2, X0
PXOR X3, X1
PXOR X1, X0
- MOVL X0, ret+12(FP)
+ MOVL X0, (DX)
RET
-TEXT runtime·aeshash32(SB),NOSPLIT,$0-16
+TEXT runtime·aeshash32(SB),NOSPLIT,$0-12
MOVL p+0(FP), AX // ptr to data
- // s+4(FP) is ignored, it is always sizeof(int32)
- MOVL h+8(FP), X0 // seed
+ MOVL h+4(FP), X0 // seed
PINSRD $1, (AX), X0 // data
AESENC runtime·aeskeysched+0(SB), X0
AESENC runtime·aeskeysched+16(SB), X0
AESENC runtime·aeskeysched+32(SB), X0
- MOVL X0, ret+12(FP)
+ MOVL X0, ret+8(FP)
RET
-TEXT runtime·aeshash64(SB),NOSPLIT,$0-16
+TEXT runtime·aeshash64(SB),NOSPLIT,$0-12
MOVL p+0(FP), AX // ptr to data
- // s+4(FP) is ignored, it is always sizeof(int64)
MOVQ (AX), X0 // data
- PINSRD $2, h+8(FP), X0 // seed
+ PINSRD $2, h+4(FP), X0 // seed
AESENC runtime·aeskeysched+0(SB), X0
AESENC runtime·aeskeysched+16(SB), X0
AESENC runtime·aeskeysched+32(SB), X0
- MOVL X0, ret+12(FP)
+ MOVL X0, ret+8(FP)
RET
// simple mask to get rid of data in the high part of the register.
MOVB AX, ret+12(FP)
RET
+// memequal_varlen(a, b unsafe.Pointer) bool
+TEXT runtime·memequal_varlen(SB),NOSPLIT,$0-9
+ MOVL a+0(FP), SI
+ MOVL b+4(FP), DI
+ CMPL SI, DI
+ JEQ eq
+ MOVL 4(DX), BX // compiler stores size at offset 4 in the closure
+ CALL runtime·memeqbody(SB)
+ MOVB AX, ret+8(FP)
+ RET
+eq:
+ MOVB $1, ret+8(FP)
+ RET
+
// eqstring tests whether two strings are equal.
// See runtime_test.go:eqstring_generic for
// equivalent Go code.
MOVQ AX, ret+0(FP)
RET
+// memhash_varlen(p unsafe.Pointer, h seed) uintptr
+// redirects to memhash(p, h, size) using the size
+// stored in the closure.
+TEXT runtime·memhash_varlen(SB),NOSPLIT,$32-24
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVQ p+0(FP), AX
+ MOVQ h+8(FP), BX
+ MOVQ 8(DX), CX
+ MOVQ AX, 0(SP)
+ MOVQ BX, 8(SP)
+ MOVQ CX, 16(SP)
+ CALL runtime·memhash(SB)
+ MOVQ 24(SP), AX
+ MOVQ AX, ret+16(FP)
+ RET
+
// hash function using AES hardware instructions
TEXT runtime·aeshash(SB),NOSPLIT,$0-32
MOVQ p+0(FP), AX // ptr to data
- MOVQ s+8(FP), CX // size
+ MOVQ s+16(FP), CX // size
+ LEAQ ret+24(FP), DX
JMP runtime·aeshashbody(SB)
-TEXT runtime·aeshashstr(SB),NOSPLIT,$0-32
+TEXT runtime·aeshashstr(SB),NOSPLIT,$0-24
MOVQ p+0(FP), AX // ptr to string struct
- // s+8(FP) is ignored, it is always sizeof(String)
MOVQ 8(AX), CX // length of string
MOVQ (AX), AX // string data
+ LEAQ ret+16(FP), DX
JMP runtime·aeshashbody(SB)
// AX: data
// CX: length
-TEXT runtime·aeshashbody(SB),NOSPLIT,$0-32
- MOVQ h+16(FP), X6 // seed to low 64 bits of xmm6
+// DX: address to put return value
+TEXT runtime·aeshashbody(SB),NOSPLIT,$0-0
+ MOVQ h+8(FP), X6 // seed to low 64 bits of xmm6
PINSRQ $1, CX, X6 // size to high 64 bits of xmm6
PSHUFHW $0, X6, X6 // replace size with its low 2 bytes repeated 4 times
MOVO runtime·aeskeysched(SB), X7
AESENC X6, X0
AESENC X7, X0
AESENC X7, X0
- MOVQ X0, ret+24(FP)
+ MOVQ X0, (DX)
RET
endofpage:
AESENC X6, X0
AESENC X7, X0
AESENC X7, X0
- MOVQ X0, ret+24(FP)
+ MOVQ X0, (DX)
RET
aes0:
// return input seed
- MOVQ h+16(FP), AX
- MOVQ AX, ret+24(FP)
+ MOVQ h+8(FP), AX
+ MOVQ AX, (DX)
RET
aes16:
AESENC X6, X0
AESENC X7, X0
AESENC X7, X0
- MOVQ X0, ret+24(FP)
+ MOVQ X0, (DX)
RET
aes17to32:
// combine results
PXOR X1, X0
- MOVQ X0, ret+24(FP)
+ MOVQ X0, (DX)
RET
aes33to64:
PXOR X2, X0
PXOR X3, X1
PXOR X1, X0
- MOVQ X0, ret+24(FP)
+ MOVQ X0, (DX)
RET
aes65to128:
PXOR X2, X0
PXOR X3, X1
PXOR X1, X0
- MOVQ X0, ret+24(FP)
+ MOVQ X0, (DX)
RET
aes129plus:
PXOR X2, X0
PXOR X3, X1
PXOR X1, X0
- MOVQ X0, ret+24(FP)
+ MOVQ X0, (DX)
RET
-TEXT runtime·aeshash32(SB),NOSPLIT,$0-32
+TEXT runtime·aeshash32(SB),NOSPLIT,$0-24
MOVQ p+0(FP), AX // ptr to data
- // s+8(FP) is ignored, it is always sizeof(int32)
- MOVQ h+16(FP), X0 // seed
+ MOVQ h+8(FP), X0 // seed
PINSRD $2, (AX), X0 // data
AESENC runtime·aeskeysched+0(SB), X0
AESENC runtime·aeskeysched+16(SB), X0
AESENC runtime·aeskeysched+32(SB), X0
- MOVQ X0, ret+24(FP)
+ MOVQ X0, ret+16(FP)
RET
-TEXT runtime·aeshash64(SB),NOSPLIT,$0-32
+TEXT runtime·aeshash64(SB),NOSPLIT,$0-24
MOVQ p+0(FP), AX // ptr to data
- // s+8(FP) is ignored, it is always sizeof(int64)
- MOVQ h+16(FP), X0 // seed
+ MOVQ h+8(FP), X0 // seed
PINSRQ $1, (AX), X0 // data
AESENC runtime·aeskeysched+0(SB), X0
AESENC runtime·aeskeysched+16(SB), X0
AESENC runtime·aeskeysched+32(SB), X0
- MOVQ X0, ret+24(FP)
+ MOVQ X0, ret+16(FP)
RET
// simple mask to get rid of data in the high part of the register.
MOVB AX, ret+24(FP)
RET
+// memequal_varlen(a, b unsafe.Pointer) bool
+TEXT runtime·memequal_varlen(SB),NOSPLIT,$0-17
+ MOVQ a+0(FP), SI
+ MOVQ b+8(FP), DI
+ CMPQ SI, DI
+ JEQ eq
+ MOVQ 8(DX), BX // compiler stores size at offset 8 in the closure
+ CALL runtime·memeqbody(SB)
+ MOVB AX, ret+16(FP)
+ RET
+eq:
+ MOVB $1, ret+16(FP)
+ RET
+
// eqstring tests whether two strings are equal.
// See runtime_test.go:eqstring_generic for
// equivalent Go code.
MOVQ AX, ret+0(FP)
RET
+// memhash_varlen(p unsafe.Pointer, h seed) uintptr
+// redirects to memhash(p, h, size) using the size
+// stored in the closure.
+TEXT runtime·memhash_varlen(SB),NOSPLIT,$20-12
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVL p+0(FP), AX
+ MOVL h+4(FP), BX
+ MOVL 4(DX), CX
+ MOVL AX, 0(SP)
+ MOVL BX, 4(SP)
+ MOVL CX, 8(SP)
+ CALL runtime·memhash(SB)
+ MOVL 16(SP), AX
+ MOVL AX, ret+8(FP)
+ RET
+
// hash function using AES hardware instructions
// For now, our one amd64p32 system (NaCl) does not
// support using AES instructions, so have not bothered to
MOVB AX, ret+16(FP)
RET
+// memequal_varlen(a, b unsafe.Pointer) bool
+TEXT runtime·memequal_varlen(SB),NOSPLIT,$0-9
+ MOVL a+0(FP), SI
+ MOVL b+4(FP), DI
+ CMPL SI, DI
+ JEQ eq
+ MOVL 4(DX), BX // compiler stores size at offset 4 in the closure
+ CALL runtime·memeqbody(SB)
+ MOVB AX, ret+8(FP)
+ RET
+eq:
+ MOVB $1, ret+8(FP)
+ RET
+
// eqstring tests whether two strings are equal.
// See runtime_test.go:eqstring_generic for
// equivalent Go code.
MOVW $0, R0
MOVW (R0), R1
+// memhash_varlen(p unsafe.Pointer, h seed) uintptr
+// redirects to memhash(p, h, size) using the size
+// stored in the closure.
+TEXT runtime·memhash_varlen(SB),NOSPLIT,$16-12
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVW p+0(FP), R0
+ MOVW h+4(FP), R1
+ MOVW 4(R7), R2
+ MOVW R0, 4(R13)
+ MOVW R1, 8(R13)
+ MOVW R2, 12(R13)
+ BL runtime·memhash(SB)
+ MOVW 16(R13), R0
+ MOVW R0, ret+8(FP)
+ RET
+
TEXT runtime·memeq(SB),NOSPLIT,$-4-13
MOVW a+0(FP), R1
MOVW b+4(FP), R2
MOVB R0, ret+12(FP)
RET
+// memequal_varlen(a, b unsafe.Pointer) bool
+TEXT runtime·memequal_varlen(SB),NOSPLIT,$16-9
+ MOVW a+0(FP), R0
+ MOVW b+4(FP), R1
+ CMP R0, R1
+ BEQ eq
+ MOVW 4(R7), R2 // compiler stores size at offset 4 in the closure
+ MOVW R0, 4(R13)
+ MOVW R1, 8(R13)
+ MOVW R2, 12(R13)
+ BL runtime·memeq(SB)
+ MOVB 16(R13), R0
+ MOVB R0, ret+8(FP)
+ RET
+eq:
+ MOVW $1, R0
+ MOVB R0, ret+8(FP)
+ RET
+
// eqstring tests whether two strings are equal.
// See runtime_test.go:eqstring_generic for
// equivalent Go code.
MOVD R3, ret+0(FP)
RETURN
+// memhash_varlen(p unsafe.Pointer, h seed) uintptr
+// redirects to memhash(p, h, size) using the size
+// stored in the closure.
+TEXT runtime·memhash_varlen(SB),NOSPLIT,$40-24
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVD p+0(FP), R3
+ MOVD h+8(FP), R4
+ MOVD 8(R11), R5
+ MOVD R3, 8(R1)
+ MOVD R4, 16(R1)
+ MOVD R5, 24(R1)
+ BL runtime·memhash(SB)
+ MOVD 32(R1), R3
+ MOVD R3, ret+16(FP)
+ RETURN
+
// AES hashing not implemented for ppc64
TEXT runtime·aeshash(SB),NOSPLIT,$-8-0
MOVW (R0), R1
MOVB R0, ret+24(FP)
RETURN
+// memequal_varlen(a, b unsafe.Pointer) bool
+TEXT runtime·memequal_varlen(SB),NOSPLIT,$40-17
+ MOVD a+0(FP), R3
+ MOVD b+8(FP), R4
+ CMP R3, R4
+ BEQ eq
+ MOVD 8(R11), R5 // compiler stores size at offset 8 in the closure
+ MOVD R3, 8(R1)
+ MOVD R4, 16(R1)
+ MOVD R5, 24(R1)
+ BL runtime·memeq(SB)
+ MOVBZ 32(R1), R3
+ MOVB R3, ret+16(FP)
+ RETURN
+eq:
+ MOVD $1, R3
+ MOVB R3, ret+16(FP)
+ RETURN
+
// eqstring tests whether two strings are equal.
// See runtime_test.go:eqstring_generic for
// equivalent Go code.
m4 = 2336365089
)
-func memhash(p unsafe.Pointer, s, seed uintptr) uintptr {
+func memhash(p unsafe.Pointer, seed, s uintptr) uintptr {
if GOARCH == "386" && GOOS != "nacl" && useAeshash {
- return aeshash(p, s, seed)
+ return aeshash(p, seed, s)
}
h := uint32(seed + s*hashkey[0])
tail:
m4 = 15839092249703872147
)
-func memhash(p unsafe.Pointer, s, seed uintptr) uintptr {
+func memhash(p unsafe.Pointer, seed, s uintptr) uintptr {
if GOARCH == "amd64" && GOOS != "nacl" && useAeshash {
- return aeshash(p, s, seed)
+ return aeshash(p, seed, s)
}
h := uint64(seed + s*hashkey[0])
tail:
return unsafe.Pointer(t.elem.zero)
}
alg := t.key.alg
- hash := alg.hash(key, uintptr(t.key.size), uintptr(h.hash0))
+ hash := alg.hash(key, uintptr(h.hash0))
m := uintptr(1)<<h.B - 1
b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
if t.indirectkey {
k = *((*unsafe.Pointer)(k))
}
- if alg.equal(key, k, uintptr(t.key.size)) {
+ if alg.equal(key, k) {
v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
if t.indirectvalue {
v = *((*unsafe.Pointer)(v))
return unsafe.Pointer(t.elem.zero), false
}
alg := t.key.alg
- hash := alg.hash(key, uintptr(t.key.size), uintptr(h.hash0))
+ hash := alg.hash(key, uintptr(h.hash0))
m := uintptr(1)<<h.B - 1
b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
if t.indirectkey {
k = *((*unsafe.Pointer)(k))
}
- if alg.equal(key, k, uintptr(t.key.size)) {
+ if alg.equal(key, k) {
v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
if t.indirectvalue {
v = *((*unsafe.Pointer)(v))
return nil, nil
}
alg := t.key.alg
- hash := alg.hash(key, uintptr(t.key.size), uintptr(h.hash0))
+ hash := alg.hash(key, uintptr(h.hash0))
m := uintptr(1)<<h.B - 1
b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
if t.indirectkey {
k = *((*unsafe.Pointer)(k))
}
- if alg.equal(key, k, uintptr(t.key.size)) {
+ if alg.equal(key, k) {
v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
if t.indirectvalue {
v = *((*unsafe.Pointer)(v))
}
alg := t.key.alg
- hash := alg.hash(key, uintptr(t.key.size), uintptr(h.hash0))
+ hash := alg.hash(key, uintptr(h.hash0))
if h.buckets == nil {
if checkgc {
if t.indirectkey {
k2 = *((*unsafe.Pointer)(k2))
}
- if !alg.equal(key, k2, uintptr(t.key.size)) {
+ if !alg.equal(key, k2) {
continue
}
// already have a mapping for key. Update it.
return
}
alg := t.key.alg
- hash := alg.hash(key, uintptr(t.key.size), uintptr(h.hash0))
+ hash := alg.hash(key, uintptr(h.hash0))
bucket := hash & (uintptr(1)<<h.B - 1)
if h.oldbuckets != nil {
growWork(t, h, bucket)
if t.indirectkey {
k2 = *((*unsafe.Pointer)(k2))
}
- if !alg.equal(key, k2, uintptr(t.key.size)) {
+ if !alg.equal(key, k2) {
continue
}
memclr(k, uintptr(t.keysize))
if t.indirectkey {
k2 = *((*unsafe.Pointer)(k2))
}
- if t.reflexivekey || alg.equal(k2, k2, uintptr(t.key.size)) {
+ if t.reflexivekey || alg.equal(k2, k2) {
// If the item in the oldbucket is not destined for
// the current new bucket in the iteration, skip it.
- hash := alg.hash(k2, uintptr(t.key.size), uintptr(h.hash0))
+ hash := alg.hash(k2, uintptr(h.hash0))
if hash&(uintptr(1)<<it.B-1) != checkBucket {
continue
}
if t.indirectkey {
k2 = *((*unsafe.Pointer)(k2))
}
- if t.reflexivekey || alg.equal(k2, k2, uintptr(t.key.size)) {
+ if t.reflexivekey || alg.equal(k2, k2) {
// Check the current hash table for the data.
// This code handles the case where the key
// has been deleted, updated, or deleted and reinserted.
}
// Compute hash to make our evacuation decision (whether we need
// to send this key/value to bucket x or bucket y).
- hash := alg.hash(k2, uintptr(t.key.size), uintptr(h.hash0))
+ hash := alg.hash(k2, uintptr(h.hash0))
if h.flags&iterator != 0 {
- if !t.reflexivekey && !alg.equal(k2, k2, uintptr(t.key.size)) {
+ if !t.reflexivekey && !alg.equal(k2, k2) {
// If key != key (NaNs), then the hash could be (and probably
// will be) entirely different from the old hash. Moreover,
// it isn't reproducible. Reproducibility is required in the
// One-bucket table. No need to hash.
b = (*bmap)(h.buckets)
} else {
- hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), 4, uintptr(h.hash0))
+ hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
m := uintptr(1)<<h.B - 1
b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
// One-bucket table. No need to hash.
b = (*bmap)(h.buckets)
} else {
- hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), 4, uintptr(h.hash0))
+ hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
m := uintptr(1)<<h.B - 1
b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
// One-bucket table. No need to hash.
b = (*bmap)(h.buckets)
} else {
- hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), 8, uintptr(h.hash0))
+ hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
m := uintptr(1)<<h.B - 1
b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
// One-bucket table. No need to hash.
b = (*bmap)(h.buckets)
} else {
- hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), 8, uintptr(h.hash0))
+ hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
m := uintptr(1)<<h.B - 1
b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
return unsafe.Pointer(t.elem.zero)
}
dohash:
- hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), 2*ptrSize, uintptr(h.hash0))
+ hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
m := uintptr(1)<<h.B - 1
b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
return unsafe.Pointer(t.elem.zero), false
}
dohash:
- hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), 2*ptrSize, uintptr(h.hash0))
+ hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
m := uintptr(1)<<h.B - 1
b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
if w > 16 {
w = 16
}
- h := memhash(unsafe.Pointer(&r[n-w]), uintptr(w), uintptr(nanotime()))
+ h := memhash(unsafe.Pointer(&r[n-w]), uintptr(nanotime()), uintptr(w))
for i := 0; i < ptrSize && n < len(r); i++ {
r[n] = byte(h)
n++