import (
"fmt"
+ "go/constant"
+ "strconv"
+ "strings"
"cmd/compile/internal/base"
"cmd/compile/internal/compare"
"cmd/compile/internal/typecheck"
"cmd/compile/internal/types"
"cmd/internal/obj"
- "cmd/internal/src"
)
// AlgType returns the fixed-width AMEMxx variants instead of the general
fmt.Printf("geneq %v\n", t)
}
- fn := eqFunc(t)
+ fn := eqFunc(eqSignature(t))
// Generate a closure which points at the function we just generated.
objw.SymPtr(closure, 0, fn.Linksym(), 0)
return closure
}
-func eqFunc(t *types.Type) *ir.Func {
- // Autogenerate code for equality of structs and arrays.
- sym := TypeSymPrefix(".eq", t)
+// TODO: generate hash function from signatures also?
+// They are slightly different, at least at the moment.
+func eqFunc(sig string) *ir.Func {
+ sym := types.TypeSymLookup(".eq." + sig)
if sym.Def != nil {
return sym.Def.(*ir.Name).Func
}
+ sig0 := sig
pos := base.AutogeneratedPos // less confusing than end of input
base.Pos = pos
- // func sym(p, q *T) bool
+ // func sym(p, q unsafe.Pointer) bool
fn := ir.NewFunc(pos, pos, sym, types.NewSignature(nil,
[]*types.Field{
- types.NewField(pos, typecheck.Lookup("p"), types.NewPtr(t)),
- types.NewField(pos, typecheck.Lookup("q"), types.NewPtr(t)),
+ types.NewField(pos, typecheck.Lookup("p"), types.Types[types.TUNSAFEPTR]),
+ types.NewField(pos, typecheck.Lookup("q"), types.Types[types.TUNSAFEPTR]),
},
[]*types.Field{
types.NewField(pos, typecheck.Lookup("r"), types.Types[types.TBOOL]),
))
sym.Def = fn.Nname
fn.Pragma |= ir.Noinline // TODO(mdempsky): We need to emit this during the unified frontend instead, to allow inlining.
-
typecheck.DeclFunc(fn)
np := fn.Dcl[0]
nq := fn.Dcl[1]
// Label to jump to if an equality test fails.
neq := typecheck.AutoLabel(".neq")
- // We reach here only for types that have equality but
- // cannot be handled by the standard algorithms,
- // so t must be either an array or a struct.
- switch t.Kind() {
- default:
- base.Fatalf("geneq %v", t)
+ // Grab known alignment of argument pointers. (ptrSize is the default.)
+ align := int64(types.PtrSize)
+ if len(sig) > 0 && sig[0] == sigAlign {
+ sig = sig[1:]
+ align, sig = parseNum(sig)
+ }
+ unalignedOk := base.Ctxt.Arch.CanMergeLoads
+
+ // offset from np/nq that we're currently working on
+ var off int64
+ var hasCall bool
+
+ // test takes a boolean. If it evaluates to false, short circuit
+ // and return false immediately. Otherwise, keep checking.
+ var lastTest ir.Node
+ test := func(eq ir.Node) {
+ // Buffer one test in lastTest so we can use the
+ // last one as the return value.
+ if lastTest != nil {
+ nif := ir.NewIfStmt(pos, lastTest, nil, []ir.Node{ir.NewBranchStmt(pos, ir.OGOTO, neq)})
+ fn.Body.Append(nif)
+ }
+ lastTest = eq
+ }
+ // load loads data of type t from np+off and nq+off.
+ // Increments off by the size of t.
+ load := func(t *types.Type) (ir.Node, ir.Node) {
+ c := ir.NewBasicLit(pos, types.Types[types.TUINTPTR], constant.MakeInt64(off))
+ p := ir.NewBinaryExpr(pos, ir.OUNSAFEADD, np, c)
+ q := ir.NewBinaryExpr(pos, ir.OUNSAFEADD, nq, c)
+ x := ir.NewStarExpr(pos, ir.NewConvExpr(pos, ir.OCONVNOP, t.PtrTo(), p))
+ y := ir.NewStarExpr(pos, ir.NewConvExpr(pos, ir.OCONVNOP, t.PtrTo(), q))
+ off += t.Size()
+ return x, y
+ }
+ // compare compares x and y and jumps to neq if they are not equal.
+ compare := func(x, y ir.Node) {
+ test(ir.NewBinaryExpr(pos, ir.OEQ, x, y))
+ }
- case types.TARRAY:
- nelem := t.NumElem()
-
- // checkAll generates code to check the equality of all array elements.
- // If unroll is greater than nelem, checkAll generates:
- //
- // if eq(p[0], q[0]) && eq(p[1], q[1]) && ... {
- // } else {
- // goto neq
- // }
- //
- // And so on.
- //
- // Otherwise it generates:
- //
- // iterateTo := nelem/unroll*unroll
- // for i := 0; i < iterateTo; i += unroll {
- // if eq(p[i+0], q[i+0]) && eq(p[i+1], q[i+1]) && ... && eq(p[i+unroll-1], q[i+unroll-1]) {
- // } else {
- // goto neq
- // }
- // }
- // if eq(p[iterateTo+0], q[iterateTo+0]) && eq(p[iterateTo+1], q[iterateTo+1]) && ... {
- // } else {
- // goto neq
- // }
- //
- checkAll := func(unroll int64, last bool, eq func(pi, qi ir.Node) ir.Node) {
- // checkIdx generates a node to check for equality at index i.
- checkIdx := func(i ir.Node) ir.Node {
- // pi := p[i]
- pi := ir.NewIndexExpr(base.Pos, np, i)
- pi.SetBounded(true)
- pi.SetType(t.Elem())
- // qi := q[i]
- qi := ir.NewIndexExpr(base.Pos, nq, i)
- qi.SetBounded(true)
- qi.SetType(t.Elem())
- return eq(pi, qi)
- }
+ // We keep track of string contents that we don't compare immediately.
+ // We delay comparing string contents because they might be large and
+ // we'd rather compare scalars farther along in the signature first.
+ var pendingStrings []int64
+ flushStrings := func() {
+ defer func(saveOff int64) {
+ off = saveOff
+ }(off)
+ byte := types.Types[types.TUINT8]
+ for _, x := range pendingStrings {
+ off = x
+ ptrA, ptrB := load(byte.PtrTo())
+ len, _ := load(types.Types[types.TUINTPTR])
+ // Note: we already checked that the lengths are equal.
+ memeq := typecheck.LookupRuntime("memequal", byte, byte)
+ test(typecheck.Call(pos, memeq, []ir.Node{ptrA, ptrB, len}, false))
+ hasCall = true
+ }
+ pendingStrings = pendingStrings[:0]
+ }
- iterations := nelem / unroll
- iterateTo := iterations * unroll
- // If a loop is iterated only once, there shouldn't be any loop at all.
- if iterations == 1 {
- iterateTo = 0
+ for len(sig) > 0 {
+ kind := sig[0]
+ sig = sig[1:]
+ switch kind {
+ case sigMemory:
+ var n int64
+ n, sig = parseNum(sig)
+ if n > 64 { // TODO: why 64?
+ // For big regions, call memequal.
+ c := ir.NewBasicLit(pos, types.Types[types.TUINTPTR], constant.MakeInt64(off))
+ p := ir.NewBinaryExpr(pos, ir.OUNSAFEADD, np, c)
+ q := ir.NewBinaryExpr(pos, ir.OUNSAFEADD, nq, c)
+ len := ir.NewBasicLit(pos, types.Types[types.TUINTPTR], constant.MakeInt64(n))
+ byte := types.Types[types.TUINT8]
+ p2 := ir.NewConvExpr(pos, ir.OCONVNOP, byte.PtrTo(), p)
+ q2 := ir.NewConvExpr(pos, ir.OCONVNOP, byte.PtrTo(), q)
+ memeq := typecheck.LookupRuntime("memequal", byte, byte)
+ test(typecheck.Call(pos, memeq, []ir.Node{p2, q2, len}, false))
+ hasCall = true
+ off += n
+ n = 0
}
-
- if iterateTo > 0 {
- // Generate an unrolled for loop.
- // for i := 0; i < nelem/unroll*unroll; i += unroll
- i := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TINT])
- init := ir.NewAssignStmt(base.Pos, i, ir.NewInt(base.Pos, 0))
- cond := ir.NewBinaryExpr(base.Pos, ir.OLT, i, ir.NewInt(base.Pos, iterateTo))
- loop := ir.NewForStmt(base.Pos, nil, cond, nil, nil, false)
- loop.PtrInit().Append(init)
-
- // if eq(p[i+0], q[i+0]) && eq(p[i+1], q[i+1]) && ... && eq(p[i+unroll-1], q[i+unroll-1]) {
- // } else {
- // goto neq
- // }
- for j := int64(0); j < unroll; j++ {
- // if check {} else { goto neq }
- nif := ir.NewIfStmt(base.Pos, checkIdx(i), nil, nil)
- nif.Else.Append(ir.NewBranchStmt(base.Pos, ir.OGOTO, neq))
- loop.Body.Append(nif)
- post := ir.NewAssignStmt(base.Pos, i, ir.NewBinaryExpr(base.Pos, ir.OADD, i, ir.NewInt(base.Pos, 1)))
- loop.Body.Append(post)
- }
-
- fn.Body.Append(loop)
-
- if nelem == iterateTo {
- if last {
- fn.Body.Append(ir.NewAssignStmt(base.Pos, nr, ir.NewBool(base.Pos, true)))
- }
- return
+ n0 := n
+ for n != 0 {
+ switch {
+ case n >= 8 && (unalignedOk || align >= 8 && off%8 == 0):
+ compare(load(types.Types[types.TUINT64]))
+ n -= 8
+ case (n == 5 || n == 6 || n == 7) && unalignedOk && n0 >= 8:
+ off -= 8 - n
+ compare(load(types.Types[types.TUINT64]))
+ n = 0
+ case n >= 4 && (unalignedOk || align >= 4 && off%4 == 0):
+ compare(load(types.Types[types.TUINT32]))
+ n -= 4
+ case n == 3 && unalignedOk && n0 >= 4:
+ off--
+ compare(load(types.Types[types.TUINT32]))
+ n = 0
+ case n >= 2 && (unalignedOk || align >= 2 && off%2 == 0):
+ compare(load(types.Types[types.TUINT16]))
+ n -= 2
+ default:
+ compare(load(types.Types[types.TUINT8]))
+ n--
}
}
-
- // Generate remaining checks, if nelem is not a multiple of unroll.
- if last {
- // Do last comparison in a different manner.
- nelem--
+ case sigFloat32:
+ compare(load(types.Types[types.TFLOAT32]))
+ case sigFloat64:
+ compare(load(types.Types[types.TFLOAT64]))
+ case sigString:
+ // Compare just the lengths right now.
+ // Save the contents for later.
+ pendingStrings = append(pendingStrings, off)
+ off += int64(types.PtrSize)
+ compare(load(types.Types[types.TUINTPTR]))
+ case sigEface, sigIface:
+ // flushStrings here to ensure that we only get a panic from
+ // this interface test if all previous equality checks pass.
+ flushStrings()
+ typeX, typeY := load(types.Types[types.TUINTPTR].PtrTo())
+ compare(typeX, typeY)
+ dataX, dataY := load(types.Types[types.TUNSAFEPTR])
+ var eqFn *ir.Name
+ if kind == sigEface {
+ eqFn = typecheck.LookupRuntime("efaceeq")
+ } else {
+ eqFn = typecheck.LookupRuntime("ifaceeq")
}
- // if eq(p[iterateTo+0], q[iterateTo+0]) && eq(p[iterateTo+1], q[iterateTo+1]) && ... {
- // } else {
- // goto neq
- // }
- for j := iterateTo; j < nelem; j++ {
- // if check {} else { goto neq }
- nif := ir.NewIfStmt(base.Pos, checkIdx(ir.NewInt(base.Pos, j)), nil, nil)
- nif.Else.Append(ir.NewBranchStmt(base.Pos, ir.OGOTO, neq))
+ test(typecheck.Call(pos, eqFn, []ir.Node{typeX, dataX, dataY}, false))
+ hasCall = true
+ case sigSkip:
+ var n int64
+ n, sig = parseNum(sig)
+ off += n
+ case sigArrayStart:
+ // Flush any pending test.
+ flushStrings()
+ // TODO: if the element comparison can't panic (no E or I), then
+ // maybe we don't need to do this flushStrings?
+ // On the other hand, maybe the unflushed string is not equal, but
+ // a big following array is all equal.
+ if lastTest != nil {
+ nif := ir.NewIfStmt(pos, lastTest, nil, []ir.Node{ir.NewBranchStmt(pos, ir.OGOTO, neq)})
fn.Body.Append(nif)
- }
- if last {
- fn.Body.Append(ir.NewAssignStmt(base.Pos, nr, checkIdx(ir.NewInt(base.Pos, nelem))))
- }
- }
-
- switch t.Elem().Kind() {
- case types.TSTRING:
- // Do two loops. First, check that all the lengths match (cheap).
- // Second, check that all the contents match (expensive).
- checkAll(3, false, func(pi, qi ir.Node) ir.Node {
- // Compare lengths.
- eqlen, _ := compare.EqString(pi, qi)
- return eqlen
- })
- checkAll(1, true, func(pi, qi ir.Node) ir.Node {
- // Compare contents.
- _, eqmem := compare.EqString(pi, qi)
- return eqmem
- })
- case types.TFLOAT32, types.TFLOAT64:
- checkAll(2, true, func(pi, qi ir.Node) ir.Node {
- // p[i] == q[i]
- return ir.NewBinaryExpr(base.Pos, ir.OEQ, pi, qi)
- })
- case types.TSTRUCT:
- isCall := func(n ir.Node) bool {
- return n.Op() == ir.OCALL || n.Op() == ir.OCALLFUNC
- }
- var expr ir.Node
- var hasCallExprs bool
- allCallExprs := true
- and := func(cond ir.Node) {
- if expr == nil {
- expr = cond
- } else {
- expr = ir.NewLogicalExpr(base.Pos, ir.OANDAND, expr, cond)
- }
+ lastTest = nil
}
- var tmpPos src.XPos
- pi := ir.NewIndexExpr(tmpPos, np, ir.NewInt(tmpPos, 0))
- pi.SetBounded(true)
- pi.SetType(t.Elem())
- qi := ir.NewIndexExpr(tmpPos, nq, ir.NewInt(tmpPos, 0))
- qi.SetBounded(true)
- qi.SetType(t.Elem())
- flatConds, canPanic := compare.EqStruct(t.Elem(), pi, qi)
- for _, c := range flatConds {
- if isCall(c) {
- hasCallExprs = true
- } else {
- allCallExprs = false
+ var n int64
+ n, sig = parseNum(sig)
+ // Find matching closing brace.
+ i := 0
+ depth := 1
+ findEndSquareBracket:
+ for {
+ if i == len(sig) {
+ base.Fatalf("mismatched brackets in %s", sig0)
}
- }
- if !hasCallExprs || allCallExprs || canPanic {
- checkAll(1, true, func(pi, qi ir.Node) ir.Node {
- // p[i] == q[i]
- return ir.NewBinaryExpr(base.Pos, ir.OEQ, pi, qi)
- })
- } else {
- checkAll(4, false, func(pi, qi ir.Node) ir.Node {
- expr = nil
- flatConds, _ := compare.EqStruct(t.Elem(), pi, qi)
- if len(flatConds) == 0 {
- return ir.NewBool(base.Pos, true)
- }
- for _, c := range flatConds {
- if !isCall(c) {
- and(c)
- }
- }
- return expr
- })
- checkAll(2, true, func(pi, qi ir.Node) ir.Node {
- expr = nil
- flatConds, _ := compare.EqStruct(t.Elem(), pi, qi)
- for _, c := range flatConds {
- if isCall(c) {
- and(c)
- }
+ switch sig[i] {
+ case sigArrayStart:
+ depth++
+ case sigArrayEnd:
+ depth--
+ if depth == 0 {
+ break findEndSquareBracket
}
- return expr
- })
- }
- default:
- checkAll(1, true, func(pi, qi ir.Node) ir.Node {
- // p[i] == q[i]
- return ir.NewBinaryExpr(base.Pos, ir.OEQ, pi, qi)
- })
- }
-
- case types.TSTRUCT:
- flatConds, _ := compare.EqStruct(t, np, nq)
- if len(flatConds) == 0 {
- fn.Body.Append(ir.NewAssignStmt(base.Pos, nr, ir.NewBool(base.Pos, true)))
- } else {
- for _, c := range flatConds[:len(flatConds)-1] {
- // if cond {} else { goto neq }
- n := ir.NewIfStmt(base.Pos, c, nil, nil)
- n.Else.Append(ir.NewBranchStmt(base.Pos, ir.OGOTO, neq))
- fn.Body.Append(n)
+ }
+ i++
}
- fn.Body.Append(ir.NewAssignStmt(base.Pos, nr, flatConds[len(flatConds)-1]))
+ elemSig := sig[:i]
+ elemSize := sigSize(elemSig)
+ sig = sig[i+1:] // remaining signature after array
+
+ // Loop N times, calling comparison function for the element.
+ // for i := off; i < off + N*elemSize; i += elemSize {
+ // if !eqfn(p+i, q+i) { goto neq }
+ // }
+ elemFn := eqFunc(elemSig).Nname
+ idx := typecheck.TempAt(pos, ir.CurFunc, types.Types[types.TUINTPTR])
+ init := ir.NewAssignStmt(pos, idx, ir.NewInt(pos, off))
+ cond := ir.NewBinaryExpr(pos, ir.OLT, idx, ir.NewInt(pos, off+n*elemSize))
+ post := ir.NewAssignStmt(pos, idx, ir.NewBinaryExpr(pos, ir.OADD, idx, ir.NewInt(pos, elemSize)))
+
+ p := ir.NewBinaryExpr(pos, ir.OUNSAFEADD, np, idx)
+ q := ir.NewBinaryExpr(pos, ir.OUNSAFEADD, nq, idx)
+ call := typecheck.Call(pos, elemFn, []ir.Node{p, q}, false)
+ nif := ir.NewIfStmt(pos, call, nil, []ir.Node{ir.NewBranchStmt(pos, ir.OGOTO, neq)})
+ loop := ir.NewForStmt(pos, init, cond, post, []ir.Node{nif}, false)
+ fn.Body.Append(loop)
+ off += n * elemSize
+
+ // TODO: if the element comparison can't panic, but has strings
+ // in it, maybe we do a loop first without string contents and a
+ // second loop with string contents. There is no way to accomplish
+ // this now they way this code works (to call the equality
+ // function of the sub-signature).
}
}
+ // Flush any pending tests.
+ // The last test is used directly as a result (instead of branching using it).
+ flushStrings()
+ if lastTest == nil {
+ lastTest = ir.NewBool(pos, true)
+ }
+ as := ir.NewAssignStmt(pos, nr, lastTest)
+ fn.Body.Append(as)
// ret:
// return
ret := typecheck.AutoLabel(".ret")
- fn.Body.Append(ir.NewLabelStmt(base.Pos, ret))
- fn.Body.Append(ir.NewReturnStmt(base.Pos, nil))
+ fn.Body.Append(ir.NewLabelStmt(pos, ret))
+ fn.Body.Append(ir.NewReturnStmt(pos, nil))
// neq:
// r = false
// return (or goto ret)
- fn.Body.Append(ir.NewLabelStmt(base.Pos, neq))
- fn.Body.Append(ir.NewAssignStmt(base.Pos, nr, ir.NewBool(base.Pos, false)))
- if compare.EqCanPanic(t) || anyCall(fn) {
+ fn.Body.Append(ir.NewLabelStmt(pos, neq))
+ fn.Body.Append(ir.NewAssignStmt(pos, nr, ir.NewBool(pos, false)))
+ if hasCall {
// Epilogue is large, so share it with the equal case.
- fn.Body.Append(ir.NewBranchStmt(base.Pos, ir.OGOTO, ret))
+ fn.Body.Append(ir.NewBranchStmt(pos, ir.OGOTO, ret))
} else {
// Epilogue is small, so don't bother sharing.
- fn.Body.Append(ir.NewReturnStmt(base.Pos, nil))
+ fn.Body.Append(ir.NewReturnStmt(pos, nil))
}
// TODO(khr): the epilogue size detection condition above isn't perfect.
// We should really do a generic CL that shares epilogues across
// EqFor returns ONAME node represents type t's equal function, and a boolean
// to indicates whether a length needs to be passed when calling the function.
-func EqFor(t *types.Type) (ir.Node, bool) {
+// Also returns the argument type of the function (TODO: remove somehow).
+func EqFor(t *types.Type) (ir.Node, bool, *types.Type) {
switch types.AlgType(t) {
case types.AMEM:
- return typecheck.LookupRuntime("memequal", t, t), true
+ return typecheck.LookupRuntime("memequal", t, t), true, t.PtrTo()
case types.ASPECIAL:
- fn := eqFunc(t)
- return fn.Nname, false
+ fn := eqFunc(eqSignature(t))
+ return fn.Nname, false, types.Types[types.TUNSAFEPTR]
}
base.Fatalf("EqFor %v", t)
- return nil, false
-}
-
-func anyCall(fn *ir.Func) bool {
- return ir.Any(fn, func(n ir.Node) bool {
- // TODO(rsc): No methods?
- op := n.Op()
- return op == ir.OCALL || op == ir.OCALLFUNC
- })
+ return nil, false, nil
}
func hashmem(t *types.Type) ir.Node {
return typecheck.LookupRuntime("memhash", t)
}
+
+// eqSignature returns a signature of the equality function for type t.
+// If two types have the same signature, they can use the same equality function.
+// The signature lists the comparisons that the equality function needs
+// to make, in order. So for instance, a type like:
+//
+// type S struct {
+// i int32
+// j uint32
+// s string
+// e error
+// }
+//
+// Will have the signature "M8SI".
+//
+// M8 = 8 bytes of regular memory
+// S = string
+// I = nonempty interface
+//
+// The content of the signature is not intended for users. It is an
+// internal condensation of the comparison operations that need to be
+// performed.
+// (Although, note that these names might be seen in tracebacks where
+// the equality test panics due to incomparable interfaces.)
+//
+// Full signature spec:
+//
+// M%d = %d bytes of memory that should be compared directly
+// K%d = %d bytes of memory that should not be compared (sKip)
+// F = float32
+// G = float64
+// S = string
+// I = non-empty interface
+// E = empty interface
+// [%d%s] = array: repeat signature %s %d times.
+// A%d = known alignment of type pointers (defaults to ptrSize)
+//
+// An alignment directive is only needed on platforms that can't do
+// unaligned loads.
+// If an alignment directive is present, it must be first.
+func eqSignature(t *types.Type) string {
+ var e eqSigBuilder
+ if !base.Ctxt.Arch.CanMergeLoads { // alignment only matters if we can't use unaligned loads
+ if a := t.Alignment(); a != int64(types.PtrSize) {
+ e.r.WriteString(fmt.Sprintf("%c%d", sigAlign, a))
+ }
+ }
+ e.build(t)
+ e.flush()
+ return e.r.String()
+}
+
+const (
+ sigMemory = 'M' // followed by an integer number of bytes
+ sigSkip = 'K' // followed by an integer number of bytes
+ sigFloat32 = 'F'
+ sigFloat64 = 'G'
+ sigString = 'S'
+ sigIface = 'I' // non-empty interface
+ sigEface = 'E' // empty interface
+ sigArrayStart = '[' // followed by an iteration count, element signature, and sigArrayEnd
+ sigArrayEnd = ']'
+ sigAlign = 'A' // followed by an integer byte alignment
+)
+
+type eqSigBuilder struct {
+ r strings.Builder
+ regMem int64 // queued up region of regular memory
+ skipMem int64 // queued up region of memory to skip
+}
+
+func (e *eqSigBuilder) flush() {
+ if e.regMem > 0 {
+ e.r.WriteString(fmt.Sprintf("%c%d", sigMemory, e.regMem))
+ e.regMem = 0
+ }
+ if e.skipMem > 0 {
+ e.r.WriteString(fmt.Sprintf("%c%d", sigSkip, e.skipMem))
+ e.skipMem = 0
+ }
+}
+func (e *eqSigBuilder) regular(n int64) {
+ if e.regMem == 0 {
+ e.flush()
+ }
+ e.regMem += n
+}
+func (e *eqSigBuilder) skip(n int64) {
+ if e.skipMem == 0 {
+ e.flush()
+ }
+ e.skipMem += n
+}
+func (e *eqSigBuilder) float32() {
+ e.flush()
+ e.r.WriteByte(sigFloat32)
+}
+func (e *eqSigBuilder) float64() {
+ e.flush()
+ e.r.WriteByte(sigFloat64)
+}
+func (e *eqSigBuilder) string() {
+ e.flush()
+ e.r.WriteByte(sigString)
+}
+func (e *eqSigBuilder) eface() {
+ e.flush()
+ e.r.WriteByte(sigEface)
+}
+func (e *eqSigBuilder) iface() {
+ e.flush()
+ e.r.WriteByte(sigIface)
+}
+
+func (e *eqSigBuilder) build(t *types.Type) {
+ switch t.Kind() {
+ case types.TINT8, types.TUINT8, types.TBOOL:
+ e.regular(1)
+ case types.TINT16, types.TUINT16:
+ e.regular(2)
+ case types.TINT32, types.TUINT32:
+ e.regular(4)
+ case types.TINT64, types.TUINT64:
+ e.regular(8)
+ case types.TINT, types.TUINT, types.TUINTPTR, types.TPTR, types.TUNSAFEPTR, types.TCHAN:
+ e.regular(int64(types.PtrSize))
+ case types.TFLOAT32:
+ e.float32()
+ case types.TFLOAT64:
+ e.float64()
+ case types.TCOMPLEX64:
+ e.float32()
+ e.float32()
+ case types.TCOMPLEX128:
+ e.float64()
+ e.float64()
+ case types.TSTRING:
+ e.string()
+ case types.TINTER:
+ if t.IsEmptyInterface() {
+ e.eface()
+ } else {
+ e.iface()
+ }
+ case types.TSTRUCT:
+ var off int64
+ for _, f := range t.Fields() {
+ if f.Sym.IsBlank() {
+ continue
+ }
+ if off < f.Offset {
+ e.skip(f.Offset - off)
+ }
+ e.build(f.Type)
+ off = f.Offset + f.Type.Size()
+ }
+ if off < t.Size() {
+ e.skip(t.Size() - off)
+ }
+ case types.TARRAY:
+ if types.AlgType(t) == types.AMEM {
+ // TODO: some "regular equality" types don't hit here,
+ // like [8]sync/atomic.Pointer. Figure out how to
+ // handle the subtle difference between "AMEM" and
+ // "can be compared byte-by-byte for equality".
+ e.regular(t.Size())
+ break
+ }
+ et := t.Elem()
+ n := t.NumElem()
+ switch n {
+ case 0:
+ case 1:
+ e.build(et)
+ default:
+ // To keep signatures small, we can't just repeat
+ // the element signature N times. Instead, we issue
+ // an array into the signature. Note that this can
+ // lead to a situation where two types which could
+ // share an equality function do not, like
+ // struct { a, b, c, d string } sig: SSSS
+ // [4]string sig: [4S]
+ // That's ok, just a tad inefficient.
+ //
+ // The generated loops are kind of inefficient as well,
+ // so unroll the loop a bit.
+ const unrollSize = 32 // make loop body compare around this many bytes
+ unroll := max(1, unrollSize/et.Size())
+ // Do partial loops directly.
+ for n%unroll != 0 {
+ e.build(et)
+ n--
+ }
+ if n == 0 {
+ break
+ }
+ // If we only have one loop left, do it directly.
+ if n == unroll {
+ for range n {
+ e.build(et)
+ }
+ break
+ }
+ e.flush()
+ e.r.WriteString(fmt.Sprintf("%c%d", sigArrayStart, n/unroll))
+ for range unroll {
+ e.build(et)
+ }
+ e.flush()
+ e.r.WriteByte(sigArrayEnd)
+ }
+ default:
+ base.Fatalf("eqSigBuilder %v", t)
+ }
+}
+
+// Parse and remove the number at the start of s.
+func parseNum(s string) (int64, string) {
+ n := 0
+ for n < len(s) && s[n] >= '0' && s[n] <= '9' {
+ n++
+ }
+ x, err := strconv.ParseInt(s[:n], 10, 64)
+ if err != nil {
+ base.Fatalf("bad integer: %s", s[:n])
+ }
+ return x, s[n:]
+}
+
+// sigSize returns the size of the type described by the signature.
+func sigSize(sig string) int64 {
+ sig0 := sig
+ var size int64
+ for len(sig) > 0 {
+ kind := sig[0]
+ sig = sig[1:]
+ switch kind {
+ case sigMemory, sigSkip:
+ var n int64
+ n, sig = parseNum(sig)
+ size += n
+ case sigFloat32:
+ size += 4
+ case sigFloat64:
+ size += 8
+ case sigString, sigIface, sigEface:
+ size += 2 * int64(types.PtrSize)
+ case sigArrayStart:
+ var n int64
+ n, sig = parseNum(sig)
+ // Find matching closing brace.
+ i := 0
+ depth := 1
+ findEndSquareBracket:
+ for {
+ if i == len(sig) {
+ base.Fatalf("mismatched brackets in %s", sig0)
+ }
+ switch sig[i] {
+ case sigArrayStart:
+ depth++
+ case sigArrayEnd:
+ depth--
+ if depth == 0 {
+ break findEndSquareBracket
+ }
+ }
+ i++
+ }
+ size += n * sigSize(sig[:i])
+ sig = sig[i+1:]
+ }
+ }
+ return size
+}