tmpP3 := Point{}
tmpP1xP1 := projP1xP1{}
for i := 0; i < 7; i++ {
- // Compute (i+1)*Q as Q + i*Q and convert to a ProjCached
+ // Compute (i+1)*Q as Q + i*Q and convert to a projCached
// This is needlessly complicated because the API has explicit
// receivers instead of creating stack objects and relying on RVO
v.points[i+1].FromP3(tmpP3.fromP1xP1(tmpP1xP1.Add(q, &v.points[i])))
tmpP3 := Point{}
tmpP1xP1 := projP1xP1{}
for i := 0; i < 7; i++ {
- // Compute (i+1)*Q as Q + i*Q and convert to AffineCached
+ // Compute (i+1)*Q as Q + i*Q and convert to affineCached
v.points[i+1].FromP3(tmpP3.fromP1xP1(tmpP1xP1.AddAffine(q, &v.points[i])))
}
}
// This file implements encryption and decryption using PKCS #1 v1.5 padding.
-// PKCS1v15DecrypterOpts is for passing options to PKCS #1 v1.5 decryption using
+// PKCS1v15DecryptOptions is for passing options to PKCS #1 v1.5 decryption using
// the crypto.Decrypter interface.
type PKCS1v15DecryptOptions struct {
// SessionKeyLen is the length of the session key that is being
return marshalECPrivateKeyWithOID(key, oid)
}
-// marshalECPrivateKey marshals an EC private key into ASN.1, DER format and
+// marshalECPrivateKeyWithOID marshals an EC private key into ASN.1, DER format and
// sets the curve ID to the given OID, or omits it if OID is nil.
func marshalECPrivateKeyWithOID(key *ecdsa.PrivateKey, oid asn1.ObjectIdentifier) ([]byte, error) {
if !key.Curve.IsOnCurve(key.X, key.Y) {
// a map from entry format ids to their descriptions
type abbrevTable map[uint32]abbrev
-// ParseAbbrev returns the abbreviation table that starts at byte off
+// parseAbbrev returns the abbreviation table that starts at byte off
// in the .debug_abbrev section.
func (d *Data) parseAbbrev(off uint64, vers int) (abbrevTable, error) {
if m, ok := d.abbrevCache[off]; ok {
consumed := false
if sv.IsValid() {
// unmarshalPath can call unmarshal, so we need to pass the depth through so that
- // we can continue to enforce the maximum recusion limit.
+ // we can continue to enforce the maximum recursion limit.
consumed, err = d.unmarshalPath(tinfo, sv, nil, &t, depth)
if err != nil {
return err
case 'T', 'U':
// Types can be recursive. We need to setup a stub
- // declaration before recursing.
+ // declaration before recurring.
obj := types.NewTypeName(pos, r.currPkg, name, nil)
named := types.NewNamed(obj, nil, nil)
// Declare obj before calling r.tparamList, so the new type name is recognized
}
// The data structure of each (provided or inferred) type represents a graph, where
- // each node corresponds to a type and each (directed) vertice points to a component
+ // each node corresponds to a type and each (directed) vertex points to a component
// type. The substitution process described above repeatedly replaces type parameter
// nodes in these graphs with the graphs of the types the type parameters stand for,
// which creates a new (possibly bigger) graph for each type.
// Generally, cycles may occur across multiple type parameters and inferred types
// (for instance, consider [P interface{ *Q }, Q interface{ func(P) }]).
// We eliminate cycles by walking the graphs for all type parameters. If a cycle
- // through a type parameter is detected, cycleFinder nils out the respectice type
+ // through a type parameter is detected, cycleFinder nils out the respective type
// which kills the cycle; this also means that the respective type could not be
// inferred.
//
// TODO(gri) If useful, we could report the respective cycle as an error. We don't
// do this now because type inference will fail anyway, and furthermore,
// constraints with cycles of this kind cannot currently be satisfied by
- // any user-suplied type. But should that change, reporting an error
+ // any user-supplied type. But should that change, reporting an error
// would be wrong.
w := cycleFinder{tparams, types, make(map[Type]bool)}
for _, t := range tparams {
// nest = A[A[string]]->B[P]
// path = A[A[string]]->B[P]
//
-// Eventutally we reach the type parameter P of type B (P₂):
+// Eventually we reach the type parameter P of type B (P₂):
//
// P₂
// nest = A[A[string]]->B[P]
)
// This package contains APIs and helpers for encoding initial portions
-// of the counter data files emitted at runtime when coverage instrumention
+// of the counter data files emitted at runtime when coverage instrumentation
// is enabled. Counter data files may contain multiple segments; the file
// header and first segment are written via the "Write" method below, and
// additional segments can then be added using "AddSegment".
// For now, this is a simple ring buffer
// (https://en.wikipedia.org/wiki/Circular_buffer).
//
-// TODO(golang.org/issue/46224): use a priotization algorithm based on input
+// TODO(golang.org/issue/46224): use a prioritization algorithm based on input
// size, previous duration, coverage, and any other metrics that seem useful.
type queue struct {
// elems holds a ring buffer.