var errorMessage strings.Builder
for caseIndex, expectedPos := range expectedPositions {
actualPosition := v.fset.PositionFor(ident.Pos(), true)
- errorOccured := false
+ errorOccurred := false
if expectedPos.Line != actualPosition.Line {
fmt.Fprintf(&errorMessage, "wrong line number for ident %s: expected: %d got: %d\n", ident.Name, expectedPos.Line, actualPosition.Line)
- errorOccured = true
+ errorOccurred = true
}
if expectedPos.Column != actualPosition.Column {
fmt.Fprintf(&errorMessage, "wrong column number for ident %s: expected: %d got: %d\n", ident.Name, expectedPos.Column, actualPosition.Column)
- errorOccured = true
+ errorOccurred = true
}
- if errorOccured {
+ if errorOccurred {
continue
}
gotMatch = true
}
if base.Debug.Checkptr != 0 && types.IsRuntimePkg(callee.Sym().Pkg) {
- // We don't intrument runtime packages for checkptr (see base/flag.go).
+ // We don't instrument runtime packages for checkptr (see base/flag.go).
if log && logopt.Enabled() {
logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", ir.FuncName(callerfn),
fmt.Sprintf(`call to into runtime package function %s in -d=checkptr build`, ir.PkgFuncName(callee)))
return nil
}
-// returns.go T_multi_return_nil_anomoly 98 0 1
+// returns.go T_multi_return_nil_anomaly 98 0 1
// ResultFlags
// 0 ResultIsConcreteTypeConvertedToInterface
// <endpropsdump>
// {"Flags":0,"ParamFlags":[0,0],"ResultFlags":[4]}
// <endcallsites>
// <endfuncpreamble>
-func T_multi_return_nil_anomoly(x, y bool) Itf {
+func T_multi_return_nil_anomaly(x, y bool) Itf {
if x && y {
var qnil *Q
return qnil
//
// Note the SPanchored: this ensures that the scheduler won't
// move the MOVDaddr earlier than the vardef. With a variable
- // "xyz" that has no pointers, howver, if we start with
+ // "xyz" that has no pointers, however, if we start with
//
// v66 = VarDef <mem> {t2} v65
// v67 = LocalAddr <*T> {t2} v2 v66
// //line directives that change line numbers in strange ways should be rare,
// and failing PGO matching on these files is not too big of a loss.
-// Package pgoir assosciates a PGO profile with the IR of the current package
+// Package pgoir associates a PGO profile with the IR of the current package
// compilation.
package pgoir
if hotAlign > 0 && b.Hotness&ssa.HotPgoInitial == ssa.HotPgoInitial {
// So far this has only been shown profitable for PGO-hot loop headers.
- // The Hotness values allows distinctions betwen initial blocks that are "hot" or not, and "flow-in" or not.
+ // The Hotness values allows distinctions between initial blocks that are "hot" or not, and "flow-in" or not.
// Currently only the initial blocks of loops are tagged in this way;
// there are no blocks tagged "pgo-hot" that are not also tagged "initial".
// TODO more heuristics, more architectures.
}
}
-// collectParams collects (but does not delare) all parameters of list and returns
+// collectParams collects (but does not declare) all parameters of list and returns
// the list of parameter names, corresponding parameter variables, and whether the
// parameter list is variadic. Anonymous parameters are recorded with nil names.
func (check *Checker) collectParams(list []*syntax.Field, variadicOk bool) (names []*syntax.Name, params []*Var, variadic bool) {
go 1.23
-// The indrect comment below is inaccurate. Its purpose
+// The indirect comment below is inaccurate. Its purpose
// is to test that it is corrected when enough packages
// are loaded to correct it.
1. All generial-prupose register names are written as Rn.
-2. All floating-poing register names are written as Fn.
+2. All floating-point register names are written as Fn.
# Argument mapping rules
// The NOP is needed to give the jumps somewhere to land.
// It is a liblink NOP, not a hardware NOP: it encodes to 0 instruction bytes.
//
- // We don't generate this for leafs because that means the wrapped
+ // We don't generate this for leaves because that means the wrapped
// function was inlined into the wrapper.
q = obj.Appendp(q, newprog)
} else {
// Non-empty output indicates failure, as mentioned above.
if len(string(sout)) != 0 {
- t.Errorf("unexpected outut from %s:\n%s\n", sprog, string(sout))
+ t.Errorf("unexpected output from %s:\n%s\n", sprog, string(sout))
}
}
rcmd := testenv.Command(t, filepath.Join(dir, targ))
for {
l := &levels[level]
if l.nextPairFreq == math.MaxInt32 && l.nextCharFreq == math.MaxInt32 {
- // We've run out of both leafs and pairs.
+ // We've run out of both leaves and pairs.
// End all calculations for this level.
// To make sure we never come back to this level or any lower level,
// set nextPairFreq impossibly large.
}
}
-// collectParams collects (but does not delare) all parameters of list and returns
+// collectParams collects (but does not declare) all parameters of list and returns
// the list of parameter names, corresponding parameter variables, and whether the
// parameter list is variadic. Anonymous parameters are recorded with nil names.
func (check *Checker) collectParams(list *ast.FieldList, variadicOk bool) (names []*ast.Ident, params []*Var, variadic bool) {
Line uint64
}
-// ExperimentalEvent presents a raw view of an experimental event's arguments and thier names.
+// ExperimentalEvent presents a raw view of an experimental event's arguments and their names.
type ExperimentalEvent struct {
// Name is the name of the event.
Name string
// Merge events as long as at least one P has more events
gs := make(map[uint64]gState)
// Note: technically we don't need a priority queue here. We're only ever
- // interested in the earliest elligible event, which means we just have to
+ // interested in the earliest eligible event, which means we just have to
// track the smallest element. However, in practice, the priority queue
// performs better, because for each event we only have to compute its state
- // transition once, not on each iteration. If it was elligible before, it'll
+ // transition once, not on each iteration. If it was eligible before, it'll
// already be in the queue. Furthermore, on average, we only have one P to
// look at in each iteration, because all other Ps are already in the queue.
var frontier orderEventList
// Tests a G being created from within a syscall.
//
-// Specifically, it tests a scenerio wherein a C
+// Specifically, it tests a scenario wherein a C
// thread is calling into Go, creating a goroutine in
// a syscall (in the tracer's model). The system is free
// to reuse thread IDs, so first a thread ID is used to
// Tests a G being created from within a syscall.
//
-// Specifically, it tests a scenerio wherein a C
+// Specifically, it tests a scenario wherein a C
// thread is calling into Go, creating a goroutine in
// a syscall (in the tracer's model). Because the actual
// m can be reused, it's possible for that m to have never
// Tests syscall P stealing.
//
-// Specifically, it tests a scenerio wherein, without a
+// Specifically, it tests a scenario wherein, without a
// P sequence number of GoSyscallBegin, the syscall that
// a ProcSteal applies to is ambiguous. This only happens in
// practice when the events aren't already properly ordered
}
func TestRem32(t *testing.T) {
- // Sanity check: for non-oveflowing dividends, the result is the
+ // Sanity check: for non-overflowing dividends, the result is the
// same as the rem returned by Div32
hi, lo, y := uint32(510510), uint32(9699690), uint32(510510+1) // ensure hi < y
for i := 0; i < 1000; i++ {
}
func TestRem64(t *testing.T) {
- // Sanity check: for non-oveflowing dividends, the result is the
+ // Sanity check: for non-overflowing dividends, the result is the
// same as the rem returned by Div64
hi, lo, y := uint64(510510), uint64(9699690), uint64(510510+1) // ensure hi < y
for i := 0; i < 1000; i++ {
// always rounded up 8 bytes.
for _, n := range []int{8, 16, 32, 64, 128} {
b.Run(fmt.Sprintf("bits=%d", n*8), func(b *testing.B) {
- // Initialize a new byte slice with pseduo-random data.
+ // Initialize a new byte slice with pseudo-random data.
bits := make([]byte, n)
rand.Read(bits)
return b
}
-// stk returns the slice in b holding the stack. The caller can asssume that the
+// stk returns the slice in b holding the stack. The caller can assume that the
// backing array is immutable.
func (b *bucket) stk() []uintptr {
stk := (*[maxProfStackDepth]uintptr)(add(unsafe.Pointer(b), unsafe.Sizeof(*b)))
// spend shows up as a 10% chance of one sample (for an expected value of
// 0.1 samples), and so that "two and six tenths" periods of CPU spend show
// up as a 60% chance of 3 samples and a 40% chance of 2 samples (for an
- // expected value of 2.6). Set the initial delay to a value in the unifom
+ // expected value of 2.6). Set the initial delay to a value in the uniform
// random distribution between 0 and the desired period. And because "0"
// means "disable timer", add 1 so the half-open interval [0,period) turns
// into (0,period].
argLen int // The number of arguments to use for the syscall
expected int // The expected number of allocations
}{
- // For less than or equal to 16 arguments, we expect 1 alloction:
+ // For less than or equal to 16 arguments, we expect 1 allocation:
// - makeValue new(ref)
{0, 1},
{2, 1},
{15, 1},
{16, 1},
- // For greater than 16 arguments, we expect 3 alloction:
+ // For greater than 16 arguments, we expect 3 allocation:
// - makeValue: new(ref)
// - makeArgSlices: argVals = make([]Value, size)
// - makeArgSlices: argRefs = make([]ref, size)