* runtime: add SetFinalizer
* time: Sleep through interruptions (thanks Chris Wedgwood)
add RFC822 formats
- experimental implemenation of Ticker using two goroutines for all tickers
+ experimental implementation of Ticker using two goroutines for all tickers
* xml: allow underscores in XML element names (thanks Michael Hoisie)
allow any scalar type in xml.Unmarshal
</pre>
The 4.8.0 version of GCC shipped in March, 2013 and includes a nearly-Go 1.1 version of <code>gccgo</code>.
Its library is a little behind the release, but the biggest difference is that method values are not implemented.
Sometime around July 2013, we expect 4.8.2 of GCC to ship with a <code>gccgo</code>
-providing a complete Go 1.1 implementaiton.
+providing a complete Go 1.1 implementation.
</p>
<h3 id="gc_flag">Command-line flag parsing</h3>
}
// Note: There are two changes in the expression handling here
-// compared to the old yacc/C implemenatations. Neither has
+// compared to the old yacc/C implementations. Neither has
// much practical consequence because the expressions we
// see in assembly code are simple, but for the record:
//
inlgen++
body := inlsubstlist(fn.Func.Inl)
- body = list(body, Nod(OGOTO, inlretlabel, nil)) // avoid 'not used' when function doesnt have return
+ body = list(body, Nod(OGOTO, inlretlabel, nil)) // avoid 'not used' when function doesn't have return
body = list(body, Nod(OLABEL, inlretlabel, nil))
typechecklist(body, Etop)
case OASOP:
// Special: rewrite l op= r into l = l op r.
- // This simplies quite a few operations;
+ // This simplifies quite a few operations;
// most important is that it lets us separate
// out map read from map write when l is
// a map index expression.
// If the result had its address taken, it is being tracked
// by the avarinit code, which does not use uevar.
// If we added it to uevar too, we'd not see any kill
- // and decide that the varible was live entry, which it is not.
+ // and decide that the variable was live entry, which it is not.
// So only use uevar in the non-addrtaken case.
// The p->to.type == thearch.D_NONE limits the bvset to
// non-tail-call return instructions; see note above
return l
}
-// Builds a type respresenting a Bucket structure for
+// Builds a type representing a Bucket structure for
// the given map type. This type is not visible to users -
// we include only enough information to generate a correct GC
// program for it.
// a package that imports the first one and does use *T pointers.
// The second module will end up defining type data for *T and a
// type.*T symbol pointing at it. It's important that calling
- // .PtrTo() on the refect.Type for T returns this type data and
+ // .PtrTo() on the reflect.Type for T returns this type data and
// not some synthesized object, so we need reflect to be able to
// find it!
if !Ctxt.Flag_dynlink {
}
evconst(n)
if n.Op != OLITERAL {
- return // we dont check variables
+ return // we don't check variables
}
var h uint32
apackagesSeen[a.p] = true
if a.p.fake && a.p.external {
// external _tests, if present must come before
- // internal _tests. Store these on a seperate list
+ // internal _tests. Store these on a separate list
// and place them at the head after this loop.
xfiles = append(xfiles, a.target)
} else if a.p.fake {
break
} else {
// if a load/store instruction takes more than 1 word to implement, then
- // we need to seperate the instruction into two:
+ // we need to separate the instruction into two:
// 1. explicitly load the address into R11.
// 2. load/store from R11.
// This won't handle .W/.P, so we should reject such code.
var zeros = []byte{0, 0, 0, 0}
-// pad pads the code sequenc with pops.
+// pad pads the code sequence with pops.
func pad(enc []byte) []byte {
if len(enc) < 4 {
enc = append(enc[:len(enc):len(enc)], zeros[:4-len(enc)]...)
0x5f, 0x5f, 0x5f, 0x5f, 0x5f, 0x5f, 0x5f, 0x5f,
}
-// pad pads the code sequenc with pops.
+// pad pads the code sequence with pops.
func pad(enc []byte) []byte {
return append(enc[:len(enc):len(enc)], pops...)
}
ld.Adddynsym(ld.Ctxt, targ)
rel := ld.Linklookup(ld.Ctxt, ".rel", 0)
ld.Addaddrplus(ld.Ctxt, rel, s, int64(r.Off))
- ld.Adduint32(ld.Ctxt, rel, ld.ELF32_R_INFO(uint32(targ.Dynid), ld.R_ARM_GLOB_DAT)) // we need a nil + A dynmic reloc
+ ld.Adduint32(ld.Ctxt, rel, ld.ELF32_R_INFO(uint32(targ.Dynid), ld.R_ARM_GLOB_DAT)) // we need a nil + A dynamic reloc
r.Type = obj.R_CONST // write r->add during relocsym
r.Sym = nil
return
case "arm64":
t.Skipf("skipping on %s, issue 10106", runtime.GOARCH)
}
- // TODO(jsing): Renable once openbsd/arm has external linking support.
+ // TODO(jsing): Reenable once openbsd/arm has external linking support.
if runtime.GOOS == "openbsd" && runtime.GOARCH == "arm" {
t.Skip("skipping on openbsd/arm, no support for external linking, issue 10619")
}
pp.Location = append(pp.Location, x)
return decodeMessage(b, x)
},
- // repeasted Function function = 5
+ // repeated Function function = 5
func(b *buffer, m message) error {
x := new(Function)
pp := m.(*Profile)
svgClose = regexp.MustCompile(`</svg>`)
)
-// Massage enhances the SVG output from DOT to provide bettern
+// Massage enhances the SVG output from DOT to provide better
// panning inside a web browser. It uses the SVGPan library, which is
// included directly.
func Massage(in bytes.Buffer) string {
//go:generate yacc -o expr.go -p "expr" expr.y
// Expr is a simple expression evaluator that serves as a working example of
-// how to use Go's yacc implemenation.
+// how to use Go's yacc implementation.
package main
CRTValues []CRTValue
}
-// CRTValue contains the precomputed chinese remainder theorem values.
+// CRTValue contains the precomputed Chinese remainder theorem values.
type CRTValue struct {
Exp *big.Int // D mod (prime-1).
Coeff *big.Int // RĀ·Coeff ā” 1 mod Prime.
if !ok {
return nil, errors.New("tls: certificate private key does not implement crypto.Decrypter")
}
- // Perform contant time RSA PKCS#1 v1.5 decryption
+ // Perform constant time RSA PKCS#1 v1.5 decryption
preMasterSecret, err := priv.Decrypt(config.rand(), ciphertext, &rsa.PKCS1v15DecryptOptions{SessionKeyLen: 48})
if err != nil {
return nil, err
// into the "line" section.
ClassLinePtr
- // ClassLocListPtr repersents values that are an int64 offset
+ // ClassLocListPtr represents values that are an int64 offset
// into the "loclist" section.
ClassLocListPtr
// CSV with quoted empty strings strictly less useful.
// Not quoting the empty string also makes this package match the behavior
// of Microsoft Excel and Google Drive.
-// For Postgres, quote the data termating string `\.`.
+// For Postgres, quote the data terminating string `\.`.
func (w *Writer) fieldNeedsQuotes(field string) bool {
if field == "" {
return false
g.evStart = ev
p.g = ev.G
if g.evCreate != nil {
- // +1 because symblizer expects return pc.
+ // +1 because symbolizer expects return pc.
ev.Stk = []*Frame{&Frame{PC: g.evCreate.Args[1] + 1}}
g.evCreate = nil
}
EvHeapAlloc = 33 // memstats.heap_alloc change [timestamp, heap_alloc]
EvNextGC = 34 // memstats.next_gc change [timestamp, next_gc]
EvTimerGoroutine = 35 // denotes timer goroutine [timer goroutine id]
- EvFutileWakeup = 36 // denotes that the revious wakeup of this goroutine was futile [timestamp]
+ EvFutileWakeup = 36 // denotes that the previous wakeup of this goroutine was futile [timestamp]
EvCount = 37
)
{net: "udp6", addr: "[::1]:0"},
- // TODO(mikioh,bradfitz): renable once 10730 is fixed
+ // TODO(mikioh,bradfitz): reenable once 10730 is fixed
// {net: "ip4:icmp", addr: "127.0.0.1"},
{net: "unixgram", addr: "@gotest3/net"},
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
pathSuffix := r.RequestURI[1:]
if r.RequestURI == "/nosetcookie" {
- return // dont set cookies for this path
+ return // don't set cookies for this path
}
SetCookie(w, &Cookie{Name: "name" + pathSuffix, Value: "val" + pathSuffix})
if r.RequestURI == "/" {
// Do a bunch of traffic from different goroutines. Send to activityc
// after each request completes, regardless of whether it failed.
- // If these are too high, OS X exhausts its emphemeral ports
+ // If these are too high, OS X exhausts its ephemeral ports
// and hangs waiting for them to transition TCP states. That's
// not what we want to test. TODO(bradfitz): use an io.Pipe
// dialer for this test instead?
return nil
}
-// Conenct wraps syscall.Connect.
+// Connect wraps syscall.Connect.
func (sw *Switch) Connect(s syscall.Handle, sa syscall.Sockaddr) (err error) {
so := sw.sockso(s)
if so == nil {
return nil
}
-// ConenctEx wraps syscall.ConnectEx.
+// ConnectEx wraps syscall.ConnectEx.
func (sw *Switch) ConnectEx(s syscall.Handle, sa syscall.Sockaddr, b *byte, n uint32, nwr *uint32, o *syscall.Overlapped) (err error) {
so := sw.sockso(s)
if so == nil {
// following applications:
//
// - An endpoint holder that opens a passive stream
- // connenction, known as a stream listener
+ // connection, known as a stream listener
//
// - An endpoint holder that opens a destination-unspecific
// datagram connection, known as a datagram listener
// Sending a literal '%' in an HTTP request's Path
req := &http.Request{
Method: "GET",
- Host: "example.com", // takes precendence over URL.Host
+ Host: "example.com", // takes precedence over URL.Host
URL: &url.URL{
Host: "ignored",
Scheme: "https",
},
// tests commands, like `a.exe`, with c.Dir set
{
- // should not find a.exe in p, becasue LookPath(`a.exe`) will fail
+ // should not find a.exe in p, because LookPath(`a.exe`) will fail
files: []string{`p\a.exe`},
dir: `p`,
arg0: `a.exe`,
stack := new(uint64)
global = stack // force heap allocation
- // Need to keep additional referenfces to nodes, the stack is not all that type-safe.
+ // Need to keep additional references to nodes, the stack is not all that type-safe.
var nodes []*MyNode
// Check the stack is initially empty.
// Callers should call heapBitsBulkBarrier immediately after
// calling memmove(p, src, size). This function is marked nosplit
// to avoid being preempted; the GC must not stop the goroutine
-// betwen the memmove and the execution of the barriers.
+// between the memmove and the execution of the barriers.
//
// The heap bitmap is not maintained for allocations containing
// no pointers at all; any caller of heapBitsBulkBarrier must first
fractionalMarkTime int64
// idleMarkTime is the nanoseconds spent in idle marking
- // during this cycle. This is udpated atomically throughout
+ // during this cycle. This is updated atomically throughout
// the cycle.
idleMarkTime int64
// stopped ensuring that any object encountered has their normal
// mark bit set. To do this we use an orthogonal bit
// pattern to indicate the object is marked. The following pattern
-// uses the upper two bits in the object's bounday nibble.
+// uses the upper two bits in the object's boundary nibble.
// 01: scalar not marked
// 10: pointer not marked
// 11: pointer marked
// getpartialorempty tries to return a partially empty
// and if none are available returns an empty one.
-// entry is used to provide a brief histoy of ownership
+// entry is used to provide a brief history of ownership
// using entry + xxx00000 to
// indicating that two line numbers in the call chain.
//go:nowritebarrier
// putpartial puts empty buffers on the work.empty queue,
// full buffers on the work.full queue and
// others on the work.partial queue.
-// entry is used to provide a brief histoy of ownership
+// entry is used to provide a brief history of ownership
// using entry + xxx00000 to
// indicating that two call chain line numbers.
//go:nowritebarrier
cachealloc fixalloc // allocator for mcache*
specialfinalizeralloc fixalloc // allocator for specialfinalizer*
specialprofilealloc fixalloc // allocator for specialprofile*
- speciallock mutex // lock for sepcial record allocators.
+ speciallock mutex // lock for special record allocators.
}
var mheap_ mheap
// in a lock-free way by all operations.
// NOTE(dvyukov): the following code uses uintptr to store *g (rg/wg),
// that will blow up when GC starts moving objects.
- lock mutex // protectes the following fields
+ lock mutex // protects the following fields
fd uintptr
closing bool
seq uintptr // protects from stale timers and ready notifications
// Beside calling runtimeĀ·netpollopen, the networking code paths
// will call runtimeĀ·netpollarm each time goroutines are interested
// in doing network I/O. Because now we know what kind of I/O we
-// are interested in (reading/writting), we can call port_associate
+// are interested in (reading/writing), we can call port_associate
// passing the correct type of event set (POLLIN/POLLOUT). As we made
// sure to have already associated the file descriptor with the port,
// when we now call port_associate, we will unblock the main poller
// Mark defer as started, but keep on list, so that traceback
// can find and update the defer's argument frame if stack growth
- // or a garbage collection hapens before reflectcall starts executing d.fn.
+ // or a garbage collection happens before reflectcall starts executing d.fn.
d.started = true
// Record the panic that is running the defer.
var exitTicks int64
if trace.enabled {
- // Wait till traceGoSysBlock event is emited.
+ // Wait till traceGoSysBlock event is emitted.
// This ensures consistency of the trace (the goroutine is started after it is blocked).
for oldp != nil && oldp.syscalltick == _g_.m.syscalltick {
osyield()
ok = exitsyscallfast_pidle()
if ok && trace.enabled {
if oldp != nil {
- // Wait till traceGoSysBlock event is emited.
+ // Wait till traceGoSysBlock event is emitted.
// This ensures consistency of the trace (the goroutine is started after it is blocked).
for oldp.syscalltick == _g_.m.syscalltick {
osyield()
traceProcStop(p)
}
}
- // move all runable goroutines to the global queue
+ // move all runnable goroutines to the global queue
for p.runqhead != p.runqtail {
// pop from tail of local queue
p.runqtail--
// there are two variables, access to one
// of them is synchronized, access to the other
// is not.
-// Select must (unconditionaly) choose the non-synchronized variable
+// Select must (unconditionally) choose the non-synchronized variable
// thus causing exactly one race.
// Currently this test doesn't look like it accomplishes
// this goal.
// the following encode that the GC is scanning the stack and what to do when it is done
_Gscan = 0x1000 // atomicstatus&~Gscan = the non-scan state,
// _Gscanidle = _Gscan + _Gidle, // Not used. Gidle only used with newly malloced gs
- _Gscanrunnable = _Gscan + _Grunnable // 0x1001 When scanning complets make Grunnable (it is already on run queue)
+ _Gscanrunnable = _Gscan + _Grunnable // 0x1001 When scanning completes make Grunnable (it is already on run queue)
_Gscanrunning = _Gscan + _Grunning // 0x1002 Used to tell preemption newstack routine to scan preempted stack.
_Gscansyscall = _Gscan + _Gsyscall // 0x1003 When scanning completes make it Gsyscall
_Gscanwaiting = _Gscan + _Gwaiting // 0x1004 When scanning completes make it Gwaiting
preempt bool // preemption signal, duplicates stackguard0 = stackpreempt
paniconfault bool // panic (instead of crash) on unexpected fault address
preemptscan bool // preempted g does scan for gc
- gcworkdone bool // debug: cleared at begining of gc work phase cycle, set by gcphasework, tested at end of cycle
+ gcworkdone bool // debug: cleared at beginning of gc work phase cycle, set by gcphasework, tested at end of cycle
gcscanvalid bool // false at start of gc cycle, true if G has not run since last scan
throwsplit bool // must not split stack
raceignore int8 // ignore race detection events
memmove(p, old.array, lenmem)
memclr(add(p, lenmem), capmem-lenmem)
} else {
- // Note: can't use rawmem (which avoids zeroing of memory), because then GC can scan unitialized memory.
+ // Note: can't use rawmem (which avoids zeroing of memory), because then GC can scan uninitialized memory.
p = newarray(et, uintptr(newcap))
if !writeBarrierEnabled {
memmove(p, old.array, lenmem)
// license that can be found in the LICENSE file.
// A copy of Sqrt tests from the math package to test the
-// purely integer arithmetic implementaiton in sqrt.go.
+// purely integer arithmetic implementation in sqrt.go.
package runtime_test
// Enable exceptions again.
NACL_SYSCALL(SYS_exception_clear_flag)
- // NaCl has abidcated its traditional operating system responsibility
+ // NaCl has abdicated its traditional operating system responsibility
// and declined to implement 'sigreturn'. Instead the only way to return
// to the execution of our program is to restore the registers ourselves.
// Unfortunately, that is impossible to do with strict fidelity, because
return d, nil
}
-// MustLoadDLL is like LoadDLL but panics if load operation failes.
+// MustLoadDLL is like LoadDLL but panics if load operation fails.
func MustLoadDLL(name string) *DLL {
d, e := LoadDLL(name)
if e != nil {
return r
}
-// ParseFile adds adition file path to a source set src.
+// ParseFile adds additional file path to a source set src.
func (src *Source) ParseFile(path string) error {
file, err := os.Open(path)
if err != nil {
// parseLinkLayerAddr parses b as a datalink socket address in
// conventional BSD kernel form.
func parseLinkLayerAddr(b []byte) (*SockaddrDatalink, int, error) {
- // The encoding looks like the follwoing:
+ // The encoding looks like the following:
// +----------------------------+
// | Type (1 octet) |
// +----------------------------+