z.init = false
}
-
/*
* arithmetic
*/
return int64(C.mpz_get_si(&z.i[0]))
}
-
// Neg sets z = -x and returns z.
func (z *Int) Neg(x *Int) *Int {
x.doinit()
return z
}
-
/*
* functions without a clear receiver
*/
"path/filepath"
)
-
var fset = token.NewFileSet()
var start = flag.String("start", "Start", "name of start production")
-
func usage() {
fmt.Fprintf(os.Stderr, "usage: ebnflint [flags] [filename]\n")
flag.PrintDefaults()
os.Exit(1)
}
-
// Markers around EBNF sections in .html files
var (
open = []byte(`<pre class="ebnf">`)
close = []byte(`</pre>`)
)
-
func report(err os.Error) {
scanner.PrintError(os.Stderr, err)
os.Exit(1)
}
-
func extractEBNF(src []byte) []byte {
var buf bytes.Buffer
return buf.Bytes()
}
-
func main() {
flag.Parse()
"xml"
)
-
// Handler for /doc/codewalk/ and below.
func codewalk(w http.ResponseWriter, r *http.Request) {
relpath := r.URL.Path[len("/doc/codewalk/"):]
servePage(w, "Codewalk: "+cw.Title, "", "", b)
}
-
// A Codewalk represents a single codewalk read from an XML file.
type Codewalk struct {
Title string `xml:"attr"`
Step []*Codestep
}
-
// A Codestep is a single step in a codewalk.
type Codestep struct {
// Filled in from XML
Data []byte
}
-
// String method for printing in template.
// Formats file address nicely.
func (st *Codestep) String() string {
return s
}
-
// loadCodewalk reads a codewalk from the named XML file.
func loadCodewalk(filename string) (*Codewalk, os.Error) {
f, err := fs.Open(filename)
return cw, nil
}
-
// codewalkDir serves the codewalk directory listing.
// It scans the directory for subdirectories or files named *.xml
// and prepares a table.
servePage(w, "Codewalks", "", "", b)
}
-
// codewalkFileprint serves requests with ?fileprint=f&lo=lo&hi=hi.
// The filename f has already been retrieved and is passed as an argument.
// Lo and hi are the numbers of the first and last line to highlight
io.WriteString(w, "</pre>")
}
-
// addrToByte evaluates the given address starting at offset start in data.
// It returns the lo and hi byte offset of the matched region within data.
// See http://plan9.bell-labs.com/sys/doc/sam/sam.html Table II
return lo, hi, nil
}
-
// addrNumber applies the given dir, n, and charOffset to the address lo, hi.
// dir is '+' or '-', n is the count, and charOffset is true if the syntax
// used was #n. Applying +n (or +#n) means to advance n lines
return 0, 0, os.NewError("address out of range")
}
-
// addrRegexp searches for pattern in the given direction starting at lo, hi.
// The direction dir is '+' (search forward from hi) or '-' (search backward from lo).
// Backward searches are unimplemented.
return m[0], m[1], nil
}
-
// lineToByte returns the byte index of the first byte of line n.
// Line numbers begin at 1.
func lineToByte(data []byte, n int) int {
return len(data)
}
-
// byteToLine returns the number of the line containing the byte at index i.
func byteToLine(data []byte, i int) int {
l := 1
"unicode"
)
-
type Directory struct {
Depth int
Path string // includes Name
Dirs []*Directory // subdirectories
}
-
func isGoFile(fi FileInfo) bool {
name := fi.Name()
return fi.IsRegular() &&
filepath.Ext(name) == ".go"
}
-
func isPkgFile(fi FileInfo) bool {
return isGoFile(fi) &&
!strings.HasSuffix(fi.Name(), "_test.go") // ignore test files
}
-
func isPkgDir(fi FileInfo) bool {
name := fi.Name()
return fi.IsDirectory() && len(name) > 0 &&
name[0] != '_' && name[0] != '.' // ignore _files and .files
}
-
func firstSentence(s string) string {
i := -1 // index+1 of first terminator (punctuation ending a sentence)
j := -1 // index+1 of first terminator followed by white space
return s[0:j]
}
-
type treeBuilder struct {
pathFilter func(string) bool
maxDepth int
}
-
func (b *treeBuilder) newDirTree(fset *token.FileSet, path, name string, depth int) *Directory {
if b.pathFilter != nil && !b.pathFilter(path) {
return nil
return &Directory{depth, path, name, synopsis, dirs}
}
-
// newDirectory creates a new package directory tree with at most maxDepth
// levels, anchored at root. The result tree is pruned such that it only
// contains directories that contain package files or that contain
return b.newDirTree(token.NewFileSet(), root, d.Name(), 0)
}
-
func (dir *Directory) writeLeafs(buf *bytes.Buffer) {
if dir != nil {
if len(dir.Dirs) == 0 {
}
}
-
func (dir *Directory) walk(c chan<- *Directory, skipRoot bool) {
if dir != nil {
if !skipRoot {
}
}
-
func (dir *Directory) iter(skipRoot bool) <-chan *Directory {
c := make(chan *Directory)
go func() {
return c
}
-
func (dir *Directory) lookupLocal(name string) *Directory {
for _, d := range dir.Dirs {
if d.Name == name {
return nil
}
-
// lookup looks for the *Directory for a given path, relative to dir.
func (dir *Directory) lookup(path string) *Directory {
d := strings.Split(dir.Path, string(filepath.Separator))
return dir
}
-
// DirEntry describes a directory entry. The Depth and Height values
// are useful for presenting an entry in an indented fashion.
//
Synopsis string
}
-
type DirList struct {
MaxHeight int // directory tree height, > 0
List []DirEntry
}
-
// listing creates a (linear) directory listing from a directory tree.
// If skipRoot is set, the root directory itself is excluded from the list.
//
"os"
)
-
// The FileInfo interface provides access to file information.
type FileInfo interface {
Name() string
IsDirectory() bool
}
-
// The FileSystem interface specifies the methods godoc is using
// to access the file system for which it serves documentation.
type FileSystem interface {
ReadFile(path string) ([]byte, os.Error)
}
-
// ----------------------------------------------------------------------------
// OS-specific FileSystem implementation
var OS FileSystem = osFS{}
-
// osFI is the OS-specific implementation of FileInfo.
type osFI struct {
*os.FileInfo
}
-
func (fi osFI) Name() string {
return fi.FileInfo.Name
}
-
func (fi osFI) Size() int64 {
if fi.IsDirectory() {
return 0
return fi.FileInfo.Size
}
-
// osFS is the OS-specific implementation of FileSystem
type osFS struct{}
return f, nil
}
-
func (osFS) Lstat(path string) (FileInfo, os.Error) {
fi, err := os.Lstat(path)
return osFI{fi}, err
}
-
func (osFS) Stat(path string) (FileInfo, os.Error) {
fi, err := os.Stat(path)
return osFI{fi}, err
}
-
func (osFS) ReadDir(path string) ([]FileInfo, os.Error) {
l0, err := ioutil.ReadDir(path) // l0 is sorted
if err != nil {
return l1, nil
}
-
func (osFS) ReadFile(path string) ([]byte, os.Error) {
return ioutil.ReadFile(path)
}
"template"
)
-
// ----------------------------------------------------------------------------
// Implementation of FormatSelections
//
type Selection func() []int
-
// A LinkWriter writes some start or end "tag" to w for the text offset offs.
// It is called by FormatSelections at the start or end of each link segment.
//
type LinkWriter func(w io.Writer, offs int, start bool)
-
// A SegmentWriter formats a text according to selections and writes it to w.
// The selections parameter is a bit set indicating which selections provided
// to FormatSelections overlap with the text segment: If the n'th bit is set
//
type SegmentWriter func(w io.Writer, text []byte, selections int)
-
// FormatSelections takes a text and writes it to w using link and segment
// writers lw and sw as follows: lw is invoked for consecutive segment starts
// and ends as specified through the links selection, and sw is invoked for
flush()
}
-
// A merger merges a slice of Selections and produces a sequence of
// consecutive segment change events through repeated next() calls.
//
segments [][]int // segments[i] is the next segment of selections[i]
}
-
const infinity int = 2e9
func newMerger(selections []Selection) *merger {
return &merger{selections, segments}
}
-
// next returns the next segment change: index specifies the Selection
// to which the segment belongs, offs is the segment start or end offset
// as determined by the start value. If there are no more segment changes,
return
}
-
// ----------------------------------------------------------------------------
// Implementation of FormatText
}
}
-
// commentSelection returns the sequence of consecutive comments
// in the Go src text as a Selection.
//
}
}
-
// makeSelection is a helper function to make a Selection from a slice of pairs.
func makeSelection(matches [][]int) Selection {
return func() (seg []int) {
}
}
-
// regexpSelection computes the Selection for the regular expression expr in text.
func regexpSelection(text []byte, expr string) Selection {
var matches [][]int
return makeSelection(matches)
}
-
var selRx = regexp.MustCompile(`^([0-9]+):([0-9]+)`)
// rangeSelection computes the Selection for a text range described
return nil
}
-
// Span tags for all the possible selection combinations that may
// be generated by FormatText. Selections are indicated by a bitset,
// and the value of the bitset specifies the tag to be used.
var endTag = []byte(`</span>`)
-
func selectionTag(w io.Writer, text []byte, selections int) {
if selections < len(startTags) {
if tag := startTags[selections]; len(tag) > 0 {
template.HTMLEscape(w, text)
}
-
// FormatText HTML-escapes text and writes it to w.
// Consecutive text segments are wrapped in HTML spans (with tags as
// defined by startTags and endTag) as follows:
"time"
)
-
// ----------------------------------------------------------------------------
// Globals
RWValue
}
-
func (dt *delayTime) backoff(max int) {
dt.mutex.Lock()
v := dt.value.(int) * 2
dt.mutex.Unlock()
}
-
var (
verbose = flag.Bool("v", false, "verbose mode")
pkgHandler httpHandler
)
-
func initHandlers() {
paths := filepath.SplitList(*pkgPath)
for _, t := range build.Path {
pkgHandler = httpHandler{"/pkg/", filepath.Join(*goroot, "src", "pkg"), true}
}
-
func registerPublicHandlers(mux *http.ServeMux) {
mux.Handle(cmdHandler.pattern, &cmdHandler)
mux.Handle(pkgHandler.pattern, &pkgHandler)
mux.HandleFunc("/", serveFile)
}
-
func initFSTree() {
fsTree.set(newDirectory(filepath.Join(*goroot, *testDir), nil, -1))
invalidateIndex()
}
-
// ----------------------------------------------------------------------------
// Directory filters
return strings.HasPrefix(q, p) && (len(q) <= n || q[n] == '/')
}
-
func setPathFilter(list []string) {
if len(list) == 0 {
pathFilter.set(nil)
})
}
-
func getPathFilter() func(string) bool {
f, _ := pathFilter.get()
if f != nil {
return nil
}
-
// readDirList reads a file containing a newline-separated list
// of directory paths and returns the list of paths.
func readDirList(filename string) ([]string, os.Error) {
return list[0:i], err
}
-
// updateMappedDirs computes the directory tree for
// each user-defined file system mapping. If a filter
// is provided, it is used to filter directories.
}
}
-
func updateFilterFile() {
updateMappedDirs(nil) // no filter for accuracy
}
}
-
func initDirTrees() {
// setup initial path filter
if *filter != "" {
}
}
-
// ----------------------------------------------------------------------------
// Path mapping
return abspath
}
-
func relativeURL(abspath string) string {
relpath := fsMap.ToRelative(abspath)
if relpath == "" {
return relpath
}
-
// ----------------------------------------------------------------------------
// Tab conversion
indent int // valid if state == indenting
}
-
func (p *tconv) writeIndent() (err os.Error) {
i := p.indent
for i >= len(spaces) {
return
}
-
func (p *tconv) Write(data []byte) (n int, err os.Error) {
if len(data) == 0 {
return
return
}
-
// ----------------------------------------------------------------------------
// Templates
(&printer.Config{mode, *tabwidth}).Fprint(&tconv{output: w}, fset, x)
}
-
// Write anything to w.
func writeAny(w io.Writer, fset *token.FileSet, x interface{}) {
switch v := x.(type) {
}
}
-
// Write anything html-escaped to w.
func writeAnyHTML(w io.Writer, fset *token.FileSet, x interface{}) {
switch v := x.(type) {
}
}
-
func fileset(x []interface{}) *token.FileSet {
if len(x) > 1 {
if fset, ok := x[1].(*token.FileSet); ok {
return nil
}
-
// Template formatter for "html-esc" format.
func htmlEscFmt(w io.Writer, format string, x ...interface{}) {
writeAnyHTML(w, fileset(x), x[0])
}
-
// Template formatter for "html-comment" format.
func htmlCommentFmt(w io.Writer, format string, x ...interface{}) {
var buf bytes.Buffer
doc.ToHTML(w, buf.Bytes(), nil) // does html-escaping
}
-
// Template formatter for "" (default) format.
func textFmt(w io.Writer, format string, x ...interface{}) {
writeAny(w, fileset(x), x[0])
}
-
// Template formatter for "urlquery-esc" format.
func urlQueryEscFmt(w io.Writer, format string, x ...interface{}) {
var buf bytes.Buffer
template.HTMLEscape(w, []byte(http.URLEscape(string(buf.Bytes()))))
}
-
// Template formatter for the various "url-xxx" formats excluding url-esc.
func urlFmt(w io.Writer, format string, x ...interface{}) {
var path string
}
}
-
// The strings in infoKinds must be properly html-escaped.
var infoKinds = [nKinds]string{
PackageClause: "package clause",
Use: "use",
}
-
// Template formatter for "infoKind" format.
func infoKindFmt(w io.Writer, format string, x ...interface{}) {
fmt.Fprintf(w, infoKinds[x[0].(SpotKind)]) // infoKind entries are html-escaped
}
-
// Template formatter for "infoLine" format.
func infoLineFmt(w io.Writer, format string, x ...interface{}) {
info := x[0].(SpotInfo)
fmt.Fprintf(w, "%d", line)
}
-
// Template formatter for "infoSnippet" format.
func infoSnippetFmt(w io.Writer, format string, x ...interface{}) {
info := x[0].(SpotInfo)
w.Write(text)
}
-
// Template formatter for "padding" format.
func paddingFmt(w io.Writer, format string, x ...interface{}) {
for i := x[0].(int); i > 0; i-- {
}
}
-
// Template formatter for "time" format.
func timeFmt(w io.Writer, format string, x ...interface{}) {
template.HTMLEscape(w, []byte(time.SecondsToLocalTime(x[0].(int64)/1e9).String()))
}
-
// Template formatter for "dir/" format.
func dirslashFmt(w io.Writer, format string, x ...interface{}) {
if x[0].(FileInfo).IsDirectory() {
}
}
-
// Template formatter for "localname" format.
func localnameFmt(w io.Writer, format string, x ...interface{}) {
_, localname := filepath.Split(x[0].(string))
template.HTMLEscape(w, []byte(localname))
}
-
// Template formatter for "numlines" format.
func numlinesFmt(w io.Writer, format string, x ...interface{}) {
list := x[0].([]int)
fmt.Fprintf(w, "%d", len(list))
}
-
var fmap = template.FormatterMap{
"": textFmt,
"html-esc": htmlEscFmt,
"numlines": numlinesFmt,
}
-
func readTemplate(name string) *template.Template {
path := filepath.Join(*goroot, "lib", "godoc", name)
if *templateDir != "" {
return t
}
-
var (
codewalkHTML,
codewalkdirHTML,
searchText = readTemplate("search.txt")
}
-
// ----------------------------------------------------------------------------
// Generic HTML wrapper
}
}
-
func serveText(w http.ResponseWriter, text []byte) {
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
w.Write(text)
}
-
// ----------------------------------------------------------------------------
// Files
firstCommentRx = regexp.MustCompile(`<!--([^\-]*)-->`)
)
-
func extractString(src []byte, rx *regexp.Regexp) (s string) {
m := rx.FindSubmatch(src)
if m != nil {
return
}
-
func serveHTMLDoc(w http.ResponseWriter, r *http.Request, abspath, relpath string) {
// get HTML body contents
src, err := fs.ReadFile(abspath)
servePage(w, title, subtitle, "", src)
}
-
func applyTemplate(t *template.Template, name string, data interface{}) []byte {
var buf bytes.Buffer
if err := t.Execute(&buf, data); err != nil {
return buf.Bytes()
}
-
func redirect(w http.ResponseWriter, r *http.Request) (redirected bool) {
if canonical := path.Clean(r.URL.Path) + "/"; r.URL.Path != canonical {
http.Redirect(w, r, canonical, http.StatusMovedPermanently)
servePage(w, title+" "+relpath, "", "", buf.Bytes())
}
-
func serveDirectory(w http.ResponseWriter, r *http.Request, abspath, relpath string) {
if redirect(w, r) {
return
servePage(w, "Directory "+relpath, "", "", contents)
}
-
func serveFile(w http.ResponseWriter, r *http.Request) {
relpath := r.URL.Path[1:] // serveFile URL paths start with '/'
abspath := absolutePath(relpath, *goroot)
fileServer.ServeHTTP(w, r)
}
-
// ----------------------------------------------------------------------------
// Packages
genDoc // generate documentation
)
-
type PageInfo struct {
Dirname string // directory containing the package
PList []string // list of package names found
Err os.Error // directory read error or nil
}
-
func (info *PageInfo) IsEmpty() bool {
return info.Err != nil || info.PAst == nil && info.PDoc == nil && info.Dirs == nil
}
-
type httpHandler struct {
pattern string // url pattern; e.g. "/pkg/"
fsRoot string // file system root to which the pattern is mapped
isPkg bool // true if this handler serves real package documentation (as opposed to command documentation)
}
-
// getPageInfo returns the PageInfo for a package directory abspath. If the
// parameter genAST is set, an AST containing only the package exports is
// computed (PageInfo.PAst), otherwise package documentation (PageInfo.Doc)
return PageInfo{abspath, plist, fset, past, pdoc, dir.listing(true), timestamp, h.isPkg, nil}
}
-
func (h *httpHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if redirect(w, r) {
return
servePage(w, title, subtitle, "", contents)
}
-
// ----------------------------------------------------------------------------
// Search
Complete bool // true if all textual occurrences of Query are reported
}
-
func lookup(query string) (result SearchResult) {
result.Query = query
return
}
-
func search(w http.ResponseWriter, r *http.Request) {
query := strings.TrimSpace(r.FormValue("q"))
result := lookup(query)
servePage(w, title, "", query, contents)
}
-
// ----------------------------------------------------------------------------
// Indexer
fsModified.set(nil)
}
-
// indexUpToDate() returns true if the search index is not older
// than any of the file systems under godoc's observation.
//
return fsTime <= siTime
}
-
// feedDirnames feeds the directory names of all directories
// under the file system given by root to channel c.
//
}
}
-
// fsDirnames() returns a channel sending all directory names
// of all the file systems under godoc's observation.
//
return c
}
-
func indexer() {
for {
if !indexUpToDate() {
"strings"
)
-
// ----------------------------------------------------------------------------
// RunList
func (h *RunList) Less(i, j int) bool { return h.less(h.At(i), h.At(j)) }
-
func (h *RunList) sort(less func(x, y interface{}) bool) {
h.less = less
sort.Sort(h)
}
-
// Compress entries which are the same according to a sort criteria
// (specified by less) into "runs".
func (h *RunList) reduce(less func(x, y interface{}) bool, newRun func(h *RunList, i, j int) interface{}) *RunList {
return &hh
}
-
// ----------------------------------------------------------------------------
// SpotInfo
nKinds
)
-
func init() {
// sanity check: if nKinds is too large, the SpotInfo
// accessor functions may need to be updated
}
}
-
// makeSpotInfo makes a SpotInfo.
func makeSpotInfo(kind SpotKind, lori int, isIndex bool) SpotInfo {
// encode lori: bits [4..32)
return x
}
-
func (x SpotInfo) Kind() SpotKind { return SpotKind(x >> 1 & 7) }
func (x SpotInfo) Lori() int { return int(x >> 4) }
func (x SpotInfo) IsIndex() bool { return x&1 != 0 }
-
// ----------------------------------------------------------------------------
// KindRun
Infos []SpotInfo
}
-
// KindRuns are sorted by line number or index. Since the isIndex bit
// is always the same for all infos in one list we can compare lori's.
func (f *KindRun) Len() int { return len(f.Infos) }
func (f *KindRun) Less(i, j int) bool { return f.Infos[i].Lori() < f.Infos[j].Lori() }
func (f *KindRun) Swap(i, j int) { f.Infos[i], f.Infos[j] = f.Infos[j], f.Infos[i] }
-
// FileRun contents are sorted by Kind for the reduction into KindRuns.
func lessKind(x, y interface{}) bool { return x.(SpotInfo).Kind() < y.(SpotInfo).Kind() }
-
// newKindRun allocates a new KindRun from the SpotInfo run [i, j) in h.
func newKindRun(h *RunList, i, j int) interface{} {
kind := h.At(i).(SpotInfo).Kind()
return run
}
-
// ----------------------------------------------------------------------------
// FileRun
return p.Name < q.Name || p.Name == q.Name && p.Path < q.Path
}
-
// A File describes a Go file.
type File struct {
Path string // complete file name
Pak Pak // the package to which the file belongs
}
-
// A Spot describes a single occurrence of a word.
type Spot struct {
File *File
Info SpotInfo
}
-
// A FileRun is a list of KindRuns belonging to the same file.
type FileRun struct {
File *File
Groups []*KindRun
}
-
// Spots are sorted by path for the reduction into FileRuns.
func lessSpot(x, y interface{}) bool { return x.(Spot).File.Path < y.(Spot).File.Path }
-
// newFileRun allocates a new FileRun from the Spot run [i, j) in h.
func newFileRun(h0 *RunList, i, j int) interface{} {
file := h0.At(i).(Spot).File
return &FileRun{file, groups}
}
-
// ----------------------------------------------------------------------------
// PakRun
func (p *PakRun) Less(i, j int) bool { return p.Files[i].File.Path < p.Files[j].File.Path }
func (p *PakRun) Swap(i, j int) { p.Files[i], p.Files[j] = p.Files[j], p.Files[i] }
-
// FileRuns are sorted by package for the reduction into PakRuns.
func lessFileRun(x, y interface{}) bool {
return x.(*FileRun).File.Pak.less(&y.(*FileRun).File.Pak)
}
-
// newPakRun allocates a new PakRun from the *FileRun run [i, j) in h.
func newPakRun(h *RunList, i, j int) interface{} {
pak := h.At(i).(*FileRun).File.Pak
return run
}
-
// ----------------------------------------------------------------------------
// HitList
// A HitList describes a list of PakRuns.
type HitList []*PakRun
-
// PakRuns are sorted by package.
func lessPakRun(x, y interface{}) bool { return x.(*PakRun).Pak.less(&y.(*PakRun).Pak) }
-
func reduce(h0 *RunList) HitList {
// reduce a list of Spots into a list of FileRuns
h1 := h0.reduce(lessSpot, newFileRun)
return h
}
-
func (h HitList) filter(pakname string) HitList {
// determine number of matching packages (most of the time just one)
n := 0
return hh
}
-
// ----------------------------------------------------------------------------
// AltWords
alt string // alternative spelling
}
-
// An AltWords describes a list of alternative spellings for a
// canonical (all lowercase) spelling of a word.
type AltWords struct {
Alts []string // alternative spelling for the same word
}
-
// wordPairs are sorted by their canonical spelling.
func lessWordPair(x, y interface{}) bool { return x.(*wordPair).canon < y.(*wordPair).canon }
-
// newAltWords allocates a new AltWords from the *wordPair run [i, j) in h.
func newAltWords(h *RunList, i, j int) interface{} {
canon := h.At(i).(*wordPair).canon
return &AltWords{canon, alts}
}
-
func (a *AltWords) filter(s string) *AltWords {
if len(a.Alts) == 1 && a.Alts[0] == s {
// there are no different alternatives
return &AltWords{a.Canon, alts[0:i]}
}
-
// ----------------------------------------------------------------------------
// Indexer
const includeMainPackages = true
const includeTestFiles = true
-
type IndexResult struct {
Decls RunList // package-level declarations (with snippets)
Others RunList // all other occurrences
}
-
// Statistics provides statistics information for an index.
type Statistics struct {
Bytes int // total size of indexed source files
Spots int // number of identifier occurrences
}
-
// An Indexer maintains the data structures and provides the machinery
// for indexing .go files under a file tree. It implements the path.Visitor
// interface for walking file trees, and the ast.Visitor interface for
stats Statistics
}
-
func (x *Indexer) addSnippet(s *Snippet) int {
index := x.snippets.Len()
x.snippets.Push(s)
return index
}
-
func (x *Indexer) visitComment(c *ast.CommentGroup) {
if c != nil {
ast.Walk(x, c)
}
}
-
func (x *Indexer) visitIdent(kind SpotKind, id *ast.Ident) {
if id != nil {
lists, found := x.words[id.Name]
}
}
-
func (x *Indexer) visitSpec(spec ast.Spec, isVarDecl bool) {
switch n := spec.(type) {
case *ast.ImportSpec:
}
}
-
func (x *Indexer) Visit(node ast.Node) ast.Visitor {
// TODO(gri): methods in interface types are categorized as VarDecl
switch n := node.(type) {
return nil
}
-
func pkgName(filename string) string {
// use a new file set each time in order to not pollute the indexer's
// file set (which must stay in sync with the concatenated source code)
return file.Name.Name
}
-
// addFile adds a file to the index if possible and returns the file set file
// and the file's AST if it was successfully parsed as a Go file. If addFile
// failed (that is, if the file was not added), it returns file == nil.
return
}
-
// Design note: Using an explicit white list of permitted files for indexing
// makes sure that the important files are included and massively reduces the
// number of files to index. The advantage over a blacklist is that unexpected
"README": true,
}
-
// isWhitelisted returns true if a file is on the list
// of "permitted" files for indexing. The filename must
// be the directory-local name of the file.
return whitelisted[key]
}
-
func (x *Indexer) visitFile(dirname string, f FileInfo, fulltextIndex bool) {
if !f.IsRegular() {
return
x.stats.Lines += file.LineCount()
}
-
// ----------------------------------------------------------------------------
// Index
Others HitList // all other occurrences
}
-
type Index struct {
fset *token.FileSet // file set used during indexing; nil if no textindex
suffixes *suffixarray.Index // suffixes for concatenated sources; nil if no textindex
stats Statistics
}
-
func canonical(w string) string { return strings.ToLower(w) }
-
// NewIndex creates a new index for the .go files
// in the directories given by dirnames.
//
return &Index{x.fset, suffixes, words, alts, snippets, x.stats}
}
-
// Stats() returns index statistics.
func (x *Index) Stats() Statistics {
return x.stats
}
-
func (x *Index) LookupWord(w string) (match *LookupResult, alt *AltWords) {
match = x.words[w]
alt = x.alts[canonical(w)]
return
}
-
func isIdentifier(s string) bool {
var S scanner.Scanner
fset := token.NewFileSet()
return false
}
-
// For a given query, which is either a single identifier or a qualified
// identifier, Lookup returns a LookupResult, and a list of alternative
// spellings, if any. If the query syntax is wrong, an error is reported.
return
}
-
func (x *Index) Snippet(i int) *Snippet {
// handle illegal snippet indices gracefully
if 0 <= i && i < len(x.snippets) {
return nil
}
-
type positionList []struct {
filename string
line int
func (list positionList) Less(i, j int) bool { return list[i].filename < list[j].filename }
func (list positionList) Swap(i, j int) { list[i], list[j] = list[j], list[i] }
-
// unique returns the list sorted and with duplicate entries removed
func unique(list []int) []int {
sort.Ints(list)
return list[0:i]
}
-
// A FileLines value specifies a file and line numbers within that file.
type FileLines struct {
Filename string
Lines []int
}
-
// LookupRegexp returns the number of matches and the matches where a regular
// expression r is found in the full text index. At most n matches are
// returned (thus found <= n).
query = flag.Bool("q", false, "arguments are considered search queries")
)
-
func serveError(w http.ResponseWriter, r *http.Request, relpath string, err os.Error) {
contents := applyTemplate(errorHTML, "errorHTML", err) // err may contain an absolute path!
w.WriteHeader(http.StatusNotFound)
servePage(w, "File "+relpath, "", "", contents)
}
-
func exec(rw http.ResponseWriter, args []string) (status int) {
r, w, err := os.Pipe()
if err != nil {
return
}
-
func dosync(w http.ResponseWriter, r *http.Request) {
args := []string{"/bin/sh", "-c", *syncCmd}
switch exec(w, args) {
}
}
-
func usage() {
fmt.Fprintf(os.Stderr,
"usage: godoc package [name ...]\n"+
os.Exit(2)
}
-
func loggingHandler(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
log.Printf("%s\t%s", req.RemoteAddr, req.URL)
})
}
-
func remoteSearch(query string) (res *http.Response, err os.Error) {
search := "/search?f=text&q=" + http.URLEscape(query)
return
}
-
// Does s look like a regular expression?
func isRegexp(s string) bool {
return strings.IndexAny(s, ".(|)*+?^$[]") >= 0
}
-
// Make a regular expression of the form
// names[0]|names[1]|...names[len(names)-1].
// Returns nil if the regular expression is illegal.
return
}
-
func main() {
flag.Usage = usage
flag.Parse()
"strings"
)
-
// A Mapping object maps relative paths (e.g. from URLs)
// to absolute paths (of the file system) and vice versa.
//
prefixes []string // lazily computed from list
}
-
type mapping struct {
prefix, path string
value *RWValue
}
-
// Init initializes the Mapping from a list of paths.
// Empty paths are ignored; relative paths are assumed to be relative to
// the current working directory and converted to absolute paths.
m.list = list
}
-
// IsEmpty returns true if there are no mappings specified.
func (m *Mapping) IsEmpty() bool { return len(m.list) == 0 }
-
// PrefixList returns a list of all prefixes, with duplicates removed.
// For instance, for the mapping:
//
return m.prefixes
}
-
// Fprint prints the mapping.
func (m *Mapping) Fprint(w io.Writer) {
for _, e := range m.list {
}
}
-
func splitFirst(path string) (head, tail string) {
i := strings.Index(path, string(filepath.Separator))
if i > 0 {
return "", path
}
-
// ToAbsolute maps a slash-separated relative path to an absolute filesystem
// path using the Mapping specified by the receiver. If the path cannot
// be mapped, the empty string is returned.
return "" // no match
}
-
// ToRelative maps an absolute filesystem path to a relative slash-separated
// path using the Mapping specified by the receiver. If the path cannot
// be mapped, the empty string is returned.
return "" // no match
}
-
// Iterate calls f for each path and RWValue in the mapping (in uspecified order)
// until f returns false.
//
return
}
-
func parseDir(fset *token.FileSet, path string, filter func(FileInfo) bool) (map[string]*ast.Package, os.Error) {
list, err := fs.ReadDir(path)
if err != nil {
"fmt"
)
-
type Snippet struct {
Line int
Text []byte
}
-
func newSnippet(fset *token.FileSet, decl ast.Decl, id *ast.Ident) *Snippet {
// TODO instead of pretty-printing the node, should use the original source instead
var buf1 bytes.Buffer
return &Snippet{fset.Position(id.Pos()).Line, buf2.Bytes()}
}
-
func findSpec(list []ast.Spec, id *ast.Ident) ast.Spec {
for _, spec := range list {
switch s := spec.(type) {
return nil
}
-
func genSnippet(fset *token.FileSet, d *ast.GenDecl, id *ast.Ident) *Snippet {
s := findSpec(d.Specs, id)
if s == nil {
return newSnippet(fset, dd, id)
}
-
func funcSnippet(fset *token.FileSet, d *ast.FuncDecl, id *ast.Ident) *Snippet {
if d.Name != id {
return nil // declaration doesn't contain id - exit gracefully
return newSnippet(fset, dd, id)
}
-
// NewSnippet creates a text snippet from a declaration decl containing an
// identifier id. Parts of the declaration not containing the identifier
// may be removed for a more compact snippet.
"io"
)
-
type ebnfParser struct {
out io.Writer // parser output
src []byte // parser source
lit string // token literal
}
-
func (p *ebnfParser) flush() {
offs := p.file.Offset(p.pos)
p.out.Write(p.src[p.prev:offs])
p.prev = offs
}
-
func (p *ebnfParser) next() {
if p.pos.IsValid() {
p.flush()
}
}
-
func (p *ebnfParser) Error(pos token.Position, msg string) {
fmt.Fprintf(p.out, `<span class="alert">error: %s</span>`, msg)
}
-
func (p *ebnfParser) errorExpected(pos token.Pos, msg string) {
msg = "expected " + msg
if pos == p.pos {
p.Error(p.file.Position(pos), msg)
}
-
func (p *ebnfParser) expect(tok token.Token) token.Pos {
pos := p.pos
if p.tok != tok {
return pos
}
-
func (p *ebnfParser) parseIdentifier(def bool) {
name := p.lit
p.expect(token.IDENT)
p.prev += len(name) // skip identifier when calling flush
}
-
func (p *ebnfParser) parseTerm() bool {
switch p.tok {
case token.IDENT:
return true
}
-
func (p *ebnfParser) parseSequence() {
if !p.parseTerm() {
p.errorExpected(p.pos, "term")
}
}
-
func (p *ebnfParser) parseExpression() {
for {
p.parseSequence()
}
}
-
func (p *ebnfParser) parseProduction() {
p.parseIdentifier(true)
p.expect(token.ASSIGN)
p.expect(token.PERIOD)
}
-
func (p *ebnfParser) parse(fset *token.FileSet, out io.Writer, src []byte) {
// initialize ebnfParser
p.out = out
p.flush()
}
-
// Markers around EBNF sections
var (
openTag = []byte(`<pre class="ebnf">`)
closeTag = []byte(`</pre>`)
)
-
func linkify(out io.Writer, src []byte) {
fset := token.NewFileSet()
for len(src) > 0 {
"utf8"
)
-
// An RWValue wraps a value and permits mutually exclusive
// access to it and records the time the value was last set.
//
timestamp int64 // time of last set(), in seconds since epoch
}
-
func (v *RWValue) set(value interface{}) {
v.mutex.Lock()
v.value = value
v.mutex.Unlock()
}
-
func (v *RWValue) get() (interface{}, int64) {
v.mutex.RLock()
defer v.mutex.RUnlock()
return v.value, v.timestamp
}
-
// TODO(gri) For now, using os.Getwd() is ok here since the functionality
// based on this code is not invoked for the appengine version,
// but this is fragile. Determine what the right thing to do is,
return list[0:i]
}
-
// writeFileAtomically writes data to a temporary file and then
// atomically renames that file to the file named by filename.
//
return os.Rename(f.Name(), filename)
}
-
// isText returns true if a significant prefix of s looks like correct UTF-8;
// that is, if it is likely that s is human-readable text.
//
return true
}
-
// TODO(gri): Should have a mapping from extension to handler, eventually.
// textExt[x] is true if the extension x indicates a text file, and false otherwise.
".js": false, // must be served raw
}
-
// isTextFile returns true if the file has a known extension indicating
// a text file, or if a significant chunk of the specified file looks like
// correct UTF-8; that is, if it is likely that the file contains human-
"strings"
)
-
// zipFI is the zip-file based implementation of FileInfo
type zipFI struct {
name string // directory-local name
file *zip.File // nil for a directory
}
-
func (fi zipFI) Name() string {
return fi.name
}
-
func (fi zipFI) Size() int64 {
if fi.file != nil {
return int64(fi.file.UncompressedSize)
return 0 // directory
}
-
func (fi zipFI) IsDirectory() bool {
return fi.file == nil
}
-
func (fi zipFI) IsRegular() bool {
return fi.file != nil
}
-
// zipFS is the zip-file based implementation of FileSystem
type zipFS struct {
*zip.ReadCloser
list zipList
}
-
func (fs *zipFS) Close() os.Error {
fs.list = nil
return fs.ReadCloser.Close()
}
-
func zipPath(name string) string {
if !path.IsAbs(name) {
panic(fmt.Sprintf("stat: not an absolute path: %s", name))
return name[1:] // strip '/'
}
-
func (fs *zipFS) stat(abspath string) (int, zipFI, os.Error) {
i := fs.list.lookup(abspath)
if i < 0 {
return i, zipFI{name, file}, nil
}
-
func (fs *zipFS) Open(abspath string) (io.ReadCloser, os.Error) {
_, fi, err := fs.stat(zipPath(abspath))
if err != nil {
return fi.file.Open()
}
-
func (fs *zipFS) Lstat(abspath string) (FileInfo, os.Error) {
_, fi, err := fs.stat(zipPath(abspath))
return fi, err
}
-
func (fs *zipFS) Stat(abspath string) (FileInfo, os.Error) {
_, fi, err := fs.stat(zipPath(abspath))
return fi, err
}
-
func (fs *zipFS) ReadDir(abspath string) ([]FileInfo, os.Error) {
path := zipPath(abspath)
i, fi, err := fs.stat(path)
return list, nil
}
-
func (fs *zipFS) ReadFile(abspath string) ([]byte, os.Error) {
rc, err := fs.Open(abspath)
if err != nil {
return ioutil.ReadAll(rc)
}
-
func NewZipFS(rc *zip.ReadCloser) FileSystem {
list := make(zipList, len(rc.File))
copy(list, rc.File) // sort a copy of rc.File
return &zipFS{rc, list}
}
-
type zipList []*zip.File
// zipList implements sort.Interface
func (z zipList) Less(i, j int) bool { return z[i].Name < z[j].Name }
func (z zipList) Swap(i, j int) { z[i], z[j] = z[j], z[i] }
-
// lookup returns the first index in the zipList
// of a path equal to name or beginning with name/.
func (z zipList) lookup(name string) int {
return fixed
}
-
// callExpr returns the call expression if x is a call to pkg.name with one argument;
// otherwise it returns nil.
func callExpr(x interface{}, pkg, name string) *ast.CallExpr {
import "os"
-
func f() {
var _ os.Error
_ = os.SIGHUP
import "os"
-
func f() {
var _ os.Error
_ = os.SIGHUP
})
}
-
func sorthelpers(f *ast.File) (fixed bool) {
if !imports(f, "sort") {
return
})
}
-
func sortslice(f *ast.File) (fixed bool) {
if !imports(f, "sort") {
return
// An Enumerated is represented as a plain int.
type Enumerated int
-
// FLAG
// A Flag accepts any data and is set to true if present.
// An Enumerated is represented as a plain int.
type Enumerated int
-
// FLAG
// A Flag accepts any data and is set to true if present.
"runtime"
)
-
// ----------------------------------------------------------------------------
// Format representation
//
type Formatter func(state *State, value interface{}, ruleName string) bool
-
// A FormatterMap is a set of custom formatters.
// It maps a rule name to a formatter function.
//
type FormatterMap map[string]Formatter
-
// A parsed format expression is built from the following nodes.
//
type (
}
)
-
// A Format is the result of parsing a format specification.
// The format may be applied repeatedly to format values.
//
type Format map[string]expr
-
// ----------------------------------------------------------------------------
// Formatting
Copy() Environment
}
-
// State represents the current formatting state.
// It is provided as argument to custom formatters.
//
separator expr // possibly nil
}
-
func newState(fmt Format, env Environment, errors chan os.Error) *State {
s := new(State)
s.fmt = fmt
return s
}
-
// Env returns the environment passed to Format.Apply.
func (s *State) Env() interface{} { return s.env }
-
// LinePos returns the position of the current line beginning
// in the state's output buffer. Line numbers start at 1.
//
func (s *State) LinePos() token.Position { return s.linePos }
-
// Pos returns the position of the next byte to be written to the
// output buffer. Line numbers start at 1.
//
return token.Position{Line: s.linePos.Line, Column: offs - s.linePos.Offset, Offset: offs}
}
-
// Write writes data to the output buffer, inserting the indentation
// string after each newline or form feed character. It cannot return an error.
//
return n + n3, nil
}
-
type checkpoint struct {
env Environment
hasOutput bool
linePos token.Position
}
-
func (s *State) save() checkpoint {
saved := checkpoint{nil, s.hasOutput, s.output.Len(), s.linePos}
if s.env != nil {
return saved
}
-
func (s *State) restore(m checkpoint) {
s.env = m.env
s.output.Truncate(m.outputLen)
}
-
func (s *State) error(msg string) {
s.errors <- os.NewError(msg)
runtime.Goexit()
}
-
// TODO At the moment, unnamed types are simply mapped to the default
// names below. For instance, all unnamed arrays are mapped to
// 'array' which is not really sufficient. Eventually one may want
return nil
}
-
// eval applies a format expression fexpr to a value. If the expression
// evaluates internally to a non-nil []byte, that slice is appended to
// the state's output buffer and eval returns true. Otherwise, eval
return false
}
-
// Eval formats each argument according to the format
// f and returns the resulting []byte and os.Error. If
// an error occurred, the []byte contains the partially
return s.output.Bytes(), err
}
-
// ----------------------------------------------------------------------------
// Convenience functions
return w.Write(data)
}
-
// Print formats each argument according to the format f
// and writes to standard output. The result is the total
// number of bytes written and an os.Error, if any.
return f.Fprint(os.Stdout, nil, args...)
}
-
// Sprint formats each argument according to the format f
// and returns the resulting string. If an error occurs
// during formatting, the result string contains the
"runtime"
)
-
// ----------------------------------------------------------------------------
// Format representation
//
type Formatter func(state *State, value interface{}, ruleName string) bool
-
// A FormatterMap is a set of custom formatters.
// It maps a rule name to a formatter function.
//
type FormatterMap map[string]Formatter
-
// A parsed format expression is built from the following nodes.
//
type (
}
)
-
// A Format is the result of parsing a format specification.
// The format may be applied repeatedly to format values.
//
type Format map[string]expr
-
// ----------------------------------------------------------------------------
// Formatting
Copy() Environment
}
-
// State represents the current formatting state.
// It is provided as argument to custom formatters.
//
separator expr // possibly nil
}
-
func newState(fmt Format, env Environment, errors chan os.Error) *State {
s := new(State)
s.fmt = fmt
return s
}
-
// Env returns the environment passed to Format.Apply.
func (s *State) Env() interface{} { return s.env }
-
// LinePos returns the position of the current line beginning
// in the state's output buffer. Line numbers start at 1.
//
func (s *State) LinePos() token.Position { return s.linePos }
-
// Pos returns the position of the next byte to be written to the
// output buffer. Line numbers start at 1.
//
return token.Position{Line: s.linePos.Line, Column: offs - s.linePos.Offset, Offset: offs}
}
-
// Write writes data to the output buffer, inserting the indentation
// string after each newline or form feed character. It cannot return an error.
//
return n + n3, nil
}
-
type checkpoint struct {
env Environment
hasOutput bool
linePos token.Position
}
-
func (s *State) save() checkpoint {
saved := checkpoint{nil, s.hasOutput, s.output.Len(), s.linePos}
if s.env != nil {
return saved
}
-
func (s *State) restore(m checkpoint) {
s.env = m.env
s.output.Truncate(m.outputLen)
}
-
func (s *State) error(msg string) {
s.errors <- os.NewError(msg)
runtime.Goexit()
}
-
// TODO At the moment, unnamed types are simply mapped to the default
// names below. For instance, all unnamed arrays are mapped to
// 'array' which is not really sufficient. Eventually one may want
return nil
}
-
// eval applies a format expression fexpr to a value. If the expression
// evaluates internally to a non-nil []byte, that slice is appended to
// the state's output buffer and eval returns true. Otherwise, eval
return false
}
-
// Eval formats each argument according to the format
// f and returns the resulting []byte and os.Error. If
// an error occurred, the []byte contains the partially
return s.output.Bytes(), err
}
-
// ----------------------------------------------------------------------------
// Convenience functions
return w.Write(data)
}
-
// Print formats each argument according to the format f
// and writes to standard output. The result is the total
// number of bytes written and an os.Error, if any.
return f.Fprint(os.Stdout, nil, args...)
}
-
// Sprint formats each argument according to the format f
// and returns the resulting string. If an error occurs
// during formatting, the result string contains the
UnmarshalJSON([]byte) os.Error
}
-
// An UnmarshalTypeError describes a JSON value that was
// not appropriate for a value of a specific Go type.
type UnmarshalTypeError struct {
return m
}
-
// literalInterface is like literal but returns an interface value.
func (d *decodeState) literalInterface() interface{} {
// All bytes inside literal return scanContinue op code.
UnmarshalJSON([]byte) os.Error
}
-
// An UnmarshalTypeError describes a JSON value that was
// not appropriate for a value of a specific Go type.
type UnmarshalTypeError struct {
return m
}
-
// literalInterface is like literal but returns an interface value.
func (d *decodeState) literalInterface() interface{} {
// All bytes inside literal return scanContinue op code.
Header() *dnsRR_Header
}
-
// Specific DNS RR formats for each query type.
type dnsRR_CNAME struct {
extra []dnsRR
}
-
func (dns *dnsMsg) Pack() (msg []byte, ok bool) {
var dh dnsHeader
Header() *dnsRR_Header
}
-
// Specific DNS RR formats for each query type.
type dnsRR_CNAME struct {
extra []dnsRR
}
-
func (dns *dnsMsg) Pack() (msg []byte, ok bool) {
var dh dnsHeader
return s
}
-
// Get the i'th arg of the struct value.
// If the arg itself is an interface, return a value for
// the thing inside the interface, not the interface itself.
return s
}
-
// Get the i'th arg of the struct value.
// If the arg itself is an interface, return a value for
// the thing inside the interface, not the interface itself.
return
}
-
var ssFree = newCache(func() interface{} { return new(ss) })
// Allocate a new ss struct or grab a cached one.
}
}
-
// token returns the next space-delimited string from the input. It
// skips white space. For Scanln, it stops at newlines. For Scan,
// newlines are treated as spaces.
return
}
-
var ssFree = newCache(func() interface{} { return new(ss) })
// Allocate a new ss struct or grab a cached one.
}
}
-
// token returns the next space-delimited string from the input. It
// skips white space. For Scanln, it stops at newlines. For Scan,
// newlines are treated as spaces.
"strings"
)
-
var (
// main operation modes
list = flag.Bool("l", false, "list files whose formatting differs from gofmt's")
cpuprofile = flag.String("cpuprofile", "", "write cpu profile to this file")
)
-
var (
fset = token.NewFileSet()
exitCode = 0
printerMode uint
)
-
func report(err os.Error) {
scanner.PrintError(os.Stderr, err)
exitCode = 2
}
-
func usage() {
fmt.Fprintf(os.Stderr, "usage: gofmt [flags] [path ...]\n")
flag.PrintDefaults()
os.Exit(2)
}
-
func initParserMode() {
parserMode = uint(0)
if *comments {
}
}
-
func initPrinterMode() {
printerMode = uint(0)
if *tabIndent {
}
}
-
func isGoFile(f *os.FileInfo) bool {
// ignore non-Go files
return f.IsRegular() && !strings.HasPrefix(f.Name, ".") && strings.HasSuffix(f.Name, ".go")
}
-
// If in == nil, the source is the contents of the file with the given filename.
func processFile(filename string, in io.Reader, out io.Writer) os.Error {
if in == nil {
return err
}
-
type fileVisitor chan os.Error
func (v fileVisitor) VisitDir(path string, f *os.FileInfo) bool {
return true
}
-
func (v fileVisitor) VisitFile(path string, f *os.FileInfo) {
if isGoFile(f) {
v <- nil // synchronize error handler
}
}
-
func walkDir(path string) {
v := make(fileVisitor)
go func() {
}
}
-
func main() {
// call gofmtMain in a separate function
// so that it can use defer and have them
os.Exit(exitCode)
}
-
func gofmtMain() {
flag.Usage = usage
flag.Parse()
}
}
-
func diff(b1, b2 []byte) (data []byte, err os.Error) {
f1, err := ioutil.TempFile("", "gofmt")
if err != nil {
"testing"
)
-
func runTest(t *testing.T, dirname, in, out, flags string) {
in = filepath.Join(dirname, in)
out = filepath.Join(dirname, out)
}
}
-
// TODO(gri) Add more test cases!
var tests = []struct {
dirname, in, out, flags string
{"testdata", "rewrite2.input", "rewrite2.golden", "-r=int->bool"},
}
-
func TestRewrite(t *testing.T) {
for _, test := range tests {
runTest(t, test.dirname, test.in, test.out, test.flags)
"utf8"
)
-
func initRewrite() {
if *rewriteRule == "" {
rewrite = nil // disable any previous rewrite
rewrite = func(p *ast.File) *ast.File { return rewriteFile(pattern, replace, p) }
}
-
// parseExpr parses s as an expression.
// It might make sense to expand this to allow statement patterns,
// but there are problems with preserving formatting and also
return x
}
-
// Keep this function for debugging.
/*
func dump(msg string, val reflect.Value) {
}
*/
-
// rewriteFile applies the rewrite rule 'pattern -> replace' to an entire file.
func rewriteFile(pattern, replace ast.Expr, p *ast.File) *ast.File {
m := make(map[string]reflect.Value)
return apply(f, reflect.ValueOf(p)).Interface().(*ast.File)
}
-
// setValue is a wrapper for x.SetValue(y); it protects
// the caller from panics if x cannot be changed to y.
func setValue(x, y reflect.Value) {
x.Set(y)
}
-
// Values/types for special cases.
var (
objectPtrNil = reflect.ValueOf((*ast.Object)(nil))
scopePtrType = reflect.TypeOf((*ast.Scope)(nil))
)
-
// apply replaces each AST field x in val with f(x), returning val.
// To avoid extra conversions, f operates on the reflect.Value form.
func apply(f func(reflect.Value) reflect.Value, val reflect.Value) reflect.Value {
return val
}
-
func isWildcard(s string) bool {
rune, size := utf8.DecodeRuneInString(s)
return size == len(s) && unicode.IsLower(rune)
}
-
// match returns true if pattern matches val,
// recording wildcard submatches in m.
// If m == nil, match checks whether pattern == val.
return p.Interface() == v.Interface()
}
-
// subst returns a copy of pattern with values from m substituted in place
// of wildcards and pos used as the position of tokens from the pattern.
// if m == nil, subst returns a copy of pattern and doesn't change the line
"reflect"
)
-
type simplifier struct{}
func (s *simplifier) Visit(node ast.Node) ast.Visitor {
return s
}
-
func simplify(node ast.Node) {
var s simplifier
ast.Walk(&s, node)
return
}
-
// Is this a standard package path? strings container/vector etc.
// Assume that if the first element has a dot, it's a domain name
// and is not the standard package path.
"strings"
)
-
var (
// main operation modes
pkgName = flag.String("p", "", "process only those files in package pkgName")
printAST = flag.Bool("ast", false, "print AST")
)
-
var exitCode = 0
-
func usage() {
fmt.Fprintf(os.Stderr, "usage: gotype [flags] [path ...]\n")
flag.PrintDefaults()
os.Exit(2)
}
-
func report(err os.Error) {
scanner.PrintError(os.Stderr, err)
exitCode = 2
}
-
// parse returns the AST for the Go source src.
// The filename is for error reporting only.
// The result is nil if there were errors or if
return file
}
-
func parseStdin(fset *token.FileSet) (files map[string]*ast.File) {
files = make(map[string]*ast.File)
src, err := ioutil.ReadAll(os.Stdin)
return
}
-
func parseFiles(fset *token.FileSet, filenames []string) (files map[string]*ast.File) {
files = make(map[string]*ast.File)
for _, filename := range filenames {
return
}
-
func isGoFilename(filename string) bool {
// ignore non-Go files
return !strings.HasPrefix(filename, ".") && strings.HasSuffix(filename, ".go")
}
-
func processDirectory(dirname string) {
f, err := os.Open(dirname)
if err != nil {
processFiles(filenames, false)
}
-
func processFiles(filenames []string, allFiles bool) {
i := 0
for _, filename := range filenames {
processPackage(fset, parseFiles(fset, filenames[0:i]))
}
-
func processPackage(fset *token.FileSet, files map[string]*ast.File) {
// make a package (resolve all identifiers)
pkg, err := ast.NewPackage(fset, files, types.GcImporter, types.Universe)
}
}
-
func main() {
flag.Usage = usage
flag.Parse()
"testing"
)
-
func runTest(t *testing.T, path, pkg string) {
exitCode = 0
*pkgName = pkg
}
}
-
var tests = []struct {
path string
pkg string
{filepath.Join(runtime.GOROOT(), "src/pkg/go/types"), "types"},
}
-
func Test(t *testing.T) {
for _, test := range tests {
runTest(t, test.path, test.pkg)
return
}
-
// checkPrint checks a call to an unformatted print routine such as Println.
// The skip argument records how many arguments to ignore; that is,
// call.Args[skip] is the first argument to be printed.
}
}
-
// make parent directory for name, if necessary
func makeParent(name string) {
parent, _ := filepath.Split(name)
}
}
-
// Undo log
type undo func() os.Error
}
}
-
// hgRoot returns the root directory of the repository.
func hgRoot() (string, os.Error) {
out, err := run([]string{"hg", "root"}, nil)
}
}
-
func TestIncrementalRead(t *testing.T) {
test := gnuTarTest
f, err := os.Open(test.file)
// An Enumerated is represented as a plain int.
type Enumerated int
-
// FLAG
// A Flag accepts any data and is set to true if present.
_M2 = _B2 - 1 // half digit mask
)
-
// ----------------------------------------------------------------------------
// Elementary operations on words
//
return
}
-
// z1<<_W + z0 = x-y-c, with c == 0 or 1
func subWW_g(x, y, c Word) (z1, z0 Word) {
yc := y + c
return
}
-
// z1<<_W + z0 = x*y
// Adapted from Warren, Hacker's Delight, p. 132.
func mulWW_g(x, y Word) (z1, z0 Word) {
return
}
-
// z1<<_W + z0 = x*y + c
func mulAddWWW_g(x, y, c Word) (z1, z0 Word) {
z1, zz0 := mulWW(x, y)
return
}
-
// Length of x in bits.
func bitLen(x Word) (n int) {
for ; x >= 0x100; x >>= 8 {
return
}
-
// log2 computes the integer binary logarithm of x.
// The result is the integer n for which 2^n <= x < 2^(n+1).
// If x == 0, the result is -1.
return bitLen(x) - 1
}
-
// Number of leading zeros in x.
func leadingZeros(x Word) uint {
return uint(_W - bitLen(x))
}
-
// q = (u1<<_W + u0 - r)/y
// Adapted from Warren, Hacker's Delight, p. 152.
func divWW_g(u1, u0, v Word) (q, r Word) {
return q1*_B2 + q0, (un21*_B2 + un0 - q0*v) >> s
}
-
func addVV_g(z, x, y []Word) (c Word) {
for i := range z {
c, z[i] = addWW_g(x[i], y[i], c)
return
}
-
func subVV_g(z, x, y []Word) (c Word) {
for i := range z {
c, z[i] = subWW_g(x[i], y[i], c)
return
}
-
func addVW_g(z, x []Word, y Word) (c Word) {
c = y
for i := range z {
return
}
-
func subVW_g(z, x []Word, y Word) (c Word) {
c = y
for i := range z {
return
}
-
func shlVU_g(z, x []Word, s uint) (c Word) {
if n := len(z); n > 0 {
ŝ := _W - s
return
}
-
func shrVU_g(z, x []Word, s uint) (c Word) {
if n := len(z); n > 0 {
ŝ := _W - s
return
}
-
func mulAddVWW_g(z, x []Word, y, r Word) (c Word) {
c = r
for i := range z {
return
}
-
func addMulVVW_g(z, x []Word, y Word) (c Word) {
for i := range z {
z1, z0 := mulAddWWW_g(x[i], y, z[i])
return
}
-
func divWVW_g(z []Word, xn Word, x []Word, y Word) (r Word) {
r = xn
for i := len(z) - 1; i >= 0; i-- {
import "testing"
-
type funWW func(x, y, c Word) (z1, z0 Word)
type argWW struct {
x, y, c, z1, z0 Word
{_M, _M, 1, 1, _M},
}
-
func testFunWW(t *testing.T, msg string, f funWW, a argWW) {
z1, z0 := f(a.x, a.y, a.c)
if z1 != a.z1 || z0 != a.z0 {
}
}
-
func TestFunWW(t *testing.T) {
for _, a := range sumWW {
arg := a
}
}
-
type funVV func(z, x, y []Word) (c Word)
type argVV struct {
z, x, y nat
{nat{0, 0, 0, 0}, nat{_M, 0, _M, 0}, nat{1, _M, 0, _M}, 1},
}
-
func testFunVV(t *testing.T, msg string, f funVV, a argVV) {
z := make(nat, len(a.z))
c := f(z, a.x, a.y)
}
}
-
func TestFunVV(t *testing.T) {
for _, a := range sumVV {
arg := a
}
}
-
type funVW func(z, x []Word, y Word) (c Word)
type argVW struct {
z, x nat
{nat{_M, _M, _M >> 20}, nat{_M, _M, _M}, 20, _M << (_W - 20) & _M},
}
-
func testFunVW(t *testing.T, msg string, f funVW, a argVW) {
z := make(nat, len(a.z))
c := f(z, a.x, a.y)
}
}
-
func makeFunVW(f func(z, x []Word, s uint) (c Word)) funVW {
return func(z, x []Word, s Word) (c Word) {
return f(z, x, uint(s))
}
}
-
func TestFunVW(t *testing.T) {
for _, a := range sumVW {
arg := a
}
}
-
type funVWW func(z, x []Word, y, r Word) (c Word)
type argVWW struct {
z, x nat
{nat{_M<<7&_M + 1<<6, _M, _M, _M}, nat{_M, _M, _M, _M}, 1 << 7, 1 << 6, _M >> (_W - 7)},
}
-
func testFunVWW(t *testing.T, msg string, f funVWW, a argVWW) {
z := make(nat, len(a.z))
c := f(z, a.x, a.y, a.r)
}
}
-
// TODO(gri) mulAddVWW and divWVW are symmetric operations but
// their signature is not symmetric. Try to unify.
}
}
-
func TestFunVWW(t *testing.T) {
for _, a := range prodVWW {
arg := a
}
}
-
var mulWWTests = []struct {
x, y Word
q, r Word
// 32 bit only: {0xc47dfa8c, 50911, 0x98a4, 0x998587f4},
}
-
func TestMulWW(t *testing.T) {
for i, test := range mulWWTests {
q, r := mulWW_g(test.x, test.y)
}
}
-
var mulAddWWWTests = []struct {
x, y, c Word
q, r Word
{_M, _M, _M, _M, 0},
}
-
func TestMulAddWWW(t *testing.T) {
for i, test := range mulAddWWWTests {
q, r := mulAddWWW_g(test.x, test.y, test.c)
"time"
)
-
var calibrate = flag.Bool("calibrate", false, "run calibration test")
-
// measure returns the time to run f
func measure(f func()) int64 {
const N = 100
return (stop - start) / N
}
-
func computeThresholds() {
fmt.Printf("Multiplication times for varying Karatsuba thresholds\n")
fmt.Printf("(run repeatedly for good results)\n")
}
}
-
func TestCalibrate(t *testing.T) {
if *calibrate {
computeThresholds()
"testing"
)
-
type matrix struct {
n, m int
a []*Rat
}
-
func (a *matrix) at(i, j int) *Rat {
if !(0 <= i && i < a.n && 0 <= j && j < a.m) {
panic("index out of range")
return a.a[i*a.m+j]
}
-
func (a *matrix) set(i, j int, x *Rat) {
if !(0 <= i && i < a.n && 0 <= j && j < a.m) {
panic("index out of range")
a.a[i*a.m+j] = x
}
-
func newMatrix(n, m int) *matrix {
if !(0 <= n && 0 <= m) {
panic("illegal matrix")
return a
}
-
func newUnit(n int) *matrix {
a := newMatrix(n, n)
for i := 0; i < n; i++ {
return a
}
-
func newHilbert(n int) *matrix {
a := newMatrix(n, n)
for i := 0; i < n; i++ {
return a
}
-
func newInverseHilbert(n int) *matrix {
a := newMatrix(n, n)
for i := 0; i < n; i++ {
return a
}
-
func (a *matrix) mul(b *matrix) *matrix {
if a.m != b.n {
panic("illegal matrix multiply")
return c
}
-
func (a *matrix) eql(b *matrix) bool {
if a.n != b.n || a.m != b.m {
return false
return true
}
-
func (a *matrix) String() string {
s := ""
for i := 0; i < a.n; i++ {
return s
}
-
func doHilbert(t *testing.T, n int) {
a := newHilbert(n)
b := newInverseHilbert(n)
}
}
-
func TestHilbert(t *testing.T) {
doHilbert(t, 10)
}
-
func BenchmarkHilbert(b *testing.B) {
for i := 0; i < b.N; i++ {
doHilbert(nil, 10)
abs nat // absolute value of the integer
}
-
var intOne = &Int{false, natOne}
-
// Sign returns:
//
// -1 if x < 0
return 1
}
-
// SetInt64 sets z to x and returns z.
func (z *Int) SetInt64(x int64) *Int {
neg := false
return z
}
-
// NewInt allocates and returns a new Int set to x.
func NewInt(x int64) *Int {
return new(Int).SetInt64(x)
}
-
// Set sets z to x and returns z.
func (z *Int) Set(x *Int) *Int {
z.abs = z.abs.set(x.abs)
return z
}
-
// Abs sets z to |x| (the absolute value of x) and returns z.
func (z *Int) Abs(x *Int) *Int {
z.abs = z.abs.set(x.abs)
return z
}
-
// Neg sets z to -x and returns z.
func (z *Int) Neg(x *Int) *Int {
z.abs = z.abs.set(x.abs)
return z
}
-
// Add sets z to the sum x+y and returns z.
func (z *Int) Add(x, y *Int) *Int {
neg := x.neg
return z
}
-
// Sub sets z to the difference x-y and returns z.
func (z *Int) Sub(x, y *Int) *Int {
neg := x.neg
return z
}
-
// Mul sets z to the product x*y and returns z.
func (z *Int) Mul(x, y *Int) *Int {
// x * y == x * y
return z
}
-
// MulRange sets z to the product of all integers
// in the range [a, b] inclusively and returns z.
// If a > b (empty range), the result is 1.
return z
}
-
// Binomial sets z to the binomial coefficient of (n, k) and returns z.
func (z *Int) Binomial(n, k int64) *Int {
var a, b Int
return z.Quo(&a, &b)
}
-
// Quo sets z to the quotient x/y for y != 0 and returns z.
// If y == 0, a division-by-zero run-time panic occurs.
// See QuoRem for more details.
return z
}
-
// Rem sets z to the remainder x%y for y != 0 and returns z.
// If y == 0, a division-by-zero run-time panic occurs.
// See QuoRem for more details.
return z
}
-
// QuoRem sets z to the quotient x/y and r to the remainder x%y
// and returns the pair (z, r) for y != 0.
// If y == 0, a division-by-zero run-time panic occurs.
return z, r
}
-
// Div sets z to the quotient x/y for y != 0 and returns z.
// If y == 0, a division-by-zero run-time panic occurs.
// See DivMod for more details.
return z
}
-
// Mod sets z to the modulus x%y for y != 0 and returns z.
// If y == 0, a division-by-zero run-time panic occurs.
// See DivMod for more details.
return z
}
-
// DivMod sets z to the quotient x div y and m to the modulus x mod y
// and returns the pair (z, m) for y != 0.
// If y == 0, a division-by-zero run-time panic occurs.
return z, m
}
-
// Cmp compares x and y and returns:
//
// -1 if x < y
return
}
-
func (x *Int) String() string {
switch {
case x == nil:
return x.abs.decimalString()
}
-
func charset(ch int) string {
switch ch {
case 'b':
return "" // unknown format
}
-
// Format is a support routine for fmt.Formatter. It accepts
// the formats 'b' (binary), 'o' (octal), 'd' (decimal), 'x'
// (lowercase hexadecimal), and 'X' (uppercase hexadecimal).
fmt.Fprint(s, t)
}
-
// scan sets z to the integer value corresponding to the longest possible prefix
// read from r representing a signed integer number in a given conversion base.
// It returns z, the actual conversion base used, and an error, if any. In the
return z, base, nil
}
-
// Scan is a support routine for fmt.Scanner; it sets z to the value of
// the scanned number. It accepts the formats 'b' (binary), 'o' (octal),
// 'd' (decimal), 'x' (lowercase hexadecimal), and 'X' (uppercase hexadecimal).
return err
}
-
// Int64 returns the int64 representation of x.
// If x cannot be represented in an int64, the result is undefined.
func (x *Int) Int64() int64 {
return v
}
-
// SetString sets z to the value of s, interpreted in the given base,
// and returns z and a boolean indicating success. If SetString fails,
// the value of z is undefined.
return z, err == os.EOF // err == os.EOF => scan consumed all of s
}
-
// SetBytes interprets buf as the bytes of a big-endian unsigned
// integer, sets z to that value, and returns z.
func (z *Int) SetBytes(buf []byte) *Int {
return z
}
-
// Bytes returns the absolute value of z as a big-endian byte slice.
func (z *Int) Bytes() []byte {
buf := make([]byte, len(z.abs)*_S)
return buf[z.abs.bytes(buf):]
}
-
// BitLen returns the length of the absolute value of z in bits.
// The bit length of 0 is 0.
func (z *Int) BitLen() int {
return z.abs.bitLen()
}
-
// Exp sets z = x**y mod m. If m is nil, z = x**y.
// See Knuth, volume 2, section 4.6.3.
func (z *Int) Exp(x, y, m *Int) *Int {
return z
}
-
// GcdInt sets d to the greatest common divisor of a and b, which must be
// positive numbers.
// If x and y are not nil, GcdInt sets x and y such that d = a*x + b*y.
*d = *A
}
-
// ProbablyPrime performs n Miller-Rabin tests to check whether z is prime.
// If it returns true, z is prime with probability 1 - 1/4^n.
// If it returns false, z is not prime.
return !z.neg && z.abs.probablyPrime(n)
}
-
// Rand sets z to a pseudo-random number in [0, n) and returns z.
func (z *Int) Rand(rnd *rand.Rand, n *Int) *Int {
z.neg = false
return z
}
-
// ModInverse sets z to the multiplicative inverse of g in the group ℤ/pℤ (where
// p is a prime) and returns z.
func (z *Int) ModInverse(g, p *Int) *Int {
return z
}
-
// Lsh sets z = x << n and returns z.
func (z *Int) Lsh(x *Int, n uint) *Int {
z.abs = z.abs.shl(x.abs, n)
return z
}
-
// Rsh sets z = x >> n and returns z.
func (z *Int) Rsh(x *Int, n uint) *Int {
if x.neg {
return z
}
-
// Bit returns the value of the i'th bit of z. That is, it
// returns (z>>i)&1. The bit index i must be >= 0.
func (z *Int) Bit(i int) uint {
return z.abs.bit(uint(i))
}
-
// SetBit sets the i'th bit of z to bit and returns z.
// That is, if bit is 1 SetBit sets z = x | (1 << i);
// if bit is 0 it sets z = x &^ (1 << i). If bit is not 0 or 1,
return z
}
-
// And sets z = x & y and returns z.
func (z *Int) And(x, y *Int) *Int {
if x.neg == y.neg {
return z
}
-
// AndNot sets z = x &^ y and returns z.
func (z *Int) AndNot(x, y *Int) *Int {
if x.neg == y.neg {
return z
}
-
// Or sets z = x | y and returns z.
func (z *Int) Or(x, y *Int) *Int {
if x.neg == y.neg {
return z
}
-
// Xor sets z = x ^ y and returns z.
func (z *Int) Xor(x, y *Int) *Int {
if x.neg == y.neg {
return z
}
-
// Not sets z = ^x and returns z.
func (z *Int) Not(x *Int) *Int {
if x.neg {
return z
}
-
// Gob codec version. Permits backward-compatible changes to the encoding.
const intGobVersion byte = 1
return buf[i:], nil
}
-
// GobDecode implements the gob.GobDecoder interface.
func (z *Int) GobDecode(buf []byte) os.Error {
if len(buf) == 0 {
"testing/quick"
)
-
func isNormalized(x *Int) bool {
if len(x.abs) == 0 {
return !x.neg
return x.abs[len(x.abs)-1] != 0
}
-
type funZZ func(z, x, y *Int) *Int
type argZZ struct {
z, x, y *Int
}
-
var sumZZ = []argZZ{
{NewInt(0), NewInt(0), NewInt(0)},
{NewInt(1), NewInt(1), NewInt(0)},
{NewInt(-1111111110), NewInt(-123456789), NewInt(-987654321)},
}
-
var prodZZ = []argZZ{
{NewInt(0), NewInt(0), NewInt(0)},
{NewInt(0), NewInt(1), NewInt(0)},
// TODO(gri) add larger products
}
-
func TestSignZ(t *testing.T) {
var zero Int
for _, a := range sumZZ {
}
}
-
func TestSetZ(t *testing.T) {
for _, a := range sumZZ {
var z Int
}
}
-
func TestAbsZ(t *testing.T) {
var zero Int
for _, a := range sumZZ {
}
}
-
func testFunZZ(t *testing.T, msg string, f funZZ, a argZZ) {
var z Int
f(&z, a.x, a.y)
}
}
-
func TestSumZZ(t *testing.T) {
AddZZ := func(z, x, y *Int) *Int { return z.Add(x, y) }
SubZZ := func(z, x, y *Int) *Int { return z.Sub(x, y) }
}
}
-
func TestProdZZ(t *testing.T) {
MulZZ := func(z, x, y *Int) *Int { return z.Mul(x, y) }
for _, a := range prodZZ {
}
}
-
// mulBytes returns x*y via grade school multiplication. Both inputs
// and the result are assumed to be in big-endian representation (to
// match the semantics of Int.Bytes and Int.SetBytes).
return z[i:]
}
-
func checkMul(a, b []byte) bool {
var x, y, z1 Int
x.SetBytes(a)
return z1.Cmp(&z2) == 0
}
-
func TestMul(t *testing.T) {
if err := quick.Check(checkMul, nil); err != nil {
t.Error(err)
}
}
-
var mulRangesZ = []struct {
a, b int64
prod string
},
}
-
func TestMulRangeZ(t *testing.T) {
var tmp Int
// test entirely positive ranges
}
}
-
var stringTests = []struct {
in string
out string
{"1001010111", "1001010111", 2, 0x257, true},
}
-
func format(base int) string {
switch base {
case 2:
return "%d"
}
-
func TestGetString(t *testing.T) {
z := new(Int)
for i, test := range stringTests {
}
}
-
func TestSetString(t *testing.T) {
tmp := new(Int)
for i, test := range stringTests {
}
}
-
var formatTests = []struct {
input string
format string
"12 49 ad 25 94 c3 7c eb 0b 27 84 c4 ce 0b f3 8a ce 40 8e 21 1a 7c aa b2 43 08 a8 2e 8f 10 00 00 00 00 00 00 00 00 00 00 00 00"},
}
-
func TestFormat(t *testing.T) {
for i, test := range formatTests {
var x *Int
}
}
-
var scanTests = []struct {
input string
format string
{"0XABC 12", "%v", "2748", 3},
}
-
func TestScan(t *testing.T) {
var buf bytes.Buffer
for i, test := range scanTests {
}
}
-
// Examples from the Go Language Spec, section "Arithmetic operators"
var divisionSignsTests = []struct {
x, y int64
{8, 4, 2, 0, 2, 0},
}
-
func TestDivisionSigns(t *testing.T) {
for i, test := range divisionSignsTests {
x := NewInt(test.x)
}
}
-
func checkSetBytes(b []byte) bool {
hex1 := hex.EncodeToString(new(Int).SetBytes(b).Bytes())
hex2 := hex.EncodeToString(b)
return hex1 == hex2
}
-
func TestSetBytes(t *testing.T) {
if err := quick.Check(checkSetBytes, nil); err != nil {
t.Error(err)
}
}
-
func checkBytes(b []byte) bool {
b2 := new(Int).SetBytes(b).Bytes()
return bytes.Compare(b, b2) == 0
}
-
func TestBytes(t *testing.T) {
if err := quick.Check(checkSetBytes, nil); err != nil {
t.Error(err)
}
}
-
func checkQuo(x, y []byte) bool {
u := new(Int).SetBytes(x)
v := new(Int).SetBytes(y)
return uprime.Cmp(u) == 0
}
-
var quoTests = []struct {
x, y string
q, r string
},
}
-
func TestQuo(t *testing.T) {
if err := quick.Check(checkQuo, nil); err != nil {
t.Error(err)
}
}
-
func TestQuoStepD6(t *testing.T) {
// See Knuth, Volume 2, section 4.3.1, exercise 21. This code exercises
// a code path which only triggers 1 in 10^{-19} cases.
}
}
-
var bitLenTests = []struct {
in string
out int
{"-0x4000000000000000000000", 87},
}
-
func TestBitLen(t *testing.T) {
for i, test := range bitLenTests {
x, ok := new(Int).SetString(test.in, 0)
}
}
-
var expTests = []struct {
x, y, m string
out string
},
}
-
func TestExp(t *testing.T) {
for i, test := range expTests {
x, ok1 := new(Int).SetString(test.x, 0)
}
}
-
func checkGcd(aBytes, bBytes []byte) bool {
a := new(Int).SetBytes(aBytes)
b := new(Int).SetBytes(bBytes)
return x.Cmp(d) == 0
}
-
var gcdTests = []struct {
a, b int64
d, x, y int64
{120, 23, 1, -9, 47},
}
-
func TestGcd(t *testing.T) {
for i, test := range gcdTests {
a := NewInt(test.a)
quick.Check(checkGcd, nil)
}
-
var primes = []string{
"2",
"3",
"203956878356401977405765866929034577280193993314348263094772646453283062722701277632936616063144088173312372882677123879538709400158306567338328279154499698366071906766440037074217117805690872792848149112022286332144876183376326512083574821647933992961249917319836219304274280243803104015000563790123",
}
-
var composites = []string{
"21284175091214687912771199898307297748211672914763848041968395774954376176754",
"6084766654921918907427900243509372380954290099172559290432744450051395395951",
"82793403787388584738507275144194252681",
}
-
func TestProbablyPrime(t *testing.T) {
nreps := 20
if testing.Short() {
}
}
-
type intShiftTest struct {
in string
shift uint
out string
}
-
var rshTests = []intShiftTest{
{"0", 0, "0"},
{"-0", 0, "0"},
{"340282366920938463463374607431768211456", 128, "1"},
}
-
func TestRsh(t *testing.T) {
for i, test := range rshTests {
in, _ := new(Int).SetString(test.in, 10)
}
}
-
func TestRshSelf(t *testing.T) {
for i, test := range rshTests {
z, _ := new(Int).SetString(test.in, 10)
}
}
-
var lshTests = []intShiftTest{
{"0", 0, "0"},
{"0", 1, "0"},
{"1", 128, "340282366920938463463374607431768211456"},
}
-
func TestLsh(t *testing.T) {
for i, test := range lshTests {
in, _ := new(Int).SetString(test.in, 10)
}
}
-
func TestLshSelf(t *testing.T) {
for i, test := range lshTests {
z, _ := new(Int).SetString(test.in, 10)
}
}
-
func TestLshRsh(t *testing.T) {
for i, test := range rshTests {
in, _ := new(Int).SetString(test.in, 10)
}
}
-
var int64Tests = []int64{
0,
1,
-9223372036854775808,
}
-
func TestInt64(t *testing.T) {
for i, testVal := range int64Tests {
in := NewInt(testVal)
}
}
-
var bitwiseTests = []struct {
x, y string
and, or, xor, andNot string
},
}
-
type bitFun func(z, x, y *Int) *Int
func testBitFun(t *testing.T, msg string, f bitFun, x, y *Int, exp string) {
}
}
-
func testBitFunSelf(t *testing.T, msg string, f bitFun, x, y *Int, exp string) {
self := new(Int)
self.Set(x)
}
}
-
func altBit(x *Int, i int) uint {
z := new(Int).Rsh(x, uint(i))
z = z.And(z, NewInt(1))
return 0
}
-
func altSetBit(z *Int, x *Int, i int, b uint) *Int {
one := NewInt(1)
m := one.Lsh(one, uint(i))
panic("set bit is not 0 or 1")
}
-
func testBitset(t *testing.T, x *Int) {
n := x.BitLen()
z := new(Int).Set(x)
}
}
-
var bitsetTests = []struct {
x string
i int
{"-0x2000000000000000000000000001", 110, 1},
}
-
func TestBitSet(t *testing.T) {
for _, test := range bitwiseTests {
x := new(Int)
}
}
-
func BenchmarkBitset(b *testing.B) {
z := new(Int)
z.SetBit(z, 512, 1)
}
}
-
func BenchmarkBitsetNeg(b *testing.B) {
z := NewInt(-1)
z.SetBit(z, 512, 0)
}
}
-
func BenchmarkBitsetOrig(b *testing.B) {
z := new(Int)
altSetBit(z, z, 512, 1)
}
}
-
func BenchmarkBitsetNegOrig(b *testing.B) {
z := NewInt(-1)
altSetBit(z, z, 512, 0)
}
}
-
func TestBitwise(t *testing.T) {
x := new(Int)
y := new(Int)
}
}
-
var notTests = []struct {
in string
out string
},
}
-
func TestNot(t *testing.T) {
in := new(Int)
out := new(Int)
}
}
-
var modInverseTests = []struct {
element string
prime string
{"239487239847", "2410312426921032588552076022197566074856950548502459942654116941958108831682612228890093858261341614673227141477904012196503648957050582631942730706805009223062734745341073406696246014589361659774041027169249453200378729434170325843778659198143763193776859869524088940195577346119843545301547043747207749969763750084308926339295559968882457872412993810129130294592999947926365264059284647209730384947211681434464714438488520940127459844288859336526896320919633919"},
}
-
func TestModInverse(t *testing.T) {
var element, prime Int
one := NewInt(1)
}
}
-
// used by TestIntGobEncoding and TestRatGobEncoding
var gobEncodingTests = []string{
"0",
"rand"
)
-
// An unsigned integer x of the form
//
// x = x[n-1]*_B^(n-1) + x[n-2]*_B^(n-2) + ... + x[1]*_B + x[0]
natTen = nat{10}
)
-
func (z nat) clear() {
for i := range z {
z[i] = 0
}
}
-
func (z nat) norm() nat {
i := len(z)
for i > 0 && z[i-1] == 0 {
return z[0:i]
}
-
func (z nat) make(n int) nat {
if n <= cap(z) {
return z[0:n] // reuse z
return make(nat, n, n+e)
}
-
func (z nat) setWord(x Word) nat {
if x == 0 {
return z.make(0)
return z
}
-
func (z nat) setUint64(x uint64) nat {
// single-digit values
if w := Word(x); uint64(w) == x {
return z
}
-
func (z nat) set(x nat) nat {
z = z.make(len(x))
copy(z, x)
return z
}
-
func (z nat) add(x, y nat) nat {
m := len(x)
n := len(y)
return z.norm()
}
-
func (z nat) sub(x, y nat) nat {
m := len(x)
n := len(y)
return z.norm()
}
-
func (x nat) cmp(y nat) (r int) {
m := len(x)
n := len(y)
return
}
-
func (z nat) mulAddWW(x nat, y, r Word) nat {
m := len(x)
if m == 0 || y == 0 {
return z.norm()
}
-
// basicMul multiplies x and y and leaves the result in z.
// The (non-normalized) result is placed in z[0 : len(x) + len(y)].
func basicMul(z, x, y nat) {
}
}
-
// Fast version of z[0:n+n>>1].add(z[0:n+n>>1], x[0:n]) w/o bounds checks.
// Factored out for readability - do not use outside karatsuba.
func karatsubaAdd(z, x nat, n int) {
}
}
-
// Like karatsubaAdd, but does subtract.
func karatsubaSub(z, x nat, n int) {
if c := subVV(z[0:n], z, x); c != 0 {
}
}
-
// Operands that are shorter than karatsubaThreshold are multiplied using
// "grade school" multiplication; for longer operands the Karatsuba algorithm
// is used.
}
}
-
// alias returns true if x and y share the same base array.
func alias(x, y nat) bool {
return cap(x) > 0 && cap(y) > 0 && &x[0:cap(x)][cap(x)-1] == &y[0:cap(y)][cap(y)-1]
}
-
// addAt implements z += x*(1<<(_W*i)); z must be long enough.
// (we don't use nat.add because we need z to stay the same
// slice, and we don't need to normalize z after each addition)
}
}
-
func max(x, y int) int {
if x > y {
return x
return y
}
-
// karatsubaLen computes an approximation to the maximum k <= n such that
// k = p<<i for a number p <= karatsubaThreshold and an i >= 0. Thus, the
// result is the largest number that can be divided repeatedly by 2 before
return n << i
}
-
func (z nat) mul(x, y nat) nat {
m := len(x)
n := len(y)
return z.norm()
}
-
// mulRange computes the product of all the unsigned integers in the
// range [a, b] inclusively. If a > b (empty range), the result is 1.
func (z nat) mulRange(a, b uint64) nat {
return z.mul(nat(nil).mulRange(a, m), nat(nil).mulRange(m+1, b))
}
-
// q = (x-r)/y, with 0 <= r < y
func (z nat) divW(x nat, y Word) (q nat, r Word) {
m := len(x)
return
}
-
func (z nat) div(z2, u, v nat) (q, r nat) {
if len(v) == 0 {
panic("division by zero")
return
}
-
// q = (uIn-r)/v, with 0 <= r < y
// Uses z as storage for q, and u as storage for r if possible.
// See Knuth, Volume 2, section 4.3.1, Algorithm D.
return q, r
}
-
// Length of x in bits. x must be normalized.
func (x nat) bitLen() int {
if i := len(x) - 1; i >= 0 {
return 0
}
-
// MaxBase is the largest number base accepted for string conversions.
const MaxBase = 'z' - 'a' + 10 + 1 // = hexValue('z') + 1
return Word(d)
}
-
// scan sets z to the natural number corresponding to the longest possible prefix
// read from r representing an unsigned integer in a given conversion base.
// It returns z, the actual conversion base used, and an error, if any. In the
return z.norm(), int(b), nil
}
-
// Character sets for string conversion.
const (
lowercaseDigits = "0123456789abcdefghijklmnopqrstuvwxyz"
uppercaseDigits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
)
-
// decimalString returns a decimal representation of x.
// It calls x.string with the charset "0123456789".
func (x nat) decimalString() string {
return x.string(lowercaseDigits[0:10])
}
-
// string converts x to a string using digits from a charset; a digit with
// value d is represented by charset[d]. The conversion base is determined
// by len(charset), which must be >= 2.
return string(s[i:])
}
-
const deBruijn32 = 0x077CB531
var deBruijn32Lookup = []byte{
54, 26, 40, 15, 34, 20, 31, 10, 25, 14, 19, 9, 13, 8, 7, 6,
}
-
// trailingZeroBits returns the number of consecutive zero bits on the right
// side of the given Word.
// See Knuth, volume 4, section 7.3.1
return 0
}
-
// z = x << s
func (z nat) shl(x nat, s uint) nat {
m := len(x)
return z.norm()
}
-
// z = x >> s
func (z nat) shr(x nat, s uint) nat {
m := len(x)
return z.norm()
}
-
func (z nat) setBit(x nat, i uint, b uint) nat {
j := int(i / _W)
m := Word(1) << (i % _W)
panic("set bit is not 0 or 1")
}
-
func (z nat) bit(i uint) uint {
j := int(i / _W)
if j >= len(z) {
return uint(z[j] >> (i % _W) & 1)
}
-
func (z nat) and(x, y nat) nat {
m := len(x)
n := len(y)
return z.norm()
}
-
func (z nat) andNot(x, y nat) nat {
m := len(x)
n := len(y)
return z.norm()
}
-
func (z nat) or(x, y nat) nat {
m := len(x)
n := len(y)
return z.norm()
}
-
func (z nat) xor(x, y nat) nat {
m := len(x)
n := len(y)
return z.norm()
}
-
// greaterThan returns true iff (x1<<_W + x2) > (y1<<_W + y2)
func greaterThan(x1, x2, y1, y2 Word) bool {
return x1 > y1 || x1 == y1 && x2 > y2
}
-
// modW returns x % d.
func (x nat) modW(d Word) (r Word) {
// TODO(agl): we don't actually need to store the q value.
return divWVW(q, 0, x, d)
}
-
// powersOfTwoDecompose finds q and k with x = q * 1<<k and q is odd, or q and k are 0.
func (x nat) powersOfTwoDecompose() (q nat, k int) {
if len(x) == 0 {
return
}
-
// random creates a random integer in [0..limit), using the space in z if
// possible. n is the bit length of limit.
func (z nat) random(rand *rand.Rand, limit nat, n int) nat {
return z.norm()
}
-
// If m != nil, expNN calculates x**y mod m. Otherwise it calculates x**y. It
// reuses the storage of z if possible.
func (z nat) expNN(x, y, m nat) nat {
return z
}
-
// probablyPrime performs reps Miller-Rabin tests to check whether n is prime.
// If it returns true, n is prime with probability 1 - 1/4^reps.
// If it returns false, n is not prime.
return true
}
-
// bytes writes the value of z into buf using big-endian encoding.
// len(buf) must be >= len(z)*_S. The value of z is encoded in the
// slice buf[i:]. The number i of unused bytes at the beginning of
return
}
-
// setBytes interprets buf as the bytes of a big-endian unsigned
// integer, sets z to that value, and returns z.
func (z nat) setBytes(buf []byte) nat {
{nat{34986, 41, 105, 1957}, nat{56, 7458, 104, 1957}, 1},
}
-
func TestCmp(t *testing.T) {
for i, a := range cmpTests {
r := a.x.cmp(a.y)
}
}
-
type funNN func(z, x, y nat) nat
type argNN struct {
z, x, y nat
}
-
var sumNN = []argNN{
{},
{nat{1}, nil, nat{1}},
{nat{0, 0, 0, 1}, nat{0, 0, _M}, nat{0, 0, 1}},
}
-
var prodNN = []argNN{
{},
{nil, nil, nil},
{nat{4, 11, 20, 30, 20, 11, 4}, nat{1, 2, 3, 4}, nat{4, 3, 2, 1}},
}
-
func TestSet(t *testing.T) {
for _, a := range sumNN {
z := nat(nil).set(a.z)
}
}
-
func testFunNN(t *testing.T, msg string, f funNN, a argNN) {
z := f(nil, a.x, a.y)
if z.cmp(a.z) != 0 {
}
}
-
func TestFunNN(t *testing.T) {
for _, a := range sumNN {
arg := a
}
}
-
var mulRangesN = []struct {
a, b uint64
prod string
},
}
-
func TestMulRangeN(t *testing.T) {
for i, r := range mulRangesN {
prod := nat(nil).mulRange(r.a, r.b).decimalString()
}
}
-
var mulArg, mulTmp nat
func init() {
}
}
-
func benchmarkMulLoad() {
for j := 1; j <= 10; j++ {
x := mulArg[0 : j*100]
}
}
-
func BenchmarkMul(b *testing.B) {
for i := 0; i < b.N; i++ {
benchmarkMulLoad()
}
}
-
func toString(x nat, charset string) string {
base := len(charset)
return string(s[i:])
}
-
var strTests = []struct {
x nat // nat value to be converted
c string // conversion charset
{nat{0x309663e6}, uppercaseDigits[0:32], "O9COV6"},
}
-
func TestString(t *testing.T) {
for _, a := range strTests {
s := a.x.string(a.c)
}
}
-
var natScanTests = []struct {
s string // string to be scanned
base int // input base
{"0XDEADBEEF", 0, nat{0xdeadbeef}, 16, true, 0},
}
-
func TestScanBase(t *testing.T) {
for _, a := range natScanTests {
r := strings.NewReader(a.s)
}
}
-
var pi = "3" +
"14159265358979323846264338327950288419716939937510582097494459230781640628620899862803482534211706798214808651" +
"32823066470938446095505822317253594081284811174502841027019385211055596446229489549303819644288109756659334461" +
"88281613323166636528619326686336062735676303544776280350450777235547105859548702790814356240145171806246436267" +
"94561275318134078330336254232783944975382437205835311477119926063813346776879695970309833913077109870408591337"
-
// Test case for BenchmarkScanPi.
func TestScanPi(t *testing.T) {
var x nat
}
}
-
func BenchmarkScanPi(b *testing.B) {
for i := 0; i < b.N; i++ {
var x nat
}
}
-
const (
// 314**271
// base 2: 2249 digits
longExponent = 27182
)
-
func BenchmarkScanShort2(b *testing.B) {
ScanHelper(b, 2, shortBase, shortExponent)
}
-
func BenchmarkScanShort8(b *testing.B) {
ScanHelper(b, 8, shortBase, shortExponent)
}
-
func BenchmarkScanSort10(b *testing.B) {
ScanHelper(b, 10, shortBase, shortExponent)
}
-
func BenchmarkScanShort16(b *testing.B) {
ScanHelper(b, 16, shortBase, shortExponent)
}
-
func BenchmarkScanMedium2(b *testing.B) {
ScanHelper(b, 2, mediumBase, mediumExponent)
}
-
func BenchmarkScanMedium8(b *testing.B) {
ScanHelper(b, 8, mediumBase, mediumExponent)
}
-
func BenchmarkScanMedium10(b *testing.B) {
ScanHelper(b, 10, mediumBase, mediumExponent)
}
-
func BenchmarkScanMedium16(b *testing.B) {
ScanHelper(b, 16, mediumBase, mediumExponent)
}
-
func BenchmarkScanLong2(b *testing.B) {
ScanHelper(b, 2, longBase, longExponent)
}
-
func BenchmarkScanLong8(b *testing.B) {
ScanHelper(b, 8, longBase, longExponent)
}
-
func BenchmarkScanLong10(b *testing.B) {
ScanHelper(b, 10, longBase, longExponent)
}
-
func BenchmarkScanLong16(b *testing.B) {
ScanHelper(b, 16, longBase, longExponent)
}
-
func ScanHelper(b *testing.B, base int, xv, yv Word) {
b.StopTimer()
var x, y, z nat
}
}
-
func BenchmarkStringShort2(b *testing.B) {
StringHelper(b, 2, shortBase, shortExponent)
}
-
func BenchmarkStringShort8(b *testing.B) {
StringHelper(b, 8, shortBase, shortExponent)
}
-
func BenchmarkStringShort10(b *testing.B) {
StringHelper(b, 10, shortBase, shortExponent)
}
-
func BenchmarkStringShort16(b *testing.B) {
StringHelper(b, 16, shortBase, shortExponent)
}
-
func BenchmarkStringMedium2(b *testing.B) {
StringHelper(b, 2, mediumBase, mediumExponent)
}
-
func BenchmarkStringMedium8(b *testing.B) {
StringHelper(b, 8, mediumBase, mediumExponent)
}
-
func BenchmarkStringMedium10(b *testing.B) {
StringHelper(b, 10, mediumBase, mediumExponent)
}
-
func BenchmarkStringMedium16(b *testing.B) {
StringHelper(b, 16, mediumBase, mediumExponent)
}
-
func BenchmarkStringLong2(b *testing.B) {
StringHelper(b, 2, longBase, longExponent)
}
-
func BenchmarkStringLong8(b *testing.B) {
StringHelper(b, 8, longBase, longExponent)
}
-
func BenchmarkStringLong10(b *testing.B) {
StringHelper(b, 10, longBase, longExponent)
}
-
func BenchmarkStringLong16(b *testing.B) {
StringHelper(b, 16, longBase, longExponent)
}
-
func StringHelper(b *testing.B, base int, xv, yv Word) {
b.StopTimer()
var x, y, z nat
}
}
-
func TestLeadingZeros(t *testing.T) {
var x Word = _B >> 1
for i := 0; i <= _W; i++ {
}
}
-
type shiftTest struct {
in nat
shift uint
out nat
}
-
var leftShiftTests = []shiftTest{
{nil, 0, nil},
{nil, 1, nil},
{nat{1 << (_W - 1), 0}, 1, nat{0, 1}},
}
-
func TestShiftLeft(t *testing.T) {
for i, test := range leftShiftTests {
var z nat
}
}
-
var rightShiftTests = []shiftTest{
{nil, 0, nil},
{nil, 1, nil},
{nat{2, 1, 1}, 1, nat{1<<(_W-1) + 1, 1 << (_W - 1)}},
}
-
func TestShiftRight(t *testing.T) {
for i, test := range rightShiftTests {
var z nat
}
}
-
type modWTest struct {
in string
dividend string
out string
}
-
var modWTests32 = []modWTest{
{"23492635982634928349238759823742", "252341", "220170"},
}
-
var modWTests64 = []modWTest{
{"6527895462947293856291561095690465243862946", "524326975699234", "375066989628668"},
}
-
func runModWTests(t *testing.T, tests []modWTest) {
for i, test := range tests {
in, _ := new(Int).SetString(test.in, 10)
}
}
-
func TestModW(t *testing.T) {
if _W >= 32 {
runModWTests(t, modWTests32)
}
}
-
func TestTrailingZeroBits(t *testing.T) {
var x Word
x--
},
}
-
func TestExpNN(t *testing.T) {
for i, test := range expNNTests {
x, _, _ := nat(nil).scan(strings.NewReader(test.x), 0)
b nat
}
-
// NewRat creates a new Rat with numerator a and denominator b.
func NewRat(a, b int64) *Rat {
return new(Rat).SetFrac64(a, b)
}
-
// SetFrac sets z to a/b and returns z.
func (z *Rat) SetFrac(a, b *Int) *Rat {
z.a.Set(a)
return z.norm()
}
-
// SetFrac64 sets z to a/b and returns z.
func (z *Rat) SetFrac64(a, b int64) *Rat {
z.a.SetInt64(a)
return z.norm()
}
-
// SetInt sets z to x (by making a copy of x) and returns z.
func (z *Rat) SetInt(x *Int) *Rat {
z.a.Set(x)
return z
}
-
// SetInt64 sets z to x and returns z.
func (z *Rat) SetInt64(x int64) *Rat {
z.a.SetInt64(x)
return z
}
-
// Sign returns:
//
// -1 if x < 0
return x.a.Sign()
}
-
// IsInt returns true if the denominator of x is 1.
func (x *Rat) IsInt() bool {
return len(x.b) == 1 && x.b[0] == 1
}
-
// Num returns the numerator of z; it may be <= 0.
// The result is a reference to z's numerator; it
// may change if a new value is assigned to z.
return &z.a
}
-
// Denom returns the denominator of z; it is always > 0.
// The result is a reference to z's denominator; it
// may change if a new value is assigned to z.
return &Int{false, z.b}
}
-
func gcd(x, y nat) nat {
// Euclidean algorithm.
var a, b nat
return a
}
-
func (z *Rat) norm() *Rat {
f := gcd(z.a.abs, z.b)
if len(z.a.abs) == 0 {
return z
}
-
func mulNat(x *Int, y nat) *Int {
var z Int
z.abs = z.abs.mul(x.abs, y)
return &z
}
-
// Cmp compares x and y and returns:
//
// -1 if x < y
return mulNat(&x.a, y.b).Cmp(mulNat(&y.a, x.b))
}
-
// Abs sets z to |x| (the absolute value of x) and returns z.
func (z *Rat) Abs(x *Rat) *Rat {
z.a.Abs(&x.a)
return z
}
-
// Add sets z to the sum x+y and returns z.
func (z *Rat) Add(x, y *Rat) *Rat {
a1 := mulNat(&x.a, y.b)
return z.norm()
}
-
// Sub sets z to the difference x-y and returns z.
func (z *Rat) Sub(x, y *Rat) *Rat {
a1 := mulNat(&x.a, y.b)
return z.norm()
}
-
// Mul sets z to the product x*y and returns z.
func (z *Rat) Mul(x, y *Rat) *Rat {
z.a.Mul(&x.a, &y.a)
return z.norm()
}
-
// Quo sets z to the quotient x/y and returns z.
// If y == 0, a division-by-zero run-time panic occurs.
func (z *Rat) Quo(x, y *Rat) *Rat {
return z.norm()
}
-
// Neg sets z to -x (by making a copy of x if necessary) and returns z.
func (z *Rat) Neg(x *Rat) *Rat {
z.a.Neg(&x.a)
return z
}
-
// Set sets z to x (by making a copy of x if necessary) and returns z.
func (z *Rat) Set(x *Rat) *Rat {
z.a.Set(&x.a)
return z
}
-
func ratTok(ch int) bool {
return strings.IndexRune("+-/0123456789.eE", ch) >= 0
}
-
// Scan is a support routine for fmt.Scanner. It accepts the formats
// 'e', 'E', 'f', 'F', 'g', 'G', and 'v'. All formats are equivalent.
func (z *Rat) Scan(s fmt.ScanState, ch int) os.Error {
return nil
}
-
// SetString sets z to the value of s and returns z and a boolean indicating
// success. s can be given as a fraction "a/b" or as a floating-point number
// optionally followed by an exponent. If the operation failed, the value of z
return z, true
}
-
// String returns a string representation of z in the form "a/b" (even if b == 1).
func (z *Rat) String() string {
return z.a.String() + "/" + z.b.decimalString()
}
-
// RatString returns a string representation of z in the form "a/b" if b != 1,
// and in the form "a" if b == 1.
func (z *Rat) RatString() string {
return z.String()
}
-
// FloatString returns a string representation of z in decimal form with prec
// digits of precision after the decimal point and the last digit rounded.
func (z *Rat) FloatString(prec int) string {
return s
}
-
// Gob codec version. Permits backward-compatible changes to the encoding.
const ratGobVersion byte = 1
return buf[j:], nil
}
-
// GobDecode implements the gob.GobDecoder interface.
func (z *Rat) GobDecode(buf []byte) os.Error {
if len(buf) == 0 {
"testing"
)
-
var setStringTests = []struct {
in, out string
ok bool
}
}
-
func TestRatScan(t *testing.T) {
var buf bytes.Buffer
for i, test := range setStringTests {
}
}
-
var floatStringTests = []struct {
in string
prec int
}
}
-
func TestRatSign(t *testing.T) {
zero := NewRat(0, 1)
for _, a := range setStringTests {
}
}
-
var ratCmpTests = []struct {
rat1, rat2 string
out int
}
}
-
func TestIsInt(t *testing.T) {
one := NewInt(1)
for _, a := range setStringTests {
}
}
-
func TestRatAbs(t *testing.T) {
zero := NewRat(0, 1)
for _, a := range setStringTests {
}
}
-
type ratBinFun func(z, x, y *Rat) *Rat
type ratBinArg struct {
x, y, z string
}
}
-
var ratBinTests = []struct {
x, y string
sum, prod string
}
}
-
func TestIssue820(t *testing.T) {
x := NewRat(3, 1)
y := NewRat(2, 1)
}
}
-
var setFrac64Tests = []struct {
a, b int64
out string
}
}
-
func TestRatGobEncoding(t *testing.T) {
var medium bytes.Buffer
enc := gob.NewEncoder(&medium)
"utf8"
)
-
const (
defaultBufSize = 4096
)
return "bufio: bad buffer size " + strconv.Itoa(int(b))
}
-
// Buffered input.
// Reader implements buffering for an io.Reader object.
return string(bytes), e
}
-
// buffered output
// Writer implements buffering for an io.Writer object.
}
}
-
type readMaker struct {
name string
fn func(io.Reader) io.Reader
"utf8"
)
-
const N = 10000 // make this bigger for a larger (and slower) test
var data string // test data for write tests
var bytes []byte // test data; same as data but as a slice.
}
}
-
// Fill buf through n writes of string fus.
// The initial contents of buf corresponds to the string s;
// the result is the final contents of buf returned as a string.
return s
}
-
// Fill buf through n writes of byte slice fub.
// The initial contents of buf corresponds to the string s;
// the result is the final contents of buf returned as a string.
return s
}
-
func TestNewBuffer(t *testing.T) {
buf := NewBuffer(bytes)
check(t, "NewBuffer", buf, data)
}
-
func TestNewBufferString(t *testing.T) {
buf := NewBufferString(data)
check(t, "NewBufferString", buf, data)
}
-
// Empty buf through repeated reads into fub.
// The initial contents of buf corresponds to the string s.
func empty(t *testing.T, testname string, buf *Buffer, s string, fub []byte) {
check(t, testname+" (empty 4)", buf, "")
}
-
func TestBasicOperations(t *testing.T) {
var buf Buffer
}
}
-
func TestLargeStringWrites(t *testing.T) {
var buf Buffer
limit := 30
check(t, "TestLargeStringWrites (3)", &buf, "")
}
-
func TestLargeByteWrites(t *testing.T) {
var buf Buffer
limit := 30
check(t, "TestLargeByteWrites (3)", &buf, "")
}
-
func TestLargeStringReads(t *testing.T) {
var buf Buffer
for i := 3; i < 30; i += 3 {
check(t, "TestLargeStringReads (3)", &buf, "")
}
-
func TestLargeByteReads(t *testing.T) {
var buf Buffer
for i := 3; i < 30; i += 3 {
check(t, "TestLargeByteReads (3)", &buf, "")
}
-
func TestMixedReadsAndWrites(t *testing.T) {
var buf Buffer
s := ""
empty(t, "TestMixedReadsAndWrites (2)", &buf, s, make([]byte, buf.Len()))
}
-
func TestNil(t *testing.T) {
var b *Buffer
if b.String() != "<nil>" {
}
}
-
func TestReadFrom(t *testing.T) {
var buf Buffer
for i := 3; i < 30; i += 3 {
}
}
-
func TestWriteTo(t *testing.T) {
var buf Buffer
for i := 3; i < 30; i += 3 {
}
}
-
func TestRuneIO(t *testing.T) {
const NRune = 1000
// Built a test array while we write the data
}
}
-
func TestNext(t *testing.T) {
b := []byte{0, 1, 2, 3, 4}
tmp := make([]byte, 5)
return Map(func(r int) int { return _case.ToTitle(r) }, s)
}
-
// isSeparator reports whether the rune could mark a word boundary.
// TODO: update when package unicode captures more of the properties.
func isSeparator(rune int) bool {
}
}
-
type SplitTest struct {
s string
sep string
}
}
-
type TrimTest struct {
f func([]byte, string) []byte
in, cutset, out string
}
}
-
func testToFromWithLevel(t *testing.T, level int, input []byte, name string) os.Error {
buffer := bytes.NewBuffer(nil)
w := NewWriter(buffer, level)
Pop() interface{}
}
-
// A heap must be initialized before any of the heap operations
// can be used. Init is idempotent with respect to the heap invariants
// and may be called whenever the heap invariants may have been invalidated.
}
}
-
// Push pushes the element x onto the heap. The complexity is
// O(log(n)) where n = h.Len().
//
up(h, h.Len()-1)
}
-
// Pop removes the minimum element (according to Less) from the heap
// and returns it. The complexity is O(log(n)) where n = h.Len().
// Same as Remove(h, 0).
return h.Pop()
}
-
// Remove removes the element at index i from the heap.
// The complexity is O(log(n)) where n = h.Len().
//
return h.Pop()
}
-
func up(h Interface, j int) {
for {
i := (j - 1) / 2 // parent
}
}
-
func down(h Interface, i, n int) {
for {
j1 := 2*i + 1
. "container/heap"
)
-
type myHeap struct {
// A vector.Vector implements sort.Interface except for Less,
// and it implements Push and Pop as required for heap.Interface.
vector.Vector
}
-
func (h *myHeap) Less(i, j int) bool { return h.At(i).(int) < h.At(j).(int) }
-
func (h *myHeap) verify(t *testing.T, i int) {
n := h.Len()
j1 := 2*i + 1
}
}
-
func TestInit0(t *testing.T) {
h := new(myHeap)
for i := 20; i > 0; i-- {
}
}
-
func TestInit1(t *testing.T) {
h := new(myHeap)
for i := 20; i > 0; i-- {
}
}
-
func Test(t *testing.T) {
h := new(myHeap)
h.verify(t, 0)
}
}
-
func TestRemove0(t *testing.T) {
h := new(myHeap)
for i := 0; i < 10; i++ {
}
}
-
func TestRemove1(t *testing.T) {
h := new(myHeap)
for i := 0; i < 10; i++ {
}
}
-
func TestRemove2(t *testing.T) {
N := 10
Value interface{} // for use by client; untouched by this library
}
-
func (r *Ring) init() *Ring {
r.next = r
r.prev = r
return r
}
-
// Next returns the next ring element. r must not be empty.
func (r *Ring) Next() *Ring {
if r.next == nil {
return r.next
}
-
// Prev returns the previous ring element. r must not be empty.
func (r *Ring) Prev() *Ring {
if r.next == nil {
return r.prev
}
-
// Move moves n % r.Len() elements backward (n < 0) or forward (n >= 0)
// in the ring and returns that ring element. r must not be empty.
//
return r
}
-
// New creates a ring of n elements.
func New(n int) *Ring {
if n <= 0 {
return r
}
-
// Link connects ring r with with ring s such that r.Next()
// becomes s and returns the original value for r.Next().
// r must not be empty.
return n
}
-
// Unlink removes n % r.Len() elements from the ring r, starting
// at r.Next(). If n % r.Len() == 0, r remains unchanged.
// The result is the removed subring. r must not be empty.
return r.Link(r.Move(n + 1))
}
-
// Len computes the number of elements in ring r.
// It executes in time proportional to the number of elements.
//
return n
}
-
// Do calls function f on each element of the ring, in forward order.
// The behavior of Do is undefined if f changes *r.
func (r *Ring) Do(f func(interface{})) {
"testing"
)
-
// For debugging - keep around.
func dump(r *Ring) {
if r == nil {
fmt.Println()
}
-
func verify(t *testing.T, r *Ring, N int, sum int) {
// Len
n := r.Len()
}
}
-
func TestCornerCases(t *testing.T) {
var (
r0 *Ring
verify(t, &r1, 1, 0)
}
-
func makeN(n int) *Ring {
r := New(n)
for i := 1; i <= n; i++ {
func sumN(n int) int { return (n*n + n) / 2 }
-
func TestNew(t *testing.T) {
for i := 0; i < 10; i++ {
r := New(i)
}
}
-
func TestLink1(t *testing.T) {
r1a := makeN(1)
var r1b Ring
verify(t, r2b, 1, 0)
}
-
func TestLink2(t *testing.T) {
var r0 *Ring
r1a := &Ring{Value: 42}
verify(t, r10, 12, sumN(10)+42+77)
}
-
func TestLink3(t *testing.T) {
var r Ring
n := 1
}
}
-
func TestUnlink(t *testing.T) {
r10 := makeN(10)
s10 := r10.Move(6)
verify(t, r10, 9, sum10-2)
}
-
func TestLinkUnlink(t *testing.T) {
for i := 1; i < 4; i++ {
ri := New(i)
// Vectors grow and shrink dynamically as necessary.
package vector
-
// Vector is a container for numbered sequences of elements of type interface{}.
// A vector's length and capacity adjusts automatically as necessary.
// The zero value for Vector is an empty vector ready to use.
type Vector []interface{}
-
// IntVector is a container for numbered sequences of elements of type int.
// A vector's length and capacity adjusts automatically as necessary.
// The zero value for IntVector is an empty vector ready to use.
type IntVector []int
-
// StringVector is a container for numbered sequences of elements of type string.
// A vector's length and capacity adjusts automatically as necessary.
// The zero value for StringVector is an empty vector ready to use.
type StringVector []string
-
// Initial underlying array size
const initialSize = 8
-
// Partial sort.Interface support
// LessInterface provides partial support of the sort.Interface.
Less(y interface{}) bool
}
-
// Less returns a boolean denoting whether the i'th element is less than the j'th element.
func (p *Vector) Less(i, j int) bool { return (*p)[i].(LessInterface).Less((*p)[j]) }
-
// sort.Interface support
// Less returns a boolean denoting whether the i'th element is less than the j'th element.
func (p *IntVector) Less(i, j int) bool { return (*p)[i] < (*p)[j] }
-
// Less returns a boolean denoting whether the i'th element is less than the j'th element.
func (p *StringVector) Less(i, j int) bool { return (*p)[i] < (*p)[j] }
package vector
-
func (p *IntVector) realloc(length, capacity int) (b []int) {
if capacity < initialSize {
capacity = initialSize
return
}
-
// Insert n elements at position i.
func (p *IntVector) Expand(i, n int) {
a := *p
*p = a
}
-
// Insert n elements at the end of a vector.
func (p *IntVector) Extend(n int) { p.Expand(len(*p), n) }
-
// Resize changes the length and capacity of a vector.
// If the new length is shorter than the current length, Resize discards
// trailing elements. If the new length is longer than the current length,
return p
}
-
// Len returns the number of elements in the vector.
// Same as len(*p).
func (p *IntVector) Len() int { return len(*p) }
-
// Cap returns the capacity of the vector; that is, the
// maximum length the vector can grow without resizing.
// Same as cap(*p).
func (p *IntVector) Cap() int { return cap(*p) }
-
// At returns the i'th element of the vector.
func (p *IntVector) At(i int) int { return (*p)[i] }
-
// Set sets the i'th element of the vector to value x.
func (p *IntVector) Set(i int, x int) { (*p)[i] = x }
-
// Last returns the element in the vector of highest index.
func (p *IntVector) Last() int { return (*p)[len(*p)-1] }
-
// Copy makes a copy of the vector and returns it.
func (p *IntVector) Copy() IntVector {
arr := make(IntVector, len(*p))
return arr
}
-
// Insert inserts into the vector an element of value x before
// the current element at index i.
func (p *IntVector) Insert(i int, x int) {
(*p)[i] = x
}
-
// Delete deletes the i'th element of the vector. The gap is closed so the old
// element at index i+1 has index i afterwards.
func (p *IntVector) Delete(i int) {
*p = a[0 : n-1]
}
-
// InsertVector inserts into the vector the contents of the vector
// x such that the 0th element of x appears at index i after insertion.
func (p *IntVector) InsertVector(i int, x *IntVector) {
copy((*p)[i:i+len(b)], b)
}
-
// Cut deletes elements i through j-1, inclusive.
func (p *IntVector) Cut(i, j int) {
a := *p
*p = a[0:m]
}
-
// Slice returns a new sub-vector by slicing the old one to extract slice [i:j].
// The elements are copied. The original vector is unchanged.
func (p *IntVector) Slice(i, j int) *IntVector {
return &s
}
-
// Convenience wrappers
// Push appends x to the end of the vector.
func (p *IntVector) Push(x int) { p.Insert(len(*p), x) }
-
// Pop deletes the last element of the vector.
func (p *IntVector) Pop() int {
a := *p
return x
}
-
// AppendVector appends the entire vector x to the end of this vector.
func (p *IntVector) AppendVector(x *IntVector) { p.InsertVector(len(*p), x) }
-
// Swap exchanges the elements at indexes i and j.
func (p *IntVector) Swap(i, j int) {
a := *p
a[i], a[j] = a[j], a[i]
}
-
// Do calls function f for each element of the vector, in order.
// The behavior of Do is undefined if f changes *p.
func (p *IntVector) Do(f func(elem int)) {
import "testing"
-
func TestIntZeroLen(t *testing.T) {
a := new(IntVector)
if a.Len() != 0 {
}
}
-
func TestIntResize(t *testing.T) {
var a IntVector
checkSize(t, &a, 0, 0)
checkSize(t, a.Resize(11, 100), 11, 100)
}
-
func TestIntResize2(t *testing.T) {
var a IntVector
checkSize(t, &a, 0, 0)
}
}
-
func checkIntZero(t *testing.T, a *IntVector, i int) {
for j := 0; j < i; j++ {
if a.At(j) == intzero {
}
}
-
func TestIntTrailingElements(t *testing.T) {
var a IntVector
for i := 0; i < 10; i++ {
checkIntZero(t, &a, 5)
}
-
func TestIntAccess(t *testing.T) {
const n = 100
var a IntVector
}
}
-
func TestIntInsertDeleteClear(t *testing.T) {
const n = 100
var a IntVector
}
}
-
func verify_sliceInt(t *testing.T, x *IntVector, elt, i, j int) {
for k := i; k < j; k++ {
if elem2IntValue(x.At(k)) != int2IntValue(elt) {
}
}
-
func verify_patternInt(t *testing.T, x *IntVector, a, b, c int) {
n := a + b + c
if x.Len() != n {
verify_sliceInt(t, x, 0, a+b, n)
}
-
func make_vectorInt(elt, len int) *IntVector {
x := new(IntVector).Resize(len, 0)
for i := 0; i < len; i++ {
return x
}
-
func TestIntInsertVector(t *testing.T) {
// 1
a := make_vectorInt(0, 0)
verify_patternInt(t, a, 8, 1000, 2)
}
-
func TestIntDo(t *testing.T) {
const n = 25
const salt = 17
}
-
func TestIntVectorCopy(t *testing.T) {
// verify Copy() returns a copy, not simply a slice of the original vector
const Len = 10
package vector
-
import (
"fmt"
"sort"
strzero string
)
-
func int2Value(x int) int { return x }
func int2IntValue(x int) int { return x }
func int2StrValue(x int) string { return string(x) }
-
func elem2Value(x interface{}) int { return x.(int) }
func elem2IntValue(x int) int { return x }
func elem2StrValue(x string) string { return x }
-
func intf2Value(x interface{}) int { return x.(int) }
func intf2IntValue(x interface{}) int { return x.(int) }
func intf2StrValue(x interface{}) string { return x.(string) }
-
type VectorInterface interface {
Len() int
Cap() int
}
-
func checkSize(t *testing.T, v VectorInterface, len, cap int) {
if v.Len() != len {
t.Errorf("%T expected len = %d; found %d", v, len, v.Len())
}
}
-
func val(i int) int { return i*991 - 1234 }
-
func TestSorting(t *testing.T) {
const n = 100
}
}
-
func tname(x interface{}) string { return fmt.Sprintf("%T: ", x) }
"testing"
)
-
const memTestN = 1000000
-
func s(n uint64) string {
str := fmt.Sprintf("%d", n)
lens := len(str)
return strings.Join(a, " ")
}
-
func TestVectorNums(t *testing.T) {
if testing.Short() {
return
t.Logf("%T.Push(%#v), n = %s: Alloc/n = %.2f\n", v, c, s(memTestN), float64(n)/memTestN)
}
-
func TestIntVectorNums(t *testing.T) {
if testing.Short() {
return
t.Logf("%T.Push(%#v), n = %s: Alloc/n = %.2f\n", v, c, s(memTestN), float64(n)/memTestN)
}
-
func TestStringVectorNums(t *testing.T) {
if testing.Short() {
return
t.Logf("%T.Push(%#v), n = %s: Alloc/n = %.2f\n", v, c, s(memTestN), float64(n)/memTestN)
}
-
func BenchmarkVectorNums(b *testing.B) {
c := int(0)
var v Vector
}
}
-
func BenchmarkIntVectorNums(b *testing.B) {
c := int(0)
var v IntVector
}
}
-
func BenchmarkStringVectorNums(b *testing.B) {
c := ""
var v StringVector
package vector
-
func (p *StringVector) realloc(length, capacity int) (b []string) {
if capacity < initialSize {
capacity = initialSize
return
}
-
// Insert n elements at position i.
func (p *StringVector) Expand(i, n int) {
a := *p
*p = a
}
-
// Insert n elements at the end of a vector.
func (p *StringVector) Extend(n int) { p.Expand(len(*p), n) }
-
// Resize changes the length and capacity of a vector.
// If the new length is shorter than the current length, Resize discards
// trailing elements. If the new length is longer than the current length,
return p
}
-
// Len returns the number of elements in the vector.
// Same as len(*p).
func (p *StringVector) Len() int { return len(*p) }
-
// Cap returns the capacity of the vector; that is, the
// maximum length the vector can grow without resizing.
// Same as cap(*p).
func (p *StringVector) Cap() int { return cap(*p) }
-
// At returns the i'th element of the vector.
func (p *StringVector) At(i int) string { return (*p)[i] }
-
// Set sets the i'th element of the vector to value x.
func (p *StringVector) Set(i int, x string) { (*p)[i] = x }
-
// Last returns the element in the vector of highest index.
func (p *StringVector) Last() string { return (*p)[len(*p)-1] }
-
// Copy makes a copy of the vector and returns it.
func (p *StringVector) Copy() StringVector {
arr := make(StringVector, len(*p))
return arr
}
-
// Insert inserts into the vector an element of value x before
// the current element at index i.
func (p *StringVector) Insert(i int, x string) {
(*p)[i] = x
}
-
// Delete deletes the i'th element of the vector. The gap is closed so the old
// element at index i+1 has index i afterwards.
func (p *StringVector) Delete(i int) {
*p = a[0 : n-1]
}
-
// InsertVector inserts into the vector the contents of the vector
// x such that the 0th element of x appears at index i after insertion.
func (p *StringVector) InsertVector(i int, x *StringVector) {
copy((*p)[i:i+len(b)], b)
}
-
// Cut deletes elements i through j-1, inclusive.
func (p *StringVector) Cut(i, j int) {
a := *p
*p = a[0:m]
}
-
// Slice returns a new sub-vector by slicing the old one to extract slice [i:j].
// The elements are copied. The original vector is unchanged.
func (p *StringVector) Slice(i, j int) *StringVector {
return &s
}
-
// Convenience wrappers
// Push appends x to the end of the vector.
func (p *StringVector) Push(x string) { p.Insert(len(*p), x) }
-
// Pop deletes the last element of the vector.
func (p *StringVector) Pop() string {
a := *p
return x
}
-
// AppendVector appends the entire vector x to the end of this vector.
func (p *StringVector) AppendVector(x *StringVector) { p.InsertVector(len(*p), x) }
-
// Swap exchanges the elements at indexes i and j.
func (p *StringVector) Swap(i, j int) {
a := *p
a[i], a[j] = a[j], a[i]
}
-
// Do calls function f for each element of the vector, in order.
// The behavior of Do is undefined if f changes *p.
func (p *StringVector) Do(f func(elem string)) {
import "testing"
-
func TestStrZeroLen(t *testing.T) {
a := new(StringVector)
if a.Len() != 0 {
}
}
-
func TestStrResize(t *testing.T) {
var a StringVector
checkSize(t, &a, 0, 0)
checkSize(t, a.Resize(11, 100), 11, 100)
}
-
func TestStrResize2(t *testing.T) {
var a StringVector
checkSize(t, &a, 0, 0)
}
}
-
func checkStrZero(t *testing.T, a *StringVector, i int) {
for j := 0; j < i; j++ {
if a.At(j) == strzero {
}
}
-
func TestStrTrailingElements(t *testing.T) {
var a StringVector
for i := 0; i < 10; i++ {
checkStrZero(t, &a, 5)
}
-
func TestStrAccess(t *testing.T) {
const n = 100
var a StringVector
}
}
-
func TestStrInsertDeleteClear(t *testing.T) {
const n = 100
var a StringVector
}
}
-
func verify_sliceStr(t *testing.T, x *StringVector, elt, i, j int) {
for k := i; k < j; k++ {
if elem2StrValue(x.At(k)) != int2StrValue(elt) {
}
}
-
func verify_patternStr(t *testing.T, x *StringVector, a, b, c int) {
n := a + b + c
if x.Len() != n {
verify_sliceStr(t, x, 0, a+b, n)
}
-
func make_vectorStr(elt, len int) *StringVector {
x := new(StringVector).Resize(len, 0)
for i := 0; i < len; i++ {
return x
}
-
func TestStrInsertVector(t *testing.T) {
// 1
a := make_vectorStr(0, 0)
verify_patternStr(t, a, 8, 1000, 2)
}
-
func TestStrDo(t *testing.T) {
const n = 25
const salt = 17
}
-
func TestStrVectorCopy(t *testing.T) {
// verify Copy() returns a copy, not simply a slice of the original vector
const Len = 10
package vector
-
func (p *Vector) realloc(length, capacity int) (b []interface{}) {
if capacity < initialSize {
capacity = initialSize
return
}
-
// Insert n elements at position i.
func (p *Vector) Expand(i, n int) {
a := *p
*p = a
}
-
// Insert n elements at the end of a vector.
func (p *Vector) Extend(n int) { p.Expand(len(*p), n) }
-
// Resize changes the length and capacity of a vector.
// If the new length is shorter than the current length, Resize discards
// trailing elements. If the new length is longer than the current length,
return p
}
-
// Len returns the number of elements in the vector.
// Same as len(*p).
func (p *Vector) Len() int { return len(*p) }
-
// Cap returns the capacity of the vector; that is, the
// maximum length the vector can grow without resizing.
// Same as cap(*p).
func (p *Vector) Cap() int { return cap(*p) }
-
// At returns the i'th element of the vector.
func (p *Vector) At(i int) interface{} { return (*p)[i] }
-
// Set sets the i'th element of the vector to value x.
func (p *Vector) Set(i int, x interface{}) { (*p)[i] = x }
-
// Last returns the element in the vector of highest index.
func (p *Vector) Last() interface{} { return (*p)[len(*p)-1] }
-
// Copy makes a copy of the vector and returns it.
func (p *Vector) Copy() Vector {
arr := make(Vector, len(*p))
return arr
}
-
// Insert inserts into the vector an element of value x before
// the current element at index i.
func (p *Vector) Insert(i int, x interface{}) {
(*p)[i] = x
}
-
// Delete deletes the i'th element of the vector. The gap is closed so the old
// element at index i+1 has index i afterwards.
func (p *Vector) Delete(i int) {
*p = a[0 : n-1]
}
-
// InsertVector inserts into the vector the contents of the vector
// x such that the 0th element of x appears at index i after insertion.
func (p *Vector) InsertVector(i int, x *Vector) {
copy((*p)[i:i+len(b)], b)
}
-
// Cut deletes elements i through j-1, inclusive.
func (p *Vector) Cut(i, j int) {
a := *p
*p = a[0:m]
}
-
// Slice returns a new sub-vector by slicing the old one to extract slice [i:j].
// The elements are copied. The original vector is unchanged.
func (p *Vector) Slice(i, j int) *Vector {
return &s
}
-
// Convenience wrappers
// Push appends x to the end of the vector.
func (p *Vector) Push(x interface{}) { p.Insert(len(*p), x) }
-
// Pop deletes the last element of the vector.
func (p *Vector) Pop() interface{} {
a := *p
return x
}
-
// AppendVector appends the entire vector x to the end of this vector.
func (p *Vector) AppendVector(x *Vector) { p.InsertVector(len(*p), x) }
-
// Swap exchanges the elements at indexes i and j.
func (p *Vector) Swap(i, j int) {
a := *p
a[i], a[j] = a[j], a[i]
}
-
// Do calls function f for each element of the vector, in order.
// The behavior of Do is undefined if f changes *p.
func (p *Vector) Do(f func(elem interface{})) {
import "testing"
-
func TestZeroLen(t *testing.T) {
a := new(Vector)
if a.Len() != 0 {
}
}
-
func TestResize(t *testing.T) {
var a Vector
checkSize(t, &a, 0, 0)
checkSize(t, a.Resize(11, 100), 11, 100)
}
-
func TestResize2(t *testing.T) {
var a Vector
checkSize(t, &a, 0, 0)
}
}
-
func checkZero(t *testing.T, a *Vector, i int) {
for j := 0; j < i; j++ {
if a.At(j) == zero {
}
}
-
func TestTrailingElements(t *testing.T) {
var a Vector
for i := 0; i < 10; i++ {
checkZero(t, &a, 5)
}
-
func TestAccess(t *testing.T) {
const n = 100
var a Vector
}
}
-
func TestInsertDeleteClear(t *testing.T) {
const n = 100
var a Vector
}
}
-
func verify_slice(t *testing.T, x *Vector, elt, i, j int) {
for k := i; k < j; k++ {
if elem2Value(x.At(k)) != int2Value(elt) {
}
}
-
func verify_pattern(t *testing.T, x *Vector, a, b, c int) {
n := a + b + c
if x.Len() != n {
verify_slice(t, x, 0, a+b, n)
}
-
func make_vector(elt, len int) *Vector {
x := new(Vector).Resize(len, 0)
for i := 0; i < len; i++ {
return x
}
-
func TestInsertVector(t *testing.T) {
// 1
a := make_vector(0, 0)
verify_pattern(t, a, 8, 1000, 2)
}
-
func TestDo(t *testing.T) {
const n = 25
const salt = 17
}
-
func TestVectorCopy(t *testing.T) {
// verify Copy() returns a copy, not simply a slice of the original vector
const Len = 10
ocspUnauthorized = 5
)
-
type certID struct {
HashAlgorithm pkix.AlgorithmIdentifier
NameHash []byte
panic("shouldn't be called")
}
-
func testCanonicalText(t *testing.T, input, expected string) {
r := recordingHash{bytes.NewBuffer(nil)}
c := NewCanonicalTextHash(r)
return b
}
-
var encryptedKeyPub = rsa.PublicKey{
E: 65537,
N: bigFromBase10("115804063926007623305902631768113868327816898845124614648849934718568541074358183759250136204762053879858102352159854352727097033322663029387610959884180306668628526686121021235757016368038585212410610742029286439607686208110250133174279811431933746643015923132833417396844716207301518956640020862630546868823"),
}
}
-
var iteratedTests = []struct {
in, out string
}{
}
}
-
var parseTests = []struct {
spec, in, out string
}{
}
}
-
func TestSerialize(t *testing.T) {
buf := bytes.NewBuffer(nil)
key := make([]byte, 16)
}
}
-
func testServerScript(t *testing.T, name string, serverScript [][]byte, config *Config) {
c, s := net.Pipe()
srv := Server(s, config)
return preMasterSecret, ckx, nil
}
-
// md5SHA1Hash implements TLS 1.0's hybrid hash function which consists of the
// concatenation of an MD5 and SHA1 hash.
func md5SHA1Hash(slices ...[]byte) []byte {
return "certificate is valid for " + valid + ", not " + h.Host
}
-
// UnknownAuthorityError results when the certificate issuer is unknown
type UnknownAuthorityError struct {
cert *Certificate
panic("unreachable")
}
-
// parseField parses the next field in the record. The read field is
// located in r.field. Delim is the first character not part of the field
// (r.Comma or '\n').
return d
}
-
func TestTypedefsELF(t *testing.T) { testTypedefs(t, elfData(t, "testdata/typedef.elf")) }
func TestTypedefsMachO(t *testing.T) {
// Magic number for the elf trampoline, chosen wisely to be an immediate value.
const ARM_MAGIC_TRAMP_NUMBER = 0x5c000003
-
// ELF32 File header.
type Header32 struct {
Ident [EI_NIDENT]byte /* File identification. */
func R_TYPE64(info uint64) uint32 { return uint32(info) }
func R_INFO(sym, typ uint32) uint64 { return uint64(sym)<<32 | uint64(typ) }
-
// ELF64 symbol table entries.
type Sym64 struct {
Name uint32 /* String table index of name. */
Characteristics uint32
}
-
type Section struct {
SectionHeader
// Open returns a new ReadSeeker reading the PE section.
func (s *Section) Open() io.ReadSeeker { return io.NewSectionReader(s.sr, 0, 1<<63-1) }
-
type FormatError struct {
off int64
msg string
"utf8"
)
-
// ----------------------------------------------------------------------------
// Internal representation
Grammar map[string]*Production
)
-
func (x Alternative) Pos() token.Pos { return x[0].Pos() } // the parser always generates non-empty Alternative
func (x Sequence) Pos() token.Pos { return x[0].Pos() } // the parser always generates non-empty Sequences
func (x *Name) Pos() token.Pos { return x.StringPos }
func (x *Bad) Pos() token.Pos { return x.TokPos }
func (x *Production) Pos() token.Pos { return x.Name.Pos() }
-
// ----------------------------------------------------------------------------
// Grammar verification
return !unicode.IsUpper(ch)
}
-
type verifier struct {
fset *token.FileSet
scanner.ErrorVector
grammar Grammar
}
-
func (v *verifier) error(pos token.Pos, msg string) {
v.Error(v.fset.Position(pos), msg)
}
-
func (v *verifier) push(prod *Production) {
name := prod.Name.String
if _, found := v.reached[name]; !found {
}
}
-
func (v *verifier) verifyChar(x *Token) int {
s := x.String
if utf8.RuneCountInString(s) != 1 {
return ch
}
-
func (v *verifier) verifyExpr(expr Expression, lexical bool) {
switch x := expr.(type) {
case nil:
}
}
-
func (v *verifier) verify(fset *token.FileSet, grammar Grammar, start string) {
// find root production
root, found := grammar[start]
}
}
-
// Verify checks that:
// - all productions used are defined
// - all productions defined are used when beginning at start
"testing"
)
-
var fset = token.NewFileSet()
-
var goodGrammars = []string{
`Program = .`,
ti = "b" .`,
}
-
var badGrammars = []string{
`Program = | .`,
`Program = | b .`,
`Program = {} .`,
}
-
func checkGood(t *testing.T, filename string, src []byte) {
grammar, err := Parse(fset, filename, src)
if err != nil {
}
}
-
func checkBad(t *testing.T, filename string, src []byte) {
_, err := Parse(fset, filename, src)
if err == nil {
}
}
-
func TestGrammars(t *testing.T) {
for _, src := range goodGrammars {
checkGood(t, "", []byte(src))
}
}
-
var files = []string{
// TODO(gri) add some test files
}
-
func TestFiles(t *testing.T) {
for _, filename := range files {
src, err := ioutil.ReadFile(filename)
"strconv"
)
-
type parser struct {
fset *token.FileSet
scanner.ErrorVector
lit string // token literal
}
-
func (p *parser) next() {
p.pos, p.tok, p.lit = p.scanner.Scan()
if p.tok.IsKeyword() {
}
}
-
func (p *parser) error(pos token.Pos, msg string) {
p.Error(p.fset.Position(pos), msg)
}
-
func (p *parser) errorExpected(pos token.Pos, msg string) {
msg = "expected " + msg
if pos == p.pos {
p.error(pos, msg)
}
-
func (p *parser) expect(tok token.Token) token.Pos {
pos := p.pos
if p.tok != tok {
return pos
}
-
func (p *parser) parseIdentifier() *Name {
pos := p.pos
name := p.lit
return &Name{pos, name}
}
-
func (p *parser) parseToken() *Token {
pos := p.pos
value := ""
return &Token{pos, value}
}
-
// ParseTerm returns nil if no term was found.
func (p *parser) parseTerm() (x Expression) {
pos := p.pos
return x
}
-
func (p *parser) parseSequence() Expression {
var list Sequence
return list
}
-
func (p *parser) parseExpression() Expression {
var list Alternative
return list
}
-
func (p *parser) parseProduction() *Production {
name := p.parseIdentifier()
p.expect(token.ASSIGN)
return &Production{name, expr}
}
-
func (p *parser) parse(fset *token.FileSet, filename string, src []byte) Grammar {
// initialize parser
p.fset = fset
return grammar
}
-
// Parse parses a set of EBNF productions from source src.
// It returns a set of productions. Errors are reported
// for incorrect syntax and if a production is declared
{"fooba", "MZXW6YTB"},
{"foobar", "MZXW6YTBOI======"},
-
// Wikipedia examples, converted to base32
{"sure.", "ON2XEZJO"},
{"sure", "ON2XEZI="},
return "invalid hex char: " + strconv.Itoa(int(e))
}
-
func DecodedLen(x int) int { return x / 2 }
// Decode decodes src into DecodedLen(len(src)) bytes, returning the actual
}
}
-
func TestNoExistBinary(t *testing.T) {
// Can't run a non-existent binary
err := Command("/no-exist-binary").Run()
"runtime"
)
-
// ----------------------------------------------------------------------------
// Format representation
//
type Formatter func(state *State, value interface{}, ruleName string) bool
-
// A FormatterMap is a set of custom formatters.
// It maps a rule name to a formatter function.
//
type FormatterMap map[string]Formatter
-
// A parsed format expression is built from the following nodes.
//
type (
}
)
-
// A Format is the result of parsing a format specification.
// The format may be applied repeatedly to format values.
//
type Format map[string]expr
-
// ----------------------------------------------------------------------------
// Formatting
Copy() Environment
}
-
// State represents the current formatting state.
// It is provided as argument to custom formatters.
//
separator expr // possibly nil
}
-
func newState(fmt Format, env Environment, errors chan os.Error) *State {
s := new(State)
s.fmt = fmt
return s
}
-
// Env returns the environment passed to Format.Apply.
func (s *State) Env() interface{} { return s.env }
-
// LinePos returns the position of the current line beginning
// in the state's output buffer. Line numbers start at 1.
//
func (s *State) LinePos() token.Position { return s.linePos }
-
// Pos returns the position of the next byte to be written to the
// output buffer. Line numbers start at 1.
//
return token.Position{Line: s.linePos.Line, Column: offs - s.linePos.Offset, Offset: offs}
}
-
// Write writes data to the output buffer, inserting the indentation
// string after each newline or form feed character. It cannot return an error.
//
return n + n3, nil
}
-
type checkpoint struct {
env Environment
hasOutput bool
linePos token.Position
}
-
func (s *State) save() checkpoint {
saved := checkpoint{nil, s.hasOutput, s.output.Len(), s.linePos}
if s.env != nil {
return saved
}
-
func (s *State) restore(m checkpoint) {
s.env = m.env
s.output.Truncate(m.outputLen)
}
-
func (s *State) error(msg string) {
s.errors <- os.NewError(msg)
runtime.Goexit()
}
-
// TODO At the moment, unnamed types are simply mapped to the default
// names below. For instance, all unnamed arrays are mapped to
// 'array' which is not really sufficient. Eventually one may want
return nil
}
-
// eval applies a format expression fexpr to a value. If the expression
// evaluates internally to a non-nil []byte, that slice is appended to
// the state's output buffer and eval returns true. Otherwise, eval
return false
}
-
// Eval formats each argument according to the format
// f and returns the resulting []byte and os.Error. If
// an error occurred, the []byte contains the partially
return s.output.Bytes(), err
}
-
// ----------------------------------------------------------------------------
// Convenience functions
return w.Write(data)
}
-
// Print formats each argument according to the format f
// and writes to standard output. The result is the total
// number of bytes written and an os.Error, if any.
return f.Fprint(os.Stdout, nil, args...)
}
-
// Sprint formats each argument according to the format f
// and returns the resulting string. If an error occurs
// during formatting, the result string contains the
"go/token"
)
-
var fset = token.NewFileSet()
-
func parse(t *testing.T, form string, fmap FormatterMap) Format {
f, err := Parse(fset, "", []byte(form), fmap)
if err != nil {
return f
}
-
func verify(t *testing.T, f Format, expected string, args ...interface{}) {
if f == nil {
return // allow other tests to run
}
}
-
func formatter(s *State, value interface{}, rule_name string) bool {
switch rule_name {
case "/":
return false
}
-
func TestCustomFormatters(t *testing.T) {
fmap0 := FormatterMap{"/": formatter}
fmap1 := FormatterMap{"int": formatter, "blank": formatter, "nil": formatter}
// TODO needs more tests
}
-
// ----------------------------------------------------------------------------
// Formatting of basic and simple composite types
}
}
-
func TestBasicTypes(t *testing.T) {
check(t, ``, ``)
check(t, `bool=":%v"`, `:true:false`, true, false)
check(t, `float64="%g"`, fs, float64(f))
}
-
func TestArrayTypes(t *testing.T) {
var a0 [10]int
check(t, `array="array";`, `array`, a0)
check(t, `array={* / ", "}; interface=*; string="bar"; default="%v";`, `42, bar, 3.14`, a2)
}
-
func TestChanTypes(t *testing.T) {
var c0 chan int
check(t, `chan="chan"`, `chan`, c0)
// check(t, `chan=*`, `42`, c1); // reflection support for chans incomplete
}
-
func TestFuncTypes(t *testing.T) {
var f0 func() int
check(t, `func="func"`, `func`, f0)
// check(t, `func=*`, `42`, f1); // reflection support for funcs incomplete
}
-
func TestMapTypes(t *testing.T) {
var m0 map[string]int
check(t, `map="map"`, `map`, m0)
// check(t, `map=*`, ``, m1); // reflection support for maps incomplete
}
-
func TestPointerTypes(t *testing.T) {
var p0 *int
check(t, `ptr="ptr"`, `ptr`, p0)
check(t, `ptr=*; int="%d"`, `99991`, p1)
}
-
func TestDefaultRule(t *testing.T) {
check(t, `default="%v"`, `42foo3.14`, 42, "foo", 3.14)
check(t, `default="%v"; int="%x"`, `abcdef`, 10, 11, 12, 13, 14, 15)
check(t, `default="%x"; int=@:default`, `abcdef`, 10, 11, 12, 13, 14, 15)
}
-
func TestGlobalSeparatorRule(t *testing.T) {
check(t, `int="%d"; / ="-"`, `1-2-3-4`, 1, 2, 3, 4)
check(t, `int="%x%x"; / ="*"`, `aa*aa`, 10, 10)
}
-
// ----------------------------------------------------------------------------
// Formatting of a struct
func TestStruct1(t *testing.T) { check(t, F1, "<42>", T1{42}) }
-
// ----------------------------------------------------------------------------
// Formatting of a struct with an optional field (ptr)
check(t, F2b, "fooempty", T2{"foo", nil})
}
-
// ----------------------------------------------------------------------------
// Formatting of a struct with a repetitive field (slice)
check(t, F3b, "bal: 2-3-5", T3{"bal", []int{2, 3, 5}})
}
-
// ----------------------------------------------------------------------------
// Formatting of a struct with alternative field
check(t, F4b, "<2, 3, 7>", T4{nil, []int{2, 3, 7}})
}
-
// ----------------------------------------------------------------------------
// Formatting a struct (documentation example)
check(t, FPoint, "---foo---{3, 0xf}", p)
}
-
// ----------------------------------------------------------------------------
// Formatting a slice (documentation example)
func TestSlice(t *testing.T) { check(t, FSlice, "10, 11, 101, 111", []int{2, 3, 5, 7}) }
-
// TODO add more tests
rules map[string]expr // RuleName -> Expression
}
-
func (p *parser) next() {
p.pos, p.tok, p.lit = p.scanner.Scan()
switch p.tok {
}
}
-
func (p *parser) init(fset *token.FileSet, filename string, src []byte) {
p.ErrorVector.Reset()
p.file = fset.AddFile(filename, fset.Base(), len(src))
p.rules = make(map[string]expr)
}
-
func (p *parser) error(pos token.Pos, msg string) {
p.Error(p.file.Position(pos), msg)
}
-
func (p *parser) errorExpected(pos token.Pos, msg string) {
msg = "expected " + msg
if pos == p.pos {
p.error(pos, msg)
}
-
func (p *parser) expect(tok token.Token) token.Pos {
pos := p.pos
if p.tok != tok {
return pos
}
-
func (p *parser) parseIdentifier() string {
name := p.lit
p.expect(token.IDENT)
return name
}
-
func (p *parser) parseTypeName() (string, bool) {
pos := p.pos
name, isIdent := p.parseIdentifier(), true
return name, isIdent
}
-
// Parses a rule name and returns it. If the rule name is
// a package-qualified type name, the package name is resolved.
// The 2nd result value is true iff the rule name consists of a
return name, isIdent
}
-
func (p *parser) parseString() string {
s := ""
if p.tok == token.STRING {
return s
}
-
func (p *parser) parseLiteral() literal {
s := []byte(p.parseString())
return lit
}
-
func (p *parser) parseField() expr {
var fname string
switch p.tok {
return &field{fname, ruleName}
}
-
func (p *parser) parseOperand() (x expr) {
switch p.tok {
case token.STRING:
return x
}
-
func (p *parser) parseSequence() expr {
var list vector.Vector
return seq
}
-
func (p *parser) parseExpression() expr {
var list vector.Vector
return alt
}
-
func (p *parser) parseFormat() {
for p.tok != token.EOF {
pos := p.pos
p.expect(token.EOF)
}
-
func remap(p *parser, name string) string {
i := strings.Index(name, ".")
if i >= 0 {
return name
}
-
// Parse parses a set of format productions from source src. Custom
// formatters may be provided via a map of formatter functions. If
// there are no errors, the result is a Format and the error is nil.
p[i], p[i+1], p[j], p[j+1] = p[j], p[j+1], p[i], p[i+1]
}
-
func checkUTF8(s string) os.Error {
for s != "" {
rune, size := utf8.DecodeRuneInString(s)
panic("unknown empty width arg")
}
-
func (i *Inst) String() string {
var b bytes.Buffer
dumpInst(&b, i)
jsGt = []byte(`\x3E`)
)
-
// JSEscape writes to w the escaped JavaScript equivalent of the plain text data b.
func JSEscape(w io.Writer, b []byte) {
last := 0
return fmt.Sprintf("({{with %s}} %s)", w.pipe, w.list)
}
-
// Parsing.
// New allocates a new template with the given name.
}
}
-
var setExecTests = []execTest{
{"empty", "", "", nil, true},
{"text", "some text", "some text", nil, true},
return string(v)
}
-
// All published variables.
var vars map[string]Var = make(map[string]Var)
var mutex sync.Mutex
}
}
-
const b32 uint32 = 1<<32 - 1
const b64 uint64 = 1<<64 - 1
}
}
-
// Check Formatter with Sprint, Sprintln, Sprintf
func TestFormatterPrintln(t *testing.T) {
f := F(1)
return s
}
-
// Get the i'th arg of the struct value.
// If the arg itself is an interface, return a value for
// the thing inside the interface, not the interface itself.
return !unicode.IsSpace(r)
}
-
// skipSpace provides Scan() methods the ability to skip space and newline characters
// in keeping with the current scanning mode set by format strings and Scan()/Scanln().
func (s *ss) SkipSpace() {
s.skipSpace(false)
}
-
// readRune is a structure to enable reading UTF-8 encoded code points
// from an io.Reader. It is used if the Reader given to the scanner does
// not already implement io.RuneReader.
return
}
-
var ssFree = newCache(func() interface{} { return new(ss) })
// Allocate a new ss struct or grab a cached one.
}
}
-
// token returns the next space-delimited string from the input. It
// skips white space. For Scanln, it stops at newlines. For Scan,
// newlines are treated as spaces.
"utf8"
)
-
// ----------------------------------------------------------------------------
// Interfaces
//
// That position information is needed to properly position comments
// when printing the construct.
-
// All node types implement the Node interface.
type Node interface {
Pos() token.Pos // position of first character belonging to the node
End() token.Pos // position of first character immediately after the node
}
-
// All expression nodes implement the Expr interface.
type Expr interface {
Node
exprNode()
}
-
// All statement nodes implement the Stmt interface.
type Stmt interface {
Node
stmtNode()
}
-
// All declaration nodes implement the Decl interface.
type Decl interface {
Node
declNode()
}
-
// ----------------------------------------------------------------------------
// Comments
Text string // comment text (excluding '\n' for //-style comments)
}
-
func (c *Comment) Pos() token.Pos { return c.Slash }
func (c *Comment) End() token.Pos { return token.Pos(int(c.Slash) + len(c.Text)) }
-
// A CommentGroup represents a sequence of comments
// with no other tokens and no empty lines between.
//
List []*Comment // len(List) > 0
}
-
func (g *CommentGroup) Pos() token.Pos { return g.List[0].Pos() }
func (g *CommentGroup) End() token.Pos { return g.List[len(g.List)-1].End() }
-
// ----------------------------------------------------------------------------
// Expressions and types
Comment *CommentGroup // line comments; or nil
}
-
func (f *Field) Pos() token.Pos {
if len(f.Names) > 0 {
return f.Names[0].Pos()
return f.Type.Pos()
}
-
func (f *Field) End() token.Pos {
if f.Tag != nil {
return f.Tag.End()
return f.Type.End()
}
-
// A FieldList represents a list of Fields, enclosed by parentheses or braces.
type FieldList struct {
Opening token.Pos // position of opening parenthesis/brace, if any
Closing token.Pos // position of closing parenthesis/brace, if any
}
-
func (f *FieldList) Pos() token.Pos {
if f.Opening.IsValid() {
return f.Opening
return token.NoPos
}
-
func (f *FieldList) End() token.Pos {
if f.Closing.IsValid() {
return f.Closing + 1
return token.NoPos
}
-
// NumFields returns the number of (named and anonymous fields) in a FieldList.
func (f *FieldList) NumFields() int {
n := 0
return n
}
-
// An expression is represented by a tree consisting of one
// or more of the following concrete expression nodes.
//
}
)
-
// The direction of a channel type is indicated by one
// of the following constants.
//
RECV
)
-
// A type is represented by a tree consisting of one
// or more of the following type-specific expression
// nodes.
}
)
-
// Pos and End implementations for expression/type nodes.
//
func (x *BadExpr) Pos() token.Pos { return x.From }
func (x *MapType) Pos() token.Pos { return x.Map }
func (x *ChanType) Pos() token.Pos { return x.Begin }
-
func (x *BadExpr) End() token.Pos { return x.To }
func (x *Ident) End() token.Pos { return token.Pos(int(x.NamePos) + len(x.Name)) }
func (x *Ellipsis) End() token.Pos {
func (x *MapType) End() token.Pos { return x.Value.End() }
func (x *ChanType) End() token.Pos { return x.Value.End() }
-
// exprNode() ensures that only expression/type nodes can be
// assigned to an ExprNode.
//
func (x *MapType) exprNode() {}
func (x *ChanType) exprNode() {}
-
// ----------------------------------------------------------------------------
// Convenience functions for Idents
//
func NewIdent(name string) *Ident { return &Ident{noPos, name, nil} }
-
// IsExported returns whether name is an exported Go symbol
// (i.e., whether it begins with an uppercase letter).
//
return unicode.IsUpper(ch)
}
-
// IsExported returns whether id is an exported Go symbol
// (i.e., whether it begins with an uppercase letter).
//
func (id *Ident) IsExported() bool { return IsExported(id.Name) }
-
func (id *Ident) String() string {
if id != nil {
return id.Name
return "<nil>"
}
-
// ----------------------------------------------------------------------------
// Statements
}
)
-
// Pos and End implementations for statement nodes.
//
func (s *BadStmt) Pos() token.Pos { return s.From }
func (s *ForStmt) Pos() token.Pos { return s.For }
func (s *RangeStmt) Pos() token.Pos { return s.For }
-
func (s *BadStmt) End() token.Pos { return s.To }
func (s *DeclStmt) End() token.Pos { return s.Decl.End() }
func (s *EmptyStmt) End() token.Pos {
func (s *ForStmt) End() token.Pos { return s.Body.End() }
func (s *RangeStmt) End() token.Pos { return s.Body.End() }
-
// stmtNode() ensures that only statement nodes can be
// assigned to a StmtNode.
//
func (s *ForStmt) stmtNode() {}
func (s *RangeStmt) stmtNode() {}
-
// ----------------------------------------------------------------------------
// Declarations
}
)
-
// Pos and End implementations for spec nodes.
//
func (s *ImportSpec) Pos() token.Pos {
func (s *ValueSpec) Pos() token.Pos { return s.Names[0].Pos() }
func (s *TypeSpec) Pos() token.Pos { return s.Name.Pos() }
-
func (s *ImportSpec) End() token.Pos { return s.Path.End() }
func (s *ValueSpec) End() token.Pos {
if n := len(s.Values); n > 0 {
}
func (s *TypeSpec) End() token.Pos { return s.Type.End() }
-
// specNode() ensures that only spec nodes can be
// assigned to a Spec.
//
func (s *ValueSpec) specNode() {}
func (s *TypeSpec) specNode() {}
-
// A declaration is represented by one of the following declaration nodes.
//
type (
}
)
-
// Pos and End implementations for declaration nodes.
//
func (d *BadDecl) Pos() token.Pos { return d.From }
func (d *GenDecl) Pos() token.Pos { return d.TokPos }
func (d *FuncDecl) Pos() token.Pos { return d.Type.Pos() }
-
func (d *BadDecl) End() token.Pos { return d.To }
func (d *GenDecl) End() token.Pos {
if d.Rparen.IsValid() {
return d.Type.End()
}
-
// declNode() ensures that only declaration nodes can be
// assigned to a DeclNode.
//
func (d *GenDecl) declNode() {}
func (d *FuncDecl) declNode() {}
-
// ----------------------------------------------------------------------------
// Files and packages
Comments []*CommentGroup // list of all comments in the source file
}
-
func (f *File) Pos() token.Pos { return f.Package }
func (f *File) End() token.Pos {
if n := len(f.Decls); n > 0 {
return f.Name.End()
}
-
// A Package node represents a set of source files
// collectively building a Go package.
//
Files map[string]*File // Go source files by filename
}
-
func (p *Package) Pos() token.Pos { return token.NoPos }
func (p *Package) End() token.Pos { return token.NoPos }
return list[0:j]
}
-
// fieldName assumes that x is the type of an anonymous field and
// returns the corresponding field name. If x is not an acceptable
// anonymous field, the result is nil.
return nil
}
-
func fieldListExports(fields *FieldList) (removedFields bool) {
if fields == nil {
return
return
}
-
func paramListExports(fields *FieldList) {
if fields == nil {
return
}
}
-
func typeExports(typ Expr) {
switch t := typ.(type) {
case *ArrayType:
}
}
-
func specExports(spec Spec) bool {
switch s := spec.(type) {
case *ValueSpec:
return false
}
-
func specListExports(list []Spec) []Spec {
j := 0
for _, s := range list {
return list[0:j]
}
-
func declExports(decl Decl) bool {
switch d := decl.(type) {
case *GenDecl:
return false
}
-
// FileExports trims the AST for a Go source file in place such that only
// exported nodes remain: all top-level identifiers which are not exported
// and their associated information (such as type, initial value, or function
return j > 0
}
-
// PackageExports trims the AST for a Go package in place such that only
// exported nodes remain. The pkg.Files list is not changed, so that file
// names and top-level package comments don't get lost.
return hasExports
}
-
// ----------------------------------------------------------------------------
// General filtering
return list[0:j]
}
-
func filterFieldList(fields *FieldList, filter Filter) (removedFields bool) {
if fields == nil {
return false
return
}
-
func filterSpec(spec Spec, f Filter) bool {
switch s := spec.(type) {
case *ValueSpec:
return false
}
-
func filterSpecList(list []Spec, f Filter) []Spec {
j := 0
for _, s := range list {
return list[0:j]
}
-
// FilterDecl trims the AST for a Go declaration in place by removing
// all names (including struct field and interface method names, but
// not from parameter lists) that don't pass through the filter f.
return false
}
-
// FilterFile trims the AST for a Go file in place by removing all
// names from top-level declarations (including struct field and
// interface method names, but not from parameter lists) that don't
return j > 0
}
-
// FilterPackage trims the AST for a Go package in place by removing all
// names from top-level declarations (including struct field and
// interface method names, but not from parameter lists) that don't
return hasDecls
}
-
// ----------------------------------------------------------------------------
// Merging of package files
//
var separator = &Comment{noPos, "//"}
-
// MergePackageFiles creates a file AST by merging the ASTs of the
// files belonging to a package. The mode flags control merging behavior.
//
"reflect"
)
-
// A FieldFilter may be provided to Fprint to control the output.
type FieldFilter func(name string, value reflect.Value) bool
-
// NotNilFilter returns true for field values that are not nil;
// it returns false otherwise.
func NotNilFilter(_ string, v reflect.Value) bool {
return true
}
-
// Fprint prints the (sub-)tree starting at AST node x to w.
// If fset != nil, position information is interpreted relative
// to that file set. Otherwise positions are printed as integer
return
}
-
// Print prints x to standard output, skipping nil fields.
// Print(fset, x) is the same as Fprint(os.Stdout, fset, x, NotNilFilter).
func Print(fset *token.FileSet, x interface{}) (int, os.Error) {
return Fprint(os.Stdout, fset, x, NotNilFilter)
}
-
type printer struct {
output io.Writer
fset *token.FileSet
line int // current line number
}
-
var indent = []byte(". ")
func (p *printer) Write(data []byte) (n int, err os.Error) {
return
}
-
// localError wraps locally caught os.Errors so we can distinguish
// them from genuine panics which we don't want to return as errors.
type localError struct {
err os.Error
}
-
// printf is a convenience wrapper that takes care of print errors.
func (p *printer) printf(format string, args ...interface{}) {
n, err := fmt.Fprintf(p, format, args...)
}
}
-
// Implementation note: Print is written for AST nodes but could be
// used to print arbitrary data structures; such a version should
// probably be in a different package.
"testing"
)
-
var tests = []struct {
x interface{} // x is printed as s
s string
3 }`},
}
-
// Split s into lines, trim whitespace from all lines, and return
// the concatenated non-empty lines.
func trim(s string) string {
return strings.Join(lines[0:i], "\n")
}
-
func TestPrint(t *testing.T) {
var buf bytes.Buffer
for _, test := range tests {
"strconv"
)
-
type pkgBuilder struct {
scanner.ErrorVector
fset *token.FileSet
}
-
func (p *pkgBuilder) error(pos token.Pos, msg string) {
p.Error(p.fset.Position(pos), msg)
}
-
func (p *pkgBuilder) errorf(pos token.Pos, format string, args ...interface{}) {
p.error(pos, fmt.Sprintf(format, args...))
}
-
func (p *pkgBuilder) declare(scope, altScope *Scope, obj *Object) {
alt := scope.Insert(obj)
if alt == nil && altScope != nil {
}
}
-
func resolve(scope *Scope, ident *Ident) bool {
for ; scope != nil; scope = scope.Outer {
if obj := scope.Lookup(ident.Name); obj != nil {
return false
}
-
// An Importer resolves import paths to package Objects.
// The imports map records the packages already imported,
// indexed by package id (canonical import path).
// return pkg.
type Importer func(imports map[string]*Object, path string) (pkg *Object, err os.Error)
-
// NewPackage creates a new Package node from a set of File nodes. It resolves
// unresolved identifiers across files and updates each file's Unresolved list
// accordingly. If a non-nil importer and universe scope are provided, they are
"go/token"
)
-
// A Scope maintains the set of named language entities declared
// in the scope and a link to the immediately surrounding (outer)
// scope.
Objects map[string]*Object
}
-
// NewScope creates a new scope nested in the outer scope.
func NewScope(outer *Scope) *Scope {
const n = 4 // initial scope capacity
return &Scope{outer, make(map[string]*Object, n)}
}
-
// Lookup returns the object with the given name if it is
// found in scope s, otherwise it returns nil. Outer scopes
// are ignored.
return s.Objects[name]
}
-
// Insert attempts to insert a named object obj into the scope s.
// If the scope already contains an object alt with the same name,
// Insert leaves the scope unchanged and returns alt. Otherwise
return
}
-
// Debugging support
func (s *Scope) String() string {
var buf bytes.Buffer
return buf.String()
}
-
// ----------------------------------------------------------------------------
// Objects
Type interface{} // place holder for type information; may be nil
}
-
// NewObj creates a new object of a given kind and name.
func NewObj(kind ObjKind, name string) *Object {
return &Object{Kind: kind, Name: name}
}
-
// Pos computes the source position of the declaration of an object name.
// The result may be an invalid position if it cannot be computed
// (obj.Decl may be nil or not correct).
return token.NoPos
}
-
// ObKind describes what an object represents.
type ObjKind int
Lbl // label
)
-
var objKindStrings = [...]string{
Bad: "bad",
Pkg: "package",
Lbl: "label",
}
-
func (kind ObjKind) String() string { return objKindStrings[kind] }
Visit(node Node) (w Visitor)
}
-
// Helper functions for common node lists. They may be empty.
func walkIdentList(v Visitor, list []*Ident) {
}
}
-
func walkExprList(v Visitor, list []Expr) {
for _, x := range list {
Walk(v, x)
}
}
-
func walkStmtList(v Visitor, list []Stmt) {
for _, x := range list {
Walk(v, x)
}
}
-
func walkDeclList(v Visitor, list []Decl) {
for _, x := range list {
Walk(v, x)
}
}
-
// TODO(gri): Investigate if providing a closure to Walk leads to
// simpler use (and may help eliminate Inspect in turn).
v.Visit(nil)
}
-
type inspector func(Node) bool
func (f inspector) Visit(node Node) Visitor {
return nil
}
-
// Inspect traverses an AST in depth-first order: It starts by calling
// f(node); node must not be nil. If f returns true, Inspect invokes f
// for all the non-nil children of node, recursively.
"template" // for htmlEscape
)
-
func isWhitespace(ch byte) bool { return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r' }
-
func stripTrailingWhitespace(s string) string {
i := len(s)
for i > 0 && isWhitespace(s[i-1]) {
return s[0:i]
}
-
// CommentText returns the text of comment,
// with the comment markers - //, /*, and */ - removed.
func CommentText(comment *ast.CommentGroup) string {
return strings.Join(lines, "\n")
}
-
// Split bytes into lines.
func split(text []byte) [][]byte {
// count lines
return out
}
-
var (
ldquo = []byte("“")
rdquo = []byte("”")
template.HTMLEscape(w, s[last:])
}
-
const (
// Regexp for Go identifiers
identRx = `[a-zA-Z_][a-zA-Z_0-9]*` // TODO(gri) ASCII only for now - fix this
html_endpre = []byte("</pre>\n")
)
-
// Emphasize and escape a line of text for HTML. URLs are converted into links;
// if the URL also appears in the words map, the link is taken from the map (if
// the corresponding map value is the empty string, the URL is not converted
commentEscape(w, line, nice)
}
-
func indentLen(s []byte) int {
i := 0
for i < len(s) && (s[i] == ' ' || s[i] == '\t') {
return i
}
-
func isBlank(s []byte) bool { return len(s) == 0 || (len(s) == 1 && s[0] == '\n') }
-
func commonPrefix(a, b []byte) []byte {
i := 0
for i < len(a) && i < len(b) && a[i] == b[i] {
return a[0:i]
}
-
func unindent(block [][]byte) {
if len(block) == 0 {
return
}
}
-
// Convert comment text to formatted HTML.
// The comment was prepared by DocReader,
// so it is known not to have leading, trailing blank lines
"sort"
)
-
// ----------------------------------------------------------------------------
type typeDoc struct {
methods map[string]*ast.FuncDecl
}
-
// docReader accumulates documentation for a single package.
// It modifies the AST: Comments (declaration documentation)
// that have been collected by the DocReader are set to nil
bugs []*ast.CommentGroup
}
-
func (doc *docReader) init(pkgName string) {
doc.pkgName = pkgName
doc.types = make(map[string]*typeDoc)
doc.funcs = make(map[string]*ast.FuncDecl)
}
-
func (doc *docReader) addDoc(comments *ast.CommentGroup) {
if doc.doc == nil {
// common case: just one package comment
doc.doc = &ast.CommentGroup{list}
}
-
func (doc *docReader) addType(decl *ast.GenDecl) {
spec := decl.Specs[0].(*ast.TypeSpec)
typ := doc.lookupTypeDoc(spec.Name.Name)
}
}
-
func (doc *docReader) lookupTypeDoc(name string) *typeDoc {
if name == "" {
return nil // no type docs for anonymous types
return tdoc
}
-
func baseTypeName(typ ast.Expr) string {
switch t := typ.(type) {
case *ast.Ident:
return ""
}
-
func (doc *docReader) addValue(decl *ast.GenDecl) {
// determine if decl should be associated with a type
// Heuristic: For each typed entry, determine the type name, if any.
*values = append(*values, decl)
}
-
// Helper function to set the table entry for function f. Makes sure that
// at least one f with associated documentation is stored in table, if there
// are multiple f's with the same name.
table[name] = f
}
-
func (doc *docReader) addFunc(fun *ast.FuncDecl) {
name := fun.Name.Name
setFunc(doc.funcs, fun)
}
-
func (doc *docReader) addDecl(decl ast.Decl) {
switch d := decl.(type) {
case *ast.GenDecl:
}
}
-
func copyCommentList(list []*ast.Comment) []*ast.Comment {
return append([]*ast.Comment(nil), list...)
}
bug_content = regexp.MustCompile("[^ \n\r\t]+") // at least one non-whitespace char
)
-
// addFile adds the AST for a source file to the docReader.
// Adding the same AST multiple times is a no-op.
//
src.Comments = nil // consumed unassociated comments - remove from ast.File node
}
-
func NewFileDoc(file *ast.File) *PackageDoc {
var r docReader
r.init(file.Name.Name)
return r.newDoc("", nil)
}
-
func NewPackageDoc(pkg *ast.Package, importpath string) *PackageDoc {
var r docReader
r.init(pkg.Name)
return r.newDoc(importpath, filenames)
}
-
// ----------------------------------------------------------------------------
// Conversion to external representation
func (p sortValueDoc) Len() int { return len(p) }
func (p sortValueDoc) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
-
func declName(d *ast.GenDecl) string {
if len(d.Specs) != 1 {
return ""
return ""
}
-
func (p sortValueDoc) Less(i, j int) bool {
// sort by name
// pull blocks (name = "") up to top
return p[i].order < p[j].order
}
-
func makeValueDocs(list []*ast.GenDecl, tok token.Token) []*ValueDoc {
d := make([]*ValueDoc, len(list)) // big enough in any case
n := 0
return d
}
-
// FuncDoc is the documentation for a func declaration,
// either a top-level function or a method function.
//
func (p sortFuncDoc) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
func (p sortFuncDoc) Less(i, j int) bool { return p[i].Name < p[j].Name }
-
func makeFuncDocs(m map[string]*ast.FuncDecl) []*FuncDoc {
d := make([]*FuncDoc, len(m))
i := 0
return d
}
-
// TypeDoc is the documentation for a declared type.
// Consts and Vars are sorted lists of constants and variables of (mostly) that type.
// Factories is a sorted list of factory functions that return that type.
return p[i].order < p[j].order
}
-
// NOTE(rsc): This would appear not to be correct for type ( )
// blocks, but the doc extractor above has split them into
// individual declarations.
return d
}
-
func makeBugDocs(list []*ast.CommentGroup) []string {
d := make([]string, len(list))
for i, g := range list {
return d
}
-
// PackageDoc is the documentation for an entire package.
//
type PackageDoc struct {
Bugs []string
}
-
// newDoc returns the accumulated documentation for the package.
//
func (doc *docReader) newDoc(importpath string, filenames []string) *PackageDoc {
return p
}
-
// ----------------------------------------------------------------------------
// Filtering by name
type Filter func(string) bool
-
func matchFields(fields *ast.FieldList, f Filter) bool {
if fields != nil {
for _, field := range fields.List {
return false
}
-
func matchDecl(d *ast.GenDecl, f Filter) bool {
for _, d := range d.Specs {
switch v := d.(type) {
return false
}
-
func filterValueDocs(a []*ValueDoc, f Filter) []*ValueDoc {
w := 0
for _, vd := range a {
return a[0:w]
}
-
func filterFuncDocs(a []*FuncDoc, f Filter) []*FuncDoc {
w := 0
for _, fd := range a {
return a[0:w]
}
-
func filterTypeDocs(a []*TypeDoc, f Filter) []*TypeDoc {
w := 0
for _, td := range a {
return a[0:w]
}
-
// Filter eliminates documentation for names that don't pass through the filter f.
// TODO: Recognize "Type.Method" as a name.
//
"path/filepath"
)
-
// If src != nil, readSource converts src to a []byte if possible;
// otherwise it returns an error. If src == nil, readSource returns
// the result of reading the file specified by filename.
return ioutil.ReadFile(filename)
}
-
func (p *parser) parseEOF() os.Error {
p.expect(token.EOF)
return p.GetError(scanner.Sorted)
}
-
// ParseExpr parses a Go expression and returns the corresponding
// AST node. The fset, filename, and src arguments have the same interpretation
// as for ParseFile. If there is an error, the result expression
return x, p.parseEOF()
}
-
// ParseStmtList parses a list of Go statements and returns the list
// of corresponding AST nodes. The fset, filename, and src arguments have the same
// interpretation as for ParseFile. If there is an error, the node
return p.parseStmtList(), p.parseEOF()
}
-
// ParseDeclList parses a list of Go declarations and returns the list
// of corresponding AST nodes. The fset, filename, and src arguments have the same
// interpretation as for ParseFile. If there is an error, the node
return p.parseDeclList(), p.parseEOF()
}
-
// ParseFile parses the source code of a single Go source file and returns
// the corresponding ast.File node. The source code may be provided via
// the filename of the source file, or via the src parameter.
return p.parseFile(), p.GetError(scanner.NoMultiples) // parseFile() reads to EOF
}
-
// ParseFiles calls ParseFile for each file in the filenames list and returns
// a map of package name -> package AST with all the packages found. The mode
// bits are passed to ParseFile unchanged. Position information is recorded
return
}
-
// ParseDir calls ParseFile for the files in the directory specified by path and
// returns a map of package name -> package AST with all the packages found. If
// filter != nil, only the files with os.FileInfo entries passing through the filter
"go/token"
)
-
// The mode parameter to the Parse* functions is a set of flags (or 0).
// They control the amount of source code parsed and other optional
// parser functionality.
DeclarationErrors // report declaration errors
)
-
// The parser structure holds the parser's internal state.
type parser struct {
file *token.File
targetStack [][]*ast.Ident // stack of unresolved labels
}
-
// scannerMode returns the scanner mode bits given the parser's mode bits.
func scannerMode(mode uint) uint {
var m uint = scanner.InsertSemis
return m
}
-
func (p *parser) init(fset *token.FileSet, filename string, src []byte, mode uint) {
p.file = fset.AddFile(filename, fset.Base(), len(src))
p.scanner.Init(p.file, src, p, scannerMode(mode))
p.openLabelScope()
}
-
// ----------------------------------------------------------------------------
// Scoping support
p.topScope = ast.NewScope(p.topScope)
}
-
func (p *parser) closeScope() {
p.topScope = p.topScope.Outer
}
-
func (p *parser) openLabelScope() {
p.labelScope = ast.NewScope(p.labelScope)
p.targetStack = append(p.targetStack, nil)
}
-
func (p *parser) closeLabelScope() {
// resolve labels
n := len(p.targetStack) - 1
p.labelScope = p.labelScope.Outer
}
-
func (p *parser) declare(decl, data interface{}, scope *ast.Scope, kind ast.ObjKind, idents ...*ast.Ident) {
for _, ident := range idents {
assert(ident.Obj == nil, "identifier already declared or resolved")
}
}
-
func (p *parser) shortVarDecl(idents []*ast.Ident) {
// Go spec: A short variable declaration may redeclare variables
// provided they were originally declared in the same block with
}
}
-
// The unresolved object is a sentinel to mark identifiers that have been added
// to the list of unresolved identifiers. The sentinel is only used for verifying
// internal consistency.
var unresolved = new(ast.Object)
-
func (p *parser) resolve(x ast.Expr) {
// nothing to do if x is not an identifier or the blank identifier
ident, _ := x.(*ast.Ident)
p.unresolved = append(p.unresolved, ident)
}
-
// ----------------------------------------------------------------------------
// Parsing support
fmt.Println(a...)
}
-
func trace(p *parser, msg string) *parser {
p.printTrace(msg, "(")
p.indent++
return p
}
-
// Usage pattern: defer un(trace(p, "..."));
func un(p *parser) {
p.indent--
p.printTrace(")")
}
-
// Advance to the next token.
func (p *parser) next0() {
// Because of one-token look-ahead, print the previous token
return
}
-
// Consume a group of adjacent comments, add it to the parser's
// comments list, and return it together with the line at which
// the last comment in the group ends. An empty line or non-comment
return
}
-
// Advance to the next non-comment token. In the process, collect
// any comment groups encountered, and remember the last lead and
// and line comments.
}
}
-
func (p *parser) error(pos token.Pos, msg string) {
p.Error(p.file.Position(pos), msg)
}
-
func (p *parser) errorExpected(pos token.Pos, msg string) {
msg = "expected " + msg
if pos == p.pos {
p.error(pos, msg)
}
-
func (p *parser) expect(tok token.Token) token.Pos {
pos := p.pos
if p.tok != tok {
return pos
}
-
func (p *parser) expectSemi() {
if p.tok != token.RPAREN && p.tok != token.RBRACE {
p.expect(token.SEMICOLON)
}
}
-
func assert(cond bool, msg string) {
if !cond {
panic("go/parser internal error: " + msg)
}
}
-
// ----------------------------------------------------------------------------
// Identifiers
return &ast.Ident{pos, name, nil}
}
-
func (p *parser) parseIdentList() (list []*ast.Ident) {
if p.trace {
defer un(trace(p, "IdentList"))
return
}
-
// ----------------------------------------------------------------------------
// Common productions
return
}
-
func (p *parser) parseLhsList() []ast.Expr {
list := p.parseExprList(true)
switch p.tok {
return list
}
-
func (p *parser) parseRhsList() []ast.Expr {
return p.parseExprList(false)
}
-
// ----------------------------------------------------------------------------
// Types
return typ
}
-
// If the result is an identifier, it is not resolved.
func (p *parser) parseTypeName() ast.Expr {
if p.trace {
return ident
}
-
func (p *parser) parseArrayType(ellipsisOk bool) ast.Expr {
if p.trace {
defer un(trace(p, "ArrayType"))
return &ast.ArrayType{lbrack, len, elt}
}
-
func (p *parser) makeIdentList(list []ast.Expr) []*ast.Ident {
idents := make([]*ast.Ident, len(list))
for i, x := range list {
return idents
}
-
func (p *parser) parseFieldDecl(scope *ast.Scope) *ast.Field {
if p.trace {
defer un(trace(p, "FieldDecl"))
return field
}
-
func (p *parser) parseStructType() *ast.StructType {
if p.trace {
defer un(trace(p, "StructType"))
return &ast.StructType{pos, &ast.FieldList{lbrace, list, rbrace}, false}
}
-
func (p *parser) parsePointerType() *ast.StarExpr {
if p.trace {
defer un(trace(p, "PointerType"))
return &ast.StarExpr{star, base}
}
-
func (p *parser) tryVarType(isParam bool) ast.Expr {
if isParam && p.tok == token.ELLIPSIS {
pos := p.pos
return p.tryIdentOrType(false)
}
-
func (p *parser) parseVarType(isParam bool) ast.Expr {
typ := p.tryVarType(isParam)
if typ == nil {
return typ
}
-
func (p *parser) parseVarList(isParam bool) (list []ast.Expr, typ ast.Expr) {
if p.trace {
defer un(trace(p, "VarList"))
return
}
-
func (p *parser) parseParameterList(scope *ast.Scope, ellipsisOk bool) (params []*ast.Field) {
if p.trace {
defer un(trace(p, "ParameterList"))
return
}
-
func (p *parser) parseParameters(scope *ast.Scope, ellipsisOk bool) *ast.FieldList {
if p.trace {
defer un(trace(p, "Parameters"))
return &ast.FieldList{lparen, params, rparen}
}
-
func (p *parser) parseResult(scope *ast.Scope) *ast.FieldList {
if p.trace {
defer un(trace(p, "Result"))
return nil
}
-
func (p *parser) parseSignature(scope *ast.Scope) (params, results *ast.FieldList) {
if p.trace {
defer un(trace(p, "Signature"))
return
}
-
func (p *parser) parseFuncType() (*ast.FuncType, *ast.Scope) {
if p.trace {
defer un(trace(p, "FuncType"))
return &ast.FuncType{pos, params, results}, scope
}
-
func (p *parser) parseMethodSpec(scope *ast.Scope) *ast.Field {
if p.trace {
defer un(trace(p, "MethodSpec"))
return spec
}
-
func (p *parser) parseInterfaceType() *ast.InterfaceType {
if p.trace {
defer un(trace(p, "InterfaceType"))
return &ast.InterfaceType{pos, &ast.FieldList{lbrace, list, rbrace}, false}
}
-
func (p *parser) parseMapType() *ast.MapType {
if p.trace {
defer un(trace(p, "MapType"))
return &ast.MapType{pos, key, value}
}
-
func (p *parser) parseChanType() *ast.ChanType {
if p.trace {
defer un(trace(p, "ChanType"))
return &ast.ChanType{pos, dir, value}
}
-
// If the result is an identifier, it is not resolved.
func (p *parser) tryIdentOrType(ellipsisOk bool) ast.Expr {
switch p.tok {
return nil
}
-
func (p *parser) tryType() ast.Expr {
typ := p.tryIdentOrType(false)
if typ != nil {
return typ
}
-
// ----------------------------------------------------------------------------
// Blocks
return
}
-
func (p *parser) parseBody(scope *ast.Scope) *ast.BlockStmt {
if p.trace {
defer un(trace(p, "Body"))
return &ast.BlockStmt{lbrace, list, rbrace}
}
-
func (p *parser) parseBlockStmt() *ast.BlockStmt {
if p.trace {
defer un(trace(p, "BlockStmt"))
return &ast.BlockStmt{lbrace, list, rbrace}
}
-
// ----------------------------------------------------------------------------
// Expressions
return &ast.FuncLit{typ, body}
}
-
// parseOperand may return an expression or a raw type (incl. array
// types of the form [...]T. Callers must verify the result.
// If lhs is set and the result is an identifier, it is not resolved.
return &ast.BadExpr{pos, p.pos}
}
-
func (p *parser) parseSelector(x ast.Expr) ast.Expr {
if p.trace {
defer un(trace(p, "Selector"))
return &ast.SelectorExpr{x, sel}
}
-
func (p *parser) parseTypeAssertion(x ast.Expr) ast.Expr {
if p.trace {
defer un(trace(p, "TypeAssertion"))
return &ast.TypeAssertExpr{x, typ}
}
-
func (p *parser) parseIndexOrSlice(x ast.Expr) ast.Expr {
if p.trace {
defer un(trace(p, "IndexOrSlice"))
return &ast.IndexExpr{x, lbrack, low, rbrack}
}
-
func (p *parser) parseCallOrConversion(fun ast.Expr) *ast.CallExpr {
if p.trace {
defer un(trace(p, "CallOrConversion"))
return &ast.CallExpr{fun, lparen, list, ellipsis, rparen}
}
-
func (p *parser) parseElement(keyOk bool) ast.Expr {
if p.trace {
defer un(trace(p, "Element"))
return x
}
-
func (p *parser) parseElementList() (list []ast.Expr) {
if p.trace {
defer un(trace(p, "ElementList"))
return
}
-
func (p *parser) parseLiteralValue(typ ast.Expr) ast.Expr {
if p.trace {
defer un(trace(p, "LiteralValue"))
return &ast.CompositeLit{typ, lbrace, elts, rbrace}
}
-
// checkExpr checks that x is an expression (and not a type).
func (p *parser) checkExpr(x ast.Expr) ast.Expr {
switch t := unparen(x).(type) {
return x
}
-
// isTypeName returns true iff x is a (qualified) TypeName.
func isTypeName(x ast.Expr) bool {
switch t := x.(type) {
return true
}
-
// isLiteralType returns true iff x is a legal composite literal type.
func isLiteralType(x ast.Expr) bool {
switch t := x.(type) {
return true
}
-
// If x is of the form *T, deref returns T, otherwise it returns x.
func deref(x ast.Expr) ast.Expr {
if p, isPtr := x.(*ast.StarExpr); isPtr {
return x
}
-
// If x is of the form (T), unparen returns unparen(T), otherwise it returns x.
func unparen(x ast.Expr) ast.Expr {
if p, isParen := x.(*ast.ParenExpr); isParen {
return x
}
-
// checkExprOrType checks that x is an expression or a type
// (and not a raw type such as [...]T).
//
return x
}
-
// If lhs is set and the result is an identifier, it is not resolved.
func (p *parser) parsePrimaryExpr(lhs bool) ast.Expr {
if p.trace {
return x
}
-
// If lhs is set and the result is an identifier, it is not resolved.
func (p *parser) parseUnaryExpr(lhs bool) ast.Expr {
if p.trace {
return p.parsePrimaryExpr(lhs)
}
-
// If lhs is set and the result is an identifier, it is not resolved.
func (p *parser) parseBinaryExpr(lhs bool, prec1 int) ast.Expr {
if p.trace {
return x
}
-
// If lhs is set and the result is an identifier, it is not resolved.
// TODO(gri): parseExpr may return a type or even a raw type ([..]int) -
// should reject when a type/raw type is obviously not allowed
return p.parseBinaryExpr(lhs, token.LowestPrec+1)
}
-
func (p *parser) parseRhs() ast.Expr {
return p.parseExpr(false)
}
-
// ----------------------------------------------------------------------------
// Statements
return &ast.ExprStmt{x[0]}
}
-
func (p *parser) parseCallExpr() *ast.CallExpr {
x := p.parseRhs()
if call, isCall := x.(*ast.CallExpr); isCall {
return nil
}
-
func (p *parser) parseGoStmt() ast.Stmt {
if p.trace {
defer un(trace(p, "GoStmt"))
return &ast.GoStmt{pos, call}
}
-
func (p *parser) parseDeferStmt() ast.Stmt {
if p.trace {
defer un(trace(p, "DeferStmt"))
return &ast.DeferStmt{pos, call}
}
-
func (p *parser) parseReturnStmt() *ast.ReturnStmt {
if p.trace {
defer un(trace(p, "ReturnStmt"))
return &ast.ReturnStmt{pos, x}
}
-
func (p *parser) parseBranchStmt(tok token.Token) *ast.BranchStmt {
if p.trace {
defer un(trace(p, "BranchStmt"))
return &ast.BranchStmt{pos, tok, label}
}
-
func (p *parser) makeExpr(s ast.Stmt) ast.Expr {
if s == nil {
return nil
return &ast.BadExpr{s.Pos(), s.End()}
}
-
func (p *parser) parseIfStmt() *ast.IfStmt {
if p.trace {
defer un(trace(p, "IfStmt"))
return &ast.IfStmt{pos, s, x, body, else_}
}
-
func (p *parser) parseTypeList() (list []ast.Expr) {
if p.trace {
defer un(trace(p, "TypeList"))
return
}
-
func (p *parser) parseCaseClause(exprSwitch bool) *ast.CaseClause {
if p.trace {
defer un(trace(p, "CaseClause"))
return &ast.CaseClause{pos, list, colon, body}
}
-
func isExprSwitch(s ast.Stmt) bool {
if s == nil {
return true
return false
}
-
func (p *parser) parseSwitchStmt() ast.Stmt {
if p.trace {
defer un(trace(p, "SwitchStmt"))
return &ast.TypeSwitchStmt{pos, s1, s2, body}
}
-
func (p *parser) parseCommClause() *ast.CommClause {
if p.trace {
defer un(trace(p, "CommClause"))
return &ast.CommClause{pos, comm, colon, body}
}
-
func (p *parser) parseSelectStmt() *ast.SelectStmt {
if p.trace {
defer un(trace(p, "SelectStmt"))
return &ast.SelectStmt{pos, body}
}
-
func (p *parser) parseForStmt() ast.Stmt {
if p.trace {
defer un(trace(p, "ForStmt"))
return &ast.ForStmt{pos, s1, p.makeExpr(s2), s3, body}
}
-
func (p *parser) parseStmt() (s ast.Stmt) {
if p.trace {
defer un(trace(p, "Statement"))
return
}
-
// ----------------------------------------------------------------------------
// Declarations
type parseSpecFunction func(p *parser, doc *ast.CommentGroup, iota int) ast.Spec
-
func parseImportSpec(p *parser, doc *ast.CommentGroup, _ int) ast.Spec {
if p.trace {
defer un(trace(p, "ImportSpec"))
return spec
}
-
func parseConstSpec(p *parser, doc *ast.CommentGroup, iota int) ast.Spec {
if p.trace {
defer un(trace(p, "ConstSpec"))
return spec
}
-
func parseTypeSpec(p *parser, doc *ast.CommentGroup, _ int) ast.Spec {
if p.trace {
defer un(trace(p, "TypeSpec"))
return spec
}
-
func parseVarSpec(p *parser, doc *ast.CommentGroup, _ int) ast.Spec {
if p.trace {
defer un(trace(p, "VarSpec"))
return spec
}
-
func (p *parser) parseGenDecl(keyword token.Token, f parseSpecFunction) *ast.GenDecl {
if p.trace {
defer un(trace(p, "GenDecl("+keyword.String()+")"))
return &ast.GenDecl{doc, pos, keyword, lparen, list, rparen}
}
-
func (p *parser) parseReceiver(scope *ast.Scope) *ast.FieldList {
if p.trace {
defer un(trace(p, "Receiver"))
return par
}
-
func (p *parser) parseFuncDecl() *ast.FuncDecl {
if p.trace {
defer un(trace(p, "FunctionDecl"))
return decl
}
-
func (p *parser) parseDecl() ast.Decl {
if p.trace {
defer un(trace(p, "Declaration"))
return p.parseGenDecl(p.tok, f)
}
-
func (p *parser) parseDeclList() (list []ast.Decl) {
if p.trace {
defer un(trace(p, "DeclList"))
return
}
-
// ----------------------------------------------------------------------------
// Source files
"testing"
)
-
var fset = token.NewFileSet()
var illegalInputs = []interface{}{
`package p; const c; /* should have constant value */`,
}
-
func TestParseIllegalInputs(t *testing.T) {
for _, src := range illegalInputs {
_, err := ParseFile(fset, "", src, 0)
}
}
-
var validPrograms = []interface{}{
"package p\n",
`package p;`,
`package p; func f() { switch ; {} };`,
}
-
func TestParseValidPrograms(t *testing.T) {
for _, src := range validPrograms {
_, err := ParseFile(fset, "", src, 0)
}
}
-
var validFiles = []string{
"parser.go",
"parser_test.go",
}
-
func TestParse3(t *testing.T) {
for _, filename := range validFiles {
_, err := ParseFile(fset, filename, nil, DeclarationErrors)
}
}
-
func nameFilter(filename string) bool {
switch filename {
case "parser.go":
return true
}
-
func dirFilter(f *os.FileInfo) bool { return nameFilter(f.Name) }
-
func TestParse4(t *testing.T) {
path := "."
pkgs, err := ParseDir(fset, path, dirFilter, 0)
"go/token"
)
-
// Other formatting issues:
// - better comment formatting for /*-style comments at the end of a line (e.g. a declaration)
// when the comment spans multiple lines; if such a comment is just two lines, formatting is
// - should use blank instead of tab to separate one-line function bodies from
// the function header unless there is a group of consecutive one-liners
-
// ----------------------------------------------------------------------------
// Common AST nodes.
return
}
-
// setComment sets g as the next comment if g != nil and if node comments
// are enabled - this mode is used when printing source code fragments such
// as exports only. It assumes that there are no other pending comments to
p.cindex = 0
}
-
type exprListMode uint
const (
periodSep // elements are separated by periods
)
-
// Sets multiLine to true if the identifier list spans multiple lines.
// If indent is set, a multi-line identifier list is indented after the
// first linebreak encountered.
p.exprList(token.NoPos, xlist, 1, mode, multiLine, token.NoPos)
}
-
// Print a list of expressions. If the list spans multiple
// source lines, the original line breaks are respected between
// expressions. Sets multiLine to true if the list spans multiple
}
}
-
// Sets multiLine to true if the the parameter list spans multiple lines.
func (p *printer) parameters(fields *ast.FieldList, multiLine *bool) {
p.print(fields.Opening, token.LPAREN)
p.print(fields.Closing, token.RPAREN)
}
-
// Sets multiLine to true if the signature spans multiple lines.
func (p *printer) signature(params, result *ast.FieldList, multiLine *bool) {
p.parameters(params, multiLine)
}
}
-
func identListSize(list []*ast.Ident, maxSize int) (size int) {
for i, x := range list {
if i > 0 {
return
}
-
func (p *printer) isOneLineFieldList(list []*ast.Field) bool {
if len(list) != 1 {
return false // allow only one field
return namesSize+typeSize <= maxSize
}
-
func (p *printer) setLineComment(text string) {
p.setComment(&ast.CommentGroup{[]*ast.Comment{&ast.Comment{token.NoPos, text}}})
}
-
func (p *printer) fieldList(fields *ast.FieldList, isStruct, isIncomplete bool) {
p.nesting++
defer func() {
p.print(unindent, formfeed, rbrace, token.RBRACE)
}
-
// ----------------------------------------------------------------------------
// Expressions
return
}
-
func cutoff(e *ast.BinaryExpr, depth int) int {
has4, has5, maxProblem := walkBinary(e)
if maxProblem > 0 {
return 4
}
-
func diffPrec(expr ast.Expr, prec int) int {
x, ok := expr.(*ast.BinaryExpr)
if !ok || prec != x.Op.Precedence() {
return 0
}
-
func reduceDepth(depth int) int {
depth--
if depth < 1 {
return depth
}
-
// Format the binary expression: decide the cutoff and then format.
// Let's call depth == 1 Normal mode, and depth > 1 Compact mode.
// (Algorithm suggestion by Russ Cox.)
}
}
-
func isBinary(expr ast.Expr) bool {
_, ok := expr.(*ast.BinaryExpr)
return ok
}
-
// If the expression contains one or more selector expressions, splits it into
// two expressions at the rightmost period. Writes entire expr to suffix when
// selector isn't found. Rewrites AST nodes for calls, index expressions and
return
}
-
// Convert an expression into an expression list split at the periods of
// selector expressions.
func selectorExprList(expr ast.Expr) (list []ast.Expr) {
return
}
-
// Sets multiLine to true if the expression spans multiple lines.
func (p *printer) expr1(expr ast.Expr, prec1, depth int, multiLine *bool) {
p.print(expr.Pos())
return
}
-
func (p *printer) expr0(x ast.Expr, depth int, multiLine *bool) {
p.expr1(x, token.LowestPrec, depth, multiLine)
}
-
// Sets multiLine to true if the expression spans multiple lines.
func (p *printer) expr(x ast.Expr, multiLine *bool) {
const depth = 1
p.expr1(x, token.LowestPrec, depth, multiLine)
}
-
// ----------------------------------------------------------------------------
// Statements
}
}
-
// block prints an *ast.BlockStmt; it always spans at least two lines.
func (p *printer) block(s *ast.BlockStmt, indent int) {
p.print(s.Pos(), token.LBRACE)
p.print(s.Rbrace, token.RBRACE)
}
-
func isTypeName(x ast.Expr) bool {
switch t := x.(type) {
case *ast.Ident:
return false
}
-
func stripParens(x ast.Expr) ast.Expr {
if px, strip := x.(*ast.ParenExpr); strip {
// parentheses must not be stripped if there are any
return x
}
-
func (p *printer) controlClause(isForStmt bool, init ast.Stmt, expr ast.Expr, post ast.Stmt) {
p.print(blank)
needsBlank := false
}
}
-
// Sets multiLine to true if the statements spans multiple lines.
func (p *printer) stmt(stmt ast.Stmt, nextIsRBrace bool, multiLine *bool) {
p.print(stmt.Pos())
return
}
-
// ----------------------------------------------------------------------------
// Declarations
return m
}
-
func (p *printer) valueSpec(s *ast.ValueSpec, keepType, doIndent bool, multiLine *bool) {
p.setComment(s.Doc)
p.identList(s.Names, doIndent, multiLine) // always present
}
}
-
// The parameter n is the number of specs in the group. If doIndent is set,
// multi-line identifier lists in the spec are indented when the first
// linebreak is encountered.
}
}
-
// Sets multiLine to true if the declaration spans multiple lines.
func (p *printer) genDecl(d *ast.GenDecl, multiLine *bool) {
p.setComment(d.Doc)
}
}
-
// nodeSize determines the size of n in chars after formatting.
// The result is <= maxSize if the node fits on one line with at
// most maxSize chars and the formatted output doesn't contain
return
}
-
func (p *printer) isOneLineFunc(b *ast.BlockStmt, headerSize int) bool {
pos1 := b.Pos()
pos2 := b.Rbrace
return headerSize+bodySize <= maxSize
}
-
// Sets multiLine to true if the function body spans multiple lines.
func (p *printer) funcBody(b *ast.BlockStmt, headerSize int, isLit bool, multiLine *bool) {
if b == nil {
*multiLine = true
}
-
// distance returns the column difference between from and to if both
// are on the same line; if they are on different lines (or unknown)
// the result is infinity.
return infinity
}
-
// Sets multiLine to true if the declaration spans multiple lines.
func (p *printer) funcDecl(d *ast.FuncDecl, multiLine *bool) {
p.setComment(d.Doc)
p.funcBody(d.Body, p.distance(d.Pos(), p.pos), false, multiLine)
}
-
// Sets multiLine to true if the declaration spans multiple lines.
func (p *printer) decl(decl ast.Decl, multiLine *bool) {
switch d := decl.(type) {
}
}
-
// ----------------------------------------------------------------------------
// Files
return
}
-
func (p *printer) file(src *ast.File) {
p.setComment(src.Doc)
p.print(src.Pos(), token.PACKAGE, blank)
"testing"
)
-
var testfile *ast.File
-
func testprint(out io.Writer, file *ast.File) {
if _, err := (&Config{TabIndent | UseSpaces, 8}).Fprint(out, fset, file); err != nil {
log.Fatalf("print error: %s", err)
}
}
-
// cannot initialize in init because (printer) Fprint launches goroutines.
func initialize() {
const filename = "testdata/parser.go"
testfile = file
}
-
func BenchmarkPrint(b *testing.B) {
if testfile == nil {
initialize()
"tabwriter"
)
-
const debug = false // enable for debugging
unindent = whiteSpace('<')
)
-
var (
esc = []byte{tabwriter.Escape}
htab = []byte{'\t'}
formfeeds = []byte("\f\f\f\f\f\f\f\f") // more than the max determined by nlines
)
-
// Special positions
var noPos token.Position // use noPos when a position is needed but not known
var infinity = 1 << 30
-
// Use ignoreMultiLine if the multiLine information is not important.
var ignoreMultiLine = new(bool)
-
// A pmode value represents the current printer mode.
type pmode int
noExtraLinebreak
)
-
type printer struct {
// Configuration (does not change after initialization)
output io.Writer
nodeSizes map[ast.Node]int
}
-
func (p *printer) init(output io.Writer, cfg *Config, fset *token.FileSet, nodeSizes map[ast.Node]int) {
p.output = output
p.Config = *cfg
p.nodeSizes = nodeSizes
}
-
func (p *printer) internalError(msg ...interface{}) {
if debug {
fmt.Print(p.pos.String() + ": ")
}
}
-
// escape escapes string s by bracketing it with tabwriter.Escape.
// Escaped strings pass through tabwriter unchanged. (Note that
// valid Go programs cannot contain tabwriter.Escape bytes since
return p.litbuf.String()
}
-
// nlines returns the adjusted number of linebreaks given the desired number
// of breaks n such that min <= result <= max where max depends on the current
// nesting level.
if n < min {
return min
}
- max := 3 // max. number of newlines at the top level (p.nesting == 0)
+ max := 2 // max. number of newlines at the top level (p.nesting == 0)
if p.nesting > 0 {
max = 2 // max. number of newlines everywhere else
}
return n
}
-
// write0 writes raw (uninterpreted) data to p.output and handles errors.
// write0 does not indent after newlines, and does not HTML-escape or update p.pos.
//
}
}
-
// write interprets data and writes it to p.output. It inserts indentation
// after a line break unless in a tabwriter escape sequence.
// It updates p.pos as a side-effect.
p.pos.Column += d
}
-
func (p *printer) writeNewlines(n int, useFF bool) {
if n > 0 {
n = p.nlines(n, 0)
}
}
-
// writeItem writes data at position pos. data is the text corresponding to
// a single lexical token, but may also be comment text. pos is the actual
// (or at least very accurately estimated) position of the data in the original
p.last = p.pos
}
-
// writeCommentPrefix writes the whitespace before a comment.
// If there is any pending whitespace, it consumes as much of
// it as is likely to help position the comment nicely.
}
}
-
// TODO(gri): It should be possible to convert the code below from using
// []byte to string and in the process eliminate some conversions.
return lines
}
-
func isBlank(s []byte) bool {
for _, b := range s {
if b > ' ' {
return true
}
-
func commonPrefix(a, b []byte) []byte {
i := 0
for i < len(a) && i < len(b) && a[i] == b[i] && (a[i] <= ' ' || a[i] == '*') {
return a[0:i]
}
-
func stripCommonPrefix(lines [][]byte) {
if len(lines) < 2 {
return // at most one line - nothing to do
}
}
-
func (p *printer) writeComment(comment *ast.Comment) {
text := comment.Text
}
}
-
// writeCommentSuffix writes a line break after a comment if indicated
// and processes any leftover indentation information. If a line break
// is needed, the kind of break (newline vs formfeed) depends on the
return
}
-
// intersperseComments consumes all comments that appear before the next token
// tok and prints it together with the buffered whitespace (i.e., the whitespace
// that needs to be written before the next token). A heuristic is used to mix
return false
}
-
// whiteWhitespace writes the first n whitespace entries.
func (p *printer) writeWhitespace(n int) {
// write entries
p.wsbuf = p.wsbuf[0:i]
}
-
// ----------------------------------------------------------------------------
// Printing interface
return
}
-
// print prints a list of "items" (roughly corresponding to syntactic
// tokens, but also including whitespace and formatting information).
// It is the only print function that should be called directly from
}
}
-
// commentBefore returns true iff the current comment occurs
// before the next position in the source code.
//
return p.cindex < len(p.comments) && p.fset.Position(p.comments[p.cindex].List[0].Pos()).Offset < next.Offset
}
-
// Flush prints any pending comments and whitespace occurring
// textually before the position of the next token tok. Flush
// returns true if a pending formfeed character was dropped
return
}
-
// ----------------------------------------------------------------------------
// Trimmer
space bytes.Buffer
}
-
// trimmer is implemented as a state machine.
// It can be in one of the following states:
const (
inText // inside text
)
-
// Design note: It is tempting to eliminate extra blanks occurring in
// whitespace in this function as it could simplify some
// of the blanks logic in the node printing functions.
return
}
-
// ----------------------------------------------------------------------------
// Public interface
UseSpaces // use spaces instead of tabs for alignment
)
-
// A Config node controls the output of Fprint.
type Config struct {
Mode uint // default: 0
Tabwidth int // default: 8
}
-
// fprint implements Fprint and takes a nodesSizes map for setting up the printer state.
func (cfg *Config) fprint(output io.Writer, fset *token.FileSet, node interface{}, nodeSizes map[ast.Node]int) (int, os.Error) {
// redirect output through a trimmer to eliminate trailing whitespace
return p.written, err
}
-
// Fprint "pretty-prints" an AST node to output and returns the number
// of bytes written and an error (if any) for a given configuration cfg.
// Position information is interpreted relative to the file set fset.
return cfg.fprint(output, fset, node, make(map[ast.Node]int))
}
-
// Fprint "pretty-prints" an AST node to output.
// It calls Config.Fprint with default settings.
//
"time"
)
-
const (
dataDir = "testdata"
tabwidth = 8
)
-
var update = flag.Bool("update", false, "update golden files")
-
var fset = token.NewFileSet()
-
func lineString(text []byte, i int) string {
i0 := i
for i < len(text) && text[i] != '\n' {
return string(text[i0:i])
}
-
type checkMode uint
const (
rawFormat
)
-
func runcheck(t *testing.T, source, golden string, mode checkMode) {
// parse source
prog, err := parser.ParseFile(fset, source, nil, parser.ParseComments)
}
}
-
func check(t *testing.T, source, golden string, mode checkMode) {
// start a timer to produce a time-out signal
tc := make(chan int)
}
}
-
type entry struct {
source, golden string
mode checkMode
{"slow.input", "slow.golden", 0},
}
-
func TestFiles(t *testing.T) {
for i, e := range data {
source := filepath.Join(dataDir, e.source)
}
}
-
// TestLineComments, using a simple test case, checks that consequtive line
// comments are properly terminated with a newline even if the AST position
// information is incorrect.
var x int // x
var ()
-
// This comment SHOULD be associated with the next declaration.
func f0() {
const pi = 3.14 // pi
f0()
}
-
func _() {
// this comment should be properly indented
}
-
func _(x int) int {
if x < 0 { // the tab printed before this comment's // must not affect the remaining lines
return -x // this statement should be properly indented
return x
}
-
func typeswitch(x interface{}) {
switch v := x.(type) {
case bool, int, float:
aligned line */
}
-
func _() {
/*
freestanding comment
aligned line */
}
-
func _() {
/*
freestanding comment
*/
}
-
// Some interesting interspersed comments
func _( /* this */ x /* is */ /* an */ int) {
}
_ = []int{0, 1 /* don't introduce a newline after this comment - was issue 1365 */ }
}
-
// Comments immediately adjacent to punctuation (for which the go/printer
// may only have estimated position information) must remain after the punctuation.
func _() {
}
}
-
// Line comments with tabs
func _() {
var finput *bufio.Reader // input file
var lflag bool // -l - disable line directives
}
-
/* This comment is the last entry in this file. It must be printed and should be followed by a newline */
//
package main
-
// The SZ struct; it is empty.
type SZ struct{}
import _ "os"
import _ "os"
-
import _ "fmt"
import _ "fmt"
import _ "fmt"
var _ int
-
// printing of constant literals
const (
_ = "foobar"
bar`
)
-
func _() {
type _ int
type _ *int
var _ func() interface{}
}
-
// don't lose blank lines in grouped declarations
const (
_ int = 0
_
)
-
type (
_ int
_ struct{}
_ map[string]int
)
-
var (
_ int = 0
_ float = 1
_ bool
)
-
// don't lose blank lines in this struct
type _ struct {
String struct {
}
}
-
// no tabs for single or ungrouped decls
func _() {
const xxxxxx = 0
filterDelay delayTime // actual filter update interval in minutes; usually filterDelay == filterMin, but filterDelay may back off exponentially
)
-
// formatting of structs
type _ struct{}
r, s float // this line should be indented
}
-
// difficult cases
type _ struct {
bool // comment
text []byte // comment
}
-
// formatting of interfaces
type EI interface{}
gggggggggggg(x, y, z int) // hurray
}
-
// formatting of variable declarations
func _() {
type day struct {
)
}
-
// formatting of multi-line variable declarations
var a1, b1, c1 int // all on one line
a4, b4, c4 int // this line should be indented
)
-
func _() {
var privateKey2 = &Block{Type: "RSA PRIVATE KEY",
Headers: map[string]string{},
}
}
-
func _() {
var Universe = Scope{
Names: map[string]*Ident{
}
}
-
// alignment of map composite entries
var _ = map[int]int{
// small key sizes: always align even if size ratios are large
abcde: a, // align with previous line
}
-
func _() {
var _ = T{
a, // must introduce trailing comma
}
}
-
// formatting of function results
func _() func() {}
func _() func(int) { return nil }
func _() func(int) int { return nil }
func _() func(int) func(int) func() { return nil }
-
// formatting of consecutive single-line functions
func _() {}
func _() {}
return x
}
-
// making function declarations safe for new semicolon rules
func _() { /* multi-line func because of comment */
}
/* multi-line func because block is on multiple lines */
}
-
// ellipsis parameters
func _(...int)
func _(...*int)
func _(x ...map[string]int)
func _(x ...chan int)
-
// these parameter lists must remain multi-line since they are multi-line in the source
func _(bool,
int) {
p *int
)
-
func _() {
// no spaces around simple or parenthesized expressions
_ = (a + 0)
x < y || z > 42
}
-
func _() {
_ = a + b
_ = a + b + c
_ = token(matchType + xlength<<lengthShift + xoffset)
}
-
func f(x int, args ...int) {
f(0, args...)
f(1, args)
_ = f(x / *y, x < -1, x < <-1, x + +1, x - -1, x & &x, x & ^x)
}
-
func _() {
_ = T{}
_ = struct{}{}
_ = map[int]T{}
}
-
// one-line structs/interfaces in composite literals (up to a threshold)
func _() {
_ = struct{}{}
_ = struct{ s struct{ int } }{struct{ int }{0}}
}
-
func _() {
// do not modify literals
_ = "tab1 tab2 tab3 end" // string contains 3 tabs
they must not be removed`
}
-
func _() {
// smart handling of indentation for multi-line raw strings
var _ = ``
}
}
-
func _() {
// one-line function literals (body is on a single line)
_ = func() {}
})
}
-
func _() {
_ = [][]int{
[]int{1},
_ = [][]int{{1}, {1, 2}, {1, 2, 3}}
}
-
// various multi-line expressions
func _() {
// do not add extra indentation to multi-line string lists
}
}
-
const _ = F1 +
`string = "%s";` +
`ptr = *;` +
`datafmt.T2 = s ["-" p "-"];`
-
const _ = `datafmt "datafmt";` +
`default = "%v";` +
`array = *;` +
`datafmt.T3 = s {" " a a / ","};`
-
const _ = `datafmt "datafmt";` +
`default = "%v";` +
`array = *;` +
`datafmt.T3 = s {" " a a / ","};`
-
func _() {
_ = F1 +
`string = "%s";` +
`datafmt.T3 = s {" " a a / ","};`
}
-
func _() {
// respect source lines in multi-line expressions
_ = a +
_ = "170141183460469231731687303715884105727" // prime
}
-
// Alignment after overlong lines
const (
_ = "991"
_ = "170141183460469231731687303715884105727" // prime
)
-
// Correct placement of operators and comments in multi-line expressions
func _() {
_ = a + // comment
_ = "ba0408" + "7265717569726564" // field 71, encoding 2, string "required"
}
-
// Correct placement of terminating comma/closing parentheses in multi-line calls.
func _() {
f(1,
)
}
-
// Align comments in multi-line lists of single-line expressions.
var txpix = [NCOL]draw.Color{
draw.Yellow, // yellow
draw.Color(0xBB005DFF), /* maroon */
}
-
func same(t, u *Time) bool {
// respect source lines in multi-line expressions
return t.Year == u.Year &&
t.Zone == u.Zone
}
-
func (p *parser) charClass() {
// respect source lines in multi-line expressions
if cc.negate && len(cc.ranges) == 2 &&
}
}
-
func addState(s []state, inst instr, match []int) {
// handle comments correctly in multi-line expressions
for i := 0; i < l; i++ {
c
}
-
// Don't introduce extra newlines in strangely formatted expression lists.
func f() {
// os.Open parameters should remain on two lines
p *int
)
-
func _() {
// no spaces around simple or parenthesized expressions
_ = (a + 0)
x < y || z > 42
}
-
func _() {
_ = a + b
_ = a + b + c
_ = token(matchType + xlength<<lengthShift + xoffset)
}
-
func f(x int, args ...int) {
f(0, args...)
f(1, args)
_ = f(x / *y, x < -1, x < <-1, x + +1, x - -1, x & &x, x & ^x)
}
-
func _() {
_ = T{}
_ = struct{}{}
_ = map[int]T{}
}
-
// one-line structs/interfaces in composite literals (up to a threshold)
func _() {
_ = struct{}{}
_ = struct{ s struct{ int } }{struct{ int }{0}}
}
-
func _() {
// do not modify literals
_ = "tab1 tab2 tab3 end" // string contains 3 tabs
they must not be removed`
}
-
func _() {
// smart handling of indentation for multi-line raw strings
var _ = ``
}
}
-
func _() {
// one-line function literals (body is on a single line)
_ = func() {}
})
}
-
func _() {
_ = [][]int{
[]int{1},
_ = [][]int{{1}, {1, 2}, {1, 2, 3}}
}
-
// various multi-line expressions
func _() {
// do not add extra indentation to multi-line string lists
}
}
-
const _ = F1 +
`string = "%s";` +
`ptr = *;` +
`datafmt.T2 = s ["-" p "-"];`
-
const _ = `datafmt "datafmt";` +
`default = "%v";` +
`array = *;` +
`datafmt.T3 = s {" " a a / ","};`
-
const _ = `datafmt "datafmt";` +
`default = "%v";` +
`array = *;` +
`datafmt.T3 = s {" " a a / ","};`
-
func _() {
_ = F1 +
`string = "%s";` +
`datafmt.T3 = s {" " a a / ","};`
}
-
func _() {
// respect source lines in multi-line expressions
_ = a +
_ = "170141183460469231731687303715884105727" // prime
}
-
// Alignment after overlong lines
const (
_ = "991"
_ = "170141183460469231731687303715884105727" // prime
)
-
// Correct placement of operators and comments in multi-line expressions
func _() {
_ = a + // comment
_ = "ba0408" + "7265717569726564" // field 71, encoding 2, string "required"
}
-
// Correct placement of terminating comma/closing parentheses in multi-line calls.
func _() {
f(1,
)
}
-
// Align comments in multi-line lists of single-line expressions.
var txpix = [NCOL]draw.Color{
draw.Yellow, // yellow
draw.Color(0xBB005DFF), /* maroon */
}
-
func same(t, u *Time) bool {
// respect source lines in multi-line expressions
return t.Year == u.Year &&
t.Zone == u.Zone
}
-
func (p *parser) charClass() {
// respect source lines in multi-line expressions
if cc.negate && len(cc.ranges) == 2 &&
}
}
-
func addState(s []state, inst instr, match []int) {
// handle comments correctly in multi-line expressions
for i := 0; i < l; i++ {
c
}
-
// Don't introduce extra newlines in strangely formatted expression lists.
func f() {
// os.Open parameters should remain on two lines
"go/token"
)
-
// The mode parameter to the Parse* functions is a set of flags (or 0).
// They control the amount of source code parsed and other optional
// parser functionality.
DeclarationErrors // report declaration errors
)
-
// The parser structure holds the parser's internal state.
type parser struct {
file *token.File
targetStack [][]*ast.Ident // stack of unresolved labels
}
-
// scannerMode returns the scanner mode bits given the parser's mode bits.
func scannerMode(mode uint) uint {
var m uint = scanner.InsertSemis
return m
}
-
func (p *parser) init(fset *token.FileSet, filename string, src []byte, mode uint) {
p.file = fset.AddFile(filename, fset.Base(), len(src))
p.scanner.Init(p.file, src, p, scannerMode(mode))
p.openLabelScope()
}
-
// ----------------------------------------------------------------------------
// Scoping support
p.topScope = ast.NewScope(p.topScope)
}
-
func (p *parser) closeScope() {
p.topScope = p.topScope.Outer
}
-
func (p *parser) openLabelScope() {
p.labelScope = ast.NewScope(p.labelScope)
p.targetStack = append(p.targetStack, nil)
}
-
func (p *parser) closeLabelScope() {
// resolve labels
n := len(p.targetStack) - 1
p.labelScope = p.labelScope.Outer
}
-
func (p *parser) declare(decl interface{}, scope *ast.Scope, kind ast.ObjKind, idents ...*ast.Ident) {
for _, ident := range idents {
assert(ident.Obj == nil, "identifier already declared or resolved")
}
}
-
func (p *parser) shortVarDecl(idents []*ast.Ident) {
// Go spec: A short variable declaration may redeclare variables
// provided they were originally declared in the same block with
}
}
-
// The unresolved object is a sentinel to mark identifiers that have been added
// to the list of unresolved identifiers. The sentinel is only used for verifying
// internal consistency.
var unresolved = new(ast.Object)
-
func (p *parser) resolve(x ast.Expr) {
// nothing to do if x is not an identifier or the blank identifier
ident, _ := x.(*ast.Ident)
p.unresolved = append(p.unresolved, ident)
}
-
// ----------------------------------------------------------------------------
// Parsing support
fmt.Println(a...)
}
-
func trace(p *parser, msg string) *parser {
p.printTrace(msg, "(")
p.indent++
return p
}
-
// Usage pattern: defer un(trace(p, "..."));
func un(p *parser) {
p.indent--
p.printTrace(")")
}
-
// Advance to the next token.
func (p *parser) next0() {
// Because of one-token look-ahead, print the previous token
return
}
-
// Consume a group of adjacent comments, add it to the parser's
// comments list, and return it together with the line at which
// the last comment in the group ends. An empty line or non-comment
return
}
-
// Advance to the next non-comment token. In the process, collect
// any comment groups encountered, and remember the last lead and
// and line comments.
}
}
-
func (p *parser) error(pos token.Pos, msg string) {
p.Error(p.file.Position(pos), msg)
}
-
func (p *parser) errorExpected(pos token.Pos, msg string) {
msg = "expected " + msg
if pos == p.pos {
p.error(pos, msg)
}
-
func (p *parser) expect(tok token.Token) token.Pos {
pos := p.pos
if p.tok != tok {
return pos
}
-
func (p *parser) expectSemi() {
if p.tok != token.RPAREN && p.tok != token.RBRACE {
p.expect(token.SEMICOLON)
}
}
-
func assert(cond bool, msg string) {
if !cond {
panic("go/parser internal error: " + msg)
}
}
-
// ----------------------------------------------------------------------------
// Identifiers
return &ast.Ident{pos, name, nil}
}
-
func (p *parser) parseIdentList() (list []*ast.Ident) {
if p.trace {
defer un(trace(p, "IdentList"))
return
}
-
// ----------------------------------------------------------------------------
// Common productions
return
}
-
func (p *parser) parseLhsList() []ast.Expr {
list := p.parseExprList(true)
switch p.tok {
return list
}
-
func (p *parser) parseRhsList() []ast.Expr {
return p.parseExprList(false)
}
-
// ----------------------------------------------------------------------------
// Types
return typ
}
-
// If the result is an identifier, it is not resolved.
func (p *parser) parseTypeName() ast.Expr {
if p.trace {
return ident
}
-
func (p *parser) parseArrayType(ellipsisOk bool) ast.Expr {
if p.trace {
defer un(trace(p, "ArrayType"))
return &ast.ArrayType{lbrack, len, elt}
}
-
func (p *parser) makeIdentList(list []ast.Expr) []*ast.Ident {
idents := make([]*ast.Ident, len(list))
for i, x := range list {
return idents
}
-
func (p *parser) parseFieldDecl(scope *ast.Scope) *ast.Field {
if p.trace {
defer un(trace(p, "FieldDecl"))
return field
}
-
func (p *parser) parseStructType() *ast.StructType {
if p.trace {
defer un(trace(p, "StructType"))
return &ast.StructType{pos, &ast.FieldList{lbrace, list, rbrace}, false}
}
-
func (p *parser) parsePointerType() *ast.StarExpr {
if p.trace {
defer un(trace(p, "PointerType"))
return &ast.StarExpr{star, base}
}
-
func (p *parser) tryVarType(isParam bool) ast.Expr {
if isParam && p.tok == token.ELLIPSIS {
pos := p.pos
return p.tryIdentOrType(false)
}
-
func (p *parser) parseVarType(isParam bool) ast.Expr {
typ := p.tryVarType(isParam)
if typ == nil {
return typ
}
-
func (p *parser) parseVarList(isParam bool) (list []ast.Expr, typ ast.Expr) {
if p.trace {
defer un(trace(p, "VarList"))
return
}
-
func (p *parser) parseParameterList(scope *ast.Scope, ellipsisOk bool) (params []*ast.Field) {
if p.trace {
defer un(trace(p, "ParameterList"))
return
}
-
func (p *parser) parseParameters(scope *ast.Scope, ellipsisOk bool) *ast.FieldList {
if p.trace {
defer un(trace(p, "Parameters"))
return &ast.FieldList{lparen, params, rparen}
}
-
func (p *parser) parseResult(scope *ast.Scope) *ast.FieldList {
if p.trace {
defer un(trace(p, "Result"))
return nil
}
-
func (p *parser) parseSignature(scope *ast.Scope) (params, results *ast.FieldList) {
if p.trace {
defer un(trace(p, "Signature"))
return
}
-
func (p *parser) parseFuncType() (*ast.FuncType, *ast.Scope) {
if p.trace {
defer un(trace(p, "FuncType"))
return &ast.FuncType{pos, params, results}, scope
}
-
func (p *parser) parseMethodSpec(scope *ast.Scope) *ast.Field {
if p.trace {
defer un(trace(p, "MethodSpec"))
return spec
}
-
func (p *parser) parseInterfaceType() *ast.InterfaceType {
if p.trace {
defer un(trace(p, "InterfaceType"))
return &ast.InterfaceType{pos, &ast.FieldList{lbrace, list, rbrace}, false}
}
-
func (p *parser) parseMapType() *ast.MapType {
if p.trace {
defer un(trace(p, "MapType"))
return &ast.MapType{pos, key, value}
}
-
func (p *parser) parseChanType() *ast.ChanType {
if p.trace {
defer un(trace(p, "ChanType"))
return &ast.ChanType{pos, dir, value}
}
-
// If the result is an identifier, it is not resolved.
func (p *parser) tryIdentOrType(ellipsisOk bool) ast.Expr {
switch p.tok {
return nil
}
-
func (p *parser) tryType() ast.Expr {
typ := p.tryIdentOrType(false)
if typ != nil {
return typ
}
-
// ----------------------------------------------------------------------------
// Blocks
return
}
-
func (p *parser) parseBody(scope *ast.Scope) *ast.BlockStmt {
if p.trace {
defer un(trace(p, "Body"))
return &ast.BlockStmt{lbrace, list, rbrace}
}
-
func (p *parser) parseBlockStmt() *ast.BlockStmt {
if p.trace {
defer un(trace(p, "BlockStmt"))
return &ast.BlockStmt{lbrace, list, rbrace}
}
-
// ----------------------------------------------------------------------------
// Expressions
return &ast.FuncLit{typ, body}
}
-
// parseOperand may return an expression or a raw type (incl. array
// types of the form [...]T. Callers must verify the result.
// If lhs is set and the result is an identifier, it is not resolved.
return &ast.BadExpr{pos, p.pos}
}
-
func (p *parser) parseSelector(x ast.Expr) ast.Expr {
if p.trace {
defer un(trace(p, "Selector"))
return &ast.SelectorExpr{x, sel}
}
-
func (p *parser) parseTypeAssertion(x ast.Expr) ast.Expr {
if p.trace {
defer un(trace(p, "TypeAssertion"))
return &ast.TypeAssertExpr{x, typ}
}
-
func (p *parser) parseIndexOrSlice(x ast.Expr) ast.Expr {
if p.trace {
defer un(trace(p, "IndexOrSlice"))
return &ast.IndexExpr{x, lbrack, low, rbrack}
}
-
func (p *parser) parseCallOrConversion(fun ast.Expr) *ast.CallExpr {
if p.trace {
defer un(trace(p, "CallOrConversion"))
return &ast.CallExpr{fun, lparen, list, ellipsis, rparen}
}
-
func (p *parser) parseElement(keyOk bool) ast.Expr {
if p.trace {
defer un(trace(p, "Element"))
return x
}
-
func (p *parser) parseElementList() (list []ast.Expr) {
if p.trace {
defer un(trace(p, "ElementList"))
return
}
-
func (p *parser) parseLiteralValue(typ ast.Expr) ast.Expr {
if p.trace {
defer un(trace(p, "LiteralValue"))
return &ast.CompositeLit{typ, lbrace, elts, rbrace}
}
-
// checkExpr checks that x is an expression (and not a type).
func (p *parser) checkExpr(x ast.Expr) ast.Expr {
switch t := unparen(x).(type) {
return x
}
-
// isTypeName returns true iff x is a (qualified) TypeName.
func isTypeName(x ast.Expr) bool {
switch t := x.(type) {
return true
}
-
// isLiteralType returns true iff x is a legal composite literal type.
func isLiteralType(x ast.Expr) bool {
switch t := x.(type) {
return true
}
-
// If x is of the form *T, deref returns T, otherwise it returns x.
func deref(x ast.Expr) ast.Expr {
if p, isPtr := x.(*ast.StarExpr); isPtr {
return x
}
-
// If x is of the form (T), unparen returns unparen(T), otherwise it returns x.
func unparen(x ast.Expr) ast.Expr {
if p, isParen := x.(*ast.ParenExpr); isParen {
return x
}
-
// checkExprOrType checks that x is an expression or a type
// (and not a raw type such as [...]T).
//
return x
}
-
// If lhs is set and the result is an identifier, it is not resolved.
func (p *parser) parsePrimaryExpr(lhs bool) ast.Expr {
if p.trace {
return x
}
-
// If lhs is set and the result is an identifier, it is not resolved.
func (p *parser) parseUnaryExpr(lhs bool) ast.Expr {
if p.trace {
return p.parsePrimaryExpr(lhs)
}
-
// If lhs is set and the result is an identifier, it is not resolved.
func (p *parser) parseBinaryExpr(lhs bool, prec1 int) ast.Expr {
if p.trace {
return x
}
-
// If lhs is set and the result is an identifier, it is not resolved.
// TODO(gri): parseExpr may return a type or even a raw type ([..]int) -
// should reject when a type/raw type is obviously not allowed
return p.parseBinaryExpr(lhs, token.LowestPrec+1)
}
-
func (p *parser) parseRhs() ast.Expr {
return p.parseExpr(false)
}
-
// ----------------------------------------------------------------------------
// Statements
return &ast.ExprStmt{x[0]}
}
-
func (p *parser) parseCallExpr() *ast.CallExpr {
x := p.parseRhs()
if call, isCall := x.(*ast.CallExpr); isCall {
return nil
}
-
func (p *parser) parseGoStmt() ast.Stmt {
if p.trace {
defer un(trace(p, "GoStmt"))
return &ast.GoStmt{pos, call}
}
-
func (p *parser) parseDeferStmt() ast.Stmt {
if p.trace {
defer un(trace(p, "DeferStmt"))
return &ast.DeferStmt{pos, call}
}
-
func (p *parser) parseReturnStmt() *ast.ReturnStmt {
if p.trace {
defer un(trace(p, "ReturnStmt"))
return &ast.ReturnStmt{pos, x}
}
-
func (p *parser) parseBranchStmt(tok token.Token) *ast.BranchStmt {
if p.trace {
defer un(trace(p, "BranchStmt"))
return &ast.BranchStmt{pos, tok, label}
}
-
func (p *parser) makeExpr(s ast.Stmt) ast.Expr {
if s == nil {
return nil
return &ast.BadExpr{s.Pos(), s.End()}
}
-
func (p *parser) parseIfStmt() *ast.IfStmt {
if p.trace {
defer un(trace(p, "IfStmt"))
return &ast.IfStmt{pos, s, x, body, else_}
}
-
func (p *parser) parseTypeList() (list []ast.Expr) {
if p.trace {
defer un(trace(p, "TypeList"))
return
}
-
func (p *parser) parseCaseClause(exprSwitch bool) *ast.CaseClause {
if p.trace {
defer un(trace(p, "CaseClause"))
return &ast.CaseClause{pos, list, colon, body}
}
-
func isExprSwitch(s ast.Stmt) bool {
if s == nil {
return true
return false
}
-
func (p *parser) parseSwitchStmt() ast.Stmt {
if p.trace {
defer un(trace(p, "SwitchStmt"))
return &ast.TypeSwitchStmt{pos, s1, s2, body}
}
-
func (p *parser) parseCommClause() *ast.CommClause {
if p.trace {
defer un(trace(p, "CommClause"))
return &ast.CommClause{pos, comm, colon, body}
}
-
func (p *parser) parseSelectStmt() *ast.SelectStmt {
if p.trace {
defer un(trace(p, "SelectStmt"))
return &ast.SelectStmt{pos, body}
}
-
func (p *parser) parseForStmt() ast.Stmt {
if p.trace {
defer un(trace(p, "ForStmt"))
return &ast.ForStmt{pos, s1, p.makeExpr(s2), s3, body}
}
-
func (p *parser) parseStmt() (s ast.Stmt) {
if p.trace {
defer un(trace(p, "Statement"))
return
}
-
// ----------------------------------------------------------------------------
// Declarations
type parseSpecFunction func(p *parser, doc *ast.CommentGroup, iota int) ast.Spec
-
func parseImportSpec(p *parser, doc *ast.CommentGroup, _ int) ast.Spec {
if p.trace {
defer un(trace(p, "ImportSpec"))
return spec
}
-
func parseConstSpec(p *parser, doc *ast.CommentGroup, iota int) ast.Spec {
if p.trace {
defer un(trace(p, "ConstSpec"))
return spec
}
-
func parseTypeSpec(p *parser, doc *ast.CommentGroup, _ int) ast.Spec {
if p.trace {
defer un(trace(p, "TypeSpec"))
return spec
}
-
func parseVarSpec(p *parser, doc *ast.CommentGroup, _ int) ast.Spec {
if p.trace {
defer un(trace(p, "VarSpec"))
return spec
}
-
func (p *parser) parseGenDecl(keyword token.Token, f parseSpecFunction) *ast.GenDecl {
if p.trace {
defer un(trace(p, "GenDecl("+keyword.String()+")"))
return &ast.GenDecl{doc, pos, keyword, lparen, list, rparen}
}
-
func (p *parser) parseReceiver(scope *ast.Scope) *ast.FieldList {
if p.trace {
defer un(trace(p, "Receiver"))
return par
}
-
func (p *parser) parseFuncDecl() *ast.FuncDecl {
if p.trace {
defer un(trace(p, "FunctionDecl"))
return decl
}
-
func (p *parser) parseDecl() ast.Decl {
if p.trace {
defer un(trace(p, "Declaration"))
return p.parseGenDecl(p.tok, f)
}
-
func (p *parser) parseDeclList() (list []ast.Decl) {
if p.trace {
defer un(trace(p, "DeclList"))
return
}
-
// ----------------------------------------------------------------------------
// Source files
}
}
-
// Formatting of switch-statement headers.
func _() {
switch {
}
}
-
// Formatting of switch statement bodies.
func _() {
switch {
}
}
-
// Formatting of selected select statements.
func _() {
select {}
}
}
-
// Formatting of for-statement headers.
func _() {
for {
} // no parens printed
}
-
// Don't remove mandatory parentheses around composite literals in control clauses.
func _() {
// strip parentheses - no composite literals or composite literals don't start with a type name
}
}
-
// Extra empty lines inside functions. Do respect source code line
// breaks between statement boundaries but print at most one empty
// line at a time.
}
}
-
// Formatting around labels.
func _() {
L:
}
-
func _() {
// this comment should be indented
L: // no semicolon needed
}
-
func _() {
switch 0 {
case 0:
}
}
-
func _() {
f()
L1:
L3:
}
-
func _() {
// this comment should be indented
L:
}
-
func _() {
L:
_ = 0
}
-
func _() {
// this comment should be indented
L:
_ = 0
}
-
func _() {
for {
L1:
}
}
-
func _() {
// this comment should be indented
for {
}
}
-
func _() {
if true {
_ = 0
_ = 0
}
-
func _() {
for {
goto L
MoreCode()
}
-
func _() {
for {
goto L
MoreCode()
}
-
func _() {
for {
goto L
MoreCode()
}
-
func _() {
for {
goto AVeryLongLabelThatShouldNotAffectFormatting
"sort"
)
-
// An implementation of an ErrorHandler may be provided to the Scanner.
// If a syntax error is encountered and a handler was installed, Error
// is called with a position and an error message. The position points
Error(pos token.Position, msg string)
}
-
// ErrorVector implements the ErrorHandler interface. It maintains a list
// of errors which can be retrieved with GetErrorList and GetError. The
// zero value for an ErrorVector is an empty ErrorVector ready to use.
errors vector.Vector
}
-
// Reset resets an ErrorVector to no errors.
func (h *ErrorVector) Reset() { h.errors.Resize(0, 0) }
-
// ErrorCount returns the number of errors collected.
func (h *ErrorVector) ErrorCount() int { return h.errors.Len() }
-
// Within ErrorVector, an error is represented by an Error node. The
// position Pos, if valid, points to the beginning of the offending
// token, and the error condition is described by Msg.
Msg string
}
-
func (e *Error) String() string {
if e.Pos.Filename != "" || e.Pos.IsValid() {
// don't print "<unknown position>"
return e.Msg
}
-
// An ErrorList is a (possibly sorted) list of Errors.
type ErrorList []*Error
-
// ErrorList implements the sort Interface.
func (p ErrorList) Len() int { return len(p) }
func (p ErrorList) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
-
func (p ErrorList) Less(i, j int) bool {
e := &p[i].Pos
f := &p[j].Pos
return false
}
-
func (p ErrorList) String() string {
switch len(p) {
case 0:
return fmt.Sprintf("%s (and %d more errors)", p[0].String(), len(p)-1)
}
-
// These constants control the construction of the ErrorList
// returned by GetErrors.
//
NoMultiples // sort error list and leave only the first error per line
)
-
// GetErrorList returns the list of errors collected by an ErrorVector.
// The construction of the ErrorList returned is controlled by the mode
// parameter. If there are no errors, the result is nil.
return list
}
-
// GetError is like GetErrorList, but it returns an os.Error instead
// so that a nil result can be assigned to an os.Error variable and
// remains nil.
return h.GetErrorList(mode)
}
-
// ErrorVector implements the ErrorHandler interface.
func (h *ErrorVector) Error(pos token.Position, msg string) {
h.errors.Push(&Error{pos, msg})
}
-
// PrintError is a utility function that prints a list of errors to w,
// one error per line, if the err parameter is an ErrorList. Otherwise
// it prints the err string.
"utf8"
)
-
// A Scanner holds the scanner's internal state while processing
// a given text. It can be allocated as part of another data
// structure but must be initialized via Init before use.
ErrorCount int // number of errors encountered
}
-
// Read the next Unicode char into S.ch.
// S.ch < 0 means end-of-file.
//
}
}
-
// The mode parameter to the Init function is a set of flags (or 0).
// They control scanner behavior.
//
S.next()
}
-
func (S *Scanner) error(offs int, msg string) {
if S.err != nil {
S.err.Error(S.file.Position(S.file.Pos(offs)), msg)
S.ErrorCount++
}
-
var prefix = []byte("//line ")
func (S *Scanner) interpretLineComment(text []byte) {
}
}
-
func (S *Scanner) scanComment() {
// initial '/' already consumed; S.ch == '/' || S.ch == '*'
offs := S.offset - 1 // position of initial '/'
S.error(offs, "comment not terminated")
}
-
func (S *Scanner) findLineEnd() bool {
// initial '/' already consumed
return false
}
-
func isLetter(ch int) bool {
return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch)
}
-
func isDigit(ch int) bool {
return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch)
}
-
func (S *Scanner) scanIdentifier() token.Token {
offs := S.offset
for isLetter(S.ch) || isDigit(S.ch) {
return token.Lookup(S.src[offs:S.offset])
}
-
func digitVal(ch int) int {
switch {
case '0' <= ch && ch <= '9':
return 16 // larger than any legal digit val
}
-
func (S *Scanner) scanMantissa(base int) {
for digitVal(S.ch) < base {
S.next()
}
}
-
func (S *Scanner) scanNumber(seenDecimalPoint bool) token.Token {
// digitVal(S.ch) < 10
tok := token.INT
return tok
}
-
func (S *Scanner) scanEscape(quote int) {
offs := S.offset
}
}
-
func (S *Scanner) scanChar() {
// '\'' opening already consumed
offs := S.offset - 1
}
}
-
func (S *Scanner) scanString() {
// '"' opening already consumed
offs := S.offset - 1
S.next()
}
-
func (S *Scanner) scanRawString() {
// '`' opening already consumed
offs := S.offset - 1
S.next()
}
-
func (S *Scanner) skipWhitespace() {
for S.ch == ' ' || S.ch == '\t' || S.ch == '\n' && !S.insertSemi || S.ch == '\r' {
S.next()
}
}
-
// Helper functions for scanning multi-byte tokens such as >> += >>= .
// Different routines recognize different length tok_i based on matches
// of ch_i. If a token ends in '=', the result is tok1 or tok3
return tok0
}
-
func (S *Scanner) switch3(tok0, tok1 token.Token, ch2 int, tok2 token.Token) token.Token {
if S.ch == '=' {
S.next()
return tok0
}
-
func (S *Scanner) switch4(tok0, tok1 token.Token, ch2 int, tok2, tok3 token.Token) token.Token {
if S.ch == '=' {
S.next()
return tok0
}
-
// Scan scans the next token and returns the token position,
// the token, and the literal string corresponding to the
// token. The source end is indicated by token.EOF.
"testing"
)
-
var fset = token.NewFileSet()
-
const /* class */ (
special = iota
literal
keyword
)
-
func tokenclass(tok token.Token) int {
switch {
case tok.IsLiteral():
return special
}
-
type elt struct {
tok token.Token
lit string
class int
}
-
var tokens = [...]elt{
// Special tokens
{token.COMMENT, "/* a comment */", special},
{token.VAR, "var", keyword},
}
-
const whitespace = " \t \n\n\n" // to separate tokens
type testErrorHandler struct {
h.t.Errorf("Error() called (msg = %s)", msg)
}
-
func newlineCount(s string) int {
n := 0
for i := 0; i < len(s); i++ {
return n
}
-
func checkPos(t *testing.T, lit string, p token.Pos, expected token.Position) {
pos := fset.Position(p)
if pos.Filename != expected.Filename {
}
}
-
// Verify that calling Scan() provides the correct results.
func TestScan(t *testing.T) {
// make source
}
}
-
func checkSemi(t *testing.T, line string, mode uint) {
var S Scanner
file := fset.AddFile("TestSemis", fset.Base(), len(line))
}
}
-
var lines = []string{
// # indicates a semicolon present in the source
// $ indicates an automatically inserted semicolon
"package main$",
}
-
func TestSemis(t *testing.T) {
for _, line := range lines {
checkSemi(t, line, AllowIllegalChars|InsertSemis)
{"\n//line c:\\dir\\File1.go:100\n line100", "c:\\dir\\File1.go", 100},
}
-
// Verify that comments of the form "//line filename:line" are interpreted correctly.
func TestLineComments(t *testing.T) {
if runtime.GOOS == "windows" {
}
}
-
// Verify that initializing the same scanner more then once works correctly.
func TestInit(t *testing.T) {
var s Scanner
}
}
-
func TestIllegalChars(t *testing.T) {
var s Scanner
}
}
-
func TestStdErrorHander(t *testing.T) {
const src = "@\n" + // illegal character, cause an error
"@ @\n" + // two errors on the same line
}
}
-
type errorCollector struct {
cnt int // number of errors encountered
msg string // last error message encountered
pos token.Position // last error position encountered
}
-
func (h *errorCollector) Error(pos token.Position, msg string) {
h.cnt++
h.msg = msg
h.pos = pos
}
-
func checkError(t *testing.T, src string, tok token.Token, pos int, err string) {
var s Scanner
var h errorCollector
}
}
-
var errors = []struct {
src string
tok token.Token
{"\"abc\x80def\"", token.STRING, 4, "illegal UTF-8 encoding"},
}
-
func TestScanErrors(t *testing.T) {
for _, e := range errors {
checkError(t, e.src, e.tok, e.pos, e.err)
"sync"
)
-
// Position describes an arbitrary source position
// including the file, line, and column location.
// A Position is valid if the line number is > 0.
Column int // column number, starting at 1 (character count)
}
-
// IsValid returns true if the position is valid.
func (pos *Position) IsValid() bool { return pos.Line > 0 }
-
// String returns a string in one of several forms:
//
// file:line:column valid position with file name
return s
}
-
// Pos is a compact encoding of a source position within a file set.
// It can be converted into a Position for a more convenient, but much
// larger, representation.
//
type Pos int
-
// The zero value for Pos is NoPos; there is no file and line information
// associated with it, and NoPos().IsValid() is false. NoPos is always
// smaller than any other Pos value. The corresponding Position value
//
const NoPos Pos = 0
-
// IsValid returns true if the position is valid.
func (p Pos) IsValid() bool {
return p != NoPos
}
-
func searchFiles(a []*File, x int) int {
return sort.Search(len(a), func(i int) bool { return a[i].base > x }) - 1
}
-
func (s *FileSet) file(p Pos) *File {
if f := s.last; f != nil && f.base <= int(p) && int(p) <= f.base+f.size {
return f
return nil
}
-
// File returns the file which contains the position p.
// If no such file is found (for instance for p == NoPos),
// the result is nil.
return
}
-
func (f *File) position(p Pos) (pos Position) {
offset := int(p) - f.base
pos.Offset = offset
return
}
-
// Position converts a Pos in the fileset into a general Position.
func (s *FileSet) Position(p Pos) (pos Position) {
if p != NoPos {
return
}
-
type lineInfo struct {
offset int
filename string
line int
}
-
// AddLineInfo adds alternative file and line number information for
// a given file offset. The offset must be larger than the offset for
// the previously added alternative line info and smaller than the
f.set.mutex.Unlock()
}
-
// A File is a handle for a file belonging to a FileSet.
// A File has a name, size, and line offset table.
//
infos []lineInfo
}
-
// Name returns the file name of file f as registered with AddFile.
func (f *File) Name() string {
return f.name
}
-
// Base returns the base offset of file f as registered with AddFile.
func (f *File) Base() int {
return f.base
}
-
// Size returns the size of file f as registered with AddFile.
func (f *File) Size() int {
return f.size
}
-
// LineCount returns the number of lines in file f.
func (f *File) LineCount() int {
f.set.mutex.RLock()
return n
}
-
// AddLine adds the line offset for a new line.
// The line offset must be larger than the offset for the previous line
// and smaller than the file size; otherwise the line offset is ignored.
f.set.mutex.Unlock()
}
-
// SetLines sets the line offsets for a file and returns true if successful.
// The line offsets are the offsets of the first character of each line;
// for instance for the content "ab\nc\n" the line offsets are {0, 3}.
return true
}
-
// SetLinesForContent sets the line offsets for the given file content.
func (f *File) SetLinesForContent(content []byte) {
var lines []int
f.set.mutex.Unlock()
}
-
// Pos returns the Pos value for the given file offset;
// the offset must be <= f.Size().
// f.Pos(f.Offset(p)) == p.
return Pos(f.base + offset)
}
-
// Offset returns the offset for the given file position p;
// p must be a valid Pos value in that file.
// f.Offset(f.Pos(offset)) == offset.
return int(p) - f.base
}
-
// Line returns the line number for the given file position p;
// p must be a Pos value in that file or NoPos.
//
return f.Position(p).Line
}
-
// Position returns the Position value for the given file position p;
// p must be a Pos value in that file or NoPos.
//
return
}
-
func searchInts(a []int, x int) int {
// This function body is a manually inlined version of:
//
return i - 1
}
-
func searchLineInfos(a []lineInfo, x int) int {
return sort.Search(len(a), func(i int) bool { return a[i].offset > x }) - 1
}
-
// info returns the file name, line, and column number for a file offset.
func (f *File) info(offset int) (filename string, line, column int) {
filename = f.name
return
}
-
// A FileSet represents a set of source files.
// Methods of file sets are synchronized; multiple goroutines
// may invoke them concurrently.
last *File // cache of last file looked up
}
-
// NewFileSet creates a new file set.
func NewFileSet() *FileSet {
s := new(FileSet)
return s
}
-
// Base returns the minimum base offset that must be provided to
// AddFile when adding the next file.
//
}
-
// AddFile adds a new file with a given filename, base offset, and file size
// to the file set s and returns the file. Multiple files may have the same
// name. The base offset must not be smaller than the FileSet's Base(), and
return f
}
-
// Files returns the files added to the file set.
func (s *FileSet) Files() <-chan *File {
ch := make(chan *File)
"testing"
)
-
func checkPos(t *testing.T, msg string, p, q Position) {
if p.Filename != q.Filename {
t.Errorf("%s: expected filename = %q; got %q", msg, q.Filename, p.Filename)
}
}
-
func TestNoPos(t *testing.T) {
if NoPos.IsValid() {
t.Errorf("NoPos should not be valid")
checkPos(t, "fset NoPos", fset.Position(NoPos), Position{})
}
-
var tests = []struct {
filename string
source []byte // may be nil
{"h", []byte("package p\n\nimport \"fmt\"\n "), 25, []int{0, 10, 11, 24}},
}
-
func linecol(lines []int, offs int) (int, int) {
prevLineOffs := 0
for line, lineOffs := range lines {
return len(lines), offs - prevLineOffs + 1
}
-
func verifyPositions(t *testing.T, fset *FileSet, f *File, lines []int) {
for offs := 0; offs < f.Size(); offs++ {
p := f.Pos(offs)
}
}
-
func makeTestSource(size int, lines []int) []byte {
src := make([]byte, size)
for _, offs := range lines {
return src
}
-
func TestPositions(t *testing.T) {
const delta = 7 // a non-zero base offset increment
fset := NewFileSet()
}
}
-
func TestLineInfo(t *testing.T) {
fset := NewFileSet()
f := fset.AddFile("foo", fset.Base(), 500)
}
}
-
func TestFiles(t *testing.T) {
fset := NewFileSet()
for i, test := range tests {
import "strconv"
-
// Token is the set of lexical tokens of the Go programming language.
type Token int
keyword_end
)
-
var tokens = [...]string{
ILLEGAL: "ILLEGAL",
VAR: "var",
}
-
// String returns the string corresponding to the token tok.
// For operators, delimiters, and keywords the string is the actual
// token character sequence (e.g., for the token ADD, the string is
return s
}
-
// A set of constants for precedence-based expression parsing.
// Non-operators have lowest precedence, followed by operators
// starting with precedence 1 up to unary operators. The highest
HighestPrec = 7
)
-
// Precedence returns the operator precedence of the binary
// operator op. If op is not a binary operator, the result
// is LowestPrecedence.
return LowestPrec
}
-
var keywords map[string]Token
func init() {
}
}
-
// Lookup maps an identifier to its keyword token or IDENT (if not a keyword).
//
func Lookup(ident []byte) Token {
return IDENT
}
-
// Predicates
// IsLiteral returns true for tokens corresponding to identifiers
import "go/ast"
-
func (tc *typechecker) openScope() *ast.Scope {
tc.topScope = ast.NewScope(tc.topScope)
return tc.topScope
}
-
func (tc *typechecker) closeScope() {
tc.topScope = tc.topScope.Outer
}
-
// declInScope declares an object of a given kind and name in scope and sets the object's Decl and N fields.
// It returns the newly allocated object. If an object with the same name already exists in scope, an error
// is reported and the object is not inserted.
return obj
}
-
// decl is the same as declInScope(tc.topScope, ...)
func (tc *typechecker) decl(kind ast.ObjKind, name *ast.Ident, decl interface{}, n int) *ast.Object {
return tc.declInScope(tc.topScope, kind, name, decl, n)
}
-
// find returns the object with the given name if visible in the current scope hierarchy.
// If no such object is found, an error is reported and a bad object is returned instead.
func (tc *typechecker) find(name *ast.Ident) (obj *ast.Object) {
return
}
-
// findField returns the object with the given name if visible in the type's scope.
// If no such object is found, an error is reported and a bad object is returned instead.
func (tc *typechecker) findField(typ *Type, name *ast.Ident) (obj *ast.Object) {
import "go/ast"
-
// A Type represents a Go type.
type Type struct {
Form Form
Expr ast.Expr // corresponding AST expression
}
-
// NewType creates a new type of a given form.
func NewType(form Form) *Type {
return &Type{Form: form, Scope: ast.NewScope(nil)}
}
-
// Form describes the form of a type.
type Form int
Tuple
)
-
var formStrings = [...]string{
BadType: "badType",
Unresolved: "unresolved",
Tuple: "tuple",
}
-
func (form Form) String() string { return formStrings[form] }
-
// The list of basic type id's.
const (
Bool = iota
// TODO(gri) ideal types are missing
)
-
var BasicTypes = map[uint]string{
Bool: "bool",
Byte: "byte",
"os"
)
-
// TODO(gri) don't report errors for objects/types that are marked as bad.
const debug = true // set for debugging output
-
// An importer takes an import path and returns the data describing the
// respective package's exported interface. The data format is TBD.
//
type Importer func(path string) ([]byte, os.Error)
-
// CheckPackage typechecks a package and augments the AST by setting
// *ast.Object, *ast.Type, and *ast.Scope fields accordingly. If an
// importer is provided, it is used to handle imports, otherwise they
return tc.GetError(scanner.Sorted)
}
-
// CheckFile typechecks a single file, but otherwise behaves like
// CheckPackage. If the complete package consists of more than just
// one file, the file may not typecheck without errors.
return CheckPackage(fset, pkg, importer)
}
-
// ----------------------------------------------------------------------------
// Typechecker state
iota int // current value of iota
}
-
func (tc *typechecker) Errorf(pos token.Pos, format string, args ...interface{}) {
tc.Error(tc.fset.Position(pos), fmt.Sprintf(format, args...))
}
-
func assert(pred bool) {
if !pred {
panic("internal error")
}
}
-
/*
Typechecking is done in several phases:
pkg.Scope = tc.topScope
}
-
func (tc *typechecker) declGlobal(global ast.Decl) {
switch d := global.(type) {
case *ast.BadDecl:
}
}
-
// If x is of the form *T, deref returns T, otherwise it returns x.
func deref(x ast.Expr) ast.Expr {
if p, isPtr := x.(*ast.StarExpr); isPtr {
return x
}
-
func (tc *typechecker) bindMethod(method *ast.FuncDecl) {
// a method is declared in the receiver base type's scope
var scope *ast.Scope
tc.declInScope(scope, ast.Fun, method.Name, method, 0)
}
-
func (tc *typechecker) resolve(obj *ast.Object) {
// check for declaration cycles
if tc.cyclemap[obj] {
}
}
-
func (tc *typechecker) checkBlock(body []ast.Stmt, ftype *Type) {
tc.openScope()
defer tc.closeScope()
}
}
-
// ----------------------------------------------------------------------------
// Types
return x
}
-
func (tc *typechecker) declFields(scope *ast.Scope, fields *ast.FieldList, ref bool) (n uint) {
if fields != nil {
for _, f := range fields.List {
return n
}
-
func (tc *typechecker) declSignature(typ *Type, recv, params, results *ast.FieldList) {
assert((typ.Form == Method) == (recv != nil))
typ.Params = ast.NewScope(nil)
typ.N = tc.declFields(typ.Params, results, true)
}
-
func (tc *typechecker) typeFor(def *Type, x ast.Expr, ref bool) (typ *Type) {
x = unparen(x)
return
}
-
// ----------------------------------------------------------------------------
// TODO(gri) implement these place holders
func (tc *typechecker) declConst(*ast.Object) {
}
-
func (tc *typechecker) declVar(*ast.Object) {
}
-
func (tc *typechecker) checkStmt(ast.Stmt) {
}
"testing"
)
-
const testDir = "./testdata" // location of test packages
var fset = token.NewFileSet()
trace = flag.Bool("trace", false, "print package names")
)
-
// ERROR comments must be of the form /* ERROR "rx" */ and rx is
// a regular expression that matches the expected error message.
var errRx = regexp.MustCompile(`^/\* *ERROR *"([^"]*)" *\*/$`)
return
}
-
func testFilter(f *os.FileInfo) bool {
return strings.HasSuffix(f.Name, ".src") && f.Name[0] != '.'
}
-
func checkError(t *testing.T, expected, found *scanner.Error) {
rx, err := regexp.Compile(expected.Msg)
if err != nil {
}
}
-
func TestTypeCheck(t *testing.T) {
flag.Parse()
pkgRx, err := regexp.Compile(*pkgPat)
// The Universe scope contains all predeclared identifiers.
var Universe *ast.Scope
-
func def(obj *ast.Object) {
alt := Universe.Insert(obj)
if alt != nil {
}
}
-
func init() {
Universe = ast.NewScope(nil)
"strconv"
)
-
const debug = false
-
type checker struct {
fset *token.FileSet
scanner.ErrorVector
types map[ast.Expr]Type
}
-
func (c *checker) errorf(pos token.Pos, format string, args ...interface{}) string {
msg := fmt.Sprintf(format, args...)
c.Error(c.fset.Position(pos), msg)
return msg
}
-
// collectFields collects struct fields tok = token.STRUCT), interface methods
// (tok = token.INTERFACE), and function arguments/results (tok = token.FUNC).
func (c *checker) collectFields(tok token.Token, list *ast.FieldList, cycleOk bool) (fields ObjList, tags []string, isVariadic bool) {
return
}
-
// makeType makes a new type for an AST type specification x or returns
// the type referred to by a type name x. If cycleOk is set, a type may
// refer to itself directly or indirectly; otherwise cycles are errors.
panic(fmt.Sprintf("unreachable (%T)", x))
}
-
// checkObj type checks an object.
func (c *checker) checkObj(obj *ast.Object, ref bool) {
if obj.Type != nil {
}
}
-
// Check typechecks a package.
// It augments the AST by assigning types to all ast.Objects and returns a map
// of types for all expression nodes in statements, and a scanner.ErrorList if
"testing"
)
-
// The test filenames do not end in .go so that they are invisible
// to gofmt since they contain comments that must not change their
// positions relative to surrounding tokens.
{"test0", []string{"testdata/test0.src"}},
}
-
var fset = token.NewFileSet()
-
// TODO(gri) This functionality should be in token.Fileset.
func getFile(filename string) *token.File {
for f := range fset.Files() {
return nil
}
-
// TODO(gri) This functionality should be in token.Fileset.
func getPos(filename string, offset int) token.Pos {
if f := getFile(filename); f != nil {
return token.NoPos
}
-
// TODO(gri) Need to revisit parser interface. We should be able to use parser.ParseFiles
// or a similar function instead.
func parseFiles(t *testing.T, testname string, filenames []string) (map[string]*ast.File, os.Error) {
return files, errors
}
-
// ERROR comments must be of the form /* ERROR "rx" */ and rx is
// a regular expression that matches the expected error message.
//
return errors
}
-
func eliminate(t *testing.T, expected map[token.Pos]string, errors os.Error) {
if errors == nil {
return
}
}
-
func check(t *testing.T, testname string, testfiles []string) {
// TODO(gri) Eventually all these different phases should be
// subsumed into a single function call that takes
}
}
-
func TestCheck(t *testing.T) {
// For easy debugging w/o changing the testing code,
// if there is a local test file, only test that file.
"strconv"
)
-
// TODO(gri) Consider changing the API so Const is an interface
// and operations on consts don't have to type switch.
val interface{}
}
-
// Representation of complex values.
type cmplx struct {
re, im *big.Rat
}
-
func assert(cond bool) {
if !cond {
panic("go/types internal error: assertion failed")
}
}
-
// MakeConst makes an ideal constant from a literal
// token and the corresponding literal string.
func MakeConst(tok token.Token, lit string) Const {
panic("unreachable")
}
-
// MakeZero returns the zero constant for the given type.
func MakeZero(typ *Type) Const {
// TODO(gri) fix this
return Const{0}
}
-
// Match attempts to match the internal constant representations of x and y.
// If the attempt is successful, the result is the values of x and y,
// if necessary converted to have the same internal representation; otherwise
return
}
-
// Convert attempts to convert the constant x to a given type.
// If the attempt is successful, the result is the new constant;
// otherwise the result is invalid.
return x
}
-
func (x Const) String() string {
switch x := x.val.(type) {
case bool:
panic("unreachable")
}
-
func (x Const) UnaryOp(op token.Token) Const {
panic("unimplemented")
}
-
func (x Const) BinaryOp(op token.Token, y Const) Const {
var z interface{}
switch x := x.val.(type) {
return Const{z}
}
-
func binaryBoolOp(x bool, op token.Token, y bool) interface{} {
switch op {
case token.EQL:
panic("unreachable")
}
-
func binaryIntOp(x *big.Int, op token.Token, y *big.Int) interface{} {
var z big.Int
switch op {
panic("unreachable")
}
-
func binaryFloatOp(x *big.Rat, op token.Token, y *big.Rat) interface{} {
var z big.Rat
switch op {
panic("unreachable")
}
-
func binaryCmplxOp(x cmplx, op token.Token, y cmplx) interface{} {
a, b := x.re, x.im
c, d := y.re, y.im
panic("unreachable")
}
-
func binaryStringOp(x string, op token.Token, y string) interface{} {
switch op {
case token.ADD:
"strings"
)
-
func readGopackHeader(buf *bufio.Reader) (name string, size int, err os.Error) {
// See $GOROOT/include/ar.h.
hdr := make([]byte, 64+12+6+6+8+10+2)
return
}
-
type dataReader struct {
*bufio.Reader
io.Closer
}
-
// ExportData returns a readCloser positioned at the beginning of the
// export data section of the given object/archive file, or an error.
// It is the caller's responsibility to close the readCloser.
"strconv"
)
-
const trace = false // set to true for debugging
var (
pkgExts = [...]string{".a", ".5", ".6", ".8"}
)
-
// findPkg returns the filename and package id for an import path.
// If no file was found, an empty filename is returned.
func findPkg(path string) (filename, id string) {
return
}
-
// gcParser parses the exports inside a gc compiler-produced
// object/archive file and populates its scope with the results.
type gcParser struct {
imports map[string]*ast.Object // package id -> package object
}
-
func (p *gcParser) init(filename, id string, src io.Reader, imports map[string]*ast.Object) {
p.scanner.Init(src)
p.scanner.Error = func(_ *scanner.Scanner, msg string) { p.error(msg) }
p.imports = imports
}
-
func (p *gcParser) next() {
p.tok = p.scanner.Scan()
switch p.tok {
}
}
-
// GcImporter implements the ast.Importer signature.
func GcImporter(imports map[string]*ast.Object, path string) (pkg *ast.Object, err os.Error) {
if path == "unsafe" {
return
}
-
// ----------------------------------------------------------------------------
// Error handling
err os.Error
}
-
func (e importError) String() string {
return fmt.Sprintf("import error %s (byte offset = %d): %s", e.pos, e.pos.Offset, e.err)
}
-
func (p *gcParser) error(err interface{}) {
if s, ok := err.(string); ok {
err = os.NewError(s)
panic(importError{p.scanner.Pos(), err.(os.Error)})
}
-
func (p *gcParser) errorf(format string, args ...interface{}) {
p.error(fmt.Sprintf(format, args...))
}
-
func (p *gcParser) expect(tok int) string {
lit := p.lit
if p.tok != tok {
return lit
}
-
func (p *gcParser) expectSpecial(tok string) {
sep := 'x' // not white space
i := 0
}
}
-
func (p *gcParser) expectKeyword(keyword string) {
lit := p.expect(scanner.Ident)
if lit != keyword {
}
}
-
// ----------------------------------------------------------------------------
// Import declarations
return pkg
}
-
// dotIdentifier = ( ident | '·' ) { ident | int | '·' } .
func (p *gcParser) parseDotIdent() string {
ident := ""
return ident
}
-
// ExportedName = ImportPath "." dotIdentifier .
//
func (p *gcParser) parseExportedName(kind ast.ObjKind) *ast.Object {
return obj
}
-
// ----------------------------------------------------------------------------
// Types
return obj.Type.(Type)
}
-
// ArrayType = "[" int_lit "]" Type .
//
func (p *gcParser) parseArrayType() Type {
return &Array{Len: n, Elt: elt}
}
-
// MapType = "map" "[" Type "]" Type .
//
func (p *gcParser) parseMapType() Type {
return &Map{Key: key, Elt: elt}
}
-
// Name = identifier | "?" .
//
func (p *gcParser) parseName() (name string) {
return
}
-
// Field = Name Type [ ":" string_lit ] .
//
func (p *gcParser) parseField() (fld *ast.Object, tag string) {
return
}
-
// StructType = "struct" "{" [ FieldList ] "}" .
// FieldList = Field { ";" Field } .
//
return &Struct{Fields: fields, Tags: tags}
}
-
// Parameter = ( identifier | "?" ) [ "..." ] Type [ ":" string_lit ] .
//
func (p *gcParser) parseParameter() (par *ast.Object, isVariadic bool) {
return
}
-
// Parameters = "(" [ ParameterList ] ")" .
// ParameterList = { Parameter "," } Parameter .
//
return
}
-
// Signature = Parameters [ Result ] .
// Result = Type | Parameters .
//
return &Func{Params: params, Results: results, IsVariadic: isVariadic}
}
-
// MethodSpec = identifier Signature .
//
func (p *gcParser) parseMethodSpec() *ast.Object {
return ast.NewObj(ast.Fun, "_")
}
-
// InterfaceType = "interface" "{" [ MethodList ] "}" .
// MethodList = MethodSpec { ";" MethodSpec } .
//
return &Interface{Methods: methods}
}
-
// ChanType = ( "chan" [ "<-" ] | "<-" "chan" ) Type .
//
func (p *gcParser) parseChanType() Type {
return &Chan{Dir: dir, Elt: elt}
}
-
// Type =
// BasicType | TypeName | ArrayType | SliceType | StructType |
// PointerType | FuncType | InterfaceType | MapType | ChanType |
return nil
}
-
// ----------------------------------------------------------------------------
// Declarations
pkg.Name = name
}
-
// int_lit = [ "+" | "-" ] { "0" ... "9" } .
//
func (p *gcParser) parseInt() (sign, val string) {
return
}
-
// number = int_lit [ "p" int_lit ] .
//
func (p *gcParser) parseNumber() Const {
return Const{mant}
}
-
// ConstDecl = "const" ExportedName [ Type ] "=" Literal .
// Literal = bool_lit | int_lit | float_lit | complex_lit | string_lit .
// bool_lit = "true" | "false" .
obj.Data = x
}
-
// TypeDecl = "type" ExportedName Type .
//
func (p *gcParser) parseTypeDecl() {
}
}
-
// VarDecl = "var" ExportedName Type .
//
func (p *gcParser) parseVarDecl() {
obj.Type = p.parseType()
}
-
// FuncDecl = "func" ExportedName Signature .
//
func (p *gcParser) parseFuncDecl() {
obj.Type = p.parseSignature()
}
-
// MethodDecl = "func" Receiver identifier Signature .
// Receiver = "(" ( identifier | "?" ) [ "*" ] ExportedName ")" .
//
p.parseSignature()
}
-
// Decl = [ ImportDecl | ConstDecl | TypeDecl | VarDecl | FuncDecl | MethodDecl ] "\n" .
//
func (p *gcParser) parseDecl() {
p.expect('\n')
}
-
// ----------------------------------------------------------------------------
// Export
"time"
)
-
var gcName, gcPath string // compiler name and path
func init() {
gcPath, _ = exec.LookPath(gcName)
}
-
func compile(t *testing.T, dirname, filename string) {
cmd := exec.Command(gcPath, filename)
cmd.Dir = dirname
t.Logf("%s", string(out))
}
-
// Use the same global imports map for all tests. The effect is
// as if all tested packages were imported into a single package.
var imports = make(map[string]*ast.Object)
return true
}
-
const maxTime = 3e9 // maximum allotted testing time in ns
func testDir(t *testing.T, dir string, endTime int64) (nimports int) {
return
}
-
func TestGcImport(t *testing.T) {
compile(t, "testdata", "exports.go")
"go/ast"
)
-
const (
C0 int = 0
C1 = 3.14159265
C7 = `bar\n`
)
-
type (
T1 int
T2 [10]int
T28 func(T28) T28
)
-
var (
V0 int
V1 = -991.0
)
-
func F1() {}
func F2(x int) {}
func F3() int { return 0 }
func F4() float32 { return 0 }
func F5(a, b, c int, u, v, w struct{ x, y T1 }, more ...interface{}) (p, q, r chan<- T10)
-
func (p *T1) M1()
"sort"
)
-
// All types implement the Type interface.
type Type interface {
isType()
}
-
// All concrete types embed ImplementsType which
// ensures that all types implement the Type interface.
type ImplementsType struct{}
func (t *ImplementsType) isType() {}
-
// A Bad type is a non-nil placeholder type when we don't know a type.
type Bad struct {
ImplementsType
Msg string // for better error reporting/debugging
}
-
// A Basic represents a (unnamed) basic type.
type Basic struct {
ImplementsType
// TODO(gri) need a field specifying the exact basic type
}
-
// An Array represents an array type [Len]Elt.
type Array struct {
ImplementsType
Elt Type
}
-
// A Slice represents a slice type []Elt.
type Slice struct {
ImplementsType
Elt Type
}
-
// A Struct represents a struct type struct{...}.
// Anonymous fields are represented by objects with empty names.
type Struct struct {
// - there is no scope for fast lookup (but the parser creates one)
}
-
// A Pointer represents a pointer type *Base.
type Pointer struct {
ImplementsType
Base Type
}
-
// A Func represents a function type func(...) (...).
// Unnamed parameters are represented by objects with empty names.
type Func struct {
IsVariadic bool // true if the last parameter's type is of the form ...T
}
-
// An Interface represents an interface type interface{...}.
type Interface struct {
ImplementsType
Methods ObjList // interface methods sorted by name; or nil
}
-
// A Map represents a map type map[Key]Elt.
type Map struct {
ImplementsType
Key, Elt Type
}
-
// A Chan represents a channel type chan Elt, <-chan Elt, or chan<-Elt.
type Chan struct {
ImplementsType
Elt Type
}
-
// A Name represents a named type as declared in a type declaration.
type Name struct {
ImplementsType
// TODO(gri) need to remember fields and methods.
}
-
// If typ is a pointer type, Deref returns the pointer's base type;
// otherwise it returns typ.
func Deref(typ Type) Type {
return typ
}
-
// Underlying returns the underlying type of a type.
func Underlying(typ Type) Type {
if typ, ok := typ.(*Name); ok {
return typ
}
-
// An ObjList represents an ordered (in some fashion) list of objects.
type ObjList []*ast.Object
// Sort sorts an object list by object name.
func (list ObjList) Sort() { sort.Sort(list) }
-
// identicalTypes returns true if both lists a and b have the
// same length and corresponding objects have identical types.
func identicalTypes(a, b ObjList) bool {
return false
}
-
// Identical returns true if two types are identical.
func Identical(x, y Type) bool {
if x == y {
import "go/ast"
-
var (
scope *ast.Scope // current scope to use for initialization
Universe *ast.Scope
Unsafe *ast.Object // package unsafe
)
-
func define(kind ast.ObjKind, name string) *ast.Object {
obj := ast.NewObj(kind, name)
if scope.Insert(obj) != nil {
return obj
}
-
func defType(name string) *Name {
obj := define(ast.Typ, name)
typ := &Name{Underlying: &Basic{}, Obj: obj}
return typ
}
-
func defConst(name string) {
obj := define(ast.Con, name)
_ = obj // TODO(gri) fill in other properties
}
-
func defFun(name string) {
obj := define(ast.Fun, name)
_ = obj // TODO(gri) fill in other properties
}
-
var (
Bool,
Int,
String *Name
)
-
func init() {
scope = ast.NewScope(nil)
Universe = scope
}
}
-
func TestNesting(t *testing.T) {
type RT struct {
A string
}
}
-
func TestBadRecursiveType(t *testing.T) {
type Rec ***Rec
var rec Rec
deb.wireType[id] = wire
}
-
// Value:
// SingletonValue | StructValue
func (deb *debugger) value(indent tab, id typeId) {
return 0
}
-
// NonNilInterfaceValue:
// ConcreteTypeName TypeSequence InterfaceContents
// ConcreteTypeName:
}
}
-
// ArrayValue:
// uint(n) FieldValue*n
func (deb *debugger) arrayValue(indent tab, wire *wireType) {
return false
}
-
func TestCGIBasicGet(t *testing.T) {
if skipTest(t) {
return
}
}
-
func TestDirUnix(t *testing.T) {
if skipTest(t) || runtime.GOOS == "windows" {
return
return send(req, c.Transport)
}
-
// send issues an HTTP request. Caller should close resp.Body when done reading from it.
func send(req *Request, t RoundTripper) (resp *Response, err os.Error) {
if t == nil {
"strconv"
)
-
// hello world, the web server
var helloRequests = expvar.NewInt("hello-requests")
w.Write([]byte("oops"))
}
-
var webroot = flag.String("root", "/home/rsc", "web root directory")
func main() {
encodeOpaque
)
-
type URLEscapeError string
func (e URLEscapeError) String() string {
return true
}
-
// URLUnescape unescapes a string in ``URL encoded'' form,
// converting %AB into the byte 0xAB and '+' into ' ' (space).
// It returns an error if any % is not followed
return sa
}
-
func sortedByFirstByte(data []byte) []int {
// total byte counts
var count [256]int
return sa
}
-
func initGroups(sa []int, data []byte) []int {
// label contiguous same-letter groups with the same group number
inv := make([]int, len(data))
return inv
}
-
type suffixSortable struct {
sa []int
inv []int
func (x *suffixSortable) Less(i, j int) bool { return x.inv[x.sa[i]+x.h] < x.inv[x.sa[j]+x.h] }
func (x *suffixSortable) Swap(i, j int) { x.sa[i], x.sa[j] = x.sa[j], x.sa[i] }
-
func (x *suffixSortable) updateGroups(offset int) {
bounds := make([]int, 0, 4)
group := x.inv[x.sa[0]+x.h]
"sort"
)
-
// Index implements a suffix array for fast substring search.
type Index struct {
data []byte
sa []int // suffix array for data
}
-
// New creates a new Index for data.
// Index creation time is O(N*log(N)) for N = len(data).
func New(data []byte) *Index {
return &Index{data, qsufsort(data)}
}
-
// Bytes returns the data over which the index was created.
// It must not be modified.
//
return x.data
}
-
func (x *Index) at(i int) []byte {
return x.data[x.sa[i]:]
}
-
// lookupAll returns a slice into the matching region of the index.
// The runtime is O(log(N)*len(s)).
func (x *Index) lookupAll(s []byte) []int {
return x.sa[i:j]
}
-
// Lookup returns an unsorted list of at most n indices where the byte string s
// occurs in the indexed data. If n < 0, all occurrences are returned.
// The result is nil if s is empty, s is not found, or n == 0.
return
}
-
// FindAllIndex returns a sorted list of non-overlapping matches of the
// regular expression r, where a match is a pair of indices specifying
// the matched slice of x.Bytes(). If n < 0, all matches are returned
"testing"
)
-
type testCase struct {
name string // name of test case
source string // source to index
patterns []string // patterns to lookup
}
-
var testCases = []testCase{
{
"empty string",
},
}
-
// find all occurrences of s in source; report at most n occurrences
func find(src, s string, n int) []int {
var res vector.IntVector
return res
}
-
func testLookup(t *testing.T, tc *testCase, x *Index, s string, n int) {
res := x.Lookup([]byte(s), n)
exp := find(tc.source, s, n)
}
}
-
func testFindAllIndex(t *testing.T, tc *testCase, x *Index, rx *regexp.Regexp, n int) {
res := x.FindAllIndex(rx, n)
exp := rx.FindAllStringIndex(tc.source, n)
}
}
-
func testLookups(t *testing.T, tc *testCase, x *Index, n int) {
for _, pat := range tc.patterns {
testLookup(t, tc, x, pat, n)
}
}
-
// index is used to hide the sort.Interface
type index Index
func (x *index) Swap(i, j int) { x.sa[i], x.sa[j] = x.sa[j], x.sa[i] }
func (a *index) at(i int) []byte { return a.data[a.sa[i]:] }
-
func testConstruction(t *testing.T, tc *testCase, x *Index) {
if !sort.IsSorted((*index)(x)) {
t.Errorf("testConstruction failed %s", tc.name)
}
}
-
func TestIndex(t *testing.T) {
for _, tc := range testCases {
x := New([]byte(tc.source))
os.Remove(filename) // ignore error
}
-
func TestReadDir(t *testing.T) {
dirname := "rumpelstilzchen"
_, err := ReadDir(dirname)
UnmarshalJSON([]byte) os.Error
}
-
// An UnmarshalTypeError describes a JSON value that was
// not appropriate for a value of a specific Go type.
type UnmarshalTypeError struct {
return m
}
-
// literalInterface is like literal but returns an interface value.
func (d *decodeState) literalInterface() interface{} {
// All bytes inside literal return scanContinue op code.
package math
-
// The original C code, the long comment, and the constants
// below are from FreeBSD's /usr/src/lib/msun/src/e_acosh.c
// and came with this notice. The go code is a simplified
package math
-
/*
Floating-point arcsine and arccosine.
package math
-
// The original C code, the long comment, and the constants
// below are from FreeBSD's /usr/src/lib/msun/src/s_asinh.c
// and came with this notice. The go code is a simplified
package math
-
// The original C code, the long comment, and the constants
// below are from FreeBSD's /usr/src/lib/msun/src/e_atanh.c
// and came with this notice. The go code is a simplified
package math
-
/*
Floating-point error function and complementary error function.
*/
package math
-
// The original C code, the long comment, and the constants
// below are from FreeBSD's /usr/src/lib/msun/src/e_exp.c
// and came with this notice. The go code is a simplified
package math
-
// The original C code, the long comment, and the constants
// below are from FreeBSD's /usr/src/lib/msun/src/s_expm1.c
// and came with this notice. The go code is a simplified
package math
-
// Floor returns the greatest integer value less than or equal to x.
//
// Special cases are:
package math
-
/*
Floating-point mod function.
*/
package math
-
// The original C code, the long comment, and the constants
// below are from FreeBSD's /usr/src/lib/msun/src/s_log1p.c
// and came with this notice. The go code is a simplified
package math
-
/*
Floating-point sine and cosine.
package math
-
/*
Floating-point hyperbolic sine and cosine.
package math
-
/*
Floating point tangent.
*/
package math
-
/*
Floating-point hyperbolic tangent.
return p.dispositionParams["name"]
}
-
// FileName returns the filename parameter of the Part's
// Content-Disposition header.
func (p *Part) FileName() string {
return nil, &DNSError{Error: "no answer from server", Name: name, Server: server, IsTimeout: true}
}
-
// Do a lookup for a single name, which must be rooted
// (otherwise answer will not find the answers).
func tryOneName(cfg *dnsConfig, name string, qtype uint16) (cname string, addrs []dnsRR, err os.Error) {
extra []dnsRR
}
-
func (dns *dnsMsg) Pack() (msg []byte, ok bool) {
var dh dnsHeader
}
}
-
// ioSrv executes net io requests.
type ioSrv struct {
submchan chan anOpIface // submit io requests
return ii[:c-1], nil
}
-
// If the ifindex is zero, interfaceTable returns mappings of all
// network interfaces. Otheriwse it returns a mapping of a specific
// interface.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-
// TODO(cw): ListenPacket test, Read() test, ipv6 test &
// Dial()/Listen() level tests
var env map[string]string
var once sync.Once
-
func copyenv() {
env = make(map[string]string)
for _, s := range Envs {
func epipecheck(file *File, e syscall.Error) {
}
-
// DevNull is the name of the operating system's ``null device.''
// On Unix-like systems, it is "/dev/null"; on Windows, "NUL".
const DevNull = "/dev/null"
return NewFile(p[0], "|0"), NewFile(p[1], "|1"), nil
}
-
// not supported on Plan 9
// Link creates a hard link.
"unsafe"
)
-
type Event struct {
Mask uint32 // Mask of events
Cookie uint32 // Unique cookie associating related events (for rename(2))
isClosed bool // Set to true when Close() is first called
}
-
// NewWatcher creates and returns a new inotify instance using inotify_init(2)
func NewWatcher() (*Watcher, os.Error) {
fd, errno := syscall.InotifyInit()
return w, nil
}
-
// Close closes an inotify watcher instance
// It sends a message to the reader goroutine to quit and removes all watches
// associated with the inotify instance
return nil
}
-
// Watch adds path to the watched file set, watching all events.
func (w *Watcher) Watch(path string) os.Error {
return w.AddWatch(path, IN_ALL_EVENTS)
}
-
// RemoveWatch removes path from the watched file set.
func (w *Watcher) RemoveWatch(path string) os.Error {
watch, ok := w.watches[path]
return nil
}
-
// readEvents reads from the inotify file descriptor, converts the
// received events into Event objects and sends them via the Event channel
func (w *Watcher) readEvents() {
}
}
-
// String formats the event e in the form
// "filename: 0xEventMask = IN_ACCESS|IN_ATTRIB_|..."
func (e *Event) String() string {
}
}
-
func TestInotifyClose(t *testing.T) {
watcher, _ := NewWatcher()
watcher.Close()
return output
}
-
func TestHostname(t *testing.T) {
// There is no other way to fetch hostname on windows, but via winapi.
// On Plan 9 it is can be taken from #c/sysname as Hostname() does.
package os
-
// MkdirAll creates a directory named path,
// along with any necessary parents, and returns nil,
// or else returns an error.
var Args []string // provided by runtime
var Envs []string // provided by runtime
-
// Getuid returns the numeric user id of the caller.
func Getuid() int { return syscall.Getuid() }
return nil, &PathError{"stat", name, Ebadstat}
}
-
// Stat returns a FileInfo structure describing the named file and an error, if any.
func Stat(name string) (fi *FileInfo, err Error) {
d, err := dirstat(name)
package os
-
// Hostname returns the host name reported by the kernel.
func Hostname() (name string, err Error) {
f, err := Open("/proc/sys/kernel/hostname")
package os
-
func Hostname() (name string, err Error) {
f, err := Open("#c/sysname")
if err != nil {
import "syscall"
-
// Time returns the current time, in whole seconds and
// fractional nanoseconds, plus an Error if any. The current
// time is thus 1e9*sec+nsec, in nanoseconds. The zero of
}
}
-
func TestAll(t *testing.T) {
testType(t, 1, TypeOf((int8)(0)), "int8")
testType(t, 2, TypeOf((*int8)(nil)).Elem(), "int8")
}
}
-
func check2ndField(x interface{}, offs uintptr, t *testing.T) {
s := ValueOf(x)
f := s.Type().Field(1)
BothDir = RecvDir | SendDir
)
-
// arrayType represents a fixed array type.
type arrayType struct {
commonType `reflect:"array"`
fields []structField
}
-
/*
* The compiler knows the exact layout of all the data structures above.
* The compiler does not know about the data structures and methods below.
return c.rwc.Close()
}
-
// DialHTTP connects to an HTTP RPC server at the specified network address
// listening on the default HTTP RPC path.
func DialHTTP(network, address string) (*Client, os.Error) {
return c.rwc.Close()
}
-
// ServeConn runs the server on a single connection.
// ServeConn blocks, serving the connection until the client hangs up.
// The caller typically invokes ServeConn in a go statement.
"utf8"
)
-
// TODO(gri): Consider changing this to use the new (token) Position package.
// A source position is represented by a Position value.
Column int // column number, starting at 1 (character count per line)
}
-
// IsValid returns true if the position is valid.
func (pos *Position) IsValid() bool { return pos.Line > 0 }
-
func (pos Position) String() string {
s := pos.Filename
if pos.IsValid() {
return s
}
-
// Predefined mode bits to control recognition of tokens. For instance,
// to configure a Scanner such that it only recognizes (Go) identifiers,
// integers, and skips comments, set the Scanner's Mode field to:
GoTokens = ScanIdents | ScanFloats | ScanChars | ScanStrings | ScanRawStrings | ScanComments | SkipComments
)
-
// The result of Scan is one of the following tokens or a Unicode character.
const (
EOF = -(iota + 1)
skipComment
)
-
var tokenString = map[int]string{
EOF: "EOF",
Ident: "Ident",
Comment: "Comment",
}
-
// TokenString returns a (visible) string for a token or Unicode character.
func TokenString(tok int) string {
if s, found := tokenString[tok]; found {
return fmt.Sprintf("%q", string(tok))
}
-
// GoWhitespace is the default value for the Scanner's Whitespace field.
// Its value selects Go's white space characters.
const GoWhitespace = 1<<'\t' | 1<<'\n' | 1<<'\r' | 1<<' '
-
const bufLen = 1024 // at least utf8.UTFMax
// A Scanner implements reading of Unicode characters and tokens from an io.Reader.
Position
}
-
// Init initializes a Scanner with a new source and returns s.
// Error is set to nil, ErrorCount is set to 0, Mode is set to GoTokens,
// and Whitespace is set to GoWhitespace.
return s
}
-
// TODO(gri): The code for next() and the internal scanner state could benefit
// from a rethink. While next() is optimized for the common ASCII
// case, the "corrections" needed for proper position tracking undo
return ch
}
-
// Next reads and returns the next Unicode character.
// It returns EOF at the end of the source. It reports
// a read error by calling s.Error, if not nil; otherwise
return ch
}
-
// Peek returns the next Unicode character in the source without advancing
// the scanner. It returns EOF if the scanner's position is at the last
// character of the source.
return s.ch
}
-
func (s *Scanner) error(msg string) {
s.ErrorCount++
if s.Error != nil {
fmt.Fprintf(os.Stderr, "%s: %s\n", s.Position, msg)
}
-
func (s *Scanner) scanIdentifier() int {
ch := s.next() // read character after first '_' or letter
for ch == '_' || unicode.IsLetter(ch) || unicode.IsDigit(ch) {
return ch
}
-
func digitVal(ch int) int {
switch {
case '0' <= ch && ch <= '9':
return 16 // larger than any legal digit val
}
-
func isDecimal(ch int) bool { return '0' <= ch && ch <= '9' }
-
func (s *Scanner) scanMantissa(ch int) int {
for isDecimal(ch) {
ch = s.next()
return ch
}
-
func (s *Scanner) scanFraction(ch int) int {
if ch == '.' {
ch = s.scanMantissa(s.next())
return ch
}
-
func (s *Scanner) scanExponent(ch int) int {
if ch == 'e' || ch == 'E' {
ch = s.next()
return ch
}
-
func (s *Scanner) scanNumber(ch int) (int, int) {
// isDecimal(ch)
if ch == '0' {
return Int, ch
}
-
func (s *Scanner) scanDigits(ch, base, n int) int {
for n > 0 && digitVal(ch) < base {
ch = s.next()
return ch
}
-
func (s *Scanner) scanEscape(quote int) int {
ch := s.next() // read character after '/'
switch ch {
return ch
}
-
func (s *Scanner) scanString(quote int) (n int) {
ch := s.next() // read character after quote
for ch != quote {
return
}
-
func (s *Scanner) scanRawString() {
ch := s.next() // read character after '`'
for ch != '`' {
}
}
-
func (s *Scanner) scanChar() {
if s.scanString('\'') != 1 {
s.error("illegal char literal")
}
}
-
func (s *Scanner) scanComment(ch int) int {
// ch == '/' || ch == '*'
if ch == '/' {
return ch
}
-
// Scan reads the next token or Unicode character from source and returns it.
// It only recognizes tokens t for which the respective Mode bit (1<<-t) is set.
// It returns EOF at the end of the source. It reports scanner errors (read and
return tok
}
-
// Pos returns the position of the character immediately after
// the character or token returned by the last call to Next or Scan.
func (s *Scanner) Pos() (pos Position) {
return
}
-
// TokenText returns the string corresponding to the most recently scanned token.
// Valid after calling Scan().
func (s *Scanner) TokenText() string {
"utf8"
)
-
// A StringReader delivers its data one string segment at a time via Read.
type StringReader struct {
data []string
step int
}
-
func (r *StringReader) Read(p []byte) (n int, err os.Error) {
if r.step < len(r.data) {
s := r.data[r.step]
return
}
-
func readRuneSegments(t *testing.T, segments []string) {
got := ""
want := strings.Join(segments, "")
}
}
-
var segmentList = [][]string{
{},
{""},
{"Hello", ", ", "", "World", "!"},
}
-
func TestNext(t *testing.T) {
for _, s := range segmentList {
readRuneSegments(t, s)
}
}
-
type token struct {
tok int
text string
{'(', "("},
}
-
func makeSource(pattern string) *bytes.Buffer {
var buf bytes.Buffer
for _, k := range tokenList {
return &buf
}
-
func checkTok(t *testing.T, s *Scanner, line, got, want int, text string) {
if got != want {
t.Fatalf("tok = %s, want %s for %q", TokenString(got), TokenString(want), text)
}
}
-
func countNewlines(s string) int {
n := 0
for _, ch := range s {
return n
}
-
func testScan(t *testing.T, mode uint) {
s := new(Scanner).Init(makeSource(" \t%s\n"))
s.Mode = mode
checkTok(t, s, line, tok, EOF, "")
}
-
func TestScan(t *testing.T) {
testScan(t, GoTokens)
testScan(t, GoTokens&^SkipComments)
}
-
func TestPosition(t *testing.T) {
src := makeSource("\t\t\t\t%s\n")
s := new(Scanner).Init(src)
}
}
-
func TestScanZeroMode(t *testing.T) {
src := makeSource("%s\n")
str := src.String()
}
}
-
func testScanSelectedMode(t *testing.T, mode uint, class int) {
src := makeSource("%s\n")
s := new(Scanner).Init(src)
}
}
-
func TestScanSelectedMask(t *testing.T) {
testScanSelectedMode(t, 0, 0)
testScanSelectedMode(t, ScanIdents, Ident)
testScanSelectedMode(t, ScanComments, Comment)
}
-
func TestScanNext(t *testing.T) {
s := new(Scanner).Init(bytes.NewBufferString("if a == bcd /* comment */ {\n\ta += c\n} // line comment ending in eof"))
checkTok(t, s, 1, s.Scan(), Ident, "if")
}
}
-
func TestScanWhitespace(t *testing.T) {
var buf bytes.Buffer
var ws uint64
}
}
-
func testError(t *testing.T, src, msg string, tok int) {
s := new(Scanner).Init(bytes.NewBufferString(src))
errorCalled := false
}
}
-
func TestError(t *testing.T) {
testError(t, "\x00", "illegal character NUL", 0)
testError(t, "\xff", "illegal UTF-8 encoding", utf8.RuneError)
testError(t, `"abc`+"\xff"+`def"`, "illegal UTF-8 encoding", String)
}
-
func checkPos(t *testing.T, got, want Position) {
if got.Offset != want.Offset || got.Line != want.Line || got.Column != want.Column {
t.Errorf("got offset, line, column = %d, %d, %d; want %d, %d, %d",
}
}
-
func checkNextPos(t *testing.T, s *Scanner, offset, line, column, char int) {
if ch := s.Next(); ch != char {
t.Errorf("ch = %s, want %s", TokenString(ch), TokenString(char))
checkPos(t, s.Pos(), want)
}
-
func checkScanPos(t *testing.T, s *Scanner, offset, line, column, char int) {
want := Position{Offset: offset, Line: line, Column: column}
checkPos(t, s.Pos(), want)
checkPos(t, s.Position, want)
}
-
func TestPos(t *testing.T) {
// corner case: empty source
s := new(Scanner).Init(bytes.NewBufferString(""))
return i
}
-
// Convenience wrappers for common cases.
// SearchInts searches for x in a sorted slice of ints and returns the index
return Search(len(a), func(i int) bool { return a[i] >= x })
}
-
// SearchFloat64s searches for x in a sorted slice of float64s and returns the index
// as specified by Search. The slice must be sorted in ascending order.
//
return Search(len(a), func(i int) bool { return a[i] >= x })
}
-
// SearchStrings searches for x slice a sorted slice of strings and returns the index
// as specified by Search. The slice must be sorted in ascending order.
//
return Search(len(a), func(i int) bool { return a[i] >= x })
}
-
// Search returns the result of applying SearchInts to the receiver and x.
func (p IntSlice) Search(x int) int { return SearchInts(p, x) }
-
// Search returns the result of applying SearchFloat64s to the receiver and x.
func (p Float64Slice) Search(x float64) int { return SearchFloat64s(p, x) }
-
// Search returns the result of applying SearchStrings to the receiver and x.
func (p StringSlice) Search(x string) int { return SearchStrings(p, x) }
import "testing"
-
func f(a []int, x int) func(int) bool {
return func(i int) bool {
return a[i] >= x
}
}
-
var data = []int{0: -10, 1: -5, 2: 0, 3: 1, 4: 2, 5: 3, 6: 5, 7: 7, 8: 11, 9: 100, 10: 100, 11: 100, 12: 1000, 13: 10000}
var tests = []struct {
{"overflow", 2e9, func(i int) bool { return false }, 2e9},
}
-
func TestSearch(t *testing.T) {
for _, e := range tests {
i := Search(e.n, e.f)
}
}
-
// log2 computes the binary logarithm of x, rounded up to the next integer.
// (log2(0) == 0, log2(1) == 0, log2(2) == 1, log2(3) == 2, etc.)
//
return n
}
-
func TestSearchEfficiency(t *testing.T) {
n := 100
step := 1
}
}
-
// Smoke tests for convenience wrappers - not comprehensive.
var fdata = []float64{0: -3.14, 1: 0, 2: 1, 3: 2, 4: 1000.7}
{"StringSlice.Search", StringSlice(sdata).Search("x"), 3},
}
-
func TestSearchWrappers(t *testing.T) {
for _, e := range wrappertests {
if e.result != e.i {
}
}
-
// Abstract exhaustive test: all sizes up to 100,
// all possible return values. If there are any small
// corner cases, this test exercises them.
func Sort(data Interface) { quickSort(data, 0, data.Len()) }
-
func IsSorted(data Interface) bool {
n := data.Len()
for i := n - 1; i > 0; i-- {
return true
}
-
// Convenience types for common cases
// IntSlice attaches the methods of Interface to []int, sorting in increasing order.
// Sort is a convenience method.
func (p IntSlice) Sort() { Sort(p) }
-
// Float64Slice attaches the methods of Interface to []float64, sorting in increasing order.
type Float64Slice []float64
// Sort is a convenience method.
func (p Float64Slice) Sort() { Sort(p) }
-
// StringSlice attaches the methods of Interface to []string, sorting in increasing order.
type StringSlice []string
// Sort is a convenience method.
func (p StringSlice) Sort() { Sort(p) }
-
// Convenience wrappers for common cases
// Ints sorts a slice of ints in increasing order.
// Strings sorts a slice of strings in increasing order.
func Strings(a []string) { Sort(StringSlice(a)) }
-
// IntsAreSorted tests whether a slice of ints is sorted in increasing order.
func IntsAreSorted(a []int) bool { return IsSorted(IntSlice(a)) }
// Float64sAreSorted tests whether a slice of float64s is sorted in increasing order.
"testing"
)
-
var ints = [...]int{74, 59, 238, -784, 9845, 959, 905, 0, 0, 42, 7586, -5467984, 7586}
var float64s = [...]float64{74.3, 59.0, 238.2, -784.0, 2.3, 9845.768, -959.7485, 905, 7.8, 7.8}
var strings = [...]string{"", "Hello", "foo", "bar", "foo", "f00", "%*&^*&^&", "***"}
func (e *NumError) String() string { return `parsing "` + e.Num + `": ` + e.Error.String() }
-
func computeIntsize() uint {
siz := uint(8)
for 1<<siz != 0 {
// returns its result in an int64.
func Atoi64(s string) (i int64, err os.Error) { return Btoi64(s, 10) }
-
// Atoui is like Atoui64 but returns its result as a uint.
func Atoui(s string) (i uint, err os.Error) {
i1, e1 := Atoui64(s)
return
}
-
// UnreadByte moves the reading position back by one byte.
// It is an error to call UnreadByte if nothing has been
// read yet.
return string(b)
}
-
// ToUpper returns a copy of the string s with all Unicode letters mapped to their upper case.
func ToUpper(s string) string { return Map(unicode.ToUpper, s) }
}
}
-
type ExplodeTest struct {
s string
n int
}
}
-
// Test case for any function which accepts and returns a single string.
type StringTest struct {
in, out string
<-c
}
-
func HammerMutex(m *Mutex, loops int, cdone chan bool) {
for i := 0; i < loops; i++ {
m.Lock()
return err
}
-
// Fork, dup fd onto 0..len(fd), and exec(argv0, argvv, envv) in child.
// If a dup or exec fails, write the errno int to pipe.
// (Pipe is close-on-exec so if exec succeeds, it will be closed.)
//sys read(fd int, buf *byte, nbuf int) (n int, errno int)
//sys write(fd int, buf *byte, nbuf int) (n int, errno int)
-
/*
* Unimplemented
*/
//sys read(fd int, buf *byte, nbuf int) (n int, errno int)
//sys write(fd int, buf *byte, nbuf int) (n int, errno int)
-
/*
* Unimplemented
*/
return int(n)
}
-
func Read(fd int, p []byte) (n int, err Error) {
return Pread(fd, p, -1)
}
return
}
-
//sys sleep(millisecs int32) (err Error)
func Sleep(nsec int64) (err Error) {
return sleep(int32((nsec + 999) / 1e6)) // round up to microsecond
"unsafe"
)
-
var (
Stdin = 0
Stdout = 1
// Types
-
// Error table
var errors = [...]string{
1: "operation not permitted",
// Types
-
// Error table
var errors = [...]string{
1: "operation not permitted",
// Types
-
// Error table
var errors = [...]string{
1: "operation not permitted",
// Types
-
// Error table
var errors = [...]string{
1: "operation not permitted",
// Types
-
// Error table
var errors = [...]string{
1: "operation not permitted",
// Types
-
// Error table
var errors = [...]string{
1: "operation not permitted",
// Types
-
// Error table
var errors = [...]string{
1: "operation not permitted",
O_SYNC = 0x00000
O_ASYNC = 0x00000
-
S_IFMT = 0x1f000
S_IFIFO = 0x1000
S_IFCHR = 0x2000
"utf8"
)
-
// ----------------------------------------------------------------------------
// Filter implementation
htab bool // true if the cell is terminated by an htab ('\t')
}
-
// A Writer is a filter that inserts padding around tab-delimited
// columns in its input to align them in the output.
//
widths []int // list of column widths in runes - re-used during formatting
}
-
func (b *Writer) addLine() { b.lines = append(b.lines, []cell{}) }
-
// Reset the current state.
func (b *Writer) reset() {
b.buf.Reset()
b.addLine()
}
-
// Internal representation (current state):
//
// - all text written is appended to buf; tabs and line breaks are stripped away
// | | |
// buf start of incomplete cell pos
-
// Formatting can be controlled with these flags.
const (
// Ignore html tags and treat entities (starting with '&'
Debug
)
-
// A Writer must be initialized with a call to Init. The first parameter (output)
// specifies the filter output. The remaining parameters control the formatting:
//
return b
}
-
// debugging support (keep code around)
func (b *Writer) dump() {
pos := 0
print("\n")
}
-
// local error wrapper so we can distinguish os.Errors we want to return
// as errors from genuine panics (which we don't want to return as errors)
type osError struct {
err os.Error
}
-
func (b *Writer) write0(buf []byte) {
n, err := b.output.Write(buf)
if n != len(buf) && err == nil {
}
}
-
func (b *Writer) writeN(src []byte, n int) {
for n > len(src) {
b.write0(src)
b.write0(src[0:n])
}
-
var (
newline = []byte{'\n'}
tabs = []byte("\t\t\t\t\t\t\t\t")
)
-
func (b *Writer) writePadding(textw, cellw int, useTabs bool) {
if b.padbytes[0] == '\t' || useTabs {
// padding is done with tabs
b.writeN(b.padbytes[0:], cellw-textw)
}
-
var vbar = []byte{'|'}
func (b *Writer) writeLines(pos0 int, line0, line1 int) (pos int) {
return
}
-
// Format the text between line0 and line1 (excluding line1); pos
// is the buffer position corresponding to the beginning of line0.
// Returns the buffer position corresponding to the beginning of
return b.writeLines(pos, line0, line1)
}
-
// Append text to current cell.
func (b *Writer) append(text []byte) {
b.buf.Write(text)
b.cell.size += len(text)
}
-
// Update the cell width.
func (b *Writer) updateWidth() {
b.cell.width += utf8.RuneCount(b.buf.Bytes()[b.pos:b.buf.Len()])
b.pos = b.buf.Len()
}
-
// To escape a text segment, bracket it with Escape characters.
// For instance, the tab in this string "Ignore this tab: \xff\t\xff"
// does not terminate a cell and constitutes a single character of
//
const Escape = '\xff'
-
// Start escaped mode.
func (b *Writer) startEscape(ch byte) {
switch ch {
}
}
-
// Terminate escaped mode. If the escaped text was an HTML tag, its width
// is assumed to be zero for formatting purposes; if it was an HTML entity,
// its width is assumed to be one. In all other cases, the width is the
b.endChar = 0
}
-
// Terminate the current cell by adding it to the list of cells of the
// current line. Returns the number of cells in that line.
//
return len(*line)
}
-
func handlePanic(err *os.Error) {
if e := recover(); e != nil {
*err = e.(osError).err // re-panics if it's not a local osError
}
}
-
// Flush should be called after the last call to Write to ensure
// that any data buffered in the Writer is written to output. Any
// incomplete escape sequence at the end is simply considered
return
}
-
var hbar = []byte("---\n")
// Write writes buf to the writer b.
return
}
-
// NewWriter allocates and initializes a new tabwriter.Writer.
// The parameters are the same as for the the Init function.
//
"testing"
)
-
type buffer struct {
a []byte
}
-
func (b *buffer) init(n int) { b.a = make([]byte, n)[0:0] }
-
func (b *buffer) clear() { b.a = b.a[0:0] }
-
func (b *buffer) Write(buf []byte) (written int, err os.Error) {
n := len(b.a)
m := len(buf)
return len(buf), nil
}
-
func (b *buffer) String() string { return string(b.a) }
-
func write(t *testing.T, testname string, w *Writer, src string) {
written, err := io.WriteString(w, src)
if err != nil {
}
}
-
func verify(t *testing.T, testname string, w *Writer, b *buffer, src, expected string) {
err := w.Flush()
if err != nil {
}
}
-
func check(t *testing.T, testname string, minwidth, tabwidth, padding int, padchar byte, flags uint, src, expected string) {
var b buffer
b.init(1000)
verify(t, title, &w, &b, src, expected)
}
-
var tests = []struct {
testname string
minwidth, tabwidth, padding int
},
}
-
func Test(t *testing.T) {
for _, e := range tests {
check(t, e.testname, e.minwidth, e.tabwidth, e.padding, e.padchar, e.flags, e.src, e.expected)
out: "77",
},
-
// Repeated
&Test{
in: "{.section Pdata }\n" +
"pointedToString\n",
},
-
// Interface values
&Test{
return r.r.Read(p[0 : (len(p)+1)/2])
}
-
// DataErrReader returns a Reader that returns the final
// error with the last data read, instead of by itself with
// zero bytes of data.
return *short
}
-
// Insert final newline if needed and tabs after internal newlines.
func tabify(s string) string {
n := len(s)
error bool
}
-
func (d *data) read(n int) []byte {
if len(d.p) < n {
d.p = nil
return p[0], true
}
-
// Make a string by stopping at the first NUL
func byteString(p []byte) string {
for i := 0; i < len(p); i++ {
package unicode
-
var TurkishCase = _TurkishCase
var _TurkishCase = SpecialCase{
CaseRange{0x0049, 0x0049, d{0, 0x131 - 0x49, 0}},
`
-
func printCategories() {
if *tablelist == "" {
return
// If there is no entry for a script name, there are no such points.
var FoldScript = map[string]*RangeTable{}
-
// Range entries: 3391 16-bit, 659 32-bit, 4050 total.
// Range bytes: 20346 16-bit, 7908 32-bit, 28254 total.
f(ws)
}
-
/*
Draft75Handler is an interface to a WebSocket based on the
(soon obsolete) draft-hixie-thewebsocketprotocol-75.
return nil, false
}
-
// RawToken is like Token but does not verify that
// start and end elements match and does not translate
// name space prefixes to their corresponding URLs.
}
}
-
// The last three tests (respectively one for characters in attribute
// names and two for character entities) pass not because of code
// changed for issue 1259, but instead pass with the given messages
{"<doc>&\xef\xbf\xbe;</doc>", "invalid character entity &;"},
}
-
func TestDisallowedCharacters(t *testing.T) {
for i, tt := range characterTests {